| // Copyright (c) 2012, the Dart project authors.  Please see the AUTHORS file | 
 | // for details. All rights reserved. Use of this source code is governed by a | 
 | // BSD-style license that can be found in the LICENSE file. | 
 |  | 
 | #include "vm/object.h" | 
 |  | 
 | #include <memory> | 
 |  | 
 | #include "compiler/method_recognizer.h" | 
 | #include "include/dart_api.h" | 
 | #include "lib/integers.h" | 
 | #include "lib/stacktrace.h" | 
 | #include "platform/assert.h" | 
 | #include "platform/text_buffer.h" | 
 | #include "platform/unaligned.h" | 
 | #include "platform/unicode.h" | 
 | #include "vm/bit_vector.h" | 
 | #include "vm/bootstrap.h" | 
 | #include "vm/canonical_tables.h" | 
 | #include "vm/class_finalizer.h" | 
 | #include "vm/class_id.h" | 
 | #include "vm/closure_functions_cache.h" | 
 | #include "vm/code_comments.h" | 
 | #include "vm/code_descriptors.h" | 
 | #include "vm/code_observers.h" | 
 | #include "vm/compiler/assembler/disassembler.h" | 
 | #include "vm/compiler/jit/compiler.h" | 
 | #include "vm/compiler/runtime_api.h" | 
 | #include "vm/cpu.h" | 
 | #include "vm/dart.h" | 
 | #include "vm/dart_api_state.h" | 
 | #include "vm/dart_entry.h" | 
 | #include "vm/datastream.h" | 
 | #include "vm/debugger.h" | 
 | #include "vm/deopt_instructions.h" | 
 | #include "vm/double_conversion.h" | 
 | #include "vm/elf.h" | 
 | #include "vm/exceptions.h" | 
 | #include "vm/growable_array.h" | 
 | #include "vm/hash.h" | 
 | #include "vm/hash_table.h" | 
 | #include "vm/heap/become.h" | 
 | #include "vm/heap/heap.h" | 
 | #include "vm/heap/sampler.h" | 
 | #include "vm/heap/weak_code.h" | 
 | #include "vm/image_snapshot.h" | 
 | #include "vm/isolate_reload.h" | 
 | #include "vm/kernel.h" | 
 | #include "vm/kernel_binary.h" | 
 | #include "vm/kernel_isolate.h" | 
 | #include "vm/kernel_loader.h" | 
 | #include "vm/log.h" | 
 | #include "vm/native_symbol.h" | 
 | #include "vm/object_graph.h" | 
 | #include "vm/object_store.h" | 
 | #include "vm/os.h" | 
 | #include "vm/parser.h" | 
 | #include "vm/profiler.h" | 
 | #include "vm/regexp.h" | 
 | #include "vm/resolver.h" | 
 | #include "vm/reusable_handles.h" | 
 | #include "vm/reverse_pc_lookup_cache.h" | 
 | #include "vm/runtime_entry.h" | 
 | #include "vm/scopes.h" | 
 | #include "vm/stack_frame.h" | 
 | #include "vm/stub_code.h" | 
 | #include "vm/symbols.h" | 
 | #include "vm/tags.h" | 
 | #include "vm/thread_registry.h" | 
 | #include "vm/timeline.h" | 
 | #include "vm/type_testing_stubs.h" | 
 | #include "vm/zone_text_buffer.h" | 
 |  | 
 | #if !defined(DART_PRECOMPILED_RUNTIME) | 
 | #include "vm/compiler/aot/precompiler.h" | 
 | #include "vm/compiler/assembler/assembler.h" | 
 | #include "vm/compiler/backend/code_statistics.h" | 
 | #include "vm/compiler/compiler_state.h" | 
 | #include "vm/compiler/frontend/kernel_fingerprints.h" | 
 | #include "vm/compiler/frontend/kernel_translation_helper.h" | 
 | #include "vm/compiler/intrinsifier.h" | 
 | #endif  // !defined(DART_PRECOMPILED_RUNTIME) | 
 |  | 
 | namespace dart { | 
 |  | 
 | DEFINE_FLAG(uint64_t, | 
 |             huge_method_cutoff_in_code_size, | 
 |             200000, | 
 |             "Huge method cutoff in unoptimized code size (in bytes)."); | 
 | DEFINE_FLAG( | 
 |     bool, | 
 |     show_internal_names, | 
 |     false, | 
 |     "Show names of internal classes (e.g. \"OneByteString\") in error messages " | 
 |     "instead of showing the corresponding interface names (e.g. \"String\"). " | 
 |     "Also show legacy nullability in type names."); | 
 |  | 
 | DEFINE_FLAG(bool, | 
 |             remove_script_timestamps_for_test, | 
 |             false, | 
 |             "Remove script timestamps to allow for deterministic testing."); | 
 |  | 
 | #if !defined(DART_PRECOMPILED_RUNTIME) | 
 | DEFINE_FLAG(bool, use_register_cc, true, "Use register calling conventions"); | 
 | #endif | 
 |  | 
 | DECLARE_FLAG(bool, intrinsify); | 
 | DECLARE_FLAG(bool, trace_deoptimization); | 
 | DECLARE_FLAG(bool, trace_deoptimization_verbose); | 
 | DECLARE_FLAG(bool, trace_reload); | 
 | DECLARE_FLAG(bool, write_protect_code); | 
 | DECLARE_FLAG(bool, precompiled_mode); | 
 | DECLARE_FLAG(int, max_polymorphic_checks); | 
 |  | 
 | static const char* const kGetterPrefix = "get:"; | 
 | static const intptr_t kGetterPrefixLength = strlen(kGetterPrefix); | 
 | static const char* const kSetterPrefix = "set:"; | 
 | static const intptr_t kSetterPrefixLength = strlen(kSetterPrefix); | 
 | static const char* const kInitPrefix = "init:"; | 
 | static const intptr_t kInitPrefixLength = strlen(kInitPrefix); | 
 |  | 
 | // A cache of VM heap allocated preinitialized empty ic data entry arrays. | 
 | ArrayPtr ICData::cached_icdata_arrays_[kCachedICDataArrayCount]; | 
 |  | 
 | cpp_vtable Object::builtin_vtables_[kNumPredefinedCids] = {}; | 
 |  | 
 | // These are initialized to a value that will force an illegal memory access if | 
 | // they are being used. | 
 | #if defined(RAW_NULL) | 
 | #error RAW_NULL should not be defined. | 
 | #endif | 
 | #define RAW_NULL static_cast<uword>(kHeapObjectTag) | 
 |  | 
 | #define CHECK_ERROR(error)                                                     \ | 
 |   {                                                                            \ | 
 |     ErrorPtr err = (error);                                                    \ | 
 |     if (err != Error::null()) {                                                \ | 
 |       return err;                                                              \ | 
 |     }                                                                          \ | 
 |   } | 
 |  | 
 | #define DEFINE_SHARED_READONLY_HANDLE(Type, name)                              \ | 
 |   Type* Object::name##_ = nullptr; | 
 | SHARED_READONLY_HANDLES_LIST(DEFINE_SHARED_READONLY_HANDLE) | 
 | #undef DEFINE_SHARED_READONLY_HANDLE | 
 |  | 
 | ObjectPtr Object::null_ = static_cast<ObjectPtr>(RAW_NULL); | 
 | BoolPtr Object::true_ = static_cast<BoolPtr>(RAW_NULL); | 
 | BoolPtr Object::false_ = static_cast<BoolPtr>(RAW_NULL); | 
 | ClassPtr Object::class_class_ = static_cast<ClassPtr>(RAW_NULL); | 
 | ClassPtr Object::dynamic_class_ = static_cast<ClassPtr>(RAW_NULL); | 
 | ClassPtr Object::void_class_ = static_cast<ClassPtr>(RAW_NULL); | 
 | ClassPtr Object::type_parameters_class_ = static_cast<ClassPtr>(RAW_NULL); | 
 | ClassPtr Object::type_arguments_class_ = static_cast<ClassPtr>(RAW_NULL); | 
 | ClassPtr Object::patch_class_class_ = static_cast<ClassPtr>(RAW_NULL); | 
 | ClassPtr Object::function_class_ = static_cast<ClassPtr>(RAW_NULL); | 
 | ClassPtr Object::closure_data_class_ = static_cast<ClassPtr>(RAW_NULL); | 
 | ClassPtr Object::ffi_trampoline_data_class_ = static_cast<ClassPtr>(RAW_NULL); | 
 | ClassPtr Object::field_class_ = static_cast<ClassPtr>(RAW_NULL); | 
 | ClassPtr Object::script_class_ = static_cast<ClassPtr>(RAW_NULL); | 
 | ClassPtr Object::library_class_ = static_cast<ClassPtr>(RAW_NULL); | 
 | ClassPtr Object::namespace_class_ = static_cast<ClassPtr>(RAW_NULL); | 
 | ClassPtr Object::kernel_program_info_class_ = static_cast<ClassPtr>(RAW_NULL); | 
 | ClassPtr Object::code_class_ = static_cast<ClassPtr>(RAW_NULL); | 
 | ClassPtr Object::instructions_class_ = static_cast<ClassPtr>(RAW_NULL); | 
 | ClassPtr Object::instructions_section_class_ = static_cast<ClassPtr>(RAW_NULL); | 
 | ClassPtr Object::instructions_table_class_ = static_cast<ClassPtr>(RAW_NULL); | 
 | ClassPtr Object::object_pool_class_ = static_cast<ClassPtr>(RAW_NULL); | 
 | ClassPtr Object::pc_descriptors_class_ = static_cast<ClassPtr>(RAW_NULL); | 
 | ClassPtr Object::code_source_map_class_ = static_cast<ClassPtr>(RAW_NULL); | 
 | ClassPtr Object::compressed_stackmaps_class_ = static_cast<ClassPtr>(RAW_NULL); | 
 | ClassPtr Object::var_descriptors_class_ = static_cast<ClassPtr>(RAW_NULL); | 
 | ClassPtr Object::exception_handlers_class_ = static_cast<ClassPtr>(RAW_NULL); | 
 | ClassPtr Object::context_class_ = static_cast<ClassPtr>(RAW_NULL); | 
 | ClassPtr Object::context_scope_class_ = static_cast<ClassPtr>(RAW_NULL); | 
 | ClassPtr Object::sentinel_class_ = static_cast<ClassPtr>(RAW_NULL); | 
 | ClassPtr Object::singletargetcache_class_ = static_cast<ClassPtr>(RAW_NULL); | 
 | ClassPtr Object::unlinkedcall_class_ = static_cast<ClassPtr>(RAW_NULL); | 
 | ClassPtr Object::monomorphicsmiablecall_class_ = | 
 |     static_cast<ClassPtr>(RAW_NULL); | 
 | ClassPtr Object::icdata_class_ = static_cast<ClassPtr>(RAW_NULL); | 
 | ClassPtr Object::megamorphic_cache_class_ = static_cast<ClassPtr>(RAW_NULL); | 
 | ClassPtr Object::subtypetestcache_class_ = static_cast<ClassPtr>(RAW_NULL); | 
 | ClassPtr Object::loadingunit_class_ = static_cast<ClassPtr>(RAW_NULL); | 
 | ClassPtr Object::api_error_class_ = static_cast<ClassPtr>(RAW_NULL); | 
 | ClassPtr Object::language_error_class_ = static_cast<ClassPtr>(RAW_NULL); | 
 | ClassPtr Object::unhandled_exception_class_ = static_cast<ClassPtr>(RAW_NULL); | 
 | ClassPtr Object::unwind_error_class_ = static_cast<ClassPtr>(RAW_NULL); | 
 | ClassPtr Object::weak_serialization_reference_class_ = | 
 |     static_cast<ClassPtr>(RAW_NULL); | 
 | ClassPtr Object::weak_array_class_ = static_cast<ClassPtr>(RAW_NULL); | 
 |  | 
 | static void AppendSubString(BaseTextBuffer* buffer, | 
 |                             const char* name, | 
 |                             intptr_t start_pos, | 
 |                             intptr_t len) { | 
 |   buffer->Printf("%.*s", static_cast<int>(len), &name[start_pos]); | 
 | } | 
 |  | 
 | // Used to define setters and getters for untagged object fields that are | 
 | // defined with the WSR_COMPRESSED_POINTER_FIELD macro. See | 
 | // PRECOMPILER_WSR_FIELD_DECLARATION in object.h for more information. | 
 | #if defined(DART_PRECOMPILER) | 
 | #define PRECOMPILER_WSR_FIELD_DEFINITION(Class, Type, Name)                    \ | 
 |   Type##Ptr Class::Name() const {                                              \ | 
 |     return Type::RawCast(WeakSerializationReference::Unwrap(untag()->Name())); \ | 
 |   } | 
 | #else | 
 | #define PRECOMPILER_WSR_FIELD_DEFINITION(Class, Type, Name)                    \ | 
 |   void Class::set_##Name(const Type& value) const {                            \ | 
 |     untag()->set_##Name(value.ptr());                                          \ | 
 |   } | 
 | #endif | 
 |  | 
 | PRECOMPILER_WSR_FIELD_DEFINITION(ClosureData, Function, parent_function) | 
 | PRECOMPILER_WSR_FIELD_DEFINITION(Function, FunctionType, signature) | 
 |  | 
 | #undef PRECOMPILER_WSR_FIELD_DEFINITION | 
 |  | 
 | #if defined(_MSC_VER) | 
 | #define TRACE_TYPE_CHECKS_VERBOSE(format, ...)                                 \ | 
 |   if (FLAG_trace_type_checks_verbose) {                                        \ | 
 |     OS::PrintErr(format, __VA_ARGS__);                                         \ | 
 |   } | 
 | #else | 
 | #define TRACE_TYPE_CHECKS_VERBOSE(format, ...)                                 \ | 
 |   if (FLAG_trace_type_checks_verbose) {                                        \ | 
 |     OS::PrintErr(format, ##__VA_ARGS__);                                       \ | 
 |   } | 
 | #endif | 
 |  | 
 | // Remove private keys, but retain getter/setter/constructor/mixin manglings. | 
 | StringPtr String::RemovePrivateKey(const String& name) { | 
 |   ASSERT(name.IsOneByteString()); | 
 |   GrowableArray<uint8_t> without_key(name.Length()); | 
 |   intptr_t i = 0; | 
 |   while (i < name.Length()) { | 
 |     while (i < name.Length()) { | 
 |       uint8_t c = name.CharAt(i++); | 
 |       if (c == '@') break; | 
 |       without_key.Add(c); | 
 |     } | 
 |     while (i < name.Length()) { | 
 |       uint8_t c = name.CharAt(i); | 
 |       if ((c < '0') || (c > '9')) break; | 
 |       i++; | 
 |     } | 
 |   } | 
 |  | 
 |   return String::FromLatin1(without_key.data(), without_key.length()); | 
 | } | 
 |  | 
 | // Takes a vm internal name and makes it suitable for external user. | 
 | // | 
 | // Examples: | 
 | // | 
 | // Internal getter and setter prefixes are changed: | 
 | // | 
 | //   get:foo -> foo | 
 | //   set:foo -> foo= | 
 | // | 
 | // Private name mangling is removed, possibly multiple times: | 
 | // | 
 | //   _ReceivePortImpl@709387912 -> _ReceivePortImpl | 
 | //   _ReceivePortImpl@709387912._internal@709387912 -> | 
 | //      _ReceivePortImpl._internal | 
 | //   _C@6328321&_E@6328321&_F@6328321 -> _C&_E&_F | 
 | // | 
 | // The trailing . on the default constructor name is dropped: | 
 | // | 
 | //   List. -> List | 
 | // | 
 | // And so forth: | 
 | // | 
 | //   get:foo@6328321 -> foo | 
 | //   _MyClass@6328321. -> _MyClass | 
 | //   _MyClass@6328321.named -> _MyClass.named | 
 | // | 
 | // For extension methods the following demangling is done | 
 | //   ext|func -> ext.func (instance extension method) | 
 | //   ext|get#prop -> ext.prop (instance extension getter) | 
 | //   ext|set#prop -> ext.prop= (instance extension setter) | 
 | //   ext|sfunc -> ext.sfunc (static extension method) | 
 | //   get:ext|sprop -> ext.sprop (static extension getter) | 
 | //   set:ext|sprop -> ext.sprop= (static extension setter) | 
 | // | 
 | const char* String::ScrubName(const String& name, bool is_extension) { | 
 |   Thread* thread = Thread::Current(); | 
 |   NoSafepointScope no_safepoint(thread); | 
 |   Zone* zone = thread->zone(); | 
 |   ZoneTextBuffer printer(zone); | 
 |  | 
 | #if !defined(DART_PRECOMPILED_RUNTIME) | 
 |   if (name.Equals(Symbols::TopLevel())) { | 
 |     // Name of invisible top-level class. | 
 |     return ""; | 
 |   } | 
 | #endif  // !defined(DART_PRECOMPILED_RUNTIME) | 
 |  | 
 |   const char* cname = name.ToCString(); | 
 |   ASSERT(strlen(cname) == static_cast<size_t>(name.Length())); | 
 |   const intptr_t name_len = name.Length(); | 
 |   // First remove all private name mangling and if 'is_extension' is true | 
 |   // substitute the first '|' character with '.'. | 
 |   intptr_t start_pos = 0; | 
 |   intptr_t sum_segment_len = 0; | 
 |   for (intptr_t i = 0; i < name_len; i++) { | 
 |     if ((cname[i] == '@') && ((i + 1) < name_len) && (cname[i + 1] >= '0') && | 
 |         (cname[i + 1] <= '9')) { | 
 |       // Append the current segment to the unmangled name. | 
 |       const intptr_t segment_len = i - start_pos; | 
 |       sum_segment_len += segment_len; | 
 |       AppendSubString(&printer, cname, start_pos, segment_len); | 
 |       // Advance until past the name mangling. The private keys are only | 
 |       // numbers so we skip until the first non-number. | 
 |       i++;  // Skip the '@'. | 
 |       while ((i < name.Length()) && (name.CharAt(i) >= '0') && | 
 |              (name.CharAt(i) <= '9')) { | 
 |         i++; | 
 |       } | 
 |       start_pos = i; | 
 |       i--;  // Account for for-loop increment. | 
 |     } else if (is_extension && cname[i] == '|') { | 
 |       // Append the current segment to the unmangled name. | 
 |       const intptr_t segment_len = i - start_pos; | 
 |       AppendSubString(&printer, cname, start_pos, segment_len); | 
 |       // Append the '.' character (replaces '|' with '.'). | 
 |       AppendSubString(&printer, ".", 0, 1); | 
 |       start_pos = i + 1; | 
 |       // Account for length of segments added so far. | 
 |       sum_segment_len += (segment_len + 1); | 
 |     } | 
 |   } | 
 |  | 
 |   const char* unmangled_name = nullptr; | 
 |   if (start_pos == 0) { | 
 |     // No name unmangling needed, reuse the name that was passed in. | 
 |     unmangled_name = cname; | 
 |     sum_segment_len = name_len; | 
 |   } else if (name.Length() != start_pos) { | 
 |     // Append the last segment. | 
 |     const intptr_t segment_len = name.Length() - start_pos; | 
 |     sum_segment_len += segment_len; | 
 |     AppendSubString(&printer, cname, start_pos, segment_len); | 
 |   } | 
 |   if (unmangled_name == nullptr) { | 
 |     // Merge unmangled_segments. | 
 |     unmangled_name = printer.buffer(); | 
 |   } | 
 |  | 
 |   printer.Clear(); | 
 |   intptr_t start = 0; | 
 |   intptr_t len = sum_segment_len; | 
 |   bool is_setter = false; | 
 |   if (is_extension) { | 
 |     // First scan till we see the '.' character. | 
 |     for (intptr_t i = 0; i < len; i++) { | 
 |       if (unmangled_name[i] == '.') { | 
 |         intptr_t slen = i + 1; | 
 |         intptr_t plen = slen - start; | 
 |         AppendSubString(&printer, unmangled_name, start, plen); | 
 |         unmangled_name += slen; | 
 |         len -= slen; | 
 |         break; | 
 |       } else if (unmangled_name[i] == ':') { | 
 |         if (start != 0) { | 
 |           // Reset and break. | 
 |           start = 0; | 
 |           is_setter = false; | 
 |           break; | 
 |         } | 
 |         if (unmangled_name[0] == 's') { | 
 |           is_setter = true; | 
 |         } | 
 |         start = i + 1; | 
 |       } | 
 |     } | 
 |   } | 
 |   intptr_t dot_pos = -1;  // Position of '.' in the name, if any. | 
 |   start = 0; | 
 |   for (intptr_t i = start; i < len; i++) { | 
 |     if (unmangled_name[i] == ':' || | 
 |         (is_extension && unmangled_name[i] == '#')) { | 
 |       if (start != 0) { | 
 |         // Reset and break. | 
 |         start = 0; | 
 |         dot_pos = -1; | 
 |         break; | 
 |       } | 
 |       ASSERT(start == 0);  // Only one : is possible in getters or setters. | 
 |       if (unmangled_name[0] == 's') { | 
 |         ASSERT(!is_setter); | 
 |         is_setter = true; | 
 |       } | 
 |       start = i + 1; | 
 |     } else if (unmangled_name[i] == '.') { | 
 |       if (dot_pos != -1) { | 
 |         // Reset and break. | 
 |         start = 0; | 
 |         dot_pos = -1; | 
 |         break; | 
 |       } | 
 |       ASSERT(dot_pos == -1);  // Only one dot is supported. | 
 |       dot_pos = i; | 
 |     } | 
 |   } | 
 |  | 
 |   if (!is_extension && (start == 0) && (dot_pos == -1)) { | 
 |     // This unmangled_name is fine as it is. | 
 |     return unmangled_name; | 
 |   } | 
 |  | 
 |   // Drop the trailing dot if needed. | 
 |   intptr_t end = ((dot_pos + 1) == len) ? dot_pos : len; | 
 |  | 
 |   intptr_t substr_len = end - start; | 
 |   AppendSubString(&printer, unmangled_name, start, substr_len); | 
 |   if (is_setter) { | 
 |     const char* equals = Symbols::Equals().ToCString(); | 
 |     const intptr_t equals_len = strlen(equals); | 
 |     AppendSubString(&printer, equals, 0, equals_len); | 
 |   } | 
 |  | 
 |   return printer.buffer(); | 
 | } | 
 |  | 
 | StringPtr String::ScrubNameRetainPrivate(const String& name, | 
 |                                          bool is_extension) { | 
 | #if !defined(DART_PRECOMPILED_RUNTIME) | 
 |   intptr_t len = name.Length(); | 
 |   intptr_t start = 0; | 
 |   intptr_t at_pos = -1;  // Position of '@' in the name, if any. | 
 |   bool is_setter = false; | 
 |  | 
 |   String& result = String::Handle(); | 
 |  | 
 |   // If extension strip out the leading prefix e.g" ext|func would strip out | 
 |   // 'ext|'. | 
 |   if (is_extension) { | 
 |     // First scan till we see the '|' character. | 
 |     for (intptr_t i = 0; i < len; i++) { | 
 |       if (name.CharAt(i) == '|') { | 
 |         result = String::SubString(name, start, (i - start)); | 
 |         result = String::Concat(result, Symbols::Dot()); | 
 |         start = i + 1; | 
 |         break; | 
 |       } else if (name.CharAt(i) == ':') { | 
 |         if (start != 0) { | 
 |           // Reset and break. | 
 |           start = 0; | 
 |           is_setter = false; | 
 |           break; | 
 |         } | 
 |         if (name.CharAt(0) == 's') { | 
 |           is_setter = true; | 
 |         } | 
 |         start = i + 1; | 
 |       } | 
 |     } | 
 |   } | 
 |  | 
 |   for (intptr_t i = start; i < len; i++) { | 
 |     if (name.CharAt(i) == ':' || (is_extension && name.CharAt(i) == '#')) { | 
 |       // Only one : is possible in getters or setters. | 
 |       ASSERT(is_extension || start == 0); | 
 |       if (name.CharAt(start) == 's') { | 
 |         is_setter = true; | 
 |       } | 
 |       start = i + 1; | 
 |     } else if (name.CharAt(i) == '@') { | 
 |       // Setters should have only one @ so we know where to put the =. | 
 |       ASSERT(!is_setter || (at_pos == -1)); | 
 |       at_pos = i; | 
 |     } | 
 |   } | 
 |  | 
 |   if (start == 0) { | 
 |     // This unmangled_name is fine as it is. | 
 |     return name.ptr(); | 
 |   } | 
 |  | 
 |   if (is_extension) { | 
 |     const String& fname = | 
 |         String::Handle(String::SubString(name, start, (len - start))); | 
 |     result = String::Concat(result, fname); | 
 |   } else { | 
 |     result = String::SubString(name, start, (len - start)); | 
 |   } | 
 |  | 
 |   if (is_setter) { | 
 |     // Setters need to end with '='. | 
 |     if (at_pos == -1) { | 
 |       return String::Concat(result, Symbols::Equals()); | 
 |     } else { | 
 |       const String& pre_at = | 
 |           String::Handle(String::SubString(result, 0, at_pos - 4)); | 
 |       const String& post_at = | 
 |           String::Handle(String::SubString(name, at_pos, len - at_pos)); | 
 |       result = String::Concat(pre_at, Symbols::Equals()); | 
 |       result = String::Concat(result, post_at); | 
 |     } | 
 |   } | 
 |  | 
 |   return result.ptr(); | 
 | #endif                // !defined(DART_PRECOMPILED_RUNTIME) | 
 |   return name.ptr();  // In AOT, return argument unchanged. | 
 | } | 
 |  | 
 | template <typename type> | 
 | static bool IsSpecialCharacter(type value) { | 
 |   return ((value == '"') || (value == '\n') || (value == '\f') || | 
 |           (value == '\b') || (value == '\t') || (value == '\v') || | 
 |           (value == '\r') || (value == '\\') || (value == '$')); | 
 | } | 
 |  | 
 | static inline bool IsAsciiNonprintable(int32_t c) { | 
 |   return ((0 <= c) && (c < 32)) || (c == 127); | 
 | } | 
 |  | 
 | static int32_t EscapeOverhead(int32_t c) { | 
 |   if (IsSpecialCharacter(c)) { | 
 |     return 1;  // 1 additional byte for the backslash. | 
 |   } else if (IsAsciiNonprintable(c)) { | 
 |     return 3;  // 3 additional bytes to encode c as \x00. | 
 |   } | 
 |   return 0; | 
 | } | 
 |  | 
 | template <typename type> | 
 | static type SpecialCharacter(type value) { | 
 |   if (value == '"') { | 
 |     return '"'; | 
 |   } else if (value == '\n') { | 
 |     return 'n'; | 
 |   } else if (value == '\f') { | 
 |     return 'f'; | 
 |   } else if (value == '\b') { | 
 |     return 'b'; | 
 |   } else if (value == '\t') { | 
 |     return 't'; | 
 |   } else if (value == '\v') { | 
 |     return 'v'; | 
 |   } else if (value == '\r') { | 
 |     return 'r'; | 
 |   } else if (value == '\\') { | 
 |     return '\\'; | 
 |   } else if (value == '$') { | 
 |     return '$'; | 
 |   } | 
 |   UNREACHABLE(); | 
 |   return '\0'; | 
 | } | 
 |  | 
 | void Object::InitNullAndBool(IsolateGroup* isolate_group) { | 
 |   // Should only be run by the vm isolate. | 
 |   ASSERT(isolate_group == Dart::vm_isolate_group()); | 
 |   Thread* thread = Thread::Current(); | 
 |   auto heap = isolate_group->heap(); | 
 |  | 
 |   // TODO(iposva): NoSafepointScope needs to be added here. | 
 |   ASSERT(class_class() == null_); | 
 |  | 
 |   // Allocate and initialize the null instance. | 
 |   // 'null_' must be the first object allocated as it is used in allocation to | 
 |   // clear the pointer fields of objects. | 
 |   { | 
 |     uword address = | 
 |         heap->Allocate(thread, Instance::InstanceSize(), Heap::kOld); | 
 |     null_ = static_cast<InstancePtr>(address + kHeapObjectTag); | 
 |     InitializeObjectVariant<Instance>(address, kNullCid); | 
 |     null_->untag()->SetCanonical(); | 
 |   } | 
 |  | 
 |   // Allocate and initialize the bool instances. | 
 |   // These must be allocated such that at kBoolValueBitPosition, the address | 
 |   // of true is 0 and the address of false is 1, and their addresses are | 
 |   // otherwise identical. | 
 |   { | 
 |     // Allocate a dummy bool object to give true the desired alignment. | 
 |     uword address = heap->Allocate(thread, Bool::InstanceSize(), Heap::kOld); | 
 |     InitializeObject<Bool>(address); | 
 |     static_cast<BoolPtr>(address + kHeapObjectTag)->untag()->value_ = false; | 
 |   } | 
 |   { | 
 |     // Allocate true. | 
 |     uword address = heap->Allocate(thread, Bool::InstanceSize(), Heap::kOld); | 
 |     true_ = static_cast<BoolPtr>(address + kHeapObjectTag); | 
 |     InitializeObject<Bool>(address); | 
 |     true_->untag()->value_ = true; | 
 |     true_->untag()->SetCanonical(); | 
 |   } | 
 |   { | 
 |     // Allocate false. | 
 |     uword address = heap->Allocate(thread, Bool::InstanceSize(), Heap::kOld); | 
 |     false_ = static_cast<BoolPtr>(address + kHeapObjectTag); | 
 |     InitializeObject<Bool>(address); | 
 |     false_->untag()->value_ = false; | 
 |     false_->untag()->SetCanonical(); | 
 |   } | 
 |  | 
 |   // Check that the objects have been allocated at appropriate addresses. | 
 |   ASSERT(static_cast<uword>(true_) == | 
 |          static_cast<uword>(null_) + kTrueOffsetFromNull); | 
 |   ASSERT(static_cast<uword>(false_) == | 
 |          static_cast<uword>(null_) + kFalseOffsetFromNull); | 
 |   ASSERT((static_cast<uword>(true_) & kBoolValueMask) == 0); | 
 |   ASSERT((static_cast<uword>(false_) & kBoolValueMask) != 0); | 
 |   ASSERT(static_cast<uword>(false_) == | 
 |          (static_cast<uword>(true_) | kBoolValueMask)); | 
 |   ASSERT((static_cast<uword>(null_) & kBoolVsNullMask) == 0); | 
 |   ASSERT((static_cast<uword>(true_) & kBoolVsNullMask) != 0); | 
 |   ASSERT((static_cast<uword>(false_) & kBoolVsNullMask) != 0); | 
 | } | 
 |  | 
 | void Object::InitVtables() { | 
 |   { | 
 |     Object fake_handle; | 
 |     builtin_vtables_[kObjectCid] = fake_handle.vtable(); | 
 |   } | 
 |  | 
 | #define INIT_VTABLE(clazz)                                                     \ | 
 |   {                                                                            \ | 
 |     clazz fake_handle;                                                         \ | 
 |     builtin_vtables_[k##clazz##Cid] = fake_handle.vtable();                    \ | 
 |   } | 
 |   CLASS_LIST_NO_OBJECT_NOR_STRING_NOR_ARRAY_NOR_MAP(INIT_VTABLE) | 
 |   INIT_VTABLE(GrowableObjectArray) | 
 | #undef INIT_VTABLE | 
 |  | 
 | #define INIT_VTABLE(clazz)                                                     \ | 
 |   {                                                                            \ | 
 |     Map fake_handle;                                                           \ | 
 |     builtin_vtables_[k##clazz##Cid] = fake_handle.vtable();                    \ | 
 |   } | 
 |   CLASS_LIST_MAPS(INIT_VTABLE) | 
 | #undef INIT_VTABLE | 
 |  | 
 | #define INIT_VTABLE(clazz)                                                     \ | 
 |   {                                                                            \ | 
 |     Set fake_handle;                                                           \ | 
 |     builtin_vtables_[k##clazz##Cid] = fake_handle.vtable();                    \ | 
 |   } | 
 |   CLASS_LIST_SETS(INIT_VTABLE) | 
 | #undef INIT_VTABLE | 
 |  | 
 | #define INIT_VTABLE(clazz)                                                     \ | 
 |   {                                                                            \ | 
 |     Array fake_handle;                                                         \ | 
 |     builtin_vtables_[k##clazz##Cid] = fake_handle.vtable();                    \ | 
 |   } | 
 |   CLASS_LIST_FIXED_LENGTH_ARRAYS(INIT_VTABLE) | 
 | #undef INIT_VTABLE | 
 |  | 
 | #define INIT_VTABLE(clazz)                                                     \ | 
 |   {                                                                            \ | 
 |     String fake_handle;                                                        \ | 
 |     builtin_vtables_[k##clazz##Cid] = fake_handle.vtable();                    \ | 
 |   } | 
 |   CLASS_LIST_STRINGS(INIT_VTABLE) | 
 | #undef INIT_VTABLE | 
 |  | 
 |   { | 
 |     Instance fake_handle; | 
 |     builtin_vtables_[kFfiNativeTypeCid] = fake_handle.vtable(); | 
 |   } | 
 |  | 
 | #define INIT_VTABLE(clazz)                                                     \ | 
 |   {                                                                            \ | 
 |     Instance fake_handle;                                                      \ | 
 |     builtin_vtables_[kFfi##clazz##Cid] = fake_handle.vtable();                 \ | 
 |   } | 
 |   CLASS_LIST_FFI_TYPE_MARKER(INIT_VTABLE) | 
 | #undef INIT_VTABLE | 
 |  | 
 |   { | 
 |     Instance fake_handle; | 
 |     builtin_vtables_[kFfiNativeFunctionCid] = fake_handle.vtable(); | 
 |   } | 
 |  | 
 |   { | 
 |     Pointer fake_handle; | 
 |     builtin_vtables_[kPointerCid] = fake_handle.vtable(); | 
 |   } | 
 |  | 
 |   { | 
 |     DynamicLibrary fake_handle; | 
 |     builtin_vtables_[kDynamicLibraryCid] = fake_handle.vtable(); | 
 |   } | 
 |  | 
 | #define INIT_VTABLE(clazz)                                                     \ | 
 |   {                                                                            \ | 
 |     TypedData fake_internal_handle;                                            \ | 
 |     builtin_vtables_[kTypedData##clazz##Cid] = fake_internal_handle.vtable();  \ | 
 |     TypedDataView fake_view_handle;                                            \ | 
 |     builtin_vtables_[kTypedData##clazz##ViewCid] = fake_view_handle.vtable();  \ | 
 |     builtin_vtables_[kUnmodifiableTypedData##clazz##ViewCid] =                 \ | 
 |         fake_view_handle.vtable();                                             \ | 
 |     ExternalTypedData fake_external_handle;                                    \ | 
 |     builtin_vtables_[kExternalTypedData##clazz##Cid] =                         \ | 
 |         fake_external_handle.vtable();                                         \ | 
 |   } | 
 |   CLASS_LIST_TYPED_DATA(INIT_VTABLE) | 
 | #undef INIT_VTABLE | 
 |  | 
 |   { | 
 |     TypedDataView fake_handle; | 
 |     builtin_vtables_[kByteDataViewCid] = fake_handle.vtable(); | 
 |     builtin_vtables_[kUnmodifiableByteDataViewCid] = fake_handle.vtable(); | 
 |   } | 
 |  | 
 |   { | 
 |     Instance fake_handle; | 
 |     builtin_vtables_[kByteBufferCid] = fake_handle.vtable(); | 
 |     builtin_vtables_[kNullCid] = fake_handle.vtable(); | 
 |     builtin_vtables_[kDynamicCid] = fake_handle.vtable(); | 
 |     builtin_vtables_[kVoidCid] = fake_handle.vtable(); | 
 |     builtin_vtables_[kNeverCid] = fake_handle.vtable(); | 
 |   } | 
 | } | 
 |  | 
 | void Object::Init(IsolateGroup* isolate_group) { | 
 |   // Should only be run by the vm isolate. | 
 |   ASSERT(isolate_group == Dart::vm_isolate_group()); | 
 |   Heap* heap = isolate_group->heap(); | 
 |   Thread* thread = Thread::Current(); | 
 |   ASSERT(thread != nullptr); | 
 |   // Ensure lock checks in setters are happy. | 
 |   SafepointWriteRwLocker ml(thread, isolate_group->program_lock()); | 
 |  | 
 |   InitVtables(); | 
 |  | 
 | // Allocate the read only object handles here. | 
 | #define INITIALIZE_SHARED_READONLY_HANDLE(Type, name)                          \ | 
 |   name##_ = Type::ReadOnlyHandle(); | 
 |   SHARED_READONLY_HANDLES_LIST(INITIALIZE_SHARED_READONLY_HANDLE) | 
 | #undef INITIALIZE_SHARED_READONLY_HANDLE | 
 |  | 
 |   *null_object_ = Object::null(); | 
 |   *null_class_ = Class::null(); | 
 |   *null_array_ = Array::null(); | 
 |   *null_string_ = String::null(); | 
 |   *null_instance_ = Instance::null(); | 
 |   *null_function_ = Function::null(); | 
 |   *null_function_type_ = FunctionType::null(); | 
 |   *null_record_type_ = RecordType::null(); | 
 |   *null_type_arguments_ = TypeArguments::null(); | 
 |   *null_closure_ = Closure::null(); | 
 |   *empty_type_arguments_ = TypeArguments::null(); | 
 |   *null_abstract_type_ = AbstractType::null(); | 
 |   *null_compressed_stackmaps_ = CompressedStackMaps::null(); | 
 |   *bool_true_ = true_; | 
 |   *bool_false_ = false_; | 
 |  | 
 |   // Initialize the empty array and empty instantiations cache array handles to | 
 |   // null_ in order to be able to check if the empty and zero arrays were | 
 |   // allocated (RAW_NULL is not available). | 
 |   *empty_array_ = Array::null(); | 
 |   *empty_instantiations_cache_array_ = Array::null(); | 
 |   *empty_subtype_test_cache_array_ = Array::null(); | 
 |  | 
 |   Class& cls = Class::Handle(); | 
 |  | 
 |   // Allocate and initialize the class class. | 
 |   { | 
 |     intptr_t size = Class::InstanceSize(); | 
 |     uword address = heap->Allocate(thread, size, Heap::kOld); | 
 |     class_class_ = static_cast<ClassPtr>(address + kHeapObjectTag); | 
 |     InitializeObject<Class>(address); | 
 |  | 
 |     Class fake; | 
 |     // Initialization from Class::New<Class>. | 
 |     // Directly set ptr_ to break a circular dependency: SetRaw will attempt | 
 |     // to lookup class class in the class table where it is not registered yet. | 
 |     cls.ptr_ = class_class_; | 
 |     ASSERT(builtin_vtables_[kClassCid] == fake.vtable()); | 
 |     cls.set_instance_size( | 
 |         Class::InstanceSize(), | 
 |         compiler::target::RoundedAllocationSize(RTN::Class::InstanceSize())); | 
 |     const intptr_t host_next_field_offset = Class::NextFieldOffset(); | 
 |     const intptr_t target_next_field_offset = RTN::Class::NextFieldOffset(); | 
 |     cls.set_next_field_offset(host_next_field_offset, target_next_field_offset); | 
 |     cls.set_id(Class::kClassId); | 
 |     cls.set_state_bits(0); | 
 |     cls.set_is_allocate_finalized(); | 
 |     cls.set_is_declaration_loaded(); | 
 |     cls.set_is_type_finalized(); | 
 |     cls.set_type_arguments_field_offset_in_words(Class::kNoTypeArguments, | 
 |                                                  RTN::Class::kNoTypeArguments); | 
 |     cls.set_num_type_arguments_unsafe(0); | 
 |     cls.set_num_native_fields(0); | 
 |     cls.InitEmptyFields(); | 
 |     isolate_group->class_table()->Register(cls); | 
 |   } | 
 |  | 
 |   // Allocate and initialize the null class. | 
 |   cls = Class::New<Instance, RTN::Instance>(kNullCid, isolate_group); | 
 |   cls.set_num_type_arguments_unsafe(0); | 
 |   isolate_group->object_store()->set_null_class(cls); | 
 |  | 
 |   // Allocate and initialize Never class. | 
 |   cls = Class::New<Instance, RTN::Instance>(kNeverCid, isolate_group); | 
 |   cls.set_num_type_arguments_unsafe(0); | 
 |   cls.set_is_allocate_finalized(); | 
 |   cls.set_is_declaration_loaded(); | 
 |   cls.set_is_type_finalized(); | 
 |   isolate_group->object_store()->set_never_class(cls); | 
 |  | 
 |   // Allocate and initialize the free list element class. | 
 |   cls = Class::New<FreeListElement::FakeInstance, | 
 |                    RTN::FreeListElement::FakeInstance>(kFreeListElement, | 
 |                                                        isolate_group); | 
 |   cls.set_num_type_arguments_unsafe(0); | 
 |   cls.set_is_allocate_finalized(); | 
 |   cls.set_is_declaration_loaded(); | 
 |   cls.set_is_type_finalized(); | 
 |  | 
 |   // Allocate and initialize the forwarding corpse class. | 
 |   cls = Class::New<ForwardingCorpse::FakeInstance, | 
 |                    RTN::ForwardingCorpse::FakeInstance>(kForwardingCorpse, | 
 |                                                         isolate_group); | 
 |   cls.set_num_type_arguments_unsafe(0); | 
 |   cls.set_is_allocate_finalized(); | 
 |   cls.set_is_declaration_loaded(); | 
 |   cls.set_is_type_finalized(); | 
 |  | 
 |   // Allocate and initialize Sentinel class. | 
 |   cls = Class::New<Sentinel, RTN::Sentinel>(isolate_group); | 
 |   sentinel_class_ = cls.ptr(); | 
 |  | 
 |   // Allocate and initialize the sentinel values. | 
 |   { | 
 |     *sentinel_ ^= Sentinel::New(); | 
 |     *transition_sentinel_ ^= Sentinel::New(); | 
 |   } | 
 |  | 
 |   // Allocate and initialize optimizing compiler constants. | 
 |   { | 
 |     *unknown_constant_ ^= Sentinel::New(); | 
 |     *non_constant_ ^= Sentinel::New(); | 
 |     *optimized_out_ ^= Sentinel::New(); | 
 |   } | 
 |  | 
 |   // Allocate the remaining VM internal classes. | 
 |   cls = Class::New<TypeParameters, RTN::TypeParameters>(isolate_group); | 
 |   type_parameters_class_ = cls.ptr(); | 
 |  | 
 |   cls = Class::New<TypeArguments, RTN::TypeArguments>(isolate_group); | 
 |   type_arguments_class_ = cls.ptr(); | 
 |  | 
 |   cls = Class::New<PatchClass, RTN::PatchClass>(isolate_group); | 
 |   patch_class_class_ = cls.ptr(); | 
 |  | 
 |   cls = Class::New<Function, RTN::Function>(isolate_group); | 
 |   function_class_ = cls.ptr(); | 
 |  | 
 |   cls = Class::New<ClosureData, RTN::ClosureData>(isolate_group); | 
 |   closure_data_class_ = cls.ptr(); | 
 |  | 
 |   cls = Class::New<FfiTrampolineData, RTN::FfiTrampolineData>(isolate_group); | 
 |   ffi_trampoline_data_class_ = cls.ptr(); | 
 |  | 
 |   cls = Class::New<Field, RTN::Field>(isolate_group); | 
 |   field_class_ = cls.ptr(); | 
 |  | 
 |   cls = Class::New<Script, RTN::Script>(isolate_group); | 
 |   script_class_ = cls.ptr(); | 
 |  | 
 |   cls = Class::New<Library, RTN::Library>(isolate_group); | 
 |   library_class_ = cls.ptr(); | 
 |  | 
 |   cls = Class::New<Namespace, RTN::Namespace>(isolate_group); | 
 |   namespace_class_ = cls.ptr(); | 
 |  | 
 |   cls = Class::New<KernelProgramInfo, RTN::KernelProgramInfo>(isolate_group); | 
 |   kernel_program_info_class_ = cls.ptr(); | 
 |  | 
 |   cls = Class::New<Code, RTN::Code>(isolate_group); | 
 |   code_class_ = cls.ptr(); | 
 |  | 
 |   cls = Class::New<Instructions, RTN::Instructions>(isolate_group); | 
 |   instructions_class_ = cls.ptr(); | 
 |  | 
 |   cls = | 
 |       Class::New<InstructionsSection, RTN::InstructionsSection>(isolate_group); | 
 |   instructions_section_class_ = cls.ptr(); | 
 |  | 
 |   cls = Class::New<InstructionsTable, RTN::InstructionsTable>(isolate_group); | 
 |   instructions_table_class_ = cls.ptr(); | 
 |  | 
 |   cls = Class::New<ObjectPool, RTN::ObjectPool>(isolate_group); | 
 |   object_pool_class_ = cls.ptr(); | 
 |  | 
 |   cls = Class::New<PcDescriptors, RTN::PcDescriptors>(isolate_group); | 
 |   pc_descriptors_class_ = cls.ptr(); | 
 |  | 
 |   cls = Class::New<CodeSourceMap, RTN::CodeSourceMap>(isolate_group); | 
 |   code_source_map_class_ = cls.ptr(); | 
 |  | 
 |   cls = | 
 |       Class::New<CompressedStackMaps, RTN::CompressedStackMaps>(isolate_group); | 
 |   compressed_stackmaps_class_ = cls.ptr(); | 
 |  | 
 |   cls = | 
 |       Class::New<LocalVarDescriptors, RTN::LocalVarDescriptors>(isolate_group); | 
 |   var_descriptors_class_ = cls.ptr(); | 
 |  | 
 |   cls = Class::New<ExceptionHandlers, RTN::ExceptionHandlers>(isolate_group); | 
 |   exception_handlers_class_ = cls.ptr(); | 
 |  | 
 |   cls = Class::New<Context, RTN::Context>(isolate_group); | 
 |   context_class_ = cls.ptr(); | 
 |  | 
 |   cls = Class::New<ContextScope, RTN::ContextScope>(isolate_group); | 
 |   context_scope_class_ = cls.ptr(); | 
 |  | 
 |   cls = Class::New<SingleTargetCache, RTN::SingleTargetCache>(isolate_group); | 
 |   singletargetcache_class_ = cls.ptr(); | 
 |  | 
 |   cls = Class::New<UnlinkedCall, RTN::UnlinkedCall>(isolate_group); | 
 |   unlinkedcall_class_ = cls.ptr(); | 
 |  | 
 |   cls = Class::New<MonomorphicSmiableCall, RTN::MonomorphicSmiableCall>( | 
 |       isolate_group); | 
 |   monomorphicsmiablecall_class_ = cls.ptr(); | 
 |  | 
 |   cls = Class::New<ICData, RTN::ICData>(isolate_group); | 
 |   icdata_class_ = cls.ptr(); | 
 |  | 
 |   cls = Class::New<MegamorphicCache, RTN::MegamorphicCache>(isolate_group); | 
 |   megamorphic_cache_class_ = cls.ptr(); | 
 |  | 
 |   cls = Class::New<SubtypeTestCache, RTN::SubtypeTestCache>(isolate_group); | 
 |   subtypetestcache_class_ = cls.ptr(); | 
 |  | 
 |   cls = Class::New<LoadingUnit, RTN::LoadingUnit>(isolate_group); | 
 |   loadingunit_class_ = cls.ptr(); | 
 |  | 
 |   cls = Class::New<ApiError, RTN::ApiError>(isolate_group); | 
 |   api_error_class_ = cls.ptr(); | 
 |  | 
 |   cls = Class::New<LanguageError, RTN::LanguageError>(isolate_group); | 
 |   language_error_class_ = cls.ptr(); | 
 |  | 
 |   cls = Class::New<UnhandledException, RTN::UnhandledException>(isolate_group); | 
 |   unhandled_exception_class_ = cls.ptr(); | 
 |  | 
 |   cls = Class::New<UnwindError, RTN::UnwindError>(isolate_group); | 
 |   unwind_error_class_ = cls.ptr(); | 
 |  | 
 |   cls = Class::New<WeakSerializationReference, RTN::WeakSerializationReference>( | 
 |       isolate_group); | 
 |   weak_serialization_reference_class_ = cls.ptr(); | 
 |  | 
 |   cls = Class::New<WeakArray, RTN::WeakArray>(isolate_group); | 
 |   weak_array_class_ = cls.ptr(); | 
 |  | 
 |   ASSERT(class_class() != null_); | 
 |  | 
 |   // Pre-allocate classes in the vm isolate so that we can for example create a | 
 |   // symbol table and populate it with some frequently used strings as symbols. | 
 |   cls = Class::New<Array, RTN::Array>(isolate_group); | 
 |   isolate_group->object_store()->set_array_class(cls); | 
 |   cls.set_type_arguments_field_offset(Array::type_arguments_offset(), | 
 |                                       RTN::Array::type_arguments_offset()); | 
 |   cls.set_num_type_arguments_unsafe(1); | 
 |   cls = Class::New<Array, RTN::Array>(kImmutableArrayCid, isolate_group); | 
 |   isolate_group->object_store()->set_immutable_array_class(cls); | 
 |   cls.set_type_arguments_field_offset(Array::type_arguments_offset(), | 
 |                                       RTN::Array::type_arguments_offset()); | 
 |   cls.set_num_type_arguments_unsafe(1); | 
 |   // In order to be able to canonicalize arguments descriptors early. | 
 |   cls.set_is_prefinalized(); | 
 |   cls = | 
 |       Class::New<GrowableObjectArray, RTN::GrowableObjectArray>(isolate_group); | 
 |   isolate_group->object_store()->set_growable_object_array_class(cls); | 
 |   cls.set_type_arguments_field_offset( | 
 |       GrowableObjectArray::type_arguments_offset(), | 
 |       RTN::GrowableObjectArray::type_arguments_offset()); | 
 |   cls.set_num_type_arguments_unsafe(1); | 
 |   cls = Class::NewStringClass(kOneByteStringCid, isolate_group); | 
 |   isolate_group->object_store()->set_one_byte_string_class(cls); | 
 |   cls = Class::NewStringClass(kTwoByteStringCid, isolate_group); | 
 |   isolate_group->object_store()->set_two_byte_string_class(cls); | 
 |   cls = Class::New<Mint, RTN::Mint>(isolate_group); | 
 |   isolate_group->object_store()->set_mint_class(cls); | 
 |   cls = Class::New<Double, RTN::Double>(isolate_group); | 
 |   isolate_group->object_store()->set_double_class(cls); | 
 |   cls = Class::New<Float32x4, RTN::Float32x4>(isolate_group); | 
 |   isolate_group->object_store()->set_float32x4_class(cls); | 
 |   cls = Class::New<Float64x2, RTN::Float64x2>(isolate_group); | 
 |   isolate_group->object_store()->set_float64x2_class(cls); | 
 |   cls = Class::New<Int32x4, RTN::Int32x4>(isolate_group); | 
 |   isolate_group->object_store()->set_int32x4_class(cls); | 
 |  | 
 |   // Ensure that class kExternalTypedDataUint8ArrayCid is registered as we | 
 |   // need it when reading in the token stream of bootstrap classes in the VM | 
 |   // isolate. | 
 |   Class::NewExternalTypedDataClass(kExternalTypedDataUint8ArrayCid, | 
 |                                    isolate_group); | 
 |  | 
 |   // Needed for object pools of VM isolate stubs. | 
 |   Class::NewTypedDataClass(kTypedDataInt8ArrayCid, isolate_group); | 
 |  | 
 |   // Allocate and initialize the empty_array instance. | 
 |   { | 
 |     uword address = heap->Allocate(thread, Array::InstanceSize(0), Heap::kOld); | 
 |     InitializeObjectVariant<Array>(address, kImmutableArrayCid, 0); | 
 |     Array::initializeHandle(empty_array_, | 
 |                             static_cast<ArrayPtr>(address + kHeapObjectTag)); | 
 |     empty_array_->untag()->set_length(Smi::New(0)); | 
 |     empty_array_->SetCanonical(); | 
 |   } | 
 |  | 
 |   Smi& smi = Smi::Handle(); | 
 |   // Allocate and initialize the empty instantiations cache array instance, | 
 |   // which contains metadata as the first element and a sentinel value | 
 |   // at the start of the first entry. | 
 |   { | 
 |     const intptr_t array_size = | 
 |         TypeArguments::Cache::kHeaderSize + TypeArguments::Cache::kEntrySize; | 
 |     uword address = | 
 |         heap->Allocate(thread, Array::InstanceSize(array_size), Heap::kOld); | 
 |     InitializeObjectVariant<Array>(address, kImmutableArrayCid, array_size); | 
 |     Array::initializeHandle(empty_instantiations_cache_array_, | 
 |                             static_cast<ArrayPtr>(address + kHeapObjectTag)); | 
 |     empty_instantiations_cache_array_->untag()->set_length( | 
 |         Smi::New(array_size)); | 
 |     // The empty cache has no occupied entries and is not a hash-based cache. | 
 |     smi = Smi::New(0); | 
 |     empty_instantiations_cache_array_->SetAt( | 
 |         TypeArguments::Cache::kMetadataIndex, smi); | 
 |     // Make the first (and only) entry unoccupied by setting its first element | 
 |     // to the sentinel value. | 
 |     smi = TypeArguments::Cache::Sentinel(); | 
 |     InstantiationsCacheTable table(*empty_instantiations_cache_array_); | 
 |     table.At(0).Set<TypeArguments::Cache::kSentinelIndex>(smi); | 
 |     // The other contents of the array are immaterial. | 
 |     empty_instantiations_cache_array_->SetCanonical(); | 
 |   } | 
 |  | 
 |   // Allocate and initialize the empty subtype test cache array instance, | 
 |   // which contains a single unoccupied entry. | 
 |   { | 
 |     const intptr_t array_size = SubtypeTestCache::kTestEntryLength; | 
 |     uword address = | 
 |         heap->Allocate(thread, Array::InstanceSize(array_size), Heap::kOld); | 
 |     InitializeObjectVariant<Array>(address, kImmutableArrayCid, array_size); | 
 |     Array::initializeHandle(empty_subtype_test_cache_array_, | 
 |                             static_cast<ArrayPtr>(address + kHeapObjectTag)); | 
 |     empty_subtype_test_cache_array_->untag()->set_length(Smi::New(array_size)); | 
 |     // Make the first (and only) entry unoccupied by setting its first element | 
 |     // to the null value. | 
 |     empty_subtype_test_cache_array_->SetAt( | 
 |         SubtypeTestCache::kInstanceCidOrSignature, Object::null_object()); | 
 |     smi = TypeArguments::Cache::Sentinel(); | 
 |     SubtypeTestCacheTable table(*empty_subtype_test_cache_array_); | 
 |     table.At(0).Set<SubtypeTestCache::kInstanceCidOrSignature>( | 
 |         Object::null_object()); | 
 |     // The other contents of the array are immaterial. | 
 |     empty_subtype_test_cache_array_->SetCanonical(); | 
 |   } | 
 |  | 
 |   // Allocate and initialize the canonical empty context scope object. | 
 |   { | 
 |     uword address = | 
 |         heap->Allocate(thread, ContextScope::InstanceSize(0), Heap::kOld); | 
 |     InitializeObject<ContextScope>(address, 0); | 
 |     ContextScope::initializeHandle( | 
 |         empty_context_scope_, | 
 |         static_cast<ContextScopePtr>(address + kHeapObjectTag)); | 
 |     empty_context_scope_->StoreNonPointer( | 
 |         &empty_context_scope_->untag()->num_variables_, 0); | 
 |     empty_context_scope_->StoreNonPointer( | 
 |         &empty_context_scope_->untag()->is_implicit_, true); | 
 |     empty_context_scope_->SetCanonical(); | 
 |   } | 
 |  | 
 |   // Allocate and initialize the canonical empty object pool object. | 
 |   { | 
 |     uword address = | 
 |         heap->Allocate(thread, ObjectPool::InstanceSize(0), Heap::kOld); | 
 |     InitializeObject<ObjectPool>(address, 0); | 
 |     ObjectPool::initializeHandle( | 
 |         empty_object_pool_, | 
 |         static_cast<ObjectPoolPtr>(address + kHeapObjectTag)); | 
 |     empty_object_pool_->StoreNonPointer(&empty_object_pool_->untag()->length_, | 
 |                                         0); | 
 |     empty_object_pool_->SetCanonical(); | 
 |   } | 
 |  | 
 |   // Allocate and initialize the empty_compressed_stackmaps instance. | 
 |   { | 
 |     const intptr_t instance_size = CompressedStackMaps::InstanceSize(0); | 
 |     uword address = heap->Allocate(thread, instance_size, Heap::kOld); | 
 |     InitializeObject<CompressedStackMaps>(address, 0); | 
 |     CompressedStackMaps::initializeHandle( | 
 |         empty_compressed_stackmaps_, | 
 |         static_cast<CompressedStackMapsPtr>(address + kHeapObjectTag)); | 
 |     empty_compressed_stackmaps_->untag()->payload()->set_flags_and_size(0); | 
 |     empty_compressed_stackmaps_->SetCanonical(); | 
 |   } | 
 |  | 
 |   // Allocate and initialize the empty_descriptors instance. | 
 |   { | 
 |     uword address = | 
 |         heap->Allocate(thread, PcDescriptors::InstanceSize(0), Heap::kOld); | 
 |     InitializeObject<PcDescriptors>(address, 0); | 
 |     PcDescriptors::initializeHandle( | 
 |         empty_descriptors_, | 
 |         static_cast<PcDescriptorsPtr>(address + kHeapObjectTag)); | 
 |     empty_descriptors_->StoreNonPointer(&empty_descriptors_->untag()->length_, | 
 |                                         0); | 
 |     empty_descriptors_->SetCanonical(); | 
 |   } | 
 |  | 
 |   // Allocate and initialize the canonical empty variable descriptor object. | 
 |   { | 
 |     uword address = heap->Allocate(thread, LocalVarDescriptors::InstanceSize(0), | 
 |                                    Heap::kOld); | 
 |     InitializeObject<LocalVarDescriptors>(address, 0); | 
 |     LocalVarDescriptors::initializeHandle( | 
 |         empty_var_descriptors_, | 
 |         static_cast<LocalVarDescriptorsPtr>(address + kHeapObjectTag)); | 
 |     empty_var_descriptors_->StoreNonPointer( | 
 |         &empty_var_descriptors_->untag()->num_entries_, 0); | 
 |     empty_var_descriptors_->SetCanonical(); | 
 |   } | 
 |  | 
 |   // Allocate and initialize the canonical empty exception handler info object. | 
 |   // The vast majority of all functions do not contain an exception handler | 
 |   // and can share this canonical descriptor. | 
 |   { | 
 |     uword address = | 
 |         heap->Allocate(thread, ExceptionHandlers::InstanceSize(0), Heap::kOld); | 
 |     InitializeObject<ExceptionHandlers>(address, 0); | 
 |     ExceptionHandlers::initializeHandle( | 
 |         empty_exception_handlers_, | 
 |         static_cast<ExceptionHandlersPtr>(address + kHeapObjectTag)); | 
 |     empty_exception_handlers_->StoreNonPointer( | 
 |         &empty_exception_handlers_->untag()->packed_fields_, 0); | 
 |     empty_exception_handlers_->SetCanonical(); | 
 |   } | 
 |  | 
 |   // Empty exception handlers for async/async* functions. | 
 |   { | 
 |     uword address = | 
 |         heap->Allocate(thread, ExceptionHandlers::InstanceSize(0), Heap::kOld); | 
 |     InitializeObject<ExceptionHandlers>(address, 0); | 
 |     ExceptionHandlers::initializeHandle( | 
 |         empty_async_exception_handlers_, | 
 |         static_cast<ExceptionHandlersPtr>(address + kHeapObjectTag)); | 
 |     empty_async_exception_handlers_->StoreNonPointer( | 
 |         &empty_async_exception_handlers_->untag()->packed_fields_, | 
 |         UntaggedExceptionHandlers::AsyncHandlerBit::update(true, 0)); | 
 |     empty_async_exception_handlers_->SetCanonical(); | 
 |   } | 
 |  | 
 |   // Allocate and initialize the canonical empty type arguments object. | 
 |   { | 
 |     uword address = | 
 |         heap->Allocate(thread, TypeArguments::InstanceSize(0), Heap::kOld); | 
 |     InitializeObject<TypeArguments>(address, 0); | 
 |     TypeArguments::initializeHandle( | 
 |         empty_type_arguments_, | 
 |         static_cast<TypeArgumentsPtr>(address + kHeapObjectTag)); | 
 |     empty_type_arguments_->untag()->set_length(Smi::New(0)); | 
 |     empty_type_arguments_->untag()->set_hash(Smi::New(0)); | 
 |     empty_type_arguments_->ComputeHash(); | 
 |     empty_type_arguments_->SetCanonical(); | 
 |   } | 
 |  | 
 |   // The VM isolate snapshot object table is initialized to an empty array | 
 |   // as we do not have any VM isolate snapshot at this time. | 
 |   *vm_isolate_snapshot_object_table_ = Object::empty_array().ptr(); | 
 |  | 
 |   cls = Class::New<Instance, RTN::Instance>(kDynamicCid, isolate_group); | 
 |   cls.set_is_abstract(); | 
 |   cls.set_num_type_arguments_unsafe(0); | 
 |   cls.set_is_allocate_finalized(); | 
 |   cls.set_is_declaration_loaded(); | 
 |   cls.set_is_type_finalized(); | 
 |   dynamic_class_ = cls.ptr(); | 
 |  | 
 |   cls = Class::New<Instance, RTN::Instance>(kVoidCid, isolate_group); | 
 |   cls.set_num_type_arguments_unsafe(0); | 
 |   cls.set_is_allocate_finalized(); | 
 |   cls.set_is_declaration_loaded(); | 
 |   cls.set_is_type_finalized(); | 
 |   void_class_ = cls.ptr(); | 
 |  | 
 |   cls = Class::New<Type, RTN::Type>(isolate_group); | 
 |   cls.set_is_allocate_finalized(); | 
 |   cls.set_is_declaration_loaded(); | 
 |   cls.set_is_type_finalized(); | 
 |  | 
 |   cls = Class::New<FunctionType, RTN::FunctionType>(isolate_group); | 
 |   cls.set_is_allocate_finalized(); | 
 |   cls.set_is_declaration_loaded(); | 
 |   cls.set_is_type_finalized(); | 
 |  | 
 |   cls = Class::New<RecordType, RTN::RecordType>(isolate_group); | 
 |   cls.set_is_allocate_finalized(); | 
 |   cls.set_is_declaration_loaded(); | 
 |   cls.set_is_type_finalized(); | 
 |  | 
 |   cls = dynamic_class_; | 
 |   *dynamic_type_ = | 
 |       Type::New(cls, Object::null_type_arguments(), Nullability::kNullable); | 
 |   dynamic_type_->SetIsFinalized(); | 
 |   dynamic_type_->ComputeHash(); | 
 |   dynamic_type_->SetCanonical(); | 
 |  | 
 |   cls = void_class_; | 
 |   *void_type_ = | 
 |       Type::New(cls, Object::null_type_arguments(), Nullability::kNullable); | 
 |   void_type_->SetIsFinalized(); | 
 |   void_type_->ComputeHash(); | 
 |   void_type_->SetCanonical(); | 
 |  | 
 |   // Since TypeArguments objects are passed as function arguments, make them | 
 |   // behave as Dart instances, although they are just VM objects. | 
 |   // Note that we cannot set the super type to ObjectType, which does not live | 
 |   // in the vm isolate. See special handling in Class::SuperClass(). | 
 |   cls = type_arguments_class_; | 
 |   cls.set_interfaces(Object::empty_array()); | 
 |   cls.SetFields(Object::empty_array()); | 
 |   cls.SetFunctions(Object::empty_array()); | 
 |  | 
 |   cls = Class::New<Bool, RTN::Bool>(isolate_group); | 
 |   isolate_group->object_store()->set_bool_class(cls); | 
 |  | 
 |   *smi_illegal_cid_ = Smi::New(kIllegalCid); | 
 |   *smi_zero_ = Smi::New(0); | 
 |  | 
 |   String& error_str = String::Handle(); | 
 |   error_str = String::New( | 
 |       "Callbacks into the Dart VM are currently prohibited. Either there are " | 
 |       "outstanding pointers from Dart_TypedDataAcquireData that have not been " | 
 |       "released with Dart_TypedDataReleaseData, or a finalizer is running.", | 
 |       Heap::kOld); | 
 |   *no_callbacks_error_ = ApiError::New(error_str, Heap::kOld); | 
 |   error_str = String::New( | 
 |       "No api calls are allowed while unwind is in progress", Heap::kOld); | 
 |   *unwind_in_progress_error_ = UnwindError::New(error_str, Heap::kOld); | 
 |   error_str = String::New("SnapshotWriter Error", Heap::kOld); | 
 |   *snapshot_writer_error_ = | 
 |       LanguageError::New(error_str, Report::kError, Heap::kOld); | 
 |   error_str = String::New("Branch offset overflow", Heap::kOld); | 
 |   *branch_offset_error_ = | 
 |       LanguageError::New(error_str, Report::kBailout, Heap::kOld); | 
 |   error_str = String::New("Speculative inlining failed", Heap::kOld); | 
 |   *speculative_inlining_error_ = | 
 |       LanguageError::New(error_str, Report::kBailout, Heap::kOld); | 
 |   error_str = String::New("Background Compilation Failed", Heap::kOld); | 
 |   *background_compilation_error_ = | 
 |       LanguageError::New(error_str, Report::kBailout, Heap::kOld); | 
 |   error_str = String::New("Out of memory", Heap::kOld); | 
 |   *out_of_memory_error_ = | 
 |       LanguageError::New(error_str, Report::kError, Heap::kOld); | 
 |  | 
 |   // Allocate the parameter types and names for synthetic getters. | 
 |   *synthetic_getter_parameter_types_ = Array::New(1, Heap::kOld); | 
 |   synthetic_getter_parameter_types_->SetAt(0, Object::dynamic_type()); | 
 |   *synthetic_getter_parameter_names_ = Array::New(1, Heap::kOld); | 
 |   // Fill in synthetic_getter_parameter_names_ later, after symbols are | 
 |   // initialized (in Object::FinalizeVMIsolate). | 
 |   // synthetic_getter_parameter_names_ object needs to be created earlier as | 
 |   // VM isolate snapshot reader references it before Object::FinalizeVMIsolate. | 
 |  | 
 |   // Some thread fields need to be reinitialized as null constants have not been | 
 |   // initialized until now. | 
 |   thread->ClearStickyError(); | 
 |  | 
 |   ASSERT(!null_object_->IsSmi()); | 
 |   ASSERT(!null_class_->IsSmi()); | 
 |   ASSERT(null_class_->IsClass()); | 
 |   ASSERT(!null_array_->IsSmi()); | 
 |   ASSERT(null_array_->IsArray()); | 
 |   ASSERT(!null_string_->IsSmi()); | 
 |   ASSERT(null_string_->IsString()); | 
 |   ASSERT(!null_instance_->IsSmi()); | 
 |   ASSERT(null_instance_->IsInstance()); | 
 |   ASSERT(!null_function_->IsSmi()); | 
 |   ASSERT(null_function_->IsFunction()); | 
 |   ASSERT(!null_function_type_->IsSmi()); | 
 |   ASSERT(null_function_type_->IsFunctionType()); | 
 |   ASSERT(!null_record_type_->IsSmi()); | 
 |   ASSERT(null_record_type_->IsRecordType()); | 
 |   ASSERT(!null_type_arguments_->IsSmi()); | 
 |   ASSERT(null_type_arguments_->IsTypeArguments()); | 
 |   ASSERT(!null_compressed_stackmaps_->IsSmi()); | 
 |   ASSERT(null_compressed_stackmaps_->IsCompressedStackMaps()); | 
 |   ASSERT(!empty_array_->IsSmi()); | 
 |   ASSERT(empty_array_->IsArray()); | 
 |   ASSERT(!empty_instantiations_cache_array_->IsSmi()); | 
 |   ASSERT(empty_instantiations_cache_array_->IsArray()); | 
 |   ASSERT(!empty_subtype_test_cache_array_->IsSmi()); | 
 |   ASSERT(empty_subtype_test_cache_array_->IsArray()); | 
 |   ASSERT(!empty_type_arguments_->IsSmi()); | 
 |   ASSERT(empty_type_arguments_->IsTypeArguments()); | 
 |   ASSERT(!empty_context_scope_->IsSmi()); | 
 |   ASSERT(empty_context_scope_->IsContextScope()); | 
 |   ASSERT(!empty_compressed_stackmaps_->IsSmi()); | 
 |   ASSERT(empty_compressed_stackmaps_->IsCompressedStackMaps()); | 
 |   ASSERT(!empty_descriptors_->IsSmi()); | 
 |   ASSERT(empty_descriptors_->IsPcDescriptors()); | 
 |   ASSERT(!empty_var_descriptors_->IsSmi()); | 
 |   ASSERT(empty_var_descriptors_->IsLocalVarDescriptors()); | 
 |   ASSERT(!empty_exception_handlers_->IsSmi()); | 
 |   ASSERT(empty_exception_handlers_->IsExceptionHandlers()); | 
 |   ASSERT(!empty_async_exception_handlers_->IsSmi()); | 
 |   ASSERT(empty_async_exception_handlers_->IsExceptionHandlers()); | 
 |   ASSERT(!sentinel_->IsSmi()); | 
 |   ASSERT(sentinel_->IsSentinel()); | 
 |   ASSERT(!transition_sentinel_->IsSmi()); | 
 |   ASSERT(transition_sentinel_->IsSentinel()); | 
 |   ASSERT(!unknown_constant_->IsSmi()); | 
 |   ASSERT(unknown_constant_->IsSentinel()); | 
 |   ASSERT(!non_constant_->IsSmi()); | 
 |   ASSERT(non_constant_->IsSentinel()); | 
 |   ASSERT(!optimized_out_->IsSmi()); | 
 |   ASSERT(optimized_out_->IsSentinel()); | 
 |   ASSERT(!bool_true_->IsSmi()); | 
 |   ASSERT(bool_true_->IsBool()); | 
 |   ASSERT(!bool_false_->IsSmi()); | 
 |   ASSERT(bool_false_->IsBool()); | 
 |   ASSERT(smi_illegal_cid_->IsSmi()); | 
 |   ASSERT(smi_zero_->IsSmi()); | 
 |   ASSERT(!no_callbacks_error_->IsSmi()); | 
 |   ASSERT(no_callbacks_error_->IsApiError()); | 
 |   ASSERT(!unwind_in_progress_error_->IsSmi()); | 
 |   ASSERT(unwind_in_progress_error_->IsUnwindError()); | 
 |   ASSERT(!snapshot_writer_error_->IsSmi()); | 
 |   ASSERT(snapshot_writer_error_->IsLanguageError()); | 
 |   ASSERT(!branch_offset_error_->IsSmi()); | 
 |   ASSERT(branch_offset_error_->IsLanguageError()); | 
 |   ASSERT(!speculative_inlining_error_->IsSmi()); | 
 |   ASSERT(speculative_inlining_error_->IsLanguageError()); | 
 |   ASSERT(!background_compilation_error_->IsSmi()); | 
 |   ASSERT(background_compilation_error_->IsLanguageError()); | 
 |   ASSERT(!out_of_memory_error_->IsSmi()); | 
 |   ASSERT(out_of_memory_error_->IsLanguageError()); | 
 |   ASSERT(!vm_isolate_snapshot_object_table_->IsSmi()); | 
 |   ASSERT(vm_isolate_snapshot_object_table_->IsArray()); | 
 |   ASSERT(!synthetic_getter_parameter_types_->IsSmi()); | 
 |   ASSERT(synthetic_getter_parameter_types_->IsArray()); | 
 |   ASSERT(!synthetic_getter_parameter_names_->IsSmi()); | 
 |   ASSERT(synthetic_getter_parameter_names_->IsArray()); | 
 | } | 
 |  | 
 | void Object::FinishInit(IsolateGroup* isolate_group) { | 
 |   // The type testing stubs we initialize in AbstractType objects for the | 
 |   // canonical type of kDynamicCid/kVoidCid need to be set in this | 
 |   // method, which is called after StubCode::InitOnce(). | 
 |   Code& code = Code::Handle(); | 
 |  | 
 |   code = TypeTestingStubGenerator::DefaultCodeForType(*dynamic_type_); | 
 |   dynamic_type_->InitializeTypeTestingStubNonAtomic(code); | 
 |  | 
 |   code = TypeTestingStubGenerator::DefaultCodeForType(*void_type_); | 
 |   void_type_->InitializeTypeTestingStubNonAtomic(code); | 
 | } | 
 |  | 
 | void Object::Cleanup() { | 
 |   null_ = static_cast<ObjectPtr>(RAW_NULL); | 
 |   true_ = static_cast<BoolPtr>(RAW_NULL); | 
 |   false_ = static_cast<BoolPtr>(RAW_NULL); | 
 |   class_class_ = static_cast<ClassPtr>(RAW_NULL); | 
 |   dynamic_class_ = static_cast<ClassPtr>(RAW_NULL); | 
 |   void_class_ = static_cast<ClassPtr>(RAW_NULL); | 
 |   type_parameters_class_ = static_cast<ClassPtr>(RAW_NULL); | 
 |   type_arguments_class_ = static_cast<ClassPtr>(RAW_NULL); | 
 |   patch_class_class_ = static_cast<ClassPtr>(RAW_NULL); | 
 |   function_class_ = static_cast<ClassPtr>(RAW_NULL); | 
 |   closure_data_class_ = static_cast<ClassPtr>(RAW_NULL); | 
 |   ffi_trampoline_data_class_ = static_cast<ClassPtr>(RAW_NULL); | 
 |   field_class_ = static_cast<ClassPtr>(RAW_NULL); | 
 |   script_class_ = static_cast<ClassPtr>(RAW_NULL); | 
 |   library_class_ = static_cast<ClassPtr>(RAW_NULL); | 
 |   namespace_class_ = static_cast<ClassPtr>(RAW_NULL); | 
 |   kernel_program_info_class_ = static_cast<ClassPtr>(RAW_NULL); | 
 |   code_class_ = static_cast<ClassPtr>(RAW_NULL); | 
 |   instructions_class_ = static_cast<ClassPtr>(RAW_NULL); | 
 |   instructions_section_class_ = static_cast<ClassPtr>(RAW_NULL); | 
 |   instructions_table_class_ = static_cast<ClassPtr>(RAW_NULL); | 
 |   object_pool_class_ = static_cast<ClassPtr>(RAW_NULL); | 
 |   pc_descriptors_class_ = static_cast<ClassPtr>(RAW_NULL); | 
 |   code_source_map_class_ = static_cast<ClassPtr>(RAW_NULL); | 
 |   compressed_stackmaps_class_ = static_cast<ClassPtr>(RAW_NULL); | 
 |   var_descriptors_class_ = static_cast<ClassPtr>(RAW_NULL); | 
 |   exception_handlers_class_ = static_cast<ClassPtr>(RAW_NULL); | 
 |   context_class_ = static_cast<ClassPtr>(RAW_NULL); | 
 |   context_scope_class_ = static_cast<ClassPtr>(RAW_NULL); | 
 |   singletargetcache_class_ = static_cast<ClassPtr>(RAW_NULL); | 
 |   unlinkedcall_class_ = static_cast<ClassPtr>(RAW_NULL); | 
 |   monomorphicsmiablecall_class_ = static_cast<ClassPtr>(RAW_NULL); | 
 |   icdata_class_ = static_cast<ClassPtr>(RAW_NULL); | 
 |   megamorphic_cache_class_ = static_cast<ClassPtr>(RAW_NULL); | 
 |   subtypetestcache_class_ = static_cast<ClassPtr>(RAW_NULL); | 
 |   loadingunit_class_ = static_cast<ClassPtr>(RAW_NULL); | 
 |   api_error_class_ = static_cast<ClassPtr>(RAW_NULL); | 
 |   language_error_class_ = static_cast<ClassPtr>(RAW_NULL); | 
 |   unhandled_exception_class_ = static_cast<ClassPtr>(RAW_NULL); | 
 |   unwind_error_class_ = static_cast<ClassPtr>(RAW_NULL); | 
 | } | 
 |  | 
 | // An object visitor which will mark all visited objects. This is used to | 
 | // premark all objects in the vm_isolate_ heap.  Also precalculates hash | 
 | // codes so that we can get the identity hash code of objects in the read- | 
 | // only VM isolate. | 
 | class FinalizeVMIsolateVisitor : public ObjectVisitor { | 
 |  public: | 
 |   FinalizeVMIsolateVisitor() | 
 | #if defined(HASH_IN_OBJECT_HEADER) | 
 |       : counter_(1337) | 
 | #endif | 
 |   { | 
 |   } | 
 |  | 
 |   void VisitObject(ObjectPtr obj) { | 
 |     // Free list elements should never be marked. | 
 |     ASSERT(!obj->untag()->IsMarked()); | 
 |     // No forwarding corpses in the VM isolate. | 
 |     ASSERT(!obj->IsForwardingCorpse()); | 
 |     if (!obj->IsFreeListElement()) { | 
 |       obj->untag()->SetMarkBitUnsynchronized(); | 
 |       Object::FinalizeReadOnlyObject(obj); | 
 | #if defined(HASH_IN_OBJECT_HEADER) | 
 |       // These objects end up in the read-only VM isolate which is shared | 
 |       // between isolates, so we have to prepopulate them with identity hash | 
 |       // codes, since we can't add hash codes later. | 
 |       if (Object::GetCachedHash(obj) == 0) { | 
 |         // Some classes have identity hash codes that depend on their contents, | 
 |         // not per object. | 
 |         ASSERT(!obj->IsStringInstance()); | 
 |         if (obj == Object::null()) { | 
 |           Object::SetCachedHashIfNotSet(obj, kNullIdentityHash); | 
 |         } else if (obj == Object::bool_true().ptr()) { | 
 |           Object::SetCachedHashIfNotSet(obj, kTrueIdentityHash); | 
 |         } else if (obj == Object::bool_false().ptr()) { | 
 |           Object::SetCachedHashIfNotSet(obj, kFalseIdentityHash); | 
 |         } else if (!obj->IsMint() && !obj->IsDouble()) { | 
 |           counter_ += 2011;  // The year Dart was announced and a prime. | 
 |           counter_ &= 0x3fffffff; | 
 |           if (counter_ == 0) counter_++; | 
 |           Object::SetCachedHashIfNotSet(obj, counter_); | 
 |         } | 
 |       } | 
 | #endif | 
 | #if !defined(DART_PRECOMPILED_RUNTIME) | 
 |       if (obj->IsClass()) { | 
 |         // Won't be able to update read-only VM isolate classes if implementors | 
 |         // are discovered later. | 
 |         static_cast<ClassPtr>(obj)->untag()->implementor_cid_ = kDynamicCid; | 
 |       } | 
 | #endif | 
 |     } | 
 |   } | 
 |  | 
 |  private: | 
 | #if defined(HASH_IN_OBJECT_HEADER) | 
 |   int32_t counter_; | 
 | #endif | 
 | }; | 
 |  | 
 | #define SET_CLASS_NAME(class_name, name)                                       \ | 
 |   cls = class_name##_class();                                                  \ | 
 |   cls.set_name(Symbols::name()); | 
 |  | 
 | void Object::FinalizeVMIsolate(IsolateGroup* isolate_group) { | 
 |   // Should only be run by the vm isolate. | 
 |   ASSERT(isolate_group == Dart::vm_isolate_group()); | 
 |  | 
 |   // Finish initialization of synthetic_getter_parameter_names_ which was | 
 |   // Started in Object::InitOnce() | 
 |   synthetic_getter_parameter_names_->SetAt(0, Symbols::This()); | 
 |  | 
 |   // Set up names for all VM singleton classes. | 
 |   Class& cls = Class::Handle(); | 
 |  | 
 |   SET_CLASS_NAME(class, Class); | 
 |   SET_CLASS_NAME(dynamic, Dynamic); | 
 |   SET_CLASS_NAME(void, Void); | 
 |   SET_CLASS_NAME(type_parameters, TypeParameters); | 
 |   SET_CLASS_NAME(type_arguments, TypeArguments); | 
 |   SET_CLASS_NAME(patch_class, PatchClass); | 
 |   SET_CLASS_NAME(function, Function); | 
 |   SET_CLASS_NAME(closure_data, ClosureData); | 
 |   SET_CLASS_NAME(ffi_trampoline_data, FfiTrampolineData); | 
 |   SET_CLASS_NAME(field, Field); | 
 |   SET_CLASS_NAME(script, Script); | 
 |   SET_CLASS_NAME(library, LibraryClass); | 
 |   SET_CLASS_NAME(namespace, Namespace); | 
 |   SET_CLASS_NAME(kernel_program_info, KernelProgramInfo); | 
 |   SET_CLASS_NAME(weak_serialization_reference, WeakSerializationReference); | 
 |   SET_CLASS_NAME(weak_array, WeakArray); | 
 |   SET_CLASS_NAME(code, Code); | 
 |   SET_CLASS_NAME(instructions, Instructions); | 
 |   SET_CLASS_NAME(instructions_section, InstructionsSection); | 
 |   SET_CLASS_NAME(instructions_table, InstructionsTable); | 
 |   SET_CLASS_NAME(object_pool, ObjectPool); | 
 |   SET_CLASS_NAME(code_source_map, CodeSourceMap); | 
 |   SET_CLASS_NAME(pc_descriptors, PcDescriptors); | 
 |   SET_CLASS_NAME(compressed_stackmaps, CompressedStackMaps); | 
 |   SET_CLASS_NAME(var_descriptors, LocalVarDescriptors); | 
 |   SET_CLASS_NAME(exception_handlers, ExceptionHandlers); | 
 |   SET_CLASS_NAME(context, Context); | 
 |   SET_CLASS_NAME(context_scope, ContextScope); | 
 |   SET_CLASS_NAME(sentinel, Sentinel); | 
 |   SET_CLASS_NAME(singletargetcache, SingleTargetCache); | 
 |   SET_CLASS_NAME(unlinkedcall, UnlinkedCall); | 
 |   SET_CLASS_NAME(monomorphicsmiablecall, MonomorphicSmiableCall); | 
 |   SET_CLASS_NAME(icdata, ICData); | 
 |   SET_CLASS_NAME(megamorphic_cache, MegamorphicCache); | 
 |   SET_CLASS_NAME(subtypetestcache, SubtypeTestCache); | 
 |   SET_CLASS_NAME(loadingunit, LoadingUnit); | 
 |   SET_CLASS_NAME(api_error, ApiError); | 
 |   SET_CLASS_NAME(language_error, LanguageError); | 
 |   SET_CLASS_NAME(unhandled_exception, UnhandledException); | 
 |   SET_CLASS_NAME(unwind_error, UnwindError); | 
 |  | 
 |   // Set up names for classes which are also pre-allocated in the vm isolate. | 
 |   cls = isolate_group->object_store()->array_class(); | 
 |   cls.set_name(Symbols::_List()); | 
 |   cls = isolate_group->object_store()->one_byte_string_class(); | 
 |   cls.set_name(Symbols::OneByteString()); | 
 |   cls = isolate_group->object_store()->never_class(); | 
 |   cls.set_name(Symbols::Never()); | 
 |  | 
 |   // Set up names for the pseudo-classes for free list elements and forwarding | 
 |   // corpses. Mainly this makes VM debugging easier. | 
 |   cls = isolate_group->class_table()->At(kFreeListElement); | 
 |   cls.set_name(Symbols::FreeListElement()); | 
 |   cls = isolate_group->class_table()->At(kForwardingCorpse); | 
 |   cls.set_name(Symbols::ForwardingCorpse()); | 
 |  | 
 | #if defined(DART_PRECOMPILER) | 
 |   const auto& function = | 
 |       Function::Handle(StubCode::UnknownDartCode().function()); | 
 |   function.set_name(Symbols::OptimizedOut()); | 
 | #endif  // defined(DART_PRECOMPILER) | 
 |  | 
 |   { | 
 |     ASSERT(isolate_group == Dart::vm_isolate_group()); | 
 |     Thread* thread = Thread::Current(); | 
 |     WritableVMIsolateScope scope(thread); | 
 |     HeapIterationScope iteration(thread); | 
 |     FinalizeVMIsolateVisitor premarker; | 
 |     ASSERT(isolate_group->heap()->UsedInWords(Heap::kNew) == 0); | 
 |     iteration.IterateOldObjectsNoImagePages(&premarker); | 
 |     // Make the VM isolate read-only again after setting all objects as marked. | 
 |     // Note objects in image pages are already pre-marked. | 
 |   } | 
 | } | 
 |  | 
 | void Object::FinalizeReadOnlyObject(ObjectPtr object) { | 
 |   NoSafepointScope no_safepoint; | 
 |   intptr_t cid = object->GetClassId(); | 
 |   if (cid == kOneByteStringCid) { | 
 |     OneByteStringPtr str = static_cast<OneByteStringPtr>(object); | 
 |     if (String::GetCachedHash(str) == 0) { | 
 |       intptr_t hash = String::Hash(str); | 
 |       String::SetCachedHashIfNotSet(str, hash); | 
 |     } | 
 |     intptr_t size = OneByteString::UnroundedSize(str); | 
 |     ASSERT(size <= str->untag()->HeapSize()); | 
 |     memset(reinterpret_cast<void*>(UntaggedObject::ToAddr(str) + size), 0, | 
 |            str->untag()->HeapSize() - size); | 
 |   } else if (cid == kTwoByteStringCid) { | 
 |     TwoByteStringPtr str = static_cast<TwoByteStringPtr>(object); | 
 |     if (String::GetCachedHash(str) == 0) { | 
 |       intptr_t hash = String::Hash(str); | 
 |       String::SetCachedHashIfNotSet(str, hash); | 
 |     } | 
 |     ASSERT(String::GetCachedHash(str) != 0); | 
 |     intptr_t size = TwoByteString::UnroundedSize(str); | 
 |     ASSERT(size <= str->untag()->HeapSize()); | 
 |     memset(reinterpret_cast<void*>(UntaggedObject::ToAddr(str) + size), 0, | 
 |            str->untag()->HeapSize() - size); | 
 |   } else if (cid == kCodeSourceMapCid) { | 
 |     CodeSourceMapPtr map = CodeSourceMap::RawCast(object); | 
 |     intptr_t size = CodeSourceMap::UnroundedSize(map); | 
 |     ASSERT(size <= map->untag()->HeapSize()); | 
 |     memset(reinterpret_cast<void*>(UntaggedObject::ToAddr(map) + size), 0, | 
 |            map->untag()->HeapSize() - size); | 
 |   } else if (cid == kCompressedStackMapsCid) { | 
 |     CompressedStackMapsPtr maps = CompressedStackMaps::RawCast(object); | 
 |     intptr_t size = CompressedStackMaps::UnroundedSize(maps); | 
 |     ASSERT(size <= maps->untag()->HeapSize()); | 
 |     memset(reinterpret_cast<void*>(UntaggedObject::ToAddr(maps) + size), 0, | 
 |            maps->untag()->HeapSize() - size); | 
 |   } else if (cid == kPcDescriptorsCid) { | 
 |     PcDescriptorsPtr desc = PcDescriptors::RawCast(object); | 
 |     intptr_t size = PcDescriptors::UnroundedSize(desc); | 
 |     ASSERT(size <= desc->untag()->HeapSize()); | 
 |     memset(reinterpret_cast<void*>(UntaggedObject::ToAddr(desc) + size), 0, | 
 |            desc->untag()->HeapSize() - size); | 
 |   } | 
 | } | 
 |  | 
 | void Object::set_vm_isolate_snapshot_object_table(const Array& table) { | 
 |   ASSERT(Isolate::Current() == Dart::vm_isolate()); | 
 |   *vm_isolate_snapshot_object_table_ = table.ptr(); | 
 | } | 
 |  | 
 | // Make unused space in an object whose type has been transformed safe | 
 | // for traversing during GC. | 
 | // The unused part of the transformed object is marked as a FreeListElement | 
 | // object that is not inserted into to the freelist. | 
 | void Object::MakeUnusedSpaceTraversable(const Object& obj, | 
 |                                         intptr_t original_size, | 
 |                                         intptr_t used_size) { | 
 |   ASSERT(Thread::Current()->no_safepoint_scope_depth() > 0); | 
 |   ASSERT(!obj.IsNull()); | 
 |   ASSERT(original_size >= used_size); | 
 |   if (original_size > used_size) { | 
 |     intptr_t leftover_size = original_size - used_size; | 
 |     uword addr = UntaggedObject::ToAddr(obj.ptr()) + used_size; | 
 |     if (obj.ptr()->IsNewObject()) { | 
 |       FreeListElement::AsElementNew(addr, leftover_size); | 
 |     } else { | 
 |       FreeListElement::AsElement(addr, leftover_size); | 
 |     } | 
 |     // On architectures with a relaxed memory model, the concurrent marker may | 
 |     // observe the write of the filler object's header before observing the | 
 |     // new array length, and so treat it as a pointer. Ensure it is a Smi so | 
 |     // the marker won't dereference it. | 
 |     ASSERT((*reinterpret_cast<uword*>(addr) & kSmiTagMask) == kSmiTag); | 
 |     ASSERT((*reinterpret_cast<uword*>(addr + kWordSize) & kSmiTagMask) == | 
 |            kSmiTag); | 
 |   } | 
 | } | 
 |  | 
 | void Object::VerifyBuiltinVtables() { | 
 | #if defined(DEBUG) | 
 |   ASSERT(builtin_vtables_[kIllegalCid] == 0); | 
 |   ASSERT(builtin_vtables_[kFreeListElement] == 0); | 
 |   ASSERT(builtin_vtables_[kForwardingCorpse] == 0); | 
 |   ClassTable* table = IsolateGroup::Current()->class_table(); | 
 |   for (intptr_t cid = kObjectCid; cid < kNumPredefinedCids; cid++) { | 
 |     if (table->HasValidClassAt(cid)) { | 
 |       ASSERT(builtin_vtables_[cid] != 0); | 
 |     } | 
 |   } | 
 | #endif | 
 | } | 
 |  | 
 | void Object::RegisterClass(const Class& cls, | 
 |                            const String& name, | 
 |                            const Library& lib) { | 
 |   ASSERT(name.Length() > 0); | 
 |   ASSERT(name.CharAt(0) != '_'); | 
 |   cls.set_name(name); | 
 |   lib.AddClass(cls); | 
 | } | 
 |  | 
 | void Object::RegisterPrivateClass(const Class& cls, | 
 |                                   const String& public_class_name, | 
 |                                   const Library& lib) { | 
 |   ASSERT(public_class_name.Length() > 0); | 
 |   ASSERT(public_class_name.CharAt(0) == '_'); | 
 |   String& str = String::Handle(); | 
 |   str = lib.PrivateName(public_class_name); | 
 |   cls.set_name(str); | 
 |   lib.AddClass(cls); | 
 | } | 
 |  | 
 | // Initialize a new isolate from source or from a snapshot. | 
 | // | 
 | // There are three possibilities: | 
 | //   1. Running a Kernel binary.  This function will bootstrap from the KERNEL | 
 | //      file. | 
 | //   2. There is no vm snapshot.  This function will bootstrap from source. | 
 | //   3. There is a vm snapshot.  The caller should initialize from the snapshot. | 
 | // | 
 | // A non-null kernel argument indicates (1). | 
 | // A nullptr kernel indicates (2) or (3). | 
 | ErrorPtr Object::Init(IsolateGroup* isolate_group, | 
 |                       const uint8_t* kernel_buffer, | 
 |                       intptr_t kernel_buffer_size) { | 
 |   Thread* thread = Thread::Current(); | 
 |   Zone* zone = thread->zone(); | 
 |   ASSERT(isolate_group == thread->isolate_group()); | 
 |   TIMELINE_DURATION(thread, Isolate, "Object::Init"); | 
 |  | 
 | #if defined(DART_PRECOMPILED_RUNTIME) | 
 |   const bool bootstrapping = false; | 
 | #else | 
 |   const bool is_kernel = (kernel_buffer != nullptr); | 
 |   const bool bootstrapping = | 
 |       (Dart::vm_snapshot_kind() == Snapshot::kNone) || is_kernel; | 
 | #endif  // defined(DART_PRECOMPILED_RUNTIME). | 
 |  | 
 |   if (bootstrapping) { | 
 | #if !defined(DART_PRECOMPILED_RUNTIME) | 
 |     // Object::Init version when we are bootstrapping from source or from a | 
 |     // Kernel binary. | 
 |     // This will initialize isolate group object_store, shared by all isolates | 
 |     // running in the isolate group. | 
 |     ObjectStore* object_store = isolate_group->object_store(); | 
 |     SafepointWriteRwLocker ml(thread, isolate_group->program_lock()); | 
 |  | 
 |     Class& cls = Class::Handle(zone); | 
 |     Type& type = Type::Handle(zone); | 
 |     Array& array = Array::Handle(zone); | 
 |     WeakArray& weak_array = WeakArray::Handle(zone); | 
 |     Library& lib = Library::Handle(zone); | 
 |     TypeArguments& type_args = TypeArguments::Handle(zone); | 
 |  | 
 |     // All RawArray fields will be initialized to an empty array, therefore | 
 |     // initialize array class first. | 
 |     cls = Class::New<Array, RTN::Array>(isolate_group); | 
 |     ASSERT(object_store->array_class() == Class::null()); | 
 |     object_store->set_array_class(cls); | 
 |  | 
 |     // VM classes that are parameterized (Array, ImmutableArray, | 
 |     // GrowableObjectArray, Map, ConstMap, | 
 |     // Set, ConstSet) are also pre-finalized, so | 
 |     // CalculateFieldOffsets() is not called, so we need to set the offset | 
 |     // of their type_arguments_ field, which is explicitly | 
 |     // declared in their respective Raw* classes. | 
 |     cls.set_type_arguments_field_offset(Array::type_arguments_offset(), | 
 |                                         RTN::Array::type_arguments_offset()); | 
 |     cls.set_num_type_arguments_unsafe(1); | 
 |  | 
 |     // Set up the growable object array class (Has to be done after the array | 
 |     // class is setup as one of its field is an array object). | 
 |     cls = Class::New<GrowableObjectArray, RTN::GrowableObjectArray>( | 
 |         isolate_group); | 
 |     object_store->set_growable_object_array_class(cls); | 
 |     cls.set_type_arguments_field_offset( | 
 |         GrowableObjectArray::type_arguments_offset(), | 
 |         RTN::GrowableObjectArray::type_arguments_offset()); | 
 |     cls.set_num_type_arguments_unsafe(1); | 
 |  | 
 |     // Initialize hash set for regexp_table_. | 
 |     const intptr_t kInitialCanonicalRegExpSize = 4; | 
 |     weak_array = HashTables::New<CanonicalRegExpSet>( | 
 |         kInitialCanonicalRegExpSize, Heap::kOld); | 
 |     object_store->set_regexp_table(weak_array); | 
 |  | 
 |     // Initialize hash set for canonical types. | 
 |     const intptr_t kInitialCanonicalTypeSize = 16; | 
 |     array = HashTables::New<CanonicalTypeSet>(kInitialCanonicalTypeSize, | 
 |                                               Heap::kOld); | 
 |     object_store->set_canonical_types(array); | 
 |  | 
 |     // Initialize hash set for canonical function types. | 
 |     const intptr_t kInitialCanonicalFunctionTypeSize = 16; | 
 |     array = HashTables::New<CanonicalFunctionTypeSet>( | 
 |         kInitialCanonicalFunctionTypeSize, Heap::kOld); | 
 |     object_store->set_canonical_function_types(array); | 
 |  | 
 |     // Initialize hash set for canonical record types. | 
 |     const intptr_t kInitialCanonicalRecordTypeSize = 16; | 
 |     array = HashTables::New<CanonicalRecordTypeSet>( | 
 |         kInitialCanonicalRecordTypeSize, Heap::kOld); | 
 |     object_store->set_canonical_record_types(array); | 
 |  | 
 |     // Initialize hash set for canonical type parameters. | 
 |     const intptr_t kInitialCanonicalTypeParameterSize = 4; | 
 |     array = HashTables::New<CanonicalTypeParameterSet>( | 
 |         kInitialCanonicalTypeParameterSize, Heap::kOld); | 
 |     object_store->set_canonical_type_parameters(array); | 
 |  | 
 |     // Initialize hash set for canonical_type_arguments_. | 
 |     const intptr_t kInitialCanonicalTypeArgumentsSize = 4; | 
 |     array = HashTables::New<CanonicalTypeArgumentsSet>( | 
 |         kInitialCanonicalTypeArgumentsSize, Heap::kOld); | 
 |     object_store->set_canonical_type_arguments(array); | 
 |  | 
 |     // Setup type class early in the process. | 
 |     const Class& type_cls = | 
 |         Class::Handle(zone, Class::New<Type, RTN::Type>(isolate_group)); | 
 |     const Class& function_type_cls = Class::Handle( | 
 |         zone, Class::New<FunctionType, RTN::FunctionType>(isolate_group)); | 
 |     const Class& record_type_cls = Class::Handle( | 
 |         zone, Class::New<RecordType, RTN::RecordType>(isolate_group)); | 
 |     const Class& type_parameter_cls = Class::Handle( | 
 |         zone, Class::New<TypeParameter, RTN::TypeParameter>(isolate_group)); | 
 |     const Class& library_prefix_cls = Class::Handle( | 
 |         zone, Class::New<LibraryPrefix, RTN::LibraryPrefix>(isolate_group)); | 
 |  | 
 |     // Pre-allocate the OneByteString class needed by the symbol table. | 
 |     cls = Class::NewStringClass(kOneByteStringCid, isolate_group); | 
 |     object_store->set_one_byte_string_class(cls); | 
 |  | 
 |     // Pre-allocate the TwoByteString class needed by the symbol table. | 
 |     cls = Class::NewStringClass(kTwoByteStringCid, isolate_group); | 
 |     object_store->set_two_byte_string_class(cls); | 
 |  | 
 |     // Setup the symbol table for the symbols created in the isolate. | 
 |     Symbols::SetupSymbolTable(isolate_group); | 
 |  | 
 |     // Set up the libraries array before initializing the core library. | 
 |     const GrowableObjectArray& libraries = | 
 |         GrowableObjectArray::Handle(zone, GrowableObjectArray::New(Heap::kOld)); | 
 |     object_store->set_libraries(libraries); | 
 |  | 
 |     // Pre-register the core library. | 
 |     Library::InitCoreLibrary(isolate_group); | 
 |  | 
 |     // Basic infrastructure has been setup, initialize the class dictionary. | 
 |     const Library& core_lib = Library::Handle(zone, Library::CoreLibrary()); | 
 |     ASSERT(!core_lib.IsNull()); | 
 |  | 
 |     const GrowableObjectArray& pending_classes = | 
 |         GrowableObjectArray::Handle(zone, GrowableObjectArray::New()); | 
 |     object_store->set_pending_classes(pending_classes); | 
 |  | 
 |     // Now that the symbol table is initialized and that the core dictionary as | 
 |     // well as the core implementation dictionary have been setup, preallocate | 
 |     // remaining classes and register them by name in the dictionaries. | 
 |     String& name = String::Handle(zone); | 
 |     cls = object_store->array_class();  // Was allocated above. | 
 |     RegisterPrivateClass(cls, Symbols::_List(), core_lib); | 
 |     pending_classes.Add(cls); | 
 |     // We cannot use NewNonParameterizedType(), because Array is | 
 |     // parameterized.  Warning: class _List has not been patched yet. Its | 
 |     // declared number of type parameters is still 0. It will become 1 after | 
 |     // patching. The array type allocated below represents the raw type _List | 
 |     // and not _List<E> as we could expect. Use with caution. | 
 |     type = Type::New(Class::Handle(zone, cls.ptr()), | 
 |                      Object::null_type_arguments(), Nullability::kNonNullable); | 
 |     type.SetIsFinalized(); | 
 |     type ^= type.Canonicalize(thread); | 
 |     object_store->set_array_type(type); | 
 |  | 
 |     cls = object_store->growable_object_array_class();  // Was allocated above. | 
 |     RegisterPrivateClass(cls, Symbols::_GrowableList(), core_lib); | 
 |     pending_classes.Add(cls); | 
 |  | 
 |     cls = Class::New<Array, RTN::Array>(kImmutableArrayCid, isolate_group); | 
 |     object_store->set_immutable_array_class(cls); | 
 |     cls.set_type_arguments_field_offset(Array::type_arguments_offset(), | 
 |                                         RTN::Array::type_arguments_offset()); | 
 |     cls.set_num_type_arguments_unsafe(1); | 
 |     ASSERT(object_store->immutable_array_class() != | 
 |            object_store->array_class()); | 
 |     cls.set_is_prefinalized(); | 
 |     RegisterPrivateClass(cls, Symbols::_ImmutableList(), core_lib); | 
 |     pending_classes.Add(cls); | 
 |  | 
 |     cls = object_store->one_byte_string_class();  // Was allocated above. | 
 |     RegisterPrivateClass(cls, Symbols::OneByteString(), core_lib); | 
 |     pending_classes.Add(cls); | 
 |  | 
 |     cls = object_store->two_byte_string_class();  // Was allocated above. | 
 |     RegisterPrivateClass(cls, Symbols::TwoByteString(), core_lib); | 
 |     pending_classes.Add(cls); | 
 |  | 
 |     // Pre-register the isolate library so the native class implementations can | 
 |     // be hooked up before compiling it. | 
 |     Library& isolate_lib = Library::Handle( | 
 |         zone, Library::LookupLibrary(thread, Symbols::DartIsolate())); | 
 |     if (isolate_lib.IsNull()) { | 
 |       isolate_lib = Library::NewLibraryHelper(Symbols::DartIsolate(), true); | 
 |       isolate_lib.SetLoadRequested(); | 
 |       isolate_lib.Register(thread); | 
 |     } | 
 |     object_store->set_bootstrap_library(ObjectStore::kIsolate, isolate_lib); | 
 |     ASSERT(!isolate_lib.IsNull()); | 
 |     ASSERT(isolate_lib.ptr() == Library::IsolateLibrary()); | 
 |  | 
 |     cls = Class::New<Capability, RTN::Capability>(isolate_group); | 
 |     RegisterPrivateClass(cls, Symbols::_Capability(), isolate_lib); | 
 |     pending_classes.Add(cls); | 
 |  | 
 |     cls = Class::New<ReceivePort, RTN::ReceivePort>(isolate_group); | 
 |     RegisterPrivateClass(cls, Symbols::_RawReceivePort(), isolate_lib); | 
 |     pending_classes.Add(cls); | 
 |  | 
 |     cls = Class::New<SendPort, RTN::SendPort>(isolate_group); | 
 |     RegisterPrivateClass(cls, Symbols::_SendPort(), isolate_lib); | 
 |     pending_classes.Add(cls); | 
 |  | 
 |     cls = Class::New<TransferableTypedData, RTN::TransferableTypedData>( | 
 |         isolate_group); | 
 |     RegisterPrivateClass(cls, Symbols::_TransferableTypedDataImpl(), | 
 |                          isolate_lib); | 
 |     pending_classes.Add(cls); | 
 |  | 
 |     const Class& stacktrace_cls = Class::Handle( | 
 |         zone, Class::New<StackTrace, RTN::StackTrace>(isolate_group)); | 
 |     RegisterPrivateClass(stacktrace_cls, Symbols::_StackTrace(), core_lib); | 
 |     pending_classes.Add(stacktrace_cls); | 
 |     // Super type set below, after Object is allocated. | 
 |  | 
 |     cls = Class::New<RegExp, RTN::RegExp>(isolate_group); | 
 |     RegisterPrivateClass(cls, Symbols::_RegExp(), core_lib); | 
 |     pending_classes.Add(cls); | 
 |  | 
 |     // Initialize the base interfaces used by the core VM classes. | 
 |  | 
 |     // Allocate and initialize the pre-allocated classes in the core library. | 
 |     // The script and token index of these pre-allocated classes is set up when | 
 |     // the corelib script is compiled. | 
 |     cls = Class::New<Instance, RTN::Instance>(kInstanceCid, isolate_group); | 
 |     object_store->set_object_class(cls); | 
 |     cls.set_name(Symbols::Object()); | 
 |     cls.set_num_type_arguments_unsafe(0); | 
 |     cls.set_is_prefinalized(); | 
 |     cls.set_is_const(); | 
 |     core_lib.AddClass(cls); | 
 |     pending_classes.Add(cls); | 
 |     type = Type::NewNonParameterizedType(cls); | 
 |     ASSERT(type.IsCanonical()); | 
 |     object_store->set_object_type(type); | 
 |     type = type.ToNullability(Nullability::kNonNullable, Heap::kOld); | 
 |     ASSERT(type.IsCanonical()); | 
 |     object_store->set_non_nullable_object_type(type); | 
 |     type = type.ToNullability(Nullability::kNullable, Heap::kOld); | 
 |     ASSERT(type.IsCanonical()); | 
 |     object_store->set_nullable_object_type(type); | 
 |  | 
 |     cls = Class::New<Bool, RTN::Bool>(isolate_group); | 
 |     object_store->set_bool_class(cls); | 
 |     RegisterClass(cls, Symbols::Bool(), core_lib); | 
 |     pending_classes.Add(cls); | 
 |  | 
 |     cls = Class::New<Instance, RTN::Instance>(kNullCid, isolate_group); | 
 |     object_store->set_null_class(cls); | 
 |     cls.set_num_type_arguments_unsafe(0); | 
 |     cls.set_is_prefinalized(); | 
 |     RegisterClass(cls, Symbols::Null(), core_lib); | 
 |     pending_classes.Add(cls); | 
 |  | 
 |     cls = Class::New<Instance, RTN::Instance>(kNeverCid, isolate_group); | 
 |     cls.set_num_type_arguments_unsafe(0); | 
 |     cls.set_is_allocate_finalized(); | 
 |     cls.set_is_declaration_loaded(); | 
 |     cls.set_is_type_finalized(); | 
 |     cls.set_name(Symbols::Never()); | 
 |     object_store->set_never_class(cls); | 
 |  | 
 |     ASSERT(!library_prefix_cls.IsNull()); | 
 |     RegisterPrivateClass(library_prefix_cls, Symbols::_LibraryPrefix(), | 
 |                          core_lib); | 
 |     pending_classes.Add(library_prefix_cls); | 
 |  | 
 |     RegisterPrivateClass(type_cls, Symbols::_Type(), core_lib); | 
 |     pending_classes.Add(type_cls); | 
 |  | 
 |     RegisterPrivateClass(function_type_cls, Symbols::_FunctionType(), core_lib); | 
 |     pending_classes.Add(function_type_cls); | 
 |  | 
 |     RegisterPrivateClass(record_type_cls, Symbols::_RecordType(), core_lib); | 
 |     pending_classes.Add(record_type_cls); | 
 |  | 
 |     RegisterPrivateClass(type_parameter_cls, Symbols::_TypeParameter(), | 
 |                          core_lib); | 
 |     pending_classes.Add(type_parameter_cls); | 
 |  | 
 |     cls = Class::New<Integer, RTN::Integer>(isolate_group); | 
 |     object_store->set_integer_implementation_class(cls); | 
 |     RegisterPrivateClass(cls, Symbols::_IntegerImplementation(), core_lib); | 
 |     pending_classes.Add(cls); | 
 |  | 
 |     cls = Class::New<Smi, RTN::Smi>(isolate_group); | 
 |     object_store->set_smi_class(cls); | 
 |     RegisterPrivateClass(cls, Symbols::_Smi(), core_lib); | 
 |     pending_classes.Add(cls); | 
 |  | 
 |     cls = Class::New<Mint, RTN::Mint>(isolate_group); | 
 |     object_store->set_mint_class(cls); | 
 |     RegisterPrivateClass(cls, Symbols::_Mint(), core_lib); | 
 |     pending_classes.Add(cls); | 
 |  | 
 |     cls = Class::New<Double, RTN::Double>(isolate_group); | 
 |     object_store->set_double_class(cls); | 
 |     RegisterPrivateClass(cls, Symbols::_Double(), core_lib); | 
 |     pending_classes.Add(cls); | 
 |  | 
 |     // Class that represents the Dart class _Closure and C++ class Closure. | 
 |     cls = Class::New<Closure, RTN::Closure>(isolate_group); | 
 |     object_store->set_closure_class(cls); | 
 |     RegisterPrivateClass(cls, Symbols::_Closure(), core_lib); | 
 |     pending_classes.Add(cls); | 
 |  | 
 |     cls = Class::New<Record, RTN::Record>(isolate_group); | 
 |     RegisterPrivateClass(cls, Symbols::_Record(), core_lib); | 
 |     pending_classes.Add(cls); | 
 |  | 
 |     cls = Class::New<WeakProperty, RTN::WeakProperty>(isolate_group); | 
 |     object_store->set_weak_property_class(cls); | 
 |     RegisterPrivateClass(cls, Symbols::_WeakProperty(), core_lib); | 
 |  | 
 |     cls = Class::New<WeakReference, RTN::WeakReference>(isolate_group); | 
 |     cls.set_type_arguments_field_offset( | 
 |         WeakReference::type_arguments_offset(), | 
 |         RTN::WeakReference::type_arguments_offset()); | 
 |     cls.set_num_type_arguments_unsafe(1); | 
 |     object_store->set_weak_reference_class(cls); | 
 |     RegisterPrivateClass(cls, Symbols::_WeakReference(), core_lib); | 
 |  | 
 |     // Pre-register the mirrors library so we can place the vm class | 
 |     // MirrorReference there rather than the core library. | 
 |     lib = Library::LookupLibrary(thread, Symbols::DartMirrors()); | 
 |     if (lib.IsNull()) { | 
 |       lib = Library::NewLibraryHelper(Symbols::DartMirrors(), true); | 
 |       lib.SetLoadRequested(); | 
 |       lib.Register(thread); | 
 |     } | 
 |     object_store->set_bootstrap_library(ObjectStore::kMirrors, lib); | 
 |     ASSERT(!lib.IsNull()); | 
 |     ASSERT(lib.ptr() == Library::MirrorsLibrary()); | 
 |  | 
 |     cls = Class::New<MirrorReference, RTN::MirrorReference>(isolate_group); | 
 |     RegisterPrivateClass(cls, Symbols::_MirrorReference(), lib); | 
 |  | 
 |     // Pre-register the collection library so we can place the vm class | 
 |     // Map there rather than the core library. | 
 |     lib = Library::LookupLibrary(thread, Symbols::DartCollection()); | 
 |     if (lib.IsNull()) { | 
 |       lib = Library::NewLibraryHelper(Symbols::DartCollection(), true); | 
 |       lib.SetLoadRequested(); | 
 |       lib.Register(thread); | 
 |     } | 
 |  | 
 |     object_store->set_bootstrap_library(ObjectStore::kCollection, lib); | 
 |     ASSERT(!lib.IsNull()); | 
 |     ASSERT(lib.ptr() == Library::CollectionLibrary()); | 
 |     cls = Class::New<Map, RTN::Map>(isolate_group); | 
 |     object_store->set_map_impl_class(cls); | 
 |     cls.set_type_arguments_field_offset(Map::type_arguments_offset(), | 
 |                                         RTN::Map::type_arguments_offset()); | 
 |     cls.set_num_type_arguments_unsafe(2); | 
 |     RegisterPrivateClass(cls, Symbols::_Map(), lib); | 
 |     pending_classes.Add(cls); | 
 |  | 
 |     cls = Class::New<Map, RTN::Map>(kConstMapCid, isolate_group); | 
 |     object_store->set_const_map_impl_class(cls); | 
 |     cls.set_type_arguments_field_offset(Map::type_arguments_offset(), | 
 |                                         RTN::Map::type_arguments_offset()); | 
 |     cls.set_num_type_arguments_unsafe(2); | 
 |     cls.set_is_prefinalized(); | 
 |     RegisterPrivateClass(cls, Symbols::_ConstMap(), lib); | 
 |     pending_classes.Add(cls); | 
 |  | 
 |     cls = Class::New<Set, RTN::Set>(isolate_group); | 
 |     object_store->set_set_impl_class(cls); | 
 |     cls.set_type_arguments_field_offset(Set::type_arguments_offset(), | 
 |                                         RTN::Set::type_arguments_offset()); | 
 |     cls.set_num_type_arguments_unsafe(1); | 
 |     RegisterPrivateClass(cls, Symbols::_Set(), lib); | 
 |     pending_classes.Add(cls); | 
 |  | 
 |     cls = Class::New<Set, RTN::Set>(kConstSetCid, isolate_group); | 
 |     object_store->set_const_set_impl_class(cls); | 
 |     cls.set_type_arguments_field_offset(Set::type_arguments_offset(), | 
 |                                         RTN::Set::type_arguments_offset()); | 
 |     cls.set_num_type_arguments_unsafe(1); | 
 |     cls.set_is_prefinalized(); | 
 |     RegisterPrivateClass(cls, Symbols::_ConstSet(), lib); | 
 |     pending_classes.Add(cls); | 
 |  | 
 |     // Pre-register the async library so we can place the vm class | 
 |     // FutureOr there rather than the core library. | 
 |     lib = Library::LookupLibrary(thread, Symbols::DartAsync()); | 
 |     if (lib.IsNull()) { | 
 |       lib = Library::NewLibraryHelper(Symbols::DartAsync(), true); | 
 |       lib.SetLoadRequested(); | 
 |       lib.Register(thread); | 
 |     } | 
 |     object_store->set_bootstrap_library(ObjectStore::kAsync, lib); | 
 |     ASSERT(!lib.IsNull()); | 
 |     ASSERT(lib.ptr() == Library::AsyncLibrary()); | 
 |     cls = Class::New<FutureOr, RTN::FutureOr>(isolate_group); | 
 |     cls.set_type_arguments_field_offset(FutureOr::type_arguments_offset(), | 
 |                                         RTN::FutureOr::type_arguments_offset()); | 
 |     cls.set_num_type_arguments_unsafe(1); | 
 |     RegisterClass(cls, Symbols::FutureOr(), lib); | 
 |     pending_classes.Add(cls); | 
 |     object_store->set_future_or_class(cls); | 
 |  | 
 |     cls = Class::New<SuspendState, RTN::SuspendState>(isolate_group); | 
 |     RegisterPrivateClass(cls, Symbols::_SuspendState(), lib); | 
 |     pending_classes.Add(cls); | 
 |  | 
 |     // Pre-register the developer library so we can place the vm class | 
 |     // UserTag there rather than the core library. | 
 |     lib = Library::LookupLibrary(thread, Symbols::DartDeveloper()); | 
 |     if (lib.IsNull()) { | 
 |       lib = Library::NewLibraryHelper(Symbols::DartDeveloper(), true); | 
 |       lib.SetLoadRequested(); | 
 |       lib.Register(thread); | 
 |     } | 
 |     object_store->set_bootstrap_library(ObjectStore::kDeveloper, lib); | 
 |     ASSERT(!lib.IsNull()); | 
 |     ASSERT(lib.ptr() == Library::DeveloperLibrary()); | 
 |     cls = Class::New<UserTag, RTN::UserTag>(isolate_group); | 
 |     RegisterPrivateClass(cls, Symbols::_UserTag(), lib); | 
 |     pending_classes.Add(cls); | 
 |  | 
 |     // Setup some default native field classes which can be extended for | 
 |     // specifying native fields in dart classes. | 
 |     Library::InitNativeWrappersLibrary(isolate_group, is_kernel); | 
 |     ASSERT(object_store->native_wrappers_library() != Library::null()); | 
 |  | 
 |     // Pre-register the typed_data library so the native class implementations | 
 |     // can be hooked up before compiling it. | 
 |     lib = Library::LookupLibrary(thread, Symbols::DartTypedData()); | 
 |     if (lib.IsNull()) { | 
 |       lib = Library::NewLibraryHelper(Symbols::DartTypedData(), true); | 
 |       lib.SetLoadRequested(); | 
 |       lib.Register(thread); | 
 |     } | 
 |     object_store->set_bootstrap_library(ObjectStore::kTypedData, lib); | 
 |     ASSERT(!lib.IsNull()); | 
 |     ASSERT(lib.ptr() == Library::TypedDataLibrary()); | 
 | #define REGISTER_TYPED_DATA_CLASS(clazz)                                       \ | 
 |   cls = Class::NewTypedDataClass(kTypedData##clazz##ArrayCid, isolate_group);  \ | 
 |   RegisterPrivateClass(cls, Symbols::_##clazz##List(), lib); | 
 |  | 
 |     DART_CLASS_LIST_TYPED_DATA(REGISTER_TYPED_DATA_CLASS); | 
 | #undef REGISTER_TYPED_DATA_CLASS | 
 | #define REGISTER_TYPED_DATA_VIEW_CLASS(clazz)                                  \ | 
 |   cls =                                                                        \ | 
 |       Class::NewTypedDataViewClass(kTypedData##clazz##ViewCid, isolate_group); \ | 
 |   RegisterPrivateClass(cls, Symbols::_##clazz##View(), lib);                   \ | 
 |   pending_classes.Add(cls);                                                    \ | 
 |   cls = Class::NewUnmodifiableTypedDataViewClass(                              \ | 
 |       kUnmodifiableTypedData##clazz##ViewCid, isolate_group);                  \ | 
 |   RegisterPrivateClass(cls, Symbols::_Unmodifiable##clazz##View(), lib);       \ | 
 |   pending_classes.Add(cls); | 
 |  | 
 |     CLASS_LIST_TYPED_DATA(REGISTER_TYPED_DATA_VIEW_CLASS); | 
 |  | 
 |     cls = Class::NewTypedDataViewClass(kByteDataViewCid, isolate_group); | 
 |     RegisterPrivateClass(cls, Symbols::_ByteDataView(), lib); | 
 |     pending_classes.Add(cls); | 
 |     cls = Class::NewUnmodifiableTypedDataViewClass(kUnmodifiableByteDataViewCid, | 
 |                                                    isolate_group); | 
 |     RegisterPrivateClass(cls, Symbols::_UnmodifiableByteDataView(), lib); | 
 |     pending_classes.Add(cls); | 
 |  | 
 | #undef REGISTER_TYPED_DATA_VIEW_CLASS | 
 | #define REGISTER_EXT_TYPED_DATA_CLASS(clazz)                                   \ | 
 |   cls = Class::NewExternalTypedDataClass(kExternalTypedData##clazz##Cid,       \ | 
 |                                          isolate_group);                       \ | 
 |   RegisterPrivateClass(cls, Symbols::_External##clazz(), lib); | 
 |  | 
 |     cls = Class::New<Instance, RTN::Instance>(kByteBufferCid, isolate_group, | 
 |                                               /*register_class=*/false); | 
 |     cls.set_instance_size(0, 0); | 
 |     cls.set_next_field_offset(-kWordSize, -compiler::target::kWordSize); | 
 |     isolate_group->class_table()->Register(cls); | 
 |     RegisterPrivateClass(cls, Symbols::_ByteBuffer(), lib); | 
 |     pending_classes.Add(cls); | 
 |  | 
 |     CLASS_LIST_TYPED_DATA(REGISTER_EXT_TYPED_DATA_CLASS); | 
 | #undef REGISTER_EXT_TYPED_DATA_CLASS | 
 |     // Register Float32x4, Int32x4, and Float64x2 in the object store. | 
 |     cls = Class::New<Float32x4, RTN::Float32x4>(isolate_group); | 
 |     RegisterPrivateClass(cls, Symbols::_Float32x4(), lib); | 
 |     pending_classes.Add(cls); | 
 |     object_store->set_float32x4_class(cls); | 
 |  | 
 |     cls = Class::New<Instance, RTN::Instance>(kIllegalCid, isolate_group, | 
 |                                               /*register_class=*/true, | 
 |                                               /*is_abstract=*/true); | 
 |     RegisterClass(cls, Symbols::Float32x4(), lib); | 
 |     cls.set_num_type_arguments_unsafe(0); | 
 |     cls.set_is_prefinalized(); | 
 |     type = Type::NewNonParameterizedType(cls); | 
 |     object_store->set_float32x4_type(type); | 
 |  | 
 |     cls = Class::New<Int32x4, RTN::Int32x4>(isolate_group); | 
 |     RegisterPrivateClass(cls, Symbols::_Int32x4(), lib); | 
 |     pending_classes.Add(cls); | 
 |     object_store->set_int32x4_class(cls); | 
 |  | 
 |     cls = Class::New<Instance, RTN::Instance>(kIllegalCid, isolate_group, | 
 |                                               /*register_class=*/true, | 
 |                                               /*is_abstract=*/true); | 
 |     RegisterClass(cls, Symbols::Int32x4(), lib); | 
 |     cls.set_num_type_arguments_unsafe(0); | 
 |     cls.set_is_prefinalized(); | 
 |     type = Type::NewNonParameterizedType(cls); | 
 |     object_store->set_int32x4_type(type); | 
 |  | 
 |     cls = Class::New<Float64x2, RTN::Float64x2>(isolate_group); | 
 |     RegisterPrivateClass(cls, Symbols::_Float64x2(), lib); | 
 |     pending_classes.Add(cls); | 
 |     object_store->set_float64x2_class(cls); | 
 |  | 
 |     cls = Class::New<Instance, RTN::Instance>(kIllegalCid, isolate_group, | 
 |                                               /*register_class=*/true, | 
 |                                               /*is_abstract=*/true); | 
 |     RegisterClass(cls, Symbols::Float64x2(), lib); | 
 |     cls.set_num_type_arguments_unsafe(0); | 
 |     cls.set_is_prefinalized(); | 
 |     type = Type::NewNonParameterizedType(cls); | 
 |     object_store->set_float64x2_type(type); | 
 |  | 
 |     // Set the super type of class StackTrace to Object type so that the | 
 |     // 'toString' method is implemented. | 
 |     type = object_store->object_type(); | 
 |     stacktrace_cls.set_super_type(type); | 
 |  | 
 |     // Abstract class that represents the Dart class Type. | 
 |     // Note that this class is implemented by Dart class _AbstractType. | 
 |     cls = Class::New<Instance, RTN::Instance>(kIllegalCid, isolate_group, | 
 |                                               /*register_class=*/true, | 
 |                                               /*is_abstract=*/true); | 
 |     cls.set_num_type_arguments_unsafe(0); | 
 |     cls.set_is_prefinalized(); | 
 |     RegisterClass(cls, Symbols::Type(), core_lib); | 
 |     pending_classes.Add(cls); | 
 |     type = Type::NewNonParameterizedType(cls); | 
 |     object_store->set_type_type(type); | 
 |  | 
 |     // Abstract class that represents the Dart class Function. | 
 |     cls = Class::New<Instance, RTN::Instance>(kIllegalCid, isolate_group, | 
 |                                               /*register_class=*/true, | 
 |                                               /*is_abstract=*/true); | 
 |     cls.set_num_type_arguments_unsafe(0); | 
 |     cls.set_is_prefinalized(); | 
 |     RegisterClass(cls, Symbols::Function(), core_lib); | 
 |     pending_classes.Add(cls); | 
 |     type = Type::NewNonParameterizedType(cls); | 
 |     object_store->set_function_type(type); | 
 |  | 
 |     // Abstract class that represents the Dart class Record. | 
 |     cls = Class::New<Instance, RTN::Instance>(kIllegalCid, isolate_group, | 
 |                                               /*register_class=*/true, | 
 |                                               /*is_abstract=*/true); | 
 |     RegisterClass(cls, Symbols::Record(), core_lib); | 
 |     pending_classes.Add(cls); | 
 |     object_store->set_record_class(cls); | 
 |  | 
 |     cls = Class::New<Number, RTN::Number>(isolate_group); | 
 |     RegisterClass(cls, Symbols::Number(), core_lib); | 
 |     pending_classes.Add(cls); | 
 |     type = Type::NewNonParameterizedType(cls); | 
 |     object_store->set_number_type(type); | 
 |     type = type.ToNullability(Nullability::kNullable, Heap::kOld); | 
 |     object_store->set_nullable_number_type(type); | 
 |  | 
 |     cls = Class::New<Instance, RTN::Instance>(kIllegalCid, isolate_group, | 
 |                                               /*register_class=*/true, | 
 |                                               /*is_abstract=*/true); | 
 |     RegisterClass(cls, Symbols::Int(), core_lib); | 
 |     cls.set_num_type_arguments_unsafe(0); | 
 |     cls.set_is_prefinalized(); | 
 |     pending_classes.Add(cls); | 
 |     type = Type::NewNonParameterizedType(cls); | 
 |     object_store->set_int_type(type); | 
 |     type = type.ToNullability(Nullability::kNonNullable, Heap::kOld); | 
 |     object_store->set_non_nullable_int_type(type); | 
 |     type = type.ToNullability(Nullability::kNullable, Heap::kOld); | 
 |     object_store->set_nullable_int_type(type); | 
 |  | 
 |     cls = Class::New<Instance, RTN::Instance>(kIllegalCid, isolate_group, | 
 |                                               /*register_class=*/true, | 
 |                                               /*is_abstract=*/true); | 
 |     RegisterClass(cls, Symbols::Double(), core_lib); | 
 |     cls.set_num_type_arguments_unsafe(0); | 
 |     cls.set_is_prefinalized(); | 
 |     pending_classes.Add(cls); | 
 |     type = Type::NewNonParameterizedType(cls); | 
 |     object_store->set_double_type(type); | 
 |     type = type.ToNullability(Nullability::kNullable, Heap::kOld); | 
 |     object_store->set_nullable_double_type(type); | 
 |  | 
 |     name = Symbols::_String().ptr(); | 
 |     cls = Class::New<Instance, RTN::Instance>(kIllegalCid, isolate_group, | 
 |                                               /*register_class=*/true, | 
 |                                               /*is_abstract=*/true); | 
 |     RegisterClass(cls, name, core_lib); | 
 |     cls.set_num_type_arguments_unsafe(0); | 
 |     cls.set_is_prefinalized(); | 
 |     pending_classes.Add(cls); | 
 |     type = Type::NewNonParameterizedType(cls); | 
 |     object_store->set_string_type(type); | 
 |  | 
 |     cls = object_store->bool_class(); | 
 |     type = Type::NewNonParameterizedType(cls); | 
 |     object_store->set_bool_type(type); | 
 |  | 
 |     cls = object_store->smi_class(); | 
 |     type = Type::NewNonParameterizedType(cls); | 
 |     object_store->set_smi_type(type); | 
 |  | 
 |     cls = object_store->mint_class(); | 
 |     type = Type::NewNonParameterizedType(cls); | 
 |     object_store->set_mint_type(type); | 
 |  | 
 |     // The classes 'void' and 'dynamic' are phony classes to make type checking | 
 |     // more regular; they live in the VM isolate. The class 'void' is not | 
 |     // registered in the class dictionary because its name is a reserved word. | 
 |     // The class 'dynamic' is registered in the class dictionary because its | 
 |     // name is a built-in identifier (this is wrong).  The corresponding types | 
 |     // are stored in the object store. | 
 |     cls = object_store->null_class(); | 
 |     type = | 
 |         Type::New(cls, Object::null_type_arguments(), Nullability::kNullable); | 
 |     type.SetIsFinalized(); | 
 |     type ^= type.Canonicalize(thread); | 
 |     object_store->set_null_type(type); | 
 |     cls.set_declaration_type(type); | 
 |     ASSERT(type.IsNullable()); | 
 |  | 
 |     // Consider removing when/if Null becomes an ordinary class. | 
 |     type = object_store->object_type(); | 
 |     cls.set_super_type(type); | 
 |  | 
 |     cls = object_store->never_class(); | 
 |     type = Type::New(cls, Object::null_type_arguments(), | 
 |                      Nullability::kNonNullable); | 
 |     type.SetIsFinalized(); | 
 |     type ^= type.Canonicalize(thread); | 
 |     object_store->set_never_type(type); | 
 |     type_args = TypeArguments::New(1); | 
 |     type_args.SetTypeAt(0, type); | 
 |     type_args = type_args.Canonicalize(thread); | 
 |     object_store->set_type_argument_never(type_args); | 
 |  | 
 |     // Create and cache commonly used type arguments <int>, <double>, | 
 |     // <String>, <String, dynamic> and <String, String>. | 
 |     type_args = TypeArguments::New(1); | 
 |     type = object_store->int_type(); | 
 |     type_args.SetTypeAt(0, type); | 
 |     type_args = type_args.Canonicalize(thread); | 
 |     object_store->set_type_argument_int(type_args); | 
 |  | 
 |     type_args = TypeArguments::New(1); | 
 |     type = object_store->double_type(); | 
 |     type_args.SetTypeAt(0, type); | 
 |     type_args = type_args.Canonicalize(thread); | 
 |     object_store->set_type_argument_double(type_args); | 
 |  | 
 |     type_args = TypeArguments::New(1); | 
 |     type = object_store->string_type(); | 
 |     type_args.SetTypeAt(0, type); | 
 |     type_args = type_args.Canonicalize(thread); | 
 |     object_store->set_type_argument_string(type_args); | 
 |  | 
 |     type_args = TypeArguments::New(2); | 
 |     type = object_store->string_type(); | 
 |     type_args.SetTypeAt(0, type); | 
 |     type_args.SetTypeAt(1, Object::dynamic_type()); | 
 |     type_args = type_args.Canonicalize(thread); | 
 |     object_store->set_type_argument_string_dynamic(type_args); | 
 |  | 
 |     type_args = TypeArguments::New(2); | 
 |     type = object_store->string_type(); | 
 |     type_args.SetTypeAt(0, type); | 
 |     type_args.SetTypeAt(1, type); | 
 |     type_args = type_args.Canonicalize(thread); | 
 |     object_store->set_type_argument_string_string(type_args); | 
 |  | 
 |     lib = Library::LookupLibrary(thread, Symbols::DartFfi()); | 
 |     if (lib.IsNull()) { | 
 |       lib = Library::NewLibraryHelper(Symbols::DartFfi(), true); | 
 |       lib.SetLoadRequested(); | 
 |       lib.Register(thread); | 
 |     } | 
 |     object_store->set_bootstrap_library(ObjectStore::kFfi, lib); | 
 |  | 
 |     cls = Class::New<Instance, RTN::Instance>(kFfiNativeTypeCid, isolate_group); | 
 |     cls.set_num_type_arguments_unsafe(0); | 
 |     cls.set_is_prefinalized(); | 
 |     pending_classes.Add(cls); | 
 |     object_store->set_ffi_native_type_class(cls); | 
 |     RegisterClass(cls, Symbols::FfiNativeType(), lib); | 
 |  | 
 | #define REGISTER_FFI_TYPE_MARKER(clazz)                                        \ | 
 |   cls = Class::New<Instance, RTN::Instance>(kFfi##clazz##Cid, isolate_group);  \ | 
 |   cls.set_num_type_arguments_unsafe(0);                                        \ | 
 |   cls.set_is_prefinalized();                                                   \ | 
 |   pending_classes.Add(cls);                                                    \ | 
 |   RegisterClass(cls, Symbols::Ffi##clazz(), lib); | 
 |     CLASS_LIST_FFI_TYPE_MARKER(REGISTER_FFI_TYPE_MARKER); | 
 | #undef REGISTER_FFI_TYPE_MARKER | 
 |  | 
 |     cls = Class::New<Instance, RTN::Instance>(kFfiNativeFunctionCid, | 
 |                                               isolate_group); | 
 |     cls.set_type_arguments_field_offset(Instance::NextFieldOffset(), | 
 |                                         RTN::Instance::NextFieldOffset()); | 
 |     cls.set_num_type_arguments_unsafe(1); | 
 |     cls.set_is_prefinalized(); | 
 |     pending_classes.Add(cls); | 
 |     RegisterClass(cls, Symbols::FfiNativeFunction(), lib); | 
 |  | 
 |     cls = Class::NewPointerClass(kPointerCid, isolate_group); | 
 |     object_store->set_ffi_pointer_class(cls); | 
 |     pending_classes.Add(cls); | 
 |     RegisterClass(cls, Symbols::FfiPointer(), lib); | 
 |  | 
 |     cls = Class::New<DynamicLibrary, RTN::DynamicLibrary>(kDynamicLibraryCid, | 
 |                                                           isolate_group); | 
 |     cls.set_instance_size(DynamicLibrary::InstanceSize(), | 
 |                           compiler::target::RoundedAllocationSize( | 
 |                               RTN::DynamicLibrary::InstanceSize())); | 
 |     cls.set_is_prefinalized(); | 
 |     pending_classes.Add(cls); | 
 |     RegisterClass(cls, Symbols::FfiDynamicLibrary(), lib); | 
 |  | 
 |     cls = Class::New<NativeFinalizer, RTN::NativeFinalizer>(isolate_group); | 
 |     object_store->set_native_finalizer_class(cls); | 
 |     RegisterPrivateClass(cls, Symbols::_NativeFinalizer(), lib); | 
 |  | 
 |     cls = Class::New<Finalizer, RTN::Finalizer>(isolate_group); | 
 |     cls.set_type_arguments_field_offset( | 
 |         Finalizer::type_arguments_offset(), | 
 |         RTN::Finalizer::type_arguments_offset()); | 
 |     cls.set_num_type_arguments_unsafe(1); | 
 |     object_store->set_finalizer_class(cls); | 
 |     pending_classes.Add(cls); | 
 |     RegisterPrivateClass(cls, Symbols::_FinalizerImpl(), core_lib); | 
 |  | 
 |     // Pre-register the internal library so we can place the vm class | 
 |     // FinalizerEntry there rather than the core library. | 
 |     lib = Library::LookupLibrary(thread, Symbols::DartInternal()); | 
 |     if (lib.IsNull()) { | 
 |       lib = Library::NewLibraryHelper(Symbols::DartInternal(), true); | 
 |       lib.SetLoadRequested(); | 
 |       lib.Register(thread); | 
 |     } | 
 |     object_store->set_bootstrap_library(ObjectStore::kInternal, lib); | 
 |     ASSERT(!lib.IsNull()); | 
 |     ASSERT(lib.ptr() == Library::InternalLibrary()); | 
 |  | 
 |     cls = Class::New<FinalizerEntry, RTN::FinalizerEntry>(isolate_group); | 
 |     object_store->set_finalizer_entry_class(cls); | 
 |     pending_classes.Add(cls); | 
 |     RegisterClass(cls, Symbols::FinalizerEntry(), lib); | 
 |  | 
 |     // Finish the initialization by compiling the bootstrap scripts containing | 
 |     // the base interfaces and the implementation of the internal classes. | 
 |     const Error& error = Error::Handle( | 
 |         zone, Bootstrap::DoBootstrapping(kernel_buffer, kernel_buffer_size)); | 
 |     if (!error.IsNull()) { | 
 |       return error.ptr(); | 
 |     } | 
 |  | 
 |     isolate_group->class_table()->CopySizesFromClassObjects(); | 
 |  | 
 |     ClassFinalizer::VerifyBootstrapClasses(); | 
 |  | 
 |     // Set up the intrinsic state of all functions (core, math and typed data). | 
 |     compiler::Intrinsifier::InitializeState(); | 
 |  | 
 |     // Adds static const fields (class ids) to the class 'ClassID'); | 
 |     lib = Library::LookupLibrary(thread, Symbols::DartInternal()); | 
 |     ASSERT(!lib.IsNull()); | 
 |     cls = lib.LookupClassAllowPrivate(Symbols::ClassID()); | 
 |     ASSERT(!cls.IsNull()); | 
 |     const bool injected = cls.InjectCIDFields(); | 
 |     ASSERT(injected); | 
 |  | 
 |     // Set up recognized state of all functions (core, math and typed data). | 
 |     MethodRecognizer::InitializeState(); | 
 | #endif  // !defined(DART_PRECOMPILED_RUNTIME) | 
 |   } else { | 
 |     // Object::Init version when we are running in a version of dart that has a | 
 |     // full snapshot linked in and an isolate is initialized using the full | 
 |     // snapshot. | 
 |     ObjectStore* object_store = isolate_group->object_store(); | 
 |     SafepointWriteRwLocker ml(thread, isolate_group->program_lock()); | 
 |  | 
 |     Class& cls = Class::Handle(zone); | 
 |  | 
 |     // Set up empty classes in the object store, these will get initialized | 
 |     // correctly when we read from the snapshot.  This is done to allow | 
 |     // bootstrapping of reading classes from the snapshot.  Some classes are not | 
 |     // stored in the object store. Yet we still need to create their Class | 
 |     // object so that they get put into the class_table (as a side effect of | 
 |     // Class::New()). | 
 |     cls = Class::New<Instance, RTN::Instance>(kInstanceCid, isolate_group); | 
 |     object_store->set_object_class(cls); | 
 |  | 
 |     cls = Class::New<LibraryPrefix, RTN::LibraryPrefix>(isolate_group); | 
 |     cls = Class::New<Type, RTN::Type>(isolate_group); | 
 |     cls = Class::New<FunctionType, RTN::FunctionType>(isolate_group); | 
 |     cls = Class::New<RecordType, RTN::RecordType>(isolate_group); | 
 |     cls = Class::New<TypeParameter, RTN::TypeParameter>(isolate_group); | 
 |  | 
 |     cls = Class::New<Array, RTN::Array>(isolate_group); | 
 |     object_store->set_array_class(cls); | 
 |  | 
 |     cls = Class::New<Array, RTN::Array>(kImmutableArrayCid, isolate_group); | 
 |     object_store->set_immutable_array_class(cls); | 
 |  | 
 |     cls = Class::New<GrowableObjectArray, RTN::GrowableObjectArray>( | 
 |         isolate_group); | 
 |     object_store->set_growable_object_array_class(cls); | 
 |  | 
 |     cls = Class::New<Map, RTN::Map>(isolate_group); | 
 |     object_store->set_map_impl_class(cls); | 
 |  | 
 |     cls = Class::New<Map, RTN::Map>(kConstMapCid, isolate_group); | 
 |     object_store->set_const_map_impl_class(cls); | 
 |  | 
 |     cls = Class::New<Set, RTN::Set>(isolate_group); | 
 |     object_store->set_set_impl_class(cls); | 
 |  | 
 |     cls = Class::New<Set, RTN::Set>(kConstSetCid, isolate_group); | 
 |     object_store->set_const_set_impl_class(cls); | 
 |  | 
 |     cls = Class::New<Float32x4, RTN::Float32x4>(isolate_group); | 
 |     object_store->set_float32x4_class(cls); | 
 |  | 
 |     cls = Class::New<Int32x4, RTN::Int32x4>(isolate_group); | 
 |     object_store->set_int32x4_class(cls); | 
 |  | 
 |     cls = Class::New<Float64x2, RTN::Float64x2>(isolate_group); | 
 |     object_store->set_float64x2_class(cls); | 
 |  | 
 | #define REGISTER_TYPED_DATA_CLASS(clazz)                                       \ | 
 |   cls = Class::NewTypedDataClass(kTypedData##clazz##Cid, isolate_group); | 
 |     CLASS_LIST_TYPED_DATA(REGISTER_TYPED_DATA_CLASS); | 
 | #undef REGISTER_TYPED_DATA_CLASS | 
 | #define REGISTER_TYPED_DATA_VIEW_CLASS(clazz)                                  \ | 
 |   cls =                                                                        \ | 
 |       Class::NewTypedDataViewClass(kTypedData##clazz##ViewCid, isolate_group); \ | 
 |   cls = Class::NewUnmodifiableTypedDataViewClass(                              \ | 
 |       kUnmodifiableTypedData##clazz##ViewCid, isolate_group); | 
 |     CLASS_LIST_TYPED_DATA(REGISTER_TYPED_DATA_VIEW_CLASS); | 
 | #undef REGISTER_TYPED_DATA_VIEW_CLASS | 
 |     cls = Class::NewTypedDataViewClass(kByteDataViewCid, isolate_group); | 
 |     cls = Class::NewUnmodifiableTypedDataViewClass(kUnmodifiableByteDataViewCid, | 
 |                                                    isolate_group); | 
 | #define REGISTER_EXT_TYPED_DATA_CLASS(clazz)                                   \ | 
 |   cls = Class::NewExternalTypedDataClass(kExternalTypedData##clazz##Cid,       \ | 
 |                                          isolate_group); | 
 |     CLASS_LIST_TYPED_DATA(REGISTER_EXT_TYPED_DATA_CLASS); | 
 | #undef REGISTER_EXT_TYPED_DATA_CLASS | 
 |  | 
 |     cls = Class::New<Instance, RTN::Instance>(kFfiNativeTypeCid, isolate_group); | 
 |     object_store->set_ffi_native_type_class(cls); | 
 |  | 
 | #define REGISTER_FFI_CLASS(clazz)                                              \ | 
 |   cls = Class::New<Instance, RTN::Instance>(kFfi##clazz##Cid, isolate_group); | 
 |     CLASS_LIST_FFI_TYPE_MARKER(REGISTER_FFI_CLASS); | 
 | #undef REGISTER_FFI_CLASS | 
 |  | 
 |     cls = Class::New<Instance, RTN::Instance>(kFfiNativeFunctionCid, | 
 |                                               isolate_group); | 
 |  | 
 |     cls = Class::NewPointerClass(kPointerCid, isolate_group); | 
 |     object_store->set_ffi_pointer_class(cls); | 
 |  | 
 |     cls = Class::New<DynamicLibrary, RTN::DynamicLibrary>(kDynamicLibraryCid, | 
 |                                                           isolate_group); | 
 |  | 
 |     cls = Class::New<Instance, RTN::Instance>(kByteBufferCid, isolate_group, | 
 |                                               /*register_isolate_group=*/false); | 
 |     cls.set_instance_size_in_words(0, 0); | 
 |     isolate_group->class_table()->Register(cls); | 
 |  | 
 |     cls = Class::New<Integer, RTN::Integer>(isolate_group); | 
 |     object_store->set_integer_implementation_class(cls); | 
 |  | 
 |     cls = Class::New<Smi, RTN::Smi>(isolate_group); | 
 |     object_store->set_smi_class(cls); | 
 |  | 
 |     cls = Class::New<Mint, RTN::Mint>(isolate_group); | 
 |     object_store->set_mint_class(cls); | 
 |  | 
 |     cls = Class::New<Double, RTN::Double>(isolate_group); | 
 |     object_store->set_double_class(cls); | 
 |  | 
 |     cls = Class::New<Closure, RTN::Closure>(isolate_group); | 
 |     object_store->set_closure_class(cls); | 
 |  | 
 |     cls = Class::New<Record, RTN::Record>(isolate_group); | 
 |  | 
 |     cls = Class::NewStringClass(kOneByteStringCid, isolate_group); | 
 |     object_store->set_one_byte_string_class(cls); | 
 |  | 
 |     cls = Class::NewStringClass(kTwoByteStringCid, isolate_group); | 
 |     object_store->set_two_byte_string_class(cls); | 
 |  | 
 |     cls = Class::New<Bool, RTN::Bool>(isolate_group); | 
 |     object_store->set_bool_class(cls); | 
 |  | 
 |     cls = Class::New<Instance, RTN::Instance>(kNullCid, isolate_group); | 
 |     object_store->set_null_class(cls); | 
 |  | 
 |     cls = Class::New<Instance, RTN::Instance>(kNeverCid, isolate_group); | 
 |     object_store->set_never_class(cls); | 
 |  | 
 |     cls = Class::New<Capability, RTN::Capability>(isolate_group); | 
 |     cls = Class::New<ReceivePort, RTN::ReceivePort>(isolate_group); | 
 |     cls = Class::New<SendPort, RTN::SendPort>(isolate_group); | 
 |     cls = Class::New<StackTrace, RTN::StackTrace>(isolate_group); | 
 |     cls = Class::New<SuspendState, RTN::SuspendState>(isolate_group); | 
 |     cls = Class::New<RegExp, RTN::RegExp>(isolate_group); | 
 |     cls = Class::New<Number, RTN::Number>(isolate_group); | 
 |  | 
 |     cls = Class::New<WeakProperty, RTN::WeakProperty>(isolate_group); | 
 |     object_store->set_weak_property_class(cls); | 
 |     cls = Class::New<WeakReference, RTN::WeakReference>(isolate_group); | 
 |     object_store->set_weak_reference_class(cls); | 
 |     cls = Class::New<Finalizer, RTN::Finalizer>(isolate_group); | 
 |     object_store->set_finalizer_class(cls); | 
 |     cls = Class::New<NativeFinalizer, RTN::NativeFinalizer>(isolate_group); | 
 |     object_store->set_native_finalizer_class(cls); | 
 |     cls = Class::New<FinalizerEntry, RTN::FinalizerEntry>(isolate_group); | 
 |     object_store->set_finalizer_entry_class(cls); | 
 |  | 
 |     cls = Class::New<MirrorReference, RTN::MirrorReference>(isolate_group); | 
 |     cls = Class::New<UserTag, RTN::UserTag>(isolate_group); | 
 |     cls = Class::New<FutureOr, RTN::FutureOr>(isolate_group); | 
 |     object_store->set_future_or_class(cls); | 
 |     cls = Class::New<TransferableTypedData, RTN::TransferableTypedData>( | 
 |         isolate_group); | 
 |   } | 
 |   return Error::null(); | 
 | } | 
 |  | 
 | #if defined(DEBUG) | 
 | bool Object::InVMIsolateHeap() const { | 
 |   return ptr()->untag()->InVMIsolateHeap(); | 
 | } | 
 | #endif  // DEBUG | 
 |  | 
 | void Object::Print() const { | 
 |   THR_Print("%s\n", ToCString()); | 
 | } | 
 |  | 
 | StringPtr Object::DictionaryName() const { | 
 |   return String::null(); | 
 | } | 
 |  | 
 | bool Object::ShouldHaveImmutabilityBitSet(classid_t class_id) { | 
 |   if (class_id < kNumPredefinedCids) { | 
 |     return ShouldHaveImmutabilityBitSetCid(class_id); | 
 |   } else { | 
 |     return Class::IsDeeplyImmutable( | 
 |         IsolateGroup::Current()->class_table()->At(class_id)); | 
 |   } | 
 | } | 
 |  | 
 | void Object::InitializeObject(uword address, | 
 |                               intptr_t class_id, | 
 |                               intptr_t size, | 
 |                               bool compressed, | 
 |                               uword ptr_field_start_offset, | 
 |                               uword ptr_field_end_offset) { | 
 |   // Note: we skip the header word here to avoid a racy read in the concurrent | 
 |   // marker from observing the null object when it reads into a heap page | 
 |   // allocated after marking started. | 
 |   uword cur = address + sizeof(UntaggedObject); | 
 |   uword ptr_field_start = address + ptr_field_start_offset; | 
 |   uword ptr_field_end = address + ptr_field_end_offset; | 
 |   uword end = address + size; | 
 |   // The start of pointer fields should always be past the object header, even | 
 |   // if there are no pointer fields (ptr_field_end < ptr_field_start). | 
 |   ASSERT(cur <= ptr_field_start); | 
 |   // The start of pointer fields can be at the end for empty payload objects. | 
 |   ASSERT(ptr_field_start <= end); | 
 |   // The end of pointer fields should always be before the end, as the end of | 
 |   // pointer fields is inclusive (the address of the last field to initialize). | 
 |   ASSERT(ptr_field_end < end); | 
 |   bool needs_init = true; | 
 |   if (IsTypedDataBaseClassId(class_id) || class_id == kArrayCid) { | 
 |     // If the size is greater than both kNewAllocatableSize and | 
 |     // kAllocatablePageSize, the object must have been allocated to a new | 
 |     // large page, which must already have been zero initialized by the OS. | 
 |     // Note that zero is a GC-safe value. | 
 |     // | 
 |     // For arrays, the caller will then initialize the fields to null with | 
 |     // safepoint checks to avoid blocking for the full duration of | 
 |     // initializing this array. | 
 |     needs_init = | 
 |         IsAllocatableInNewSpace(size) || IsAllocatableViaFreeLists(size); | 
 |   } | 
 |   if (needs_init) { | 
 |     // Initialize the memory prior to any pointer fields with 0. (This loop | 
 |     // and the next will be a no-op if the object has no pointer fields.) | 
 |     uword initial_value = 0; | 
 |     while (cur < ptr_field_start) { | 
 |       *reinterpret_cast<uword*>(cur) = initial_value; | 
 |       cur += kWordSize; | 
 |     } | 
 |     // Initialize any pointer fields with Object::null(). | 
 |     initial_value = static_cast<uword>(null_); | 
 | #if defined(DART_COMPRESSED_POINTERS) | 
 |     if (compressed) { | 
 |       initial_value &= 0xFFFFFFFF; | 
 |       initial_value |= initial_value << 32; | 
 |     } | 
 |     const bool has_pointer_fields = ptr_field_start <= ptr_field_end; | 
 |     // If there are compressed pointer fields and the first compressed pointer | 
 |     // field is not at a word start, then initialize it to Object::null(). | 
 |     if (compressed && has_pointer_fields && | 
 |         (ptr_field_start % kWordSize != 0)) { | 
 |       *reinterpret_cast<compressed_uword*>(ptr_field_start) = initial_value; | 
 |     } | 
 | #endif | 
 |     while (cur <= ptr_field_end) { | 
 |       *reinterpret_cast<uword*>(cur) = initial_value; | 
 |       cur += kWordSize; | 
 |     } | 
 |     // Initialize the memory after any pointer fields with 0, unless this is | 
 |     // an instructions object in which case we use the break instruction. | 
 |     initial_value = class_id == kInstructionsCid ? kBreakInstructionFiller : 0; | 
 | #if defined(DART_COMPRESSED_POINTERS) | 
 |     // If there are compressed pointer fields and the last compressed pointer | 
 |     // field is the start of a word, then initialize the other part of the word | 
 |     // to the new initial value. | 
 |     // | 
 |     // (We're guaranteed there's always space in the object after the last | 
 |     // pointer field in this case since objects are allocated in multiples of | 
 |     // the word size.) | 
 |     if (compressed && has_pointer_fields && (ptr_field_end % kWordSize == 0)) { | 
 |       *reinterpret_cast<compressed_uword*>(ptr_field_end + | 
 |                                            kCompressedWordSize) = initial_value; | 
 |     } | 
 | #endif | 
 |     while (cur < end) { | 
 |       *reinterpret_cast<uword*>(cur) = initial_value; | 
 |       cur += kWordSize; | 
 |     } | 
 |   } else { | 
 |     // Check that MemorySanitizer understands this is initialized. | 
 |     MSAN_CHECK_INITIALIZED(reinterpret_cast<void*>(address), size); | 
 | #if defined(DEBUG) | 
 |     const uword initial_value = 0; | 
 |     while (cur < end) { | 
 |       ASSERT_EQUAL(*reinterpret_cast<uword*>(cur), initial_value); | 
 |       cur += kWordSize; | 
 |     } | 
 | #endif | 
 |   } | 
 |   uword tags = 0; | 
 |   ASSERT(class_id != kIllegalCid); | 
 |   tags = UntaggedObject::ClassIdTag::update(class_id, tags); | 
 |   tags = UntaggedObject::SizeTag::update(size, tags); | 
 |   const bool is_old = | 
 |       (address & kNewObjectAlignmentOffset) == kOldObjectAlignmentOffset; | 
 |   tags = UntaggedObject::AlwaysSetBit::update(true, tags); | 
 |   tags = UntaggedObject::NotMarkedBit::update(true, tags); | 
 |   tags = UntaggedObject::OldAndNotRememberedBit::update(is_old, tags); | 
 |   tags = UntaggedObject::NewOrEvacuationCandidateBit::update(!is_old, tags); | 
 |   tags = UntaggedObject::ImmutableBit::update( | 
 |       Object::ShouldHaveImmutabilityBitSet(class_id), tags); | 
 | #if defined(HASH_IN_OBJECT_HEADER) | 
 |   tags = UntaggedObject::HashTag::update(0, tags); | 
 | #endif | 
 |   reinterpret_cast<UntaggedObject*>(address)->tags_ = tags; | 
 | } | 
 |  | 
 | void Object::CheckHandle() const { | 
 | #if defined(DEBUG) | 
 |   if (ptr_ != Object::null()) { | 
 |     intptr_t cid = ptr_->GetClassIdMayBeSmi(); | 
 |     if (cid >= kNumPredefinedCids) { | 
 |       cid = kInstanceCid; | 
 |     } | 
 |     ASSERT(vtable() == builtin_vtables_[cid]); | 
 |   } | 
 | #endif | 
 | } | 
 |  | 
 | ObjectPtr Object::Allocate(intptr_t cls_id, | 
 |                            intptr_t size, | 
 |                            Heap::Space space, | 
 |                            bool compressed, | 
 |                            uword ptr_field_start_offset, | 
 |                            uword ptr_field_end_offset) { | 
 |   ASSERT(Utils::IsAligned(size, kObjectAlignment)); | 
 |   Thread* thread = Thread::Current(); | 
 |   ASSERT(thread->execution_state() == Thread::kThreadInVM); | 
 |   ASSERT(thread->no_safepoint_scope_depth() == 0); | 
 |   ASSERT(thread->no_callback_scope_depth() == 0); | 
 |   Heap* heap = thread->heap(); | 
 |  | 
 |   uword address = heap->Allocate(thread, size, space); | 
 |   if (UNLIKELY(address == 0)) { | 
 |     // SuspendLongJumpScope during Dart entry ensures that if a longjmp base is | 
 |     // available, it is the innermost error handler, so check for a longjmp base | 
 |     // before checking for an exit frame. | 
 |     if (thread->long_jump_base() != nullptr) { | 
 |       Report::LongJump(Object::out_of_memory_error()); | 
 |       UNREACHABLE(); | 
 |     } else if (thread->top_exit_frame_info() != 0) { | 
 |       // Use the preallocated out of memory exception to avoid calling | 
 |       // into dart code or allocating any code. | 
 |       Exceptions::ThrowOOM(); | 
 |       UNREACHABLE(); | 
 |     } else { | 
 |       // Nowhere to propagate an exception to. | 
 |       OUT_OF_MEMORY(); | 
 |     } | 
 |   } | 
 |  | 
 |   ObjectPtr raw_obj; | 
 |   NoSafepointScope no_safepoint(thread); | 
 |   InitializeObject(address, cls_id, size, compressed, ptr_field_start_offset, | 
 |                    ptr_field_end_offset); | 
 |   raw_obj = static_cast<ObjectPtr>(address + kHeapObjectTag); | 
 |   ASSERT(cls_id == UntaggedObject::ClassIdTag::decode(raw_obj->untag()->tags_)); | 
 |   if (raw_obj->IsOldObject() && UNLIKELY(thread->is_marking())) { | 
 |     // Black allocation. Prevents a data race between the mutator and | 
 |     // concurrent marker on ARM and ARM64 (the marker may observe a | 
 |     // publishing store of this object before the stores that initialize its | 
 |     // slots), and helps the collection to finish sooner. | 
 |     // release: Setting the mark bit must not be ordered after a publishing | 
 |     // store of this object. Compare Scavenger::ScavengePointer. | 
 |     raw_obj->untag()->SetMarkBitRelease(); | 
 |     heap->old_space()->AllocateBlack(size); | 
 |   } | 
 |  | 
 | #if !defined(PRODUCT) || defined(FORCE_INCLUDE_SAMPLING_HEAP_PROFILER) | 
 |   HeapProfileSampler& heap_sampler = thread->heap_sampler(); | 
 |   if (heap_sampler.HasOutstandingSample()) { | 
 |     thread->IncrementNoCallbackScopeDepth(); | 
 |     void* data = heap_sampler.InvokeCallbackForLastSample(cls_id); | 
 |     heap->SetHeapSamplingData(raw_obj, data); | 
 |     thread->DecrementNoCallbackScopeDepth(); | 
 |   } | 
 | #endif  // !defined(PRODUCT) || defined(FORCE_INCLUDE_SAMPLING_HEAP_PROFILER) | 
 |  | 
 | #if !defined(PRODUCT) | 
 |   auto class_table = thread->isolate_group()->class_table(); | 
 |   if (class_table->ShouldTraceAllocationFor(cls_id)) { | 
 |     uint32_t hash = | 
 |         HeapSnapshotWriter::GetHeapSnapshotIdentityHash(thread, raw_obj); | 
 |     Profiler::SampleAllocation(thread, cls_id, hash); | 
 |   } | 
 | #endif  // !defined(PRODUCT) | 
 |   return raw_obj; | 
 | } | 
 |  | 
 | class WriteBarrierUpdateVisitor : public ObjectPointerVisitor { | 
 |  public: | 
 |   explicit WriteBarrierUpdateVisitor(Thread* thread, ObjectPtr obj) | 
 |       : ObjectPointerVisitor(thread->isolate_group()), | 
 |         thread_(thread), | 
 |         old_obj_(obj) { | 
 |     ASSERT(old_obj_->IsOldObject()); | 
 |   } | 
 |  | 
 |   void VisitPointers(ObjectPtr* from, ObjectPtr* to) override { | 
 |     if (old_obj_->IsArray()) { | 
 |       for (ObjectPtr* slot = from; slot <= to; ++slot) { | 
 |         ObjectPtr value = *slot; | 
 |         if (value->IsHeapObject()) { | 
 |           old_obj_->untag()->CheckArrayPointerStore(slot, value, thread_); | 
 |         } | 
 |       } | 
 |     } else { | 
 |       for (ObjectPtr* slot = from; slot <= to; ++slot) { | 
 |         ObjectPtr value = *slot; | 
 |         if (value->IsHeapObject()) { | 
 |           old_obj_->untag()->CheckHeapPointerStore(value, thread_); | 
 |         } | 
 |       } | 
 |     } | 
 |   } | 
 |  | 
 | #if defined(DART_COMPRESSED_POINTERS) | 
 |   void VisitCompressedPointers(uword heap_base, | 
 |                                CompressedObjectPtr* from, | 
 |                                CompressedObjectPtr* to) override { | 
 |     if (old_obj_->IsArray()) { | 
 |       for (CompressedObjectPtr* slot = from; slot <= to; ++slot) { | 
 |         ObjectPtr value = slot->Decompress(heap_base); | 
 |         if (value->IsHeapObject()) { | 
 |           old_obj_->untag()->CheckArrayPointerStore(slot, value, thread_); | 
 |         } | 
 |       } | 
 |     } else { | 
 |       for (CompressedObjectPtr* slot = from; slot <= to; ++slot) { | 
 |         ObjectPtr value = slot->Decompress(heap_base); | 
 |         if (value->IsHeapObject()) { | 
 |           old_obj_->untag()->CheckHeapPointerStore(value, thread_); | 
 |         } | 
 |       } | 
 |     } | 
 |   } | 
 | #endif | 
 |  | 
 |  private: | 
 |   Thread* thread_; | 
 |   ObjectPtr old_obj_; | 
 |  | 
 |   DISALLOW_COPY_AND_ASSIGN(WriteBarrierUpdateVisitor); | 
 | }; | 
 |  | 
 | #if defined(DEBUG) | 
 | bool Object::IsZoneHandle() const { | 
 |   return VMHandles::IsZoneHandle(reinterpret_cast<uword>(this)); | 
 | } | 
 |  | 
 | bool Object::IsReadOnlyHandle() const { | 
 |   return Dart::IsReadOnlyHandle(reinterpret_cast<uword>(this)); | 
 | } | 
 |  | 
 | bool Object::IsNotTemporaryScopedHandle() const { | 
 |   return (IsZoneHandle() || IsReadOnlyHandle()); | 
 | } | 
 | #endif | 
 |  | 
 | ObjectPtr Object::Clone(const Object& orig, | 
 |                         Heap::Space space, | 
 |                         bool load_with_relaxed_atomics) { | 
 |   // Generic function types should be cloned with FunctionType::Clone. | 
 |   ASSERT(!orig.IsFunctionType() || !FunctionType::Cast(orig).IsGeneric()); | 
 |   const Class& cls = Class::Handle(orig.clazz()); | 
 |   intptr_t size = orig.ptr()->untag()->HeapSize(); | 
 |   // All fields (including non-SmiPtr fields) will be initialized with Smi 0, | 
 |   // but the contents of the original object are copied over before the thread | 
 |   // is allowed to reach a safepoint. | 
 |   ObjectPtr raw_clone = | 
 |       Object::Allocate(cls.id(), size, space, cls.HasCompressedPointers(), | 
 |                        from_offset<Object>(), to_offset<Object>()); | 
 |   NoSafepointScope no_safepoint; | 
 |   // Copy the body of the original into the clone. | 
 |   uword orig_addr = UntaggedObject::ToAddr(orig.ptr()); | 
 |   uword clone_addr = UntaggedObject::ToAddr(raw_clone); | 
 |   const intptr_t kHeaderSizeInBytes = sizeof(UntaggedObject); | 
 |   if (load_with_relaxed_atomics) { | 
 |     auto orig_atomics_ptr = reinterpret_cast<std::atomic<uword>*>(orig_addr); | 
 |     auto clone_ptr = reinterpret_cast<uword*>(clone_addr); | 
 |     for (intptr_t i = kHeaderSizeInBytes / kWordSize; i < size / kWordSize; | 
 |          i++) { | 
 |       *(clone_ptr + i) = | 
 |           (orig_atomics_ptr + i)->load(std::memory_order_relaxed); | 
 |     } | 
 |   } else { | 
 |     memmove(reinterpret_cast<uint8_t*>(clone_addr + kHeaderSizeInBytes), | 
 |             reinterpret_cast<uint8_t*>(orig_addr + kHeaderSizeInBytes), | 
 |             size - kHeaderSizeInBytes); | 
 |   } | 
 |  | 
 |   if (IsTypedDataClassId(raw_clone->GetClassId())) { | 
 |     auto raw_typed_data = TypedData::RawCast(raw_clone); | 
 |     raw_typed_data.untag()->RecomputeDataField(); | 
 |   } | 
 |  | 
 |   // Add clone to store buffer, if needed. | 
 |   if (!raw_clone->IsOldObject()) { | 
 |     // No need to remember an object in new space. | 
 |     return raw_clone; | 
 |   } | 
 |   WriteBarrierUpdateVisitor visitor(Thread::Current(), raw_clone); | 
 |   raw_clone->untag()->VisitPointers(&visitor); | 
 |   return raw_clone; | 
 | } | 
 |  | 
 | bool Class::HasCompressedPointers() const { | 
 |   const intptr_t cid = id(); | 
 |   switch (cid) { | 
 |     case kByteBufferCid: | 
 |       return ByteBuffer::ContainsCompressedPointers(); | 
 | #define HANDLE_CASE(clazz)                                                     \ | 
 |   case k##clazz##Cid:                                                          \ | 
 |     return dart::clazz::ContainsCompressedPointers(); | 
 |       CLASS_LIST(HANDLE_CASE) | 
 | #undef HANDLE_CASE | 
 | #define HANDLE_CASE(clazz)                                                     \ | 
 |   case kTypedData##clazz##Cid:                                                 \ | 
 |     return dart::TypedData::ContainsCompressedPointers();                      \ | 
 |   case kTypedData##clazz##ViewCid:                                             \ | 
 |   case kUnmodifiableTypedData##clazz##ViewCid:                                 \ | 
 |     return dart::TypedDataView::ContainsCompressedPointers();                  \ | 
 |   case kExternalTypedData##clazz##Cid:                                         \ | 
 |     return dart::ExternalTypedData::ContainsCompressedPointers(); | 
 |       CLASS_LIST_TYPED_DATA(HANDLE_CASE) | 
 | #undef HANDLE_CASE | 
 |     default: | 
 |       if (cid >= kNumPredefinedCids) { | 
 |         return dart::Instance::ContainsCompressedPointers(); | 
 |       } | 
 |   } | 
 |   FATAL("Unsupported class for compressed pointers translation: %s (id=%" Pd | 
 |         ", kNumPredefinedCids=%" Pd ")\n", | 
 |         ToCString(), cid, kNumPredefinedCids); | 
 |   return false; | 
 | } | 
 |  | 
 | StringPtr Class::Name() const { | 
 |   return untag()->name(); | 
 | } | 
 |  | 
 | StringPtr Class::ScrubbedName() const { | 
 |   return Symbols::New(Thread::Current(), ScrubbedNameCString()); | 
 | } | 
 |  | 
 | const char* Class::ScrubbedNameCString() const { | 
 |   return String::ScrubName(String::Handle(Name())); | 
 | } | 
 |  | 
 | StringPtr Class::UserVisibleName() const { | 
 | #if !defined(PRODUCT) | 
 |   ASSERT(untag()->user_name() != String::null()); | 
 |   return untag()->user_name(); | 
 | #endif  // !defined(PRODUCT) | 
 |   // No caching in PRODUCT, regenerate. | 
 |   return Symbols::New(Thread::Current(), GenerateUserVisibleName()); | 
 | } | 
 |  | 
 | const char* Class::UserVisibleNameCString() const { | 
 | #if !defined(PRODUCT) | 
 |   ASSERT(untag()->user_name() != String::null()); | 
 |   return String::Handle(untag()->user_name()).ToCString(); | 
 | #endif                               // !defined(PRODUCT) | 
 |   return GenerateUserVisibleName();  // No caching in PRODUCT, regenerate. | 
 | } | 
 |  | 
 | const char* Class::NameCString(NameVisibility name_visibility) const { | 
 |   switch (name_visibility) { | 
 |     case Object::kInternalName: | 
 |       return String::Handle(Name()).ToCString(); | 
 |     case Object::kScrubbedName: | 
 |       return ScrubbedNameCString(); | 
 |     case Object::kUserVisibleName: | 
 |       return UserVisibleNameCString(); | 
 |     default: | 
 |       UNREACHABLE(); | 
 |       return nullptr; | 
 |   } | 
 | } | 
 |  | 
 | ClassPtr Class::Mixin() const { | 
 |   if (is_transformed_mixin_application()) { | 
 |     const Array& interfaces = Array::Handle(this->interfaces()); | 
 |     const Type& mixin_type = | 
 |         Type::Handle(Type::RawCast(interfaces.At(interfaces.Length() - 1))); | 
 |     return mixin_type.type_class(); | 
 |   } | 
 |   return ptr(); | 
 | } | 
 |  | 
 | bool Class::IsInFullSnapshot() const { | 
 |   NoSafepointScope no_safepoint; | 
 |   return UntaggedLibrary::InFullSnapshotBit::decode( | 
 |       untag()->library()->untag()->flags_); | 
 | } | 
 |  | 
 | TypePtr Class::RareType() const { | 
 |   if (!IsGeneric()) { | 
 |     return DeclarationType(); | 
 |   } | 
 |   ASSERT(is_declaration_loaded()); | 
 |   Thread* const thread = Thread::Current(); | 
 |   Zone* const zone = thread->zone(); | 
 |   const auto& inst_to_bounds = | 
 |       TypeArguments::Handle(zone, DefaultTypeArguments(zone)); | 
 |   ASSERT(inst_to_bounds.ptr() != Object::empty_type_arguments().ptr()); | 
 |   auto& type = Type::Handle( | 
 |       zone, Type::New(*this, inst_to_bounds, Nullability::kNonNullable)); | 
 |   type ^= ClassFinalizer::FinalizeType(type); | 
 |   return type.ptr(); | 
 | } | 
 |  | 
 | template <class FakeObject, class TargetFakeObject> | 
 | ClassPtr Class::New(IsolateGroup* isolate_group, bool register_class) { | 
 |   ASSERT(Object::class_class() != Class::null()); | 
 |   const auto& result = Class::Handle(Object::Allocate<Class>(Heap::kOld)); | 
 |   Object::VerifyBuiltinVtable<FakeObject>(FakeObject::kClassId); | 
 |   NOT_IN_PRECOMPILED(result.set_token_pos(TokenPosition::kNoSource)); | 
 |   NOT_IN_PRECOMPILED(result.set_end_token_pos(TokenPosition::kNoSource)); | 
 |   result.set_instance_size(FakeObject::InstanceSize(), | 
 |                            compiler::target::RoundedAllocationSize( | 
 |                                TargetFakeObject::InstanceSize())); | 
 |   result.set_type_arguments_field_offset_in_words(kNoTypeArguments, | 
 |                                                   RTN::Class::kNoTypeArguments); | 
 |   const intptr_t host_next_field_offset = FakeObject::NextFieldOffset(); | 
 |   const intptr_t target_next_field_offset = TargetFakeObject::NextFieldOffset(); | 
 |   result.set_next_field_offset(host_next_field_offset, | 
 |                                target_next_field_offset); | 
 |   COMPILE_ASSERT((FakeObject::kClassId != kInstanceCid)); | 
 |   result.set_id(FakeObject::kClassId); | 
 |   NOT_IN_PRECOMPILED(result.set_implementor_cid(kIllegalCid)); | 
 |   result.set_num_type_arguments_unsafe(0); | 
 |   result.set_num_native_fields(0); | 
 |   result.set_state_bits(0); | 
 |   if (IsInternalOnlyClassId(FakeObject::kClassId) || | 
 |       (FakeObject::kClassId == kTypeArgumentsCid)) { | 
 |     // VM internal classes are done. There is no finalization needed or | 
 |     // possible in this case. | 
 |     result.set_is_declaration_loaded(); | 
 |     result.set_is_type_finalized(); | 
 |     result.set_is_allocate_finalized(); | 
 |   } else if (FakeObject::kClassId != kClosureCid) { | 
 |     // VM backed classes are almost ready: run checks and resolve class | 
 |     // references, but do not recompute size. | 
 |     result.set_is_prefinalized(); | 
 |   } | 
 |   if (FakeObject::kClassId < kNumPredefinedCids && | 
 |       IsDeeplyImmutableCid(FakeObject::kClassId)) { | 
 |     result.set_is_deeply_immutable(true); | 
 |   } | 
 |   NOT_IN_PRECOMPILED(result.set_kernel_offset(0)); | 
 |   result.InitEmptyFields(); | 
 |   if (register_class) { | 
 |     isolate_group->class_table()->Register(result); | 
 |   } | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | #if !defined(DART_PRECOMPILED_RUNTIME) | 
 | static void ReportTooManyTypeArguments(const Class& cls) { | 
 |   Report::MessageF(Report::kError, Script::Handle(cls.script()), | 
 |                    cls.token_pos(), Report::AtLocation, | 
 |                    "too many type parameters declared in class '%s' or in its " | 
 |                    "super classes", | 
 |                    String::Handle(cls.Name()).ToCString()); | 
 |   UNREACHABLE(); | 
 | } | 
 | #endif  // !defined(DART_PRECOMPILED_RUNTIME) | 
 |  | 
 | void Class::set_num_type_arguments(intptr_t value) const { | 
 | #if defined(DART_PRECOMPILED_RUNTIME) | 
 |   UNREACHABLE(); | 
 | #else | 
 |   if (!Utils::IsInt(16, value)) { | 
 |     ReportTooManyTypeArguments(*this); | 
 |   } | 
 |   // We allow concurrent calculation of the number of type arguments. If two | 
 |   // threads perform this operation it doesn't matter which one wins. | 
 |   DEBUG_ONLY(intptr_t old_value = num_type_arguments()); | 
 |   DEBUG_ASSERT(old_value == kUnknownNumTypeArguments || old_value == value); | 
 |   StoreNonPointer<int16_t, int16_t, std::memory_order_relaxed>( | 
 |       &untag()->num_type_arguments_, value); | 
 | #endif  // defined(DART_PRECOMPILED_RUNTIME) | 
 | } | 
 |  | 
 | void Class::set_num_type_arguments_unsafe(intptr_t value) const { | 
 |   StoreNonPointer(&untag()->num_type_arguments_, value); | 
 | } | 
 |  | 
 | void Class::set_has_pragma(bool value) const { | 
 |   set_state_bits(HasPragmaBit::update(value, state_bits())); | 
 | } | 
 |  | 
 | void Class::set_is_isolate_unsendable(bool value) const { | 
 |   ASSERT(IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); | 
 |   set_state_bits(IsIsolateUnsendableBit::update(value, state_bits())); | 
 | } | 
 |  | 
 | void Class::set_is_isolate_unsendable_due_to_pragma(bool value) const { | 
 |   ASSERT(IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); | 
 |   set_state_bits( | 
 |       IsIsolateUnsendableDueToPragmaBit::update(value, state_bits())); | 
 | } | 
 |  | 
 | void Class::set_is_deeply_immutable(bool value) const { | 
 |   ASSERT(IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); | 
 |   set_state_bits(IsDeeplyImmutableBit::update(value, state_bits())); | 
 | } | 
 |  | 
 | void Class::set_is_future_subtype(bool value) const { | 
 |   ASSERT(IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); | 
 |   set_state_bits(IsFutureSubtypeBit::update(value, state_bits())); | 
 | } | 
 |  | 
 | void Class::set_can_be_future(bool value) const { | 
 |   ASSERT(IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); | 
 |   set_state_bits(CanBeFutureBit::update(value, state_bits())); | 
 | } | 
 |  | 
 | // Initialize class fields of type Array with empty array. | 
 | void Class::InitEmptyFields() const { | 
 |   if (Object::empty_array().ptr() == Array::null()) { | 
 |     // The empty array has not been initialized yet. | 
 |     return; | 
 |   } | 
 |   untag()->set_interfaces(Object::empty_array().ptr()); | 
 |   untag()->set_constants(Object::null_array().ptr()); | 
 |   set_functions(Object::empty_array()); | 
 |   set_fields(Object::empty_array()); | 
 |   set_invocation_dispatcher_cache(Object::empty_array()); | 
 | } | 
 |  | 
 | ArrayPtr Class::OffsetToFieldMap( | 
 |     ClassTable* class_table /* = nullptr */) const { | 
 |   ASSERT(is_finalized()); | 
 |   if (untag()->offset_in_words_to_field<std::memory_order_acquire>() == | 
 |       Array::null()) { | 
 |     // Even if multiple threads are calling this concurrently, all of them would | 
 |     // compute the same array, so we intentionally don't acquire any locks here. | 
 |     const intptr_t length = untag()->host_instance_size_in_words_; | 
 |     const Array& array = Array::Handle(Array::New(length, Heap::kOld)); | 
 |     Class& cls = Class::Handle(this->ptr()); | 
 |     Array& fields = Array::Handle(); | 
 |     Field& f = Field::Handle(); | 
 |     while (!cls.IsNull()) { | 
 |       fields = cls.fields(); | 
 |       for (intptr_t i = 0; i < fields.Length(); ++i) { | 
 |         f ^= fields.At(i); | 
 |         if (f.is_instance()) { | 
 |           array.SetAt(f.HostOffset() >> kCompressedWordSizeLog2, f); | 
 |         } | 
 |       } | 
 |       cls = cls.SuperClass(class_table); | 
 |     } | 
 |     untag()->set_offset_in_words_to_field<std::memory_order_release>( | 
 |         array.ptr()); | 
 |   } | 
 |   return untag()->offset_in_words_to_field<std::memory_order_acquire>(); | 
 | } | 
 |  | 
 | bool Class::HasInstanceFields() const { | 
 |   const Array& field_array = Array::Handle(fields()); | 
 |   Field& field = Field::Handle(); | 
 |   for (intptr_t i = 0; i < field_array.Length(); ++i) { | 
 |     field ^= field_array.At(i); | 
 |     if (!field.is_static()) { | 
 |       return true; | 
 |     } | 
 |   } | 
 |   return false; | 
 | } | 
 |  | 
 | class FunctionName { | 
 |  public: | 
 |   FunctionName(const String& name, String* tmp_string) | 
 |       : name_(name), tmp_string_(tmp_string) {} | 
 |   bool Matches(const Function& function) const { | 
 |     if (name_.IsSymbol()) { | 
 |       return name_.ptr() == function.name(); | 
 |     } else { | 
 |       *tmp_string_ = function.name(); | 
 |       return name_.Equals(*tmp_string_); | 
 |     } | 
 |   } | 
 |   intptr_t Hash() const { return name_.Hash(); } | 
 |  | 
 |  private: | 
 |   const String& name_; | 
 |   String* tmp_string_; | 
 | }; | 
 |  | 
 | // Traits for looking up Functions by name. | 
 | class ClassFunctionsTraits { | 
 |  public: | 
 |   static const char* Name() { return "ClassFunctionsTraits"; } | 
 |   static bool ReportStats() { return false; } | 
 |  | 
 |   // Called when growing the table. | 
 |   static bool IsMatch(const Object& a, const Object& b) { | 
 |     ASSERT(a.IsFunction() && b.IsFunction()); | 
 |     // Function objects are always canonical. | 
 |     return a.ptr() == b.ptr(); | 
 |   } | 
 |   static bool IsMatch(const FunctionName& name, const Object& obj) { | 
 |     return name.Matches(Function::Cast(obj)); | 
 |   } | 
 |   static uword Hash(const Object& key) { | 
 |     return String::HashRawSymbol(Function::Cast(key).name()); | 
 |   } | 
 |   static uword Hash(const FunctionName& name) { return name.Hash(); } | 
 | }; | 
 | typedef UnorderedHashSet<ClassFunctionsTraits> ClassFunctionsSet; | 
 |  | 
 | void Class::SetFunctions(const Array& value) const { | 
 |   ASSERT(!value.IsNull()); | 
 |   const intptr_t len = value.Length(); | 
 | #if defined(DEBUG) | 
 |   Thread* thread = Thread::Current(); | 
 |   ASSERT(thread->isolate_group()->program_lock()->IsCurrentThreadWriter()); | 
 |   if (is_finalized()) { | 
 |     Function& function = Function::Handle(); | 
 |     FunctionType& signature = FunctionType::Handle(); | 
 |     for (intptr_t i = 0; i < len; ++i) { | 
 |       function ^= value.At(i); | 
 |       signature = function.signature(); | 
 |       ASSERT(signature.IsFinalized()); | 
 |     } | 
 |   } | 
 | #endif | 
 |   set_functions(value); | 
 |   if (len >= kFunctionLookupHashThreshold) { | 
 |     ClassFunctionsSet set(HashTables::New<ClassFunctionsSet>(len, Heap::kOld)); | 
 |     Function& func = Function::Handle(); | 
 |     for (intptr_t i = 0; i < len; ++i) { | 
 |       func ^= value.At(i); | 
 |       // Verify that all the functions in the array have this class as owner. | 
 |       ASSERT(func.Owner() == ptr()); | 
 |       set.Insert(func); | 
 |     } | 
 |     untag()->set_functions_hash_table(set.Release().ptr()); | 
 |   } else { | 
 |     untag()->set_functions_hash_table(Array::null()); | 
 |   } | 
 | } | 
 |  | 
 | void Class::AddFunction(const Function& function) const { | 
 | #if defined(DEBUG) | 
 |   Thread* thread = Thread::Current(); | 
 |   ASSERT(thread->IsDartMutatorThread()); | 
 |   ASSERT(thread->isolate_group()->program_lock()->IsCurrentThreadWriter()); | 
 |   ASSERT(!is_finalized() || | 
 |          FunctionType::Handle(function.signature()).IsFinalized()); | 
 | #endif | 
 |   const Array& arr = Array::Handle(functions()); | 
 |   const Array& new_array = | 
 |       Array::Handle(Array::Grow(arr, arr.Length() + 1, Heap::kOld)); | 
 |   new_array.SetAt(arr.Length(), function); | 
 |   set_functions(new_array); | 
 |   // Add to hash table, if any. | 
 |   const intptr_t new_len = new_array.Length(); | 
 |   if (new_len == kFunctionLookupHashThreshold) { | 
 |     // Transition to using hash table. | 
 |     SetFunctions(new_array); | 
 |   } else if (new_len > kFunctionLookupHashThreshold) { | 
 |     ClassFunctionsSet set(untag()->functions_hash_table()); | 
 |     set.Insert(function); | 
 |     untag()->set_functions_hash_table(set.Release().ptr()); | 
 |   } | 
 | } | 
 |  | 
 | intptr_t Class::FindFunctionIndex(const Function& needle) const { | 
 |   Thread* thread = Thread::Current(); | 
 |   if (EnsureIsFinalized(thread) != Error::null()) { | 
 |     return -1; | 
 |   } | 
 |   REUSABLE_ARRAY_HANDLESCOPE(thread); | 
 |   REUSABLE_FUNCTION_HANDLESCOPE(thread); | 
 |   Array& funcs = thread->ArrayHandle(); | 
 |   Function& function = thread->FunctionHandle(); | 
 |   funcs = current_functions(); | 
 |   ASSERT(!funcs.IsNull()); | 
 |   const intptr_t len = funcs.Length(); | 
 |   for (intptr_t i = 0; i < len; i++) { | 
 |     function ^= funcs.At(i); | 
 |     if (needle.ptr() == function.ptr()) { | 
 |       return i; | 
 |     } | 
 |   } | 
 |   // No function found. | 
 |   return -1; | 
 | } | 
 |  | 
 | FunctionPtr Class::FunctionFromIndex(intptr_t idx) const { | 
 |   const Array& funcs = Array::Handle(current_functions()); | 
 |   if ((idx < 0) || (idx >= funcs.Length())) { | 
 |     return Function::null(); | 
 |   } | 
 |   Function& func = Function::Handle(); | 
 |   func ^= funcs.At(idx); | 
 |   ASSERT(!func.IsNull()); | 
 |   return func.ptr(); | 
 | } | 
 |  | 
 | FunctionPtr Class::ImplicitClosureFunctionFromIndex(intptr_t idx) const { | 
 |   Function& func = Function::Handle(FunctionFromIndex(idx)); | 
 |   if (func.IsNull() || !func.HasImplicitClosureFunction()) { | 
 |     return Function::null(); | 
 |   } | 
 |   func = func.ImplicitClosureFunction(); | 
 |   ASSERT(!func.IsNull()); | 
 |   return func.ptr(); | 
 | } | 
 |  | 
 | intptr_t Class::FindImplicitClosureFunctionIndex(const Function& needle) const { | 
 |   Thread* thread = Thread::Current(); | 
 |   if (EnsureIsFinalized(thread) != Error::null()) { | 
 |     return -1; | 
 |   } | 
 |   REUSABLE_ARRAY_HANDLESCOPE(thread); | 
 |   REUSABLE_FUNCTION_HANDLESCOPE(thread); | 
 |   Array& funcs = thread->ArrayHandle(); | 
 |   Function& function = thread->FunctionHandle(); | 
 |   funcs = current_functions(); | 
 |   ASSERT(!funcs.IsNull()); | 
 |   Function& implicit_closure = Function::Handle(thread->zone()); | 
 |   const intptr_t len = funcs.Length(); | 
 |   for (intptr_t i = 0; i < len; i++) { | 
 |     function ^= funcs.At(i); | 
 |     implicit_closure = function.implicit_closure_function(); | 
 |     if (implicit_closure.IsNull()) { | 
 |       // Skip non-implicit closure functions. | 
 |       continue; | 
 |     } | 
 |     if (needle.ptr() == implicit_closure.ptr()) { | 
 |       return i; | 
 |     } | 
 |   } | 
 |   // No function found. | 
 |   return -1; | 
 | } | 
 |  | 
 | intptr_t Class::FindInvocationDispatcherFunctionIndex( | 
 |     const Function& needle) const { | 
 |   Thread* thread = Thread::Current(); | 
 |   if (EnsureIsFinalized(thread) != Error::null()) { | 
 |     return -1; | 
 |   } | 
 |   REUSABLE_ARRAY_HANDLESCOPE(thread); | 
 |   REUSABLE_OBJECT_HANDLESCOPE(thread); | 
 |   Array& funcs = thread->ArrayHandle(); | 
 |   Object& object = thread->ObjectHandle(); | 
 |   funcs = invocation_dispatcher_cache(); | 
 |   ASSERT(!funcs.IsNull()); | 
 |   const intptr_t len = funcs.Length(); | 
 |   for (intptr_t i = 0; i < len; i++) { | 
 |     object = funcs.At(i); | 
 |     // The invocation_dispatcher_cache is a table with some entries that | 
 |     // are functions. | 
 |     if (object.IsFunction()) { | 
 |       if (Function::Cast(object).ptr() == needle.ptr()) { | 
 |         return i; | 
 |       } | 
 |     } | 
 |   } | 
 |   // No function found. | 
 |   return -1; | 
 | } | 
 |  | 
 | FunctionPtr Class::InvocationDispatcherFunctionFromIndex(intptr_t idx) const { | 
 |   Thread* thread = Thread::Current(); | 
 |   REUSABLE_ARRAY_HANDLESCOPE(thread); | 
 |   REUSABLE_OBJECT_HANDLESCOPE(thread); | 
 |   Array& dispatcher_cache = thread->ArrayHandle(); | 
 |   Object& object = thread->ObjectHandle(); | 
 |   dispatcher_cache = invocation_dispatcher_cache(); | 
 |   object = dispatcher_cache.At(idx); | 
 |   if (!object.IsFunction()) { | 
 |     return Function::null(); | 
 |   } | 
 |   return Function::Cast(object).ptr(); | 
 | } | 
 |  | 
 | void Class::set_state_bits(intptr_t bits) const { | 
 |   StoreNonPointer<uint32_t, uint32_t, std::memory_order_release>( | 
 |       &untag()->state_bits_, static_cast<uint32_t>(bits)); | 
 | } | 
 |  | 
 | void Class::set_library(const Library& value) const { | 
 |   untag()->set_library(value.ptr()); | 
 | } | 
 |  | 
 | void Class::set_type_parameters(const TypeParameters& value) const { | 
 |   ASSERT((num_type_arguments() == kUnknownNumTypeArguments) || | 
 |          is_prefinalized()); | 
 |   untag()->set_type_parameters(value.ptr()); | 
 | } | 
 |  | 
 | void Class::set_functions(const Array& value) const { | 
 |   // Ensure all writes to the [Function]s are visible by the time the array | 
 |   // is visible. | 
 |   untag()->set_functions<std::memory_order_release>(value.ptr()); | 
 | } | 
 |  | 
 | void Class::set_fields(const Array& value) const { | 
 |   // Ensure all writes to the [Field]s are visible by the time the array | 
 |   // is visible. | 
 |   untag()->set_fields<std::memory_order_release>(value.ptr()); | 
 | } | 
 |  | 
 | void Class::set_invocation_dispatcher_cache(const Array& cache) const { | 
 |   // Ensure all writes to the cache are visible by the time the array | 
 |   // is visible. | 
 |   untag()->set_invocation_dispatcher_cache<std::memory_order_release>( | 
 |       cache.ptr()); | 
 | } | 
 |  | 
 | void Class::set_declaration_instance_type_arguments( | 
 |     const TypeArguments& value) const { | 
 |   ASSERT(value.IsNull() || (value.IsCanonical() && value.IsOld())); | 
 |   ASSERT((declaration_instance_type_arguments() == TypeArguments::null()) || | 
 |          (declaration_instance_type_arguments() == value.ptr())); | 
 |   untag()->set_declaration_instance_type_arguments<std::memory_order_release>( | 
 |       value.ptr()); | 
 | } | 
 |  | 
 | TypeArgumentsPtr Class::GetDeclarationInstanceTypeArguments() const { | 
 |   const intptr_t num_type_arguments = NumTypeArguments(); | 
 |   if (num_type_arguments == 0) { | 
 |     return TypeArguments::null(); | 
 |   } | 
 |   if (declaration_instance_type_arguments() != TypeArguments::null()) { | 
 |     return declaration_instance_type_arguments(); | 
 |   } | 
 |   Thread* thread = Thread::Current(); | 
 |   SafepointWriteRwLocker ml(thread, thread->isolate_group()->program_lock()); | 
 |   if (declaration_instance_type_arguments() != TypeArguments::null()) { | 
 |     return declaration_instance_type_arguments(); | 
 |   } | 
 |   Zone* zone = thread->zone(); | 
 |   auto& args = TypeArguments::Handle(zone); | 
 |   auto& type = AbstractType::Handle(zone); | 
 |   const intptr_t num_type_parameters = NumTypeParameters(thread); | 
 |   if (num_type_arguments == num_type_parameters) { | 
 |     type = DeclarationType(); | 
 |     args = Type::Cast(type).arguments(); | 
 |   } else { | 
 |     type = super_type(); | 
 |     const auto& super_args = TypeArguments::Handle( | 
 |         zone, Type::Cast(type).GetInstanceTypeArguments(thread)); | 
 |     if ((num_type_parameters == 0) || | 
 |         (!super_args.IsNull() && (super_args.Length() == num_type_arguments))) { | 
 |       args = super_args.ptr(); | 
 |     } else { | 
 |       args = TypeArguments::New(num_type_arguments); | 
 |       const intptr_t offset = num_type_arguments - num_type_parameters; | 
 |       for (intptr_t i = 0; i < offset; ++i) { | 
 |         type = super_args.TypeAtNullSafe(i); | 
 |         args.SetTypeAt(i, type); | 
 |       } | 
 |       type = DeclarationType(); | 
 |       const auto& decl_args = | 
 |           TypeArguments::Handle(zone, Type::Cast(type).arguments()); | 
 |       for (intptr_t i = 0; i < num_type_parameters; ++i) { | 
 |         type = decl_args.TypeAt(i); | 
 |         args.SetTypeAt(offset + i, type); | 
 |       } | 
 |     } | 
 |   } | 
 |   args = args.Canonicalize(thread); | 
 |   set_declaration_instance_type_arguments(args); | 
 |   return args.ptr(); | 
 | } | 
 |  | 
 | TypeArgumentsPtr Class::GetInstanceTypeArguments( | 
 |     Thread* thread, | 
 |     const TypeArguments& type_arguments, | 
 |     bool canonicalize) const { | 
 |   const intptr_t num_type_arguments = NumTypeArguments(); | 
 |   if (num_type_arguments == 0) { | 
 |     return TypeArguments::null(); | 
 |   } | 
 |   Zone* zone = thread->zone(); | 
 |   auto& args = TypeArguments::Handle(zone); | 
 |   const intptr_t num_type_parameters = NumTypeParameters(thread); | 
 |   ASSERT(type_arguments.IsNull() || | 
 |          type_arguments.Length() == num_type_parameters); | 
 |   if (num_type_arguments == num_type_parameters) { | 
 |     args = type_arguments.ptr(); | 
 |   } else { | 
 |     args = GetDeclarationInstanceTypeArguments(); | 
 |     if (num_type_parameters == 0) { | 
 |       return args.ptr(); | 
 |     } | 
 |     args = args.InstantiateFrom( | 
 |         TypeArguments::Handle( | 
 |             zone, type_arguments.ToInstantiatorTypeArguments(thread, *this)), | 
 |         Object::null_type_arguments(), kAllFree, Heap::kOld); | 
 |   } | 
 |   if (canonicalize) { | 
 |     args = args.Canonicalize(thread); | 
 |   } | 
 |   return args.ptr(); | 
 | } | 
 |  | 
 | intptr_t Class::NumTypeParameters(Thread* thread) const { | 
 |   if (!is_declaration_loaded()) { | 
 |     ASSERT(is_prefinalized()); | 
 |     const intptr_t cid = id(); | 
 |     if ((cid == kArrayCid) || (cid == kImmutableArrayCid) || | 
 |         (cid == kGrowableObjectArrayCid)) { | 
 |       return 1;  // List's type parameter may not have been parsed yet. | 
 |     } | 
 |     return 0; | 
 |   } | 
 |   if (type_parameters() == TypeParameters::null()) { | 
 |     return 0; | 
 |   } | 
 |   REUSABLE_TYPE_PARAMETERS_HANDLESCOPE(thread); | 
 |   TypeParameters& type_params = thread->TypeParametersHandle(); | 
 |   type_params = type_parameters(); | 
 |   return type_params.Length(); | 
 | } | 
 |  | 
 | intptr_t Class::ComputeNumTypeArguments() const { | 
 |   ASSERT(is_declaration_loaded()); | 
 |   Thread* thread = Thread::Current(); | 
 |   Zone* zone = thread->zone(); | 
 |   auto isolate_group = thread->isolate_group(); | 
 |   const intptr_t num_type_params = NumTypeParameters(); | 
 |  | 
 |   if ((super_type() == AbstractType::null()) || | 
 |       (super_type() == isolate_group->object_store()->object_type())) { | 
 |     return num_type_params; | 
 |   } | 
 |  | 
 |   const auto& sup_type = Type::Handle(zone, super_type()); | 
 |   const auto& sup_class = Class::Handle(zone, sup_type.type_class()); | 
 |   const intptr_t sup_class_num_type_args = sup_class.NumTypeArguments(); | 
 |   if (num_type_params == 0) { | 
 |     return sup_class_num_type_args; | 
 |   } | 
 |  | 
 |   const auto& sup_type_args = TypeArguments::Handle(zone, sup_type.arguments()); | 
 |   if (sup_type_args.IsNull()) { | 
 |     // The super type is raw or the super class is non generic. | 
 |     // In either case, overlapping is not possible. | 
 |     return sup_class_num_type_args + num_type_params; | 
 |   } | 
 |  | 
 |   const intptr_t sup_type_args_length = sup_type_args.Length(); | 
 |   // Determine the maximum overlap of a prefix of the vector consisting of the | 
 |   // type parameters of this class with a suffix of the vector consisting of the | 
 |   // type arguments of the super type of this class. | 
 |   // The number of own type arguments of this class is the number of its type | 
 |   // parameters minus the number of type arguments in the overlap. | 
 |   // Attempt to overlap the whole vector of type parameters; reduce the size | 
 |   // of the vector (keeping the first type parameter) until it fits or until | 
 |   // its size is zero. | 
 |   auto& sup_type_arg = AbstractType::Handle(zone); | 
 |   for (intptr_t num_overlapping_type_args = | 
 |            (num_type_params < sup_type_args_length) ? num_type_params | 
 |                                                     : sup_type_args_length; | 
 |        num_overlapping_type_args > 0; num_overlapping_type_args--) { | 
 |     intptr_t i = 0; | 
 |     for (; i < num_overlapping_type_args; i++) { | 
 |       sup_type_arg = sup_type_args.TypeAt(sup_type_args_length - | 
 |                                           num_overlapping_type_args + i); | 
 |       ASSERT(!sup_type_arg.IsNull()); | 
 |       if (!sup_type_arg.IsTypeParameter()) break; | 
 |       // The only type parameters appearing in the type arguments of the super | 
 |       // type are those declared by this class. Their finalized indices depend | 
 |       // on the number of type arguments being computed here. Therefore, they | 
 |       // cannot possibly be finalized yet. | 
 |       ASSERT(!TypeParameter::Cast(sup_type_arg).IsFinalized()); | 
 |       if (TypeParameter::Cast(sup_type_arg).index() != i || | 
 |           TypeParameter::Cast(sup_type_arg).IsNullable()) { | 
 |         break; | 
 |       } | 
 |     } | 
 |     if (i == num_overlapping_type_args) { | 
 |       // Overlap found. | 
 |       return sup_class_num_type_args + num_type_params - | 
 |              num_overlapping_type_args; | 
 |     } | 
 |   } | 
 |   // No overlap found. | 
 |   return sup_class_num_type_args + num_type_params; | 
 | } | 
 |  | 
 | intptr_t Class::NumTypeArguments() const { | 
 |   // Return cached value if already calculated. | 
 |   intptr_t num_type_args = num_type_arguments(); | 
 |   if (num_type_args != kUnknownNumTypeArguments) { | 
 |     return num_type_args; | 
 |   } | 
 |  | 
 | #if defined(DART_PRECOMPILED_RUNTIME) | 
 |   UNREACHABLE(); | 
 |   return 0; | 
 | #else | 
 |   num_type_args = ComputeNumTypeArguments(); | 
 |   ASSERT(num_type_args != kUnknownNumTypeArguments); | 
 |   set_num_type_arguments(num_type_args); | 
 |   return num_type_args; | 
 | #endif  // defined(DART_PRECOMPILED_RUNTIME) | 
 | } | 
 |  | 
 | TypeArgumentsPtr Class::DefaultTypeArguments(Zone* zone) const { | 
 |   if (type_parameters() == TypeParameters::null()) { | 
 |     return Object::empty_type_arguments().ptr(); | 
 |   } | 
 |   return TypeParameters::Handle(zone, type_parameters()).defaults(); | 
 | } | 
 |  | 
 | ClassPtr Class::SuperClass(ClassTable* class_table /* = nullptr */) const { | 
 |   Thread* thread = Thread::Current(); | 
 |   Zone* zone = thread->zone(); | 
 |   if (class_table == nullptr) { | 
 |     class_table = thread->isolate_group()->class_table(); | 
 |   } | 
 |  | 
 |   if (super_type() == AbstractType::null()) { | 
 |     if (id() == kTypeArgumentsCid) { | 
 |       // Pretend TypeArguments objects are Dart instances. | 
 |       return class_table->At(kInstanceCid); | 
 |     } | 
 |     return Class::null(); | 
 |   } | 
 |   const AbstractType& sup_type = AbstractType::Handle(zone, super_type()); | 
 |   const intptr_t type_class_id = sup_type.type_class_id(); | 
 |   return class_table->At(type_class_id); | 
 | } | 
 |  | 
 | void Class::set_super_type(const Type& value) const { | 
 |   ASSERT(value.IsNull() || !value.IsDynamicType()); | 
 |   untag()->set_super_type(value.ptr()); | 
 | } | 
 |  | 
 | TypeParameterPtr Class::TypeParameterAt(intptr_t index, | 
 |                                         Nullability nullability) const { | 
 |   ASSERT(index >= 0 && index < NumTypeParameters()); | 
 |   TypeParameter& type_param = | 
 |       TypeParameter::Handle(TypeParameter::New(*this, 0, index, nullability)); | 
 |   // Finalize type parameter only if its declaring class is | 
 |   // finalized and available in the current class table. | 
 |   if (is_type_finalized() && (type_param.parameterized_class() == ptr())) { | 
 |     type_param ^= ClassFinalizer::FinalizeType(type_param); | 
 |   } | 
 |   return type_param.ptr(); | 
 | } | 
 |  | 
 | intptr_t Class::UnboxedFieldSizeInBytesByCid(intptr_t cid) { | 
 |   switch (cid) { | 
 |     case kDoubleCid: | 
 |       return sizeof(UntaggedDouble::value_); | 
 |     case kFloat32x4Cid: | 
 |       return sizeof(UntaggedFloat32x4::value_); | 
 |     case kFloat64x2Cid: | 
 |       return sizeof(UntaggedFloat64x2::value_); | 
 |     default: | 
 |       return sizeof(UntaggedMint::value_); | 
 |   } | 
 | } | 
 |  | 
 | UnboxedFieldBitmap Class::CalculateFieldOffsets() const { | 
 |   Array& flds = Array::Handle(fields()); | 
 |   const Class& super = Class::Handle(SuperClass()); | 
 |   intptr_t host_offset = 0; | 
 |   UnboxedFieldBitmap host_bitmap{}; | 
 |   // Target offsets might differ if the word size are different | 
 |   intptr_t target_offset = 0; | 
 |   intptr_t host_type_args_field_offset = kNoTypeArguments; | 
 |   intptr_t target_type_args_field_offset = RTN::Class::kNoTypeArguments; | 
 |   if (super.IsNull()) { | 
 |     host_offset = Instance::NextFieldOffset(); | 
 |     target_offset = RTN::Instance::NextFieldOffset(); | 
 |     ASSERT(host_offset > 0); | 
 |     ASSERT(target_offset > 0); | 
 |   } else { | 
 |     ASSERT(super.is_finalized() || super.is_prefinalized()); | 
 |     host_type_args_field_offset = super.host_type_arguments_field_offset(); | 
 |     target_type_args_field_offset = super.target_type_arguments_field_offset(); | 
 |     host_offset = super.host_next_field_offset(); | 
 |     ASSERT(host_offset > 0); | 
 |     target_offset = super.target_next_field_offset(); | 
 |     ASSERT(target_offset > 0); | 
 |     // We should never call CalculateFieldOffsets for native wrapper | 
 |     // classes, assert this. | 
 |     ASSERT(num_native_fields() == 0); | 
 |     const intptr_t num_native_fields = super.num_native_fields(); | 
 |     set_num_native_fields(num_native_fields); | 
 |     if (num_native_fields > 0 || is_isolate_unsendable_due_to_pragma()) { | 
 |       set_is_isolate_unsendable(true); | 
 |     } | 
 |  | 
 |     host_bitmap = IsolateGroup::Current()->class_table()->GetUnboxedFieldsMapAt( | 
 |         super.id()); | 
 |   } | 
 |   // If the super class is parameterized, use the same type_arguments field, | 
 |   // otherwise, if this class is the first in the super chain to be | 
 |   // parameterized, introduce a new type_arguments field. | 
 |   if (host_type_args_field_offset == kNoTypeArguments) { | 
 |     ASSERT(target_type_args_field_offset == RTN::Class::kNoTypeArguments); | 
 |     if (IsGeneric()) { | 
 |       // The instance needs a type_arguments field. | 
 |       host_type_args_field_offset = host_offset; | 
 |       target_type_args_field_offset = target_offset; | 
 |       host_offset += kCompressedWordSize; | 
 |       target_offset += compiler::target::kCompressedWordSize; | 
 |     } | 
 |   } else { | 
 |     ASSERT(target_type_args_field_offset != RTN::Class::kNoTypeArguments); | 
 |   } | 
 |  | 
 |   set_type_arguments_field_offset(host_type_args_field_offset, | 
 |                                   target_type_args_field_offset); | 
 |   ASSERT(host_offset > 0); | 
 |   ASSERT(target_offset > 0); | 
 |   Field& field = Field::Handle(); | 
 |   const intptr_t len = flds.Length(); | 
 |   for (intptr_t i = 0; i < len; i++) { | 
 |     field ^= flds.At(i); | 
 |     // Offset is computed only for instance fields. | 
 |     if (!field.is_static()) { | 
 |       ASSERT(field.HostOffset() == 0); | 
 |       ASSERT(field.TargetOffset() == 0); | 
 |       field.SetOffset(host_offset, target_offset); | 
 |  | 
 |       if (field.is_unboxed()) { | 
 |         const intptr_t field_size = | 
 |             UnboxedFieldSizeInBytesByCid(field.guarded_cid()); | 
 |  | 
 |         const intptr_t host_num_words = field_size / kCompressedWordSize; | 
 |         const intptr_t host_next_offset = host_offset + field_size; | 
 |         const intptr_t host_next_position = | 
 |             host_next_offset / kCompressedWordSize; | 
 |  | 
 |         const intptr_t target_next_offset = target_offset + field_size; | 
 |         const intptr_t target_next_position = | 
 |             target_next_offset / compiler::target::kCompressedWordSize; | 
 |  | 
 |         // The bitmap has fixed length. Checks if the offset position is smaller | 
 |         // than its length. If it is not, than the field should be boxed | 
 |         if (host_next_position <= UnboxedFieldBitmap::Length() && | 
 |             target_next_position <= UnboxedFieldBitmap::Length()) { | 
 |           for (intptr_t j = 0; j < host_num_words; j++) { | 
 |             // Activate the respective bit in the bitmap, indicating that the | 
 |             // content is not a pointer | 
 |             host_bitmap.Set(host_offset / kCompressedWordSize); | 
 |             host_offset += kCompressedWordSize; | 
 |           } | 
 |  | 
 |           ASSERT(host_offset == host_next_offset); | 
 |           target_offset = target_next_offset; | 
 |         } else { | 
 |           // Make the field boxed | 
 |           field.set_is_unboxed(false); | 
 |           host_offset += kCompressedWordSize; | 
 |           target_offset += compiler::target::kCompressedWordSize; | 
 |         } | 
 |       } else { | 
 |         host_offset += kCompressedWordSize; | 
 |         target_offset += compiler::target::kCompressedWordSize; | 
 |       } | 
 |     } | 
 |   } | 
 |  | 
 |   const intptr_t host_instance_size = RoundedAllocationSize(host_offset); | 
 |   const intptr_t target_instance_size = | 
 |       compiler::target::RoundedAllocationSize(target_offset); | 
 |   if (!Utils::IsInt(32, target_instance_size)) { | 
 |     // Many parts of the compiler assume offsets can be represented with | 
 |     // int32_t. | 
 |     FATAL("Too many fields in %s\n", UserVisibleNameCString()); | 
 |   } | 
 |   set_instance_size(host_instance_size, target_instance_size); | 
 |   set_next_field_offset(host_offset, target_offset); | 
 |   return host_bitmap; | 
 | } | 
 |  | 
 | void Class::AddInvocationDispatcher(const String& target_name, | 
 |                                     const Array& args_desc, | 
 |                                     const Function& dispatcher) const { | 
 |   auto thread = Thread::Current(); | 
 |   ASSERT(thread->isolate_group()->program_lock()->IsCurrentThreadWriter()); | 
 |  | 
 |   ASSERT(target_name.ptr() == dispatcher.name()); | 
 |  | 
 |   DispatcherSet dispatchers(invocation_dispatcher_cache() == | 
 |                                     Array::empty_array().ptr() | 
 |                                 ? HashTables::New<DispatcherSet>(4, Heap::kOld) | 
 |                                 : invocation_dispatcher_cache()); | 
 |   dispatchers.Insert(dispatcher); | 
 |   set_invocation_dispatcher_cache(dispatchers.Release()); | 
 | } | 
 |  | 
 | FunctionPtr Class::GetInvocationDispatcher(const String& target_name, | 
 |                                            const Array& args_desc, | 
 |                                            UntaggedFunction::Kind kind, | 
 |                                            bool create_if_absent) const { | 
 |   ASSERT(kind == UntaggedFunction::kNoSuchMethodDispatcher || | 
 |          kind == UntaggedFunction::kInvokeFieldDispatcher || | 
 |          kind == UntaggedFunction::kDynamicInvocationForwarder); | 
 |   auto thread = Thread::Current(); | 
 |   auto Z = thread->zone(); | 
 |   auto& function = Function::Handle(Z); | 
 |  | 
 |   // First we'll try to find it without using locks. | 
 |   DispatcherKey key(target_name, args_desc, kind); | 
 |   if (invocation_dispatcher_cache() != Array::empty_array().ptr()) { | 
 |     DispatcherSet dispatchers(Z, invocation_dispatcher_cache()); | 
 |     function ^= dispatchers.GetOrNull(key); | 
 |     dispatchers.Release(); | 
 |   } | 
 |   if (!function.IsNull() || !create_if_absent) { | 
 |     return function.ptr(); | 
 |   } | 
 |  | 
 |   // If we failed to find it and possibly need to create it, use a write lock. | 
 |   SafepointWriteRwLocker ml(thread, thread->isolate_group()->program_lock()); | 
 |  | 
 |   // Try to find it again & return if it was added in the meantime. | 
 |   if (invocation_dispatcher_cache() != Array::empty_array().ptr()) { | 
 |     DispatcherSet dispatchers(Z, invocation_dispatcher_cache()); | 
 |     function ^= dispatchers.GetOrNull(key); | 
 |     dispatchers.Release(); | 
 |   } | 
 |   if (!function.IsNull()) return function.ptr(); | 
 |  | 
 |   // Otherwise create it & add it. | 
 |   function = CreateInvocationDispatcher(target_name, args_desc, kind); | 
 |   AddInvocationDispatcher(target_name, args_desc, function); | 
 |   return function.ptr(); | 
 | } | 
 |  | 
 | FunctionPtr Class::CreateInvocationDispatcher( | 
 |     const String& target_name, | 
 |     const Array& args_desc, | 
 |     UntaggedFunction::Kind kind) const { | 
 |   Thread* thread = Thread::Current(); | 
 |   Zone* zone = thread->zone(); | 
 |   FunctionType& signature = FunctionType::Handle(zone, FunctionType::New()); | 
 |   Function& invocation = Function::Handle( | 
 |       zone, Function::New( | 
 |                 signature, | 
 |                 String::Handle(zone, Symbols::New(thread, target_name)), kind, | 
 |                 false,  // Not static. | 
 |                 false,  // Not const. | 
 |                 false,  // Not abstract. | 
 |                 false,  // Not external. | 
 |                 false,  // Not native. | 
 |                 *this, TokenPosition::kMinSource)); | 
 |   ArgumentsDescriptor desc(args_desc); | 
 |   const intptr_t type_args_len = desc.TypeArgsLen(); | 
 |   if (type_args_len > 0) { | 
 |     // Make dispatcher function generic, since type arguments are passed. | 
 |     const auto& type_parameters = | 
 |         TypeParameters::Handle(zone, TypeParameters::New(type_args_len)); | 
 |     // Allow any type, as any type checking is compiled into the dispatcher. | 
 |     auto& bound = Type::Handle( | 
 |         zone, IsolateGroup::Current()->object_store()->nullable_object_type()); | 
 |     for (intptr_t i = 0; i < type_args_len; i++) { | 
 |       // The name of the type parameter does not matter, as a type error using | 
 |       // it should never be thrown. | 
 |       type_parameters.SetNameAt(i, Symbols::OptimizedOut()); | 
 |       type_parameters.SetBoundAt(i, bound); | 
 |       // Type arguments will always be provided, so the default is not used. | 
 |       type_parameters.SetDefaultAt(i, Object::dynamic_type()); | 
 |     } | 
 |     signature.SetTypeParameters(type_parameters); | 
 |   } | 
 |  | 
 |   signature.set_num_fixed_parameters(desc.PositionalCount()); | 
 |   signature.SetNumOptionalParameters(desc.NamedCount(), | 
 |                                      false);  // Not positional. | 
 |   signature.set_parameter_types( | 
 |       Array::Handle(zone, Array::New(desc.Count(), Heap::kOld))); | 
 |   invocation.CreateNameArray(); | 
 |   signature.CreateNameArrayIncludingFlags(); | 
 |   // Receiver. | 
 |   signature.SetParameterTypeAt(0, Object::dynamic_type()); | 
 |   invocation.SetParameterNameAt(0, Symbols::This()); | 
 |   // Remaining positional parameters. | 
 |   for (intptr_t i = 1; i < desc.PositionalCount(); i++) { | 
 |     signature.SetParameterTypeAt(i, Object::dynamic_type()); | 
 |     char name[64]; | 
 |     Utils::SNPrint(name, 64, ":p%" Pd, i); | 
 |     invocation.SetParameterNameAt( | 
 |         i, String::Handle(zone, Symbols::New(thread, name))); | 
 |   } | 
 |  | 
 |   // Named parameters. | 
 |   for (intptr_t i = 0; i < desc.NamedCount(); i++) { | 
 |     const intptr_t param_index = desc.PositionAt(i); | 
 |     const auto& param_name = String::Handle(zone, desc.NameAt(i)); | 
 |     signature.SetParameterTypeAt(param_index, Object::dynamic_type()); | 
 |     signature.SetParameterNameAt(param_index, param_name); | 
 |   } | 
 |   signature.FinalizeNameArray(); | 
 |   signature.set_result_type(Object::dynamic_type()); | 
 |   invocation.set_is_debuggable(false); | 
 |   invocation.set_is_visible(false); | 
 |   invocation.set_is_reflectable(false); | 
 |   invocation.set_saved_args_desc(args_desc); | 
 |  | 
 |   signature ^= ClassFinalizer::FinalizeType(signature); | 
 |   invocation.SetSignature(signature); | 
 |  | 
 |   return invocation.ptr(); | 
 | } | 
 |  | 
 | // Method extractors are used to create implicit closures from methods. | 
 | // When an expression obj.M is evaluated for the first time and receiver obj | 
 | // does not have a getter called M but has a method called M then an extractor | 
 | // is created and injected as a getter (under the name get:M) into the class | 
 | // owning method M. | 
 | FunctionPtr Function::CreateMethodExtractor(const String& getter_name) const { | 
 |   Thread* thread = Thread::Current(); | 
 |   Zone* zone = thread->zone(); | 
 |   ASSERT(Field::IsGetterName(getter_name)); | 
 |   const Function& closure_function = | 
 |       Function::Handle(zone, ImplicitClosureFunction()); | 
 |  | 
 |   const Class& owner = Class::Handle(zone, closure_function.Owner()); | 
 |   FunctionType& signature = FunctionType::Handle(zone, FunctionType::New()); | 
 |   const Function& extractor = Function::Handle( | 
 |       zone, | 
 |       Function::New(signature, | 
 |                     String::Handle(zone, Symbols::New(thread, getter_name)), | 
 |                     UntaggedFunction::kMethodExtractor, | 
 |                     false,  // Not static. | 
 |                     false,  // Not const. | 
 |                     is_abstract(), | 
 |                     false,  // Not external. | 
 |                     false,  // Not native. | 
 |                     owner, TokenPosition::kMethodExtractor)); | 
 |  | 
 |   // Initialize signature: receiver is a single fixed parameter. | 
 |   const intptr_t kNumParameters = 1; | 
 |   signature.set_num_fixed_parameters(kNumParameters); | 
 |   signature.SetNumOptionalParameters(0, false); | 
 |   signature.set_parameter_types(Object::synthetic_getter_parameter_types()); | 
 | #if !defined(DART_PRECOMPILED_RUNTIME) | 
 |   extractor.set_positional_parameter_names( | 
 |       Object::synthetic_getter_parameter_names()); | 
 | #endif | 
 |   signature.set_result_type(Object::dynamic_type()); | 
 |  | 
 |   extractor.InheritKernelOffsetFrom(*this); | 
 |  | 
 |   extractor.set_extracted_method_closure(closure_function); | 
 |   extractor.set_is_debuggable(false); | 
 |   extractor.set_is_visible(false); | 
 |  | 
 |   signature ^= ClassFinalizer::FinalizeType(signature); | 
 |   extractor.SetSignature(signature); | 
 |  | 
 |   owner.AddFunction(extractor); | 
 |  | 
 |   return extractor.ptr(); | 
 | } | 
 |  | 
 | FunctionPtr Function::GetMethodExtractor(const String& getter_name) const { | 
 |   ASSERT(Field::IsGetterName(getter_name)); | 
 |   const Function& closure_function = | 
 |       Function::Handle(ImplicitClosureFunction()); | 
 |   const Class& owner = Class::Handle(closure_function.Owner()); | 
 |   Thread* thread = Thread::Current(); | 
 |   if (owner.EnsureIsFinalized(thread) != Error::null()) { | 
 |     return Function::null(); | 
 |   } | 
 |   IsolateGroup* group = thread->isolate_group(); | 
 |   Function& result = Function::Handle( | 
 |       Resolver::ResolveDynamicFunction(thread->zone(), owner, getter_name)); | 
 |   if (result.IsNull()) { | 
 |     SafepointWriteRwLocker ml(thread, group->program_lock()); | 
 |     result = owner.LookupDynamicFunctionUnsafe(getter_name); | 
 |     if (result.IsNull()) { | 
 |       result = CreateMethodExtractor(getter_name); | 
 |     } | 
 |   } | 
 |   ASSERT(result.kind() == UntaggedFunction::kMethodExtractor); | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | // Record field getters are used to access fields of arbitrary | 
 | // record instances dynamically. | 
 | FunctionPtr Class::CreateRecordFieldGetter(const String& getter_name) const { | 
 |   Thread* thread = Thread::Current(); | 
 |   Zone* zone = thread->zone(); | 
 |   ASSERT(IsRecordClass()); | 
 |   ASSERT(Field::IsGetterName(getter_name)); | 
 |   FunctionType& signature = FunctionType::Handle(zone, FunctionType::New()); | 
 |   const Function& getter = Function::Handle( | 
 |       zone, | 
 |       Function::New(signature, | 
 |                     String::Handle(zone, Symbols::New(thread, getter_name)), | 
 |                     UntaggedFunction::kRecordFieldGetter, | 
 |                     false,  // Not static. | 
 |                     false,  // Not const. | 
 |                     false,  // Not abstract. | 
 |                     false,  // Not external. | 
 |                     false,  // Not native. | 
 |                     *this, TokenPosition::kMinSource)); | 
 |  | 
 |   // Initialize signature: receiver is a single fixed parameter. | 
 |   const intptr_t kNumParameters = 1; | 
 |   signature.set_num_fixed_parameters(kNumParameters); | 
 |   signature.SetNumOptionalParameters(0, false); | 
 |   signature.set_parameter_types(Object::synthetic_getter_parameter_types()); | 
 | #if !defined(DART_PRECOMPILED_RUNTIME) | 
 |   getter.set_positional_parameter_names( | 
 |       Object::synthetic_getter_parameter_names()); | 
 | #endif | 
 |   signature.set_result_type(Object::dynamic_type()); | 
 |  | 
 |   getter.set_is_debuggable(false); | 
 |   getter.set_is_visible(false); | 
 |  | 
 |   signature ^= ClassFinalizer::FinalizeType(signature); | 
 |   getter.SetSignature(signature); | 
 |  | 
 |   AddFunction(getter); | 
 |  | 
 |   return getter.ptr(); | 
 | } | 
 |  | 
 | FunctionPtr Class::GetRecordFieldGetter(const String& getter_name) const { | 
 |   ASSERT(IsRecordClass()); | 
 |   ASSERT(Field::IsGetterName(getter_name)); | 
 |   Thread* thread = Thread::Current(); | 
 |   SafepointWriteRwLocker ml(thread, thread->isolate_group()->program_lock()); | 
 |   Function& result = Function::Handle(thread->zone(), | 
 |                                       LookupDynamicFunctionUnsafe(getter_name)); | 
 |   if (result.IsNull()) { | 
 |     result = CreateRecordFieldGetter(getter_name); | 
 |   } | 
 |   ASSERT(result.kind() == UntaggedFunction::kRecordFieldGetter); | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | bool FindPragmaInMetadata(Thread* T, | 
 |                           const Object& metadata_obj, | 
 |                           const String& pragma_name, | 
 |                           bool multiple, | 
 |                           Object* options) { | 
 |   auto IG = T->isolate_group(); | 
 |   auto Z = T->zone(); | 
 |  | 
 |   // If there is a compile-time error while evaluating the metadata, we will | 
 |   // simply claim there was no @pragma annotation. | 
 |   if (metadata_obj.IsNull() || metadata_obj.IsLanguageError()) { | 
 |     return false; | 
 |   } | 
 |   ASSERT(metadata_obj.IsArray()); | 
 |  | 
 |   auto& metadata = Array::Cast(metadata_obj); | 
 |   auto& pragma_class = Class::Handle(Z, IG->object_store()->pragma_class()); | 
 |   if (pragma_class.IsNull()) { | 
 |     // Precompiler may drop pragma class. | 
 |     return false; | 
 |   } | 
 |   auto& pragma_name_field = | 
 |       Field::Handle(Z, pragma_class.LookupField(Symbols::name())); | 
 |   auto& pragma_options_field = | 
 |       Field::Handle(Z, pragma_class.LookupField(Symbols::options())); | 
 |  | 
 |   auto& pragma = Object::Handle(Z); | 
 |   bool found = false; | 
 |   auto& options_value = Object::Handle(Z); | 
 |   auto& results = GrowableObjectArray::Handle(Z); | 
 |   if (multiple) { | 
 |     ASSERT(options != nullptr); | 
 |     results ^= GrowableObjectArray::New(1); | 
 |   } | 
 |   for (intptr_t i = 0; i < metadata.Length(); ++i) { | 
 |     pragma = metadata.At(i); | 
 |     if (pragma.clazz() != pragma_class.ptr() || | 
 |         Instance::Cast(pragma).GetField(pragma_name_field) != | 
 |             pragma_name.ptr()) { | 
 |       continue; | 
 |     } | 
 |     options_value = Instance::Cast(pragma).GetField(pragma_options_field); | 
 |     found = true; | 
 |     if (multiple) { | 
 |       results.Add(options_value); | 
 |       continue; | 
 |     } | 
 |     if (options != nullptr) { | 
 |       *options = options_value.ptr(); | 
 |     } | 
 |     return true; | 
 |   } | 
 |  | 
 |   if (found && options != nullptr) { | 
 |     *options = results.ptr(); | 
 |   } | 
 |   return false; | 
 | } | 
 |  | 
 | bool Library::FindPragma(Thread* T, | 
 |                          bool only_core, | 
 |                          const Object& obj, | 
 |                          const String& pragma_name, | 
 |                          bool multiple, | 
 |                          Object* options) { | 
 |   auto Z = T->zone(); | 
 |   auto& lib = Library::Handle(Z); | 
 |  | 
 |   if (obj.IsLibrary()) { | 
 |     lib = Library::Cast(obj).ptr(); | 
 |   } else if (obj.IsClass()) { | 
 |     auto& klass = Class::Cast(obj); | 
 |     if (!klass.has_pragma()) return false; | 
 |     lib = klass.library(); | 
 |   } else if (obj.IsFunction()) { | 
 |     auto& function = Function::Cast(obj); | 
 |     if (!function.has_pragma()) return false; | 
 |     lib = Class::Handle(Z, function.Owner()).library(); | 
 |   } else if (obj.IsField()) { | 
 |     auto& field = Field::Cast(obj); | 
 |     if (!field.has_pragma()) return false; | 
 |     lib = Class::Handle(Z, field.Owner()).library(); | 
 |   } else { | 
 |     UNREACHABLE(); | 
 |   } | 
 |  | 
 |   if (only_core && !lib.IsAnyCoreLibrary()) { | 
 |     return false; | 
 |   } | 
 |  | 
 |   Object& metadata_obj = Object::Handle(Z, lib.GetMetadata(obj)); | 
 |   if (metadata_obj.IsUnwindError()) { | 
 |     Report::LongJump(UnwindError::Cast(metadata_obj)); | 
 |   } | 
 |  | 
 |   return FindPragmaInMetadata(T, metadata_obj, pragma_name, multiple, options); | 
 | } | 
 |  | 
 | bool Function::IsDynamicInvocationForwarderName(const String& name) { | 
 |   return IsDynamicInvocationForwarderName(name.ptr()); | 
 | } | 
 |  | 
 | bool Function::IsDynamicInvocationForwarderName(StringPtr name) { | 
 |   return String::StartsWith(name, Symbols::DynamicPrefix().ptr()); | 
 | } | 
 |  | 
 | StringPtr Function::DemangleDynamicInvocationForwarderName(const String& name) { | 
 |   const intptr_t kDynamicPrefixLength = 4;  // "dyn:" | 
 |   ASSERT(Symbols::DynamicPrefix().Length() == kDynamicPrefixLength); | 
 |   return Symbols::New(Thread::Current(), name, kDynamicPrefixLength, | 
 |                       name.Length() - kDynamicPrefixLength); | 
 | } | 
 |  | 
 | StringPtr Function::CreateDynamicInvocationForwarderName(const String& name) { | 
 |   return Symbols::FromConcat(Thread::Current(), Symbols::DynamicPrefix(), name); | 
 | } | 
 |  | 
 | #if !defined(DART_PRECOMPILED_RUNTIME) | 
 | FunctionPtr Function::CreateDynamicInvocationForwarder( | 
 |     const String& mangled_name) const { | 
 |   Thread* thread = Thread::Current(); | 
 |   Zone* zone = thread->zone(); | 
 |  | 
 |   Function& forwarder = Function::Handle(zone); | 
 |   forwarder ^= Object::Clone(*this, Heap::kOld); | 
 |  | 
 |   forwarder.reset_unboxed_parameters_and_return(); | 
 |  | 
 |   forwarder.set_name(mangled_name); | 
 |   forwarder.set_is_native(false); | 
 |   // TODO(dartbug.com/37737): Currently, we intentionally keep the recognized | 
 |   // kind when creating the dynamic invocation forwarder. | 
 |   forwarder.set_kind(UntaggedFunction::kDynamicInvocationForwarder); | 
 |   forwarder.set_modifier(UntaggedFunction::kNoModifier); | 
 |   forwarder.set_is_debuggable(false); | 
 |  | 
 |   // TODO(vegorov) for error reporting reasons it is better to make this | 
 |   // function visible and instead use a TailCall to invoke the target. | 
 |   // Our TailCall instruction is not ready for such usage though it | 
 |   // blocks inlining and can't take Function-s only Code objects. | 
 |   forwarder.set_is_visible(false); | 
 |  | 
 |   forwarder.ClearICDataArray(); | 
 |   forwarder.ClearCode(); | 
 |   forwarder.set_usage_counter(0); | 
 |   forwarder.set_deoptimization_counter(0); | 
 |   forwarder.set_optimized_instruction_count(0); | 
 |   forwarder.set_inlining_depth(0); | 
 |   forwarder.set_optimized_call_site_count(0); | 
 |  | 
 |   forwarder.InheritKernelOffsetFrom(*this); | 
 |   forwarder.SetForwardingTarget(*this); | 
 |  | 
 |   return forwarder.ptr(); | 
 | } | 
 |  | 
 | FunctionPtr Function::GetDynamicInvocationForwarder( | 
 |     const String& mangled_name) const { | 
 |   ASSERT(IsDynamicInvocationForwarderName(mangled_name)); | 
 |   auto thread = Thread::Current(); | 
 |   auto zone = thread->zone(); | 
 |   const Class& owner = Class::Handle(zone, Owner()); | 
 |   Function& result = Function::Handle(zone); | 
 |  | 
 |   // First we'll try to find it without using locks. | 
 |   result = owner.GetInvocationDispatcher( | 
 |       mangled_name, Array::null_array(), | 
 |       UntaggedFunction::kDynamicInvocationForwarder, | 
 |       /*create_if_absent=*/false); | 
 |   if (!result.IsNull()) return result.ptr(); | 
 |  | 
 |   const bool needs_dyn_forwarder = | 
 |       kernel::NeedsDynamicInvocationForwarder(*this); | 
 |   if (!needs_dyn_forwarder) { | 
 |     return ptr(); | 
 |   } | 
 |  | 
 |   // If we failed to find it and possibly need to create it, use a write lock. | 
 |   SafepointWriteRwLocker ml(thread, thread->isolate_group()->program_lock()); | 
 |  | 
 |   // Try to find it again & return if it was added in the mean time. | 
 |   result = owner.GetInvocationDispatcher( | 
 |       mangled_name, Array::null_array(), | 
 |       UntaggedFunction::kDynamicInvocationForwarder, | 
 |       /*create_if_absent=*/false); | 
 |   if (!result.IsNull()) return result.ptr(); | 
 |  | 
 |   // Otherwise create it & add it. | 
 |   result = CreateDynamicInvocationForwarder(mangled_name); | 
 |   owner.AddInvocationDispatcher(mangled_name, Array::null_array(), result); | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | #endif | 
 |  | 
 | bool AbstractType::InstantiateAndTestSubtype( | 
 |     AbstractType* subtype, | 
 |     AbstractType* supertype, | 
 |     const TypeArguments& instantiator_type_args, | 
 |     const TypeArguments& function_type_args) { | 
 |   if (!subtype->IsInstantiated()) { | 
 |     *subtype = subtype->InstantiateFrom( | 
 |         instantiator_type_args, function_type_args, kAllFree, Heap::kOld); | 
 |   } | 
 |   if (!supertype->IsInstantiated()) { | 
 |     *supertype = supertype->InstantiateFrom( | 
 |         instantiator_type_args, function_type_args, kAllFree, Heap::kOld); | 
 |   } | 
 |   return subtype->IsSubtypeOf(*supertype, Heap::kOld); | 
 | } | 
 |  | 
 | ArrayPtr Class::invocation_dispatcher_cache() const { | 
 |   return untag()->invocation_dispatcher_cache<std::memory_order_acquire>(); | 
 | } | 
 |  | 
 | void Class::Finalize() const { | 
 |   auto thread = Thread::Current(); | 
 |   auto isolate_group = thread->isolate_group(); | 
 |   ASSERT(!thread->isolate_group()->all_classes_finalized()); | 
 |   ASSERT(!is_finalized()); | 
 |   // Prefinalized classes have a VM internal representation and no Dart fields. | 
 |   // Their instance size  is precomputed and field offsets are known. | 
 |   if (!is_prefinalized()) { | 
 |     // Compute offsets of instance fields, instance size and bitmap for unboxed | 
 |     // fields. | 
 |     const auto host_bitmap = CalculateFieldOffsets(); | 
 |     if (ptr() == isolate_group->class_table()->At(id())) { | 
 |       if (!ClassTable::IsTopLevelCid(id())) { | 
 |         // Unless class is top-level, which don't get instantiated, | 
 |         // sets the new size in the class table. | 
 |         isolate_group->class_table()->UpdateClassSize(id(), ptr()); | 
 |         isolate_group->class_table()->SetUnboxedFieldsMapAt(id(), host_bitmap); | 
 |       } | 
 |     } | 
 |   } | 
 |  | 
 | #if defined(DEBUG) | 
 |   if (is_const()) { | 
 |     // Double-check that all fields are final (CFE should guarantee that if it | 
 |     // marks the class as having a constant constructor). | 
 |     auto Z = thread->zone(); | 
 |     const auto& super_class = Class::Handle(Z, SuperClass()); | 
 |     ASSERT(super_class.IsNull() || super_class.is_const()); | 
 |     const auto& fields = Array::Handle(Z, this->fields()); | 
 |     auto& field = Field::Handle(Z); | 
 |     for (intptr_t i = 0; i < fields.Length(); ++i) { | 
 |       field ^= fields.At(i); | 
 |       ASSERT(field.is_static() || field.is_final()); | 
 |     } | 
 |   } | 
 | #endif | 
 |  | 
 |   set_is_finalized(); | 
 | } | 
 |  | 
 | #if defined(DEBUG) | 
 | static bool IsMutatorOrAtDeoptSafepoint() { | 
 |   Thread* thread = Thread::Current(); | 
 |   return thread->IsDartMutatorThread() || thread->OwnsDeoptSafepoint(); | 
 | } | 
 | #endif | 
 |  | 
 | #if !defined(DART_PRECOMPILED_RUNTIME) | 
 |  | 
 | class CHACodeArray : public WeakCodeReferences { | 
 |  public: | 
 |   explicit CHACodeArray(const Class& cls) | 
 |       : WeakCodeReferences(WeakArray::Handle(cls.dependent_code())), | 
 |         cls_(cls) {} | 
 |  | 
 |   virtual void UpdateArrayTo(const WeakArray& value) { | 
 |     // TODO(fschneider): Fails for classes in the VM isolate. | 
 |     cls_.set_dependent_code(value); | 
 |   } | 
 |  | 
 |   virtual void ReportDeoptimization(const Code& code) { | 
 |     if (FLAG_trace_deoptimization || FLAG_trace_deoptimization_verbose) { | 
 |       Function& function = Function::Handle(code.function()); | 
 |       THR_Print("Deoptimizing %s because CHA optimized (%s).\n", | 
 |                 function.ToFullyQualifiedCString(), cls_.ToCString()); | 
 |     } | 
 |   } | 
 |  | 
 |   virtual void ReportSwitchingCode(const Code& code) { | 
 |     if (FLAG_trace_deoptimization || FLAG_trace_deoptimization_verbose) { | 
 |       Function& function = Function::Handle(code.function()); | 
 |       THR_Print( | 
 |           "Switching %s to unoptimized code because CHA invalid" | 
 |           " (%s)\n", | 
 |           function.ToFullyQualifiedCString(), cls_.ToCString()); | 
 |     } | 
 |   } | 
 |  | 
 |  private: | 
 |   const Class& cls_; | 
 |   DISALLOW_COPY_AND_ASSIGN(CHACodeArray); | 
 | }; | 
 |  | 
 | void Class::RegisterCHACode(const Code& code) { | 
 |   if (FLAG_trace_cha) { | 
 |     THR_Print("RegisterCHACode '%s' depends on class '%s'\n", | 
 |               Function::Handle(code.function()).ToQualifiedCString(), | 
 |               ToCString()); | 
 |   } | 
 |   DEBUG_ASSERT(IsMutatorOrAtDeoptSafepoint()); | 
 |   ASSERT(code.is_optimized()); | 
 |   CHACodeArray a(*this); | 
 |   a.Register(code); | 
 | } | 
 |  | 
 | void Class::DisableCHAOptimizedCode(const Class& subclass) { | 
 |   DEBUG_ASSERT( | 
 |       IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); | 
 |   CHACodeArray a(*this); | 
 |   if (FLAG_trace_deoptimization && a.HasCodes()) { | 
 |     if (subclass.IsNull()) { | 
 |       THR_Print("Deopt for CHA (all)\n"); | 
 |     } else { | 
 |       THR_Print("Deopt for CHA (new subclass %s)\n", subclass.ToCString()); | 
 |     } | 
 |   } | 
 |   a.DisableCode(/*are_mutators_stopped=*/false); | 
 | } | 
 |  | 
 | void Class::DisableAllCHAOptimizedCode() { | 
 |   DisableCHAOptimizedCode(Class::Handle()); | 
 | } | 
 |  | 
 | WeakArrayPtr Class::dependent_code() const { | 
 |   DEBUG_ASSERT( | 
 |       IsolateGroup::Current()->program_lock()->IsCurrentThreadReader()); | 
 |   return untag()->dependent_code(); | 
 | } | 
 |  | 
 | void Class::set_dependent_code(const WeakArray& array) const { | 
 |   DEBUG_ASSERT( | 
 |       IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); | 
 |   untag()->set_dependent_code(array.ptr()); | 
 | } | 
 |  | 
 | #endif  // !defined(DART_PRECOMPILED_RUNTIME) | 
 |  | 
 | bool Class::TraceAllocation(IsolateGroup* isolate_group) const { | 
 | #ifndef PRODUCT | 
 |   auto class_table = isolate_group->class_table(); | 
 |   return class_table->ShouldTraceAllocationFor(id()); | 
 | #else | 
 |   return false; | 
 | #endif | 
 | } | 
 |  | 
 | void Class::SetTraceAllocation(bool trace_allocation) const { | 
 | #ifndef PRODUCT | 
 |   auto isolate_group = IsolateGroup::Current(); | 
 |   const bool changed = trace_allocation != this->TraceAllocation(isolate_group); | 
 |   if (changed) { | 
 |     auto class_table = isolate_group->class_table(); | 
 |     class_table->SetTraceAllocationFor(id(), trace_allocation); | 
 | #ifdef TARGET_ARCH_IA32 | 
 |     DisableAllocationStub(); | 
 | #endif | 
 |   } | 
 | #else | 
 |   UNREACHABLE(); | 
 | #endif | 
 | } | 
 |  | 
 | // Conventions: | 
 | // * For throwing a NSM in a library or top-level class (i.e., level is | 
 | //   kTopLevel), if a method was found but was incompatible, we pass the | 
 | //   signature of the found method as a string, otherwise the null instance. | 
 | // * Otherwise, for throwing a NSM in a class klass we use its runtime type as | 
 | //   receiver, i.e., klass.RareType(). | 
 | static ObjectPtr ThrowNoSuchMethod(const Instance& receiver, | 
 |                                    const String& function_name, | 
 |                                    const Array& arguments, | 
 |                                    const Array& argument_names, | 
 |                                    const InvocationMirror::Level level, | 
 |                                    const InvocationMirror::Kind kind) { | 
 |   const Smi& invocation_type = | 
 |       Smi::Handle(Smi::New(InvocationMirror::EncodeType(level, kind))); | 
 |  | 
 |   ASSERT(!receiver.IsNull() || level == InvocationMirror::Level::kTopLevel); | 
 |   ASSERT(level != InvocationMirror::Level::kTopLevel || receiver.IsString()); | 
 |   const Array& args = Array::Handle(Array::New(7)); | 
 |   args.SetAt(0, receiver); | 
 |   args.SetAt(1, function_name); | 
 |   args.SetAt(2, invocation_type); | 
 |   args.SetAt(3, Object::smi_zero());  // Type arguments length. | 
 |   args.SetAt(4, Object::null_type_arguments()); | 
 |   args.SetAt(5, arguments); | 
 |   args.SetAt(6, argument_names); | 
 |  | 
 |   const Library& libcore = Library::Handle(Library::CoreLibrary()); | 
 |   const Class& cls = | 
 |       Class::Handle(libcore.LookupClass(Symbols::NoSuchMethodError())); | 
 |   ASSERT(!cls.IsNull()); | 
 |   const auto& error = cls.EnsureIsFinalized(Thread::Current()); | 
 |   ASSERT(error == Error::null()); | 
 |   const Function& throwNew = | 
 |       Function::Handle(cls.LookupFunctionAllowPrivate(Symbols::ThrowNew())); | 
 |   return DartEntry::InvokeFunction(throwNew, args); | 
 | } | 
 |  | 
 | static ObjectPtr ThrowTypeError(const TokenPosition token_pos, | 
 |                                 const Instance& src_value, | 
 |                                 const AbstractType& dst_type, | 
 |                                 const String& dst_name) { | 
 |   const Array& args = Array::Handle(Array::New(4)); | 
 |   const Smi& pos = Smi::Handle(Smi::New(token_pos.Serialize())); | 
 |   args.SetAt(0, pos); | 
 |   args.SetAt(1, src_value); | 
 |   args.SetAt(2, dst_type); | 
 |   args.SetAt(3, dst_name); | 
 |  | 
 |   const Library& libcore = Library::Handle(Library::CoreLibrary()); | 
 |   const Class& cls = | 
 |       Class::Handle(libcore.LookupClassAllowPrivate(Symbols::TypeError())); | 
 |   const auto& error = cls.EnsureIsFinalized(Thread::Current()); | 
 |   ASSERT(error == Error::null()); | 
 |   const Function& throwNew = | 
 |       Function::Handle(cls.LookupFunctionAllowPrivate(Symbols::ThrowNew())); | 
 |   return DartEntry::InvokeFunction(throwNew, args); | 
 | } | 
 |  | 
 | ObjectPtr Class::InvokeGetter(const String& getter_name, | 
 |                               bool throw_nsm_if_absent, | 
 |                               bool respect_reflectable, | 
 |                               bool check_is_entrypoint) const { | 
 |   Thread* thread = Thread::Current(); | 
 |   Zone* zone = thread->zone(); | 
 |  | 
 |   CHECK_ERROR(EnsureIsFinalized(thread)); | 
 |  | 
 |   // Note static fields do not have implicit getters. | 
 |   const Field& field = Field::Handle(zone, LookupStaticField(getter_name)); | 
 |  | 
 |   if (!field.IsNull() && check_is_entrypoint) { | 
 |     CHECK_ERROR(field.VerifyEntryPoint(EntryPointPragma::kGetterOnly)); | 
 |   } | 
 |  | 
 |   if (field.IsNull() || field.IsUninitialized()) { | 
 |     const String& internal_getter_name = | 
 |         String::Handle(zone, Field::GetterName(getter_name)); | 
 |     Function& getter = | 
 |         Function::Handle(zone, LookupStaticFunction(internal_getter_name)); | 
 |  | 
 |     if (field.IsNull() && !getter.IsNull() && check_is_entrypoint) { | 
 |       CHECK_ERROR(getter.VerifyCallEntryPoint()); | 
 |     } | 
 |  | 
 |     if (getter.IsNull() || (respect_reflectable && !getter.is_reflectable())) { | 
 |       if (getter.IsNull()) { | 
 |         getter = LookupStaticFunction(getter_name); | 
 |         if (!getter.IsNull()) { | 
 |           if (check_is_entrypoint) { | 
 |             CHECK_ERROR(getter.VerifyClosurizedEntryPoint()); | 
 |           } | 
 |           if (getter.SafeToClosurize()) { | 
 |             // Looking for a getter but found a regular method: closurize it. | 
 |             const Function& closure_function = | 
 |                 Function::Handle(zone, getter.ImplicitClosureFunction()); | 
 |             return closure_function.ImplicitStaticClosure(); | 
 |           } | 
 |         } | 
 |       } | 
 |       if (throw_nsm_if_absent) { | 
 |         return ThrowNoSuchMethod( | 
 |             AbstractType::Handle(zone, RareType()), getter_name, | 
 |             Object::null_array(), Object::null_array(), | 
 |             InvocationMirror::kStatic, InvocationMirror::kGetter); | 
 |       } | 
 |       // Fall through case: Indicate that we didn't find any function or field | 
 |       // using a special null instance. This is different from a field being | 
 |       // null. Callers make sure that this null does not leak into Dartland. | 
 |       return Object::sentinel().ptr(); | 
 |     } | 
 |  | 
 |     // Invoke the getter and return the result. | 
 |     return DartEntry::InvokeFunction(getter, Object::empty_array()); | 
 |   } | 
 |  | 
 |   return field.StaticValue(); | 
 | } | 
 |  | 
 | ObjectPtr Class::InvokeSetter(const String& setter_name, | 
 |                               const Instance& value, | 
 |                               bool respect_reflectable, | 
 |                               bool check_is_entrypoint) const { | 
 |   Thread* thread = Thread::Current(); | 
 |   Zone* zone = thread->zone(); | 
 |  | 
 |   CHECK_ERROR(EnsureIsFinalized(thread)); | 
 |  | 
 |   // Check for real fields and user-defined setters. | 
 |   const Field& field = Field::Handle(zone, LookupStaticField(setter_name)); | 
 |   const String& internal_setter_name = | 
 |       String::Handle(zone, Field::SetterName(setter_name)); | 
 |  | 
 |   if (!field.IsNull() && check_is_entrypoint) { | 
 |     CHECK_ERROR(field.VerifyEntryPoint(EntryPointPragma::kSetterOnly)); | 
 |   } | 
 |  | 
 |   AbstractType& parameter_type = AbstractType::Handle(zone); | 
 |   if (field.IsNull()) { | 
 |     const Function& setter = | 
 |         Function::Handle(zone, LookupStaticFunction(internal_setter_name)); | 
 |     if (!setter.IsNull() && check_is_entrypoint) { | 
 |       CHECK_ERROR(setter.VerifyCallEntryPoint()); | 
 |     } | 
 |     const int kNumArgs = 1; | 
 |     const Array& args = Array::Handle(zone, Array::New(kNumArgs)); | 
 |     args.SetAt(0, value); | 
 |     if (setter.IsNull() || (respect_reflectable && !setter.is_reflectable())) { | 
 |       return ThrowNoSuchMethod(AbstractType::Handle(zone, RareType()), | 
 |                                internal_setter_name, args, Object::null_array(), | 
 |                                InvocationMirror::kStatic, | 
 |                                InvocationMirror::kSetter); | 
 |     } | 
 |     parameter_type = setter.ParameterTypeAt(0); | 
 |     if (!value.RuntimeTypeIsSubtypeOf(parameter_type, | 
 |                                       Object::null_type_arguments(), | 
 |                                       Object::null_type_arguments())) { | 
 |       const String& argument_name = | 
 |           String::Handle(zone, setter.ParameterNameAt(0)); | 
 |       return ThrowTypeError(setter.token_pos(), value, parameter_type, | 
 |                             argument_name); | 
 |     } | 
 |     // Invoke the setter and return the result. | 
 |     return DartEntry::InvokeFunction(setter, args); | 
 |   } | 
 |  | 
 |   if (field.is_final() || (respect_reflectable && !field.is_reflectable())) { | 
 |     const int kNumArgs = 1; | 
 |     const Array& args = Array::Handle(zone, Array::New(kNumArgs)); | 
 |     args.SetAt(0, value); | 
 |     return ThrowNoSuchMethod(AbstractType::Handle(zone, RareType()), | 
 |                              internal_setter_name, args, Object::null_array(), | 
 |                              InvocationMirror::kStatic, | 
 |                              InvocationMirror::kSetter); | 
 |   } | 
 |  | 
 |   parameter_type = field.type(); | 
 |   if (!value.RuntimeTypeIsSubtypeOf(parameter_type, | 
 |                                     Object::null_type_arguments(), | 
 |                                     Object::null_type_arguments())) { | 
 |     const String& argument_name = String::Handle(zone, field.name()); | 
 |     return ThrowTypeError(field.token_pos(), value, parameter_type, | 
 |                           argument_name); | 
 |   } | 
 |   field.SetStaticValue(value); | 
 |   return value.ptr(); | 
 | } | 
 |  | 
 | // Creates a new array of boxed arguments suitable for invoking the callable | 
 | // from the original boxed arguments for a static call. Also sets the contents | 
 | // of the handle pointed to by [callable_args_desc_array_out] to an appropriate | 
 | // arguments descriptor array for the new arguments. | 
 | // | 
 | // Assumes [arg_names] are consistent with [static_args_descriptor]. | 
 | static ArrayPtr CreateCallableArgumentsFromStatic( | 
 |     Zone* zone, | 
 |     const Instance& receiver, | 
 |     const Array& static_args, | 
 |     const Array& arg_names, | 
 |     const ArgumentsDescriptor& static_args_descriptor) { | 
 |   const intptr_t num_static_type_args = static_args_descriptor.TypeArgsLen(); | 
 |   const intptr_t num_static_args = static_args_descriptor.Count(); | 
 |   // Double check that the static args descriptor expects boxed arguments | 
 |   // and the static args descriptor is consistent with the static arguments. | 
 |   ASSERT_EQUAL(static_args_descriptor.Size(), num_static_args); | 
 |   ASSERT_EQUAL(static_args.Length(), | 
 |                num_static_args + (num_static_type_args > 0 ? 1 : 0)); | 
 |   // Add an additional slot to store the callable as the receiver. | 
 |   const auto& callable_args = | 
 |       Array::Handle(zone, Array::New(static_args.Length() + 1)); | 
 |   const intptr_t first_arg_index = static_args_descriptor.FirstArgIndex(); | 
 |   auto& temp = Object::Handle(zone); | 
 |   // Copy the static args into the corresponding slots of the callable args. | 
 |   if (num_static_type_args > 0) { | 
 |     temp = static_args.At(0); | 
 |     callable_args.SetAt(0, temp); | 
 |   } | 
 |   for (intptr_t i = first_arg_index; i < static_args.Length(); i++) { | 
 |     temp = static_args.At(i); | 
 |     callable_args.SetAt(i + 1, temp); | 
 |   } | 
 |   // Set the receiver slot in the callable args. | 
 |   callable_args.SetAt(first_arg_index, receiver); | 
 |   return callable_args.ptr(); | 
 | } | 
 |  | 
 | ObjectPtr Class::Invoke(const String& function_name, | 
 |                         const Array& args, | 
 |                         const Array& arg_names, | 
 |                         bool respect_reflectable, | 
 |                         bool check_is_entrypoint) const { | 
 |   Thread* thread = Thread::Current(); | 
 |   Zone* zone = thread->zone(); | 
 |   CHECK_ERROR(EnsureIsFinalized(thread)); | 
 |  | 
 |   // We don't pass any explicit type arguments, which will be understood as | 
 |   // using dynamic for any function type arguments by lower layers. | 
 |   const int kTypeArgsLen = 0; | 
 |   const Array& args_descriptor_array = Array::Handle( | 
 |       zone, ArgumentsDescriptor::NewBoxed(kTypeArgsLen, args.Length(), | 
 |                                           arg_names, Heap::kNew)); | 
 |   ArgumentsDescriptor args_descriptor(args_descriptor_array); | 
 |  | 
 |   Function& function = | 
 |       Function::Handle(zone, LookupStaticFunction(function_name)); | 
 |  | 
 |   if (!function.IsNull() && check_is_entrypoint) { | 
 |     CHECK_ERROR(function.VerifyCallEntryPoint()); | 
 |   } | 
 |  | 
 |   if (function.IsNull()) { | 
 |     // Didn't find a method: try to find a getter and invoke call on its result. | 
 |     const Object& getter_result = Object::Handle( | 
 |         zone, InvokeGetter(function_name, false, respect_reflectable, | 
 |                            check_is_entrypoint)); | 
 |     if (getter_result.ptr() != Object::sentinel().ptr()) { | 
 |       if (check_is_entrypoint) { | 
 |         CHECK_ERROR(EntryPointFieldInvocationError(function_name)); | 
 |       } | 
 |       const auto& call_args_descriptor_array = Array::Handle( | 
 |           zone, ArgumentsDescriptor::NewBoxed(args_descriptor.TypeArgsLen(), | 
 |                                               args_descriptor.Count() + 1, | 
 |                                               arg_names, Heap::kNew)); | 
 |       const auto& call_args = Array::Handle( | 
 |           zone, | 
 |           CreateCallableArgumentsFromStatic(zone, Instance::Cast(getter_result), | 
 |                                             args, arg_names, args_descriptor)); | 
 |       return DartEntry::InvokeClosure(thread, call_args, | 
 |                                       call_args_descriptor_array); | 
 |     } | 
 |   } | 
 |  | 
 |   if (function.IsNull() || | 
 |       !function.AreValidArguments(args_descriptor, nullptr) || | 
 |       (respect_reflectable && !function.is_reflectable())) { | 
 |     return ThrowNoSuchMethod( | 
 |         AbstractType::Handle(zone, RareType()), function_name, args, arg_names, | 
 |         InvocationMirror::kStatic, InvocationMirror::kMethod); | 
 |   } | 
 |   // This is a static function, so we pass an empty instantiator tav. | 
 |   ASSERT(function.is_static()); | 
 |   ObjectPtr type_error = function.DoArgumentTypesMatch( | 
 |       args, args_descriptor, Object::empty_type_arguments()); | 
 |   if (type_error != Error::null()) { | 
 |     return type_error; | 
 |   } | 
 |   return DartEntry::InvokeFunction(function, args, args_descriptor_array); | 
 | } | 
 |  | 
 | #if !defined(DART_PRECOMPILED_RUNTIME) | 
 |  | 
 | static ObjectPtr LoadExpressionEvaluationFunction( | 
 |     Zone* zone, | 
 |     const ExternalTypedData& kernel_buffer, | 
 |     const String& library_url, | 
 |     const String& klass) { | 
 |   std::unique_ptr<kernel::Program> kernel_pgm = | 
 |       kernel::Program::ReadFromTypedData(kernel_buffer); | 
 |  | 
 |   if (kernel_pgm == nullptr) { | 
 |     return ApiError::New(String::Handle( | 
 |         zone, String::New("Kernel isolate returned ill-formed kernel."))); | 
 |   } | 
 |  | 
 |   auto& result = Object::Handle(zone); | 
 |   { | 
 |     kernel::KernelLoader loader(kernel_pgm.get(), | 
 |                                 /*uri_to_source_table=*/nullptr); | 
 |     result = loader.LoadExpressionEvaluationFunction(library_url, klass); | 
 |     kernel_pgm.reset(); | 
 |   } | 
 |   if (result.IsError()) return result.ptr(); | 
 |   return Function::Cast(result).ptr(); | 
 | } | 
 |  | 
 | static bool EvaluationFunctionNeedsReceiver(Thread* thread, | 
 |                                             Zone* zone, | 
 |                                             const Function& eval_function) { | 
 |   auto parsed_function = new ParsedFunction( | 
 |       thread, Function::ZoneHandle(zone, eval_function.ptr())); | 
 |   parsed_function->EnsureKernelScopes(); | 
 |   return parsed_function->is_receiver_used(); | 
 | } | 
 |  | 
 | static ObjectPtr EvaluateCompiledExpressionHelper( | 
 |     Zone* zone, | 
 |     const Function& eval_function, | 
 |     const Array& type_definitions, | 
 |     const Array& arguments, | 
 |     const TypeArguments& type_arguments) { | 
 |   // type_arguments is null if all type arguments are dynamic. | 
 |   if (type_definitions.Length() == 0 || type_arguments.IsNull()) { | 
 |     return DartEntry::InvokeFunction(eval_function, arguments); | 
 |   } | 
 |  | 
 |   intptr_t num_type_args = type_arguments.Length(); | 
 |   const auto& real_arguments = | 
 |       Array::Handle(zone, Array::New(arguments.Length() + 1)); | 
 |   real_arguments.SetAt(0, type_arguments); | 
 |   Object& arg = Object::Handle(zone); | 
 |   for (intptr_t i = 0; i < arguments.Length(); ++i) { | 
 |     arg = arguments.At(i); | 
 |     real_arguments.SetAt(i + 1, arg); | 
 |   } | 
 |  | 
 |   const Array& args_desc = | 
 |       Array::Handle(zone, ArgumentsDescriptor::NewBoxed( | 
 |                               num_type_args, arguments.Length(), Heap::kNew)); | 
 |   return DartEntry::InvokeFunction(eval_function, real_arguments, args_desc); | 
 | } | 
 |  | 
 | #endif  // !defined(DART_PRECOMPILED_RUNTIME) | 
 |  | 
 | ObjectPtr Library::EvaluateCompiledExpression( | 
 |     const ExternalTypedData& kernel_buffer, | 
 |     const Array& type_definitions, | 
 |     const Array& arguments, | 
 |     const TypeArguments& type_arguments) const { | 
 |   const auto& klass = Class::Handle(toplevel_class()); | 
 |   return klass.EvaluateCompiledExpression(kernel_buffer, type_definitions, | 
 |                                           arguments, type_arguments); | 
 | } | 
 |  | 
 | ObjectPtr Class::EvaluateCompiledExpression( | 
 |     const ExternalTypedData& kernel_buffer, | 
 |     const Array& type_definitions, | 
 |     const Array& arguments, | 
 |     const TypeArguments& type_arguments) const { | 
 |   auto thread = Thread::Current(); | 
 |   const auto& library = Library::Handle(thread->zone(), this->library()); | 
 |   return Instance::EvaluateCompiledExpression( | 
 |       thread, Instance::null_object(), library, *this, kernel_buffer, | 
 |       type_definitions, arguments, type_arguments); | 
 | } | 
 |  | 
 | ObjectPtr Instance::EvaluateCompiledExpression( | 
 |     const Class& klass, | 
 |     const ExternalTypedData& kernel_buffer, | 
 |     const Array& type_definitions, | 
 |     const Array& arguments, | 
 |     const TypeArguments& type_arguments) const { | 
 |   auto thread = Thread::Current(); | 
 |   auto zone = thread->zone(); | 
 |   const auto& library = Library::Handle(zone, klass.library()); | 
 |   return Instance::EvaluateCompiledExpression(thread, *this, library, klass, | 
 |                                               kernel_buffer, type_definitions, | 
 |                                               arguments, type_arguments); | 
 | } | 
 |  | 
 | ObjectPtr Instance::EvaluateCompiledExpression( | 
 |     Thread* thread, | 
 |     const Object& receiver, | 
 |     const Library& library, | 
 |     const Class& klass, | 
 |     const ExternalTypedData& kernel_buffer, | 
 |     const Array& type_definitions, | 
 |     const Array& arguments, | 
 |     const TypeArguments& type_arguments) { | 
 |   auto zone = Thread::Current()->zone(); | 
 | #if defined(DART_PRECOMPILED_RUNTIME) | 
 |   const auto& error_str = String::Handle( | 
 |       zone, | 
 |       String::New("Expression evaluation not available in precompiled mode.")); | 
 |   return ApiError::New(error_str); | 
 | #else | 
 |   if (IsInternalOnlyClassId(klass.id()) || (klass.id() == kTypeArgumentsCid)) { | 
 |     const auto& exception = Instance::Handle( | 
 |         zone, String::New("Expressions can be evaluated only with regular Dart " | 
 |                           "instances/classes.")); | 
 |     return UnhandledException::New(exception, StackTrace::null_instance()); | 
 |   } | 
 |  | 
 |   const auto& url = String::Handle(zone, library.url()); | 
 |   const auto& klass_name = klass.IsTopLevel() | 
 |                                ? String::null_string() | 
 |                                : String::Handle(zone, klass.UserVisibleName()); | 
 |  | 
 |   const auto& result = Object::Handle( | 
 |       zone, | 
 |       LoadExpressionEvaluationFunction(zone, kernel_buffer, url, klass_name)); | 
 |   if (result.IsError()) return result.ptr(); | 
 |  | 
 |   const auto& eval_function = Function::Cast(result); | 
 |  | 
 | #if defined(DEBUG) | 
 |   for (intptr_t i = 0; i < arguments.Length(); ++i) { | 
 |     ASSERT(arguments.At(i) != Object::optimized_out().ptr()); | 
 |   } | 
 | #endif  // defined(DEBUG) | 
 |  | 
 |   auto& all_arguments = Array::Handle(zone, arguments.ptr()); | 
 |   if (!eval_function.is_static()) { | 
 |     // `this` may be optimized out (e.g. not accessible from breakpoint due to | 
 |     // not being captured by closure). We allow this as long as the evaluation | 
 |     // function doesn't actually need `this`. | 
 |     if (receiver.IsNull() || receiver.ptr() == Object::optimized_out().ptr()) { | 
 |       if (EvaluationFunctionNeedsReceiver(thread, zone, eval_function)) { | 
 |         return Object::optimized_out().ptr(); | 
 |       } | 
 |     } | 
 |  | 
 |     all_arguments = Array::New(1 + arguments.Length()); | 
 |     auto& param = PassiveObject::Handle(); | 
 |     all_arguments.SetAt(0, receiver); | 
 |     for (intptr_t i = 0; i < arguments.Length(); i++) { | 
 |       param = arguments.At(i); | 
 |       all_arguments.SetAt(i + 1, param); | 
 |     } | 
 |   } | 
 |  | 
 |   return EvaluateCompiledExpressionHelper(zone, eval_function, type_definitions, | 
 |                                           all_arguments, type_arguments); | 
 | #endif  // !defined(DART_PRECOMPILED_RUNTIME) | 
 | } | 
 |  | 
 | void Class::EnsureDeclarationLoaded() const { | 
 |   if (!is_declaration_loaded()) { | 
 | #if defined(DART_PRECOMPILED_RUNTIME) | 
 |     UNREACHABLE(); | 
 | #else | 
 |     FATAL("Unable to use class %s which is not loaded yet.", ToCString()); | 
 | #endif | 
 |   } | 
 | } | 
 |  | 
 | // Ensure that top level parsing of the class has been done. | 
 | ErrorPtr Class::EnsureIsFinalized(Thread* thread) const { | 
 |   ASSERT(!IsNull()); | 
 |   if (is_finalized()) { | 
 |     return Error::null(); | 
 |   } | 
 | #if defined(DART_PRECOMPILED_RUNTIME) | 
 |   UNREACHABLE(); | 
 |   return Error::null(); | 
 | #else | 
 |   SafepointWriteRwLocker ml(thread, thread->isolate_group()->program_lock()); | 
 |   if (is_finalized()) { | 
 |     return Error::null(); | 
 |   } | 
 |   LeaveCompilerScope ncs(thread); | 
 |   ASSERT(thread != nullptr); | 
 |   const Error& error = | 
 |       Error::Handle(thread->zone(), ClassFinalizer::LoadClassMembers(*this)); | 
 |   if (!error.IsNull()) { | 
 |     ASSERT(thread == Thread::Current()); | 
 |     if (thread->long_jump_base() != nullptr) { | 
 |       Report::LongJump(error); | 
 |       UNREACHABLE(); | 
 |     } | 
 |   } | 
 |   return error.ptr(); | 
 | #endif  // defined(DART_PRECOMPILED_RUNTIME) | 
 | } | 
 |  | 
 | // Ensure that code outdated by finalized class is cleaned up, new instance of | 
 | // this class is ready to be allocated. | 
 | ErrorPtr Class::EnsureIsAllocateFinalized(Thread* thread) const { | 
 |   ASSERT(!IsNull()); | 
 |   if (is_allocate_finalized()) { | 
 |     return Error::null(); | 
 |   } | 
 |   SafepointWriteRwLocker ml(thread, thread->isolate_group()->program_lock()); | 
 |   if (is_allocate_finalized()) { | 
 |     return Error::null(); | 
 |   } | 
 |   ASSERT(thread != nullptr); | 
 |   Error& error = Error::Handle(thread->zone(), EnsureIsFinalized(thread)); | 
 |   if (!error.IsNull()) { | 
 |     ASSERT(thread == Thread::Current()); | 
 |     if (thread->long_jump_base() != nullptr) { | 
 |       Report::LongJump(error); | 
 |       UNREACHABLE(); | 
 |     } | 
 |   } | 
 |   // May be allocate-finalized recursively during EnsureIsFinalized. | 
 |   if (is_allocate_finalized()) { | 
 |     return Error::null(); | 
 |   } | 
 | #if defined(DART_PRECOMPILED_RUNTIME) | 
 |   UNREACHABLE(); | 
 | #else | 
 |   error ^= ClassFinalizer::AllocateFinalizeClass(*this); | 
 | #endif  // defined(DART_PRECOMPILED_RUNTIME) | 
 |   return error.ptr(); | 
 | } | 
 |  | 
 | void Class::SetFields(const Array& value) const { | 
 |   ASSERT(!value.IsNull()); | 
 | #if defined(DEBUG) | 
 |   Thread* thread = Thread::Current(); | 
 |   ASSERT(thread->isolate_group()->program_lock()->IsCurrentThreadWriter()); | 
 |   // Verify that all the fields in the array have this class as owner. | 
 |   Field& field = Field::Handle(); | 
 |   intptr_t len = value.Length(); | 
 |   for (intptr_t i = 0; i < len; i++) { | 
 |     field ^= value.At(i); | 
 |     ASSERT(field.IsOriginal()); | 
 |     ASSERT(field.Owner() == ptr()); | 
 |   } | 
 | #endif | 
 |   // The value of static fields is already initialized to null. | 
 |   set_fields(value); | 
 | } | 
 |  | 
 | void Class::AddField(const Field& field) const { | 
 | #if defined(DEBUG) | 
 |   Thread* thread = Thread::Current(); | 
 |   ASSERT(thread->isolate_group()->program_lock()->IsCurrentThreadWriter()); | 
 | #endif | 
 |   const Array& arr = Array::Handle(fields()); | 
 |   const Array& new_arr = Array::Handle(Array::Grow(arr, arr.Length() + 1)); | 
 |   new_arr.SetAt(arr.Length(), field); | 
 |   SetFields(new_arr); | 
 | } | 
 |  | 
 | void Class::AddFields(const GrowableArray<const Field*>& new_fields) const { | 
 | #if defined(DEBUG) | 
 |   Thread* thread = Thread::Current(); | 
 |   ASSERT(thread->isolate_group()->program_lock()->IsCurrentThreadWriter()); | 
 | #endif | 
 |   const intptr_t num_new_fields = new_fields.length(); | 
 |   if (num_new_fields == 0) return; | 
 |   const Array& arr = Array::Handle(fields()); | 
 |   const intptr_t num_old_fields = arr.Length(); | 
 |   const Array& new_arr = Array::Handle( | 
 |       Array::Grow(arr, num_old_fields + num_new_fields, Heap::kOld)); | 
 |   for (intptr_t i = 0; i < num_new_fields; i++) { | 
 |     new_arr.SetAt(i + num_old_fields, *new_fields.At(i)); | 
 |   } | 
 |   SetFields(new_arr); | 
 | } | 
 |  | 
 | intptr_t Class::FindFieldIndex(const Field& needle) const { | 
 |   Thread* thread = Thread::Current(); | 
 |   if (EnsureIsFinalized(thread) != Error::null()) { | 
 |     return -1; | 
 |   } | 
 |   REUSABLE_ARRAY_HANDLESCOPE(thread); | 
 |   REUSABLE_FIELD_HANDLESCOPE(thread); | 
 |   Array& fields = thread->ArrayHandle(); | 
 |   Field& field = thread->FieldHandle(); | 
 |   fields = this->fields(); | 
 |   ASSERT(!fields.IsNull()); | 
 |   for (intptr_t i = 0, n = fields.Length(); i < n; ++i) { | 
 |     field ^= fields.At(i); | 
 |     if (needle.ptr() == field.ptr()) { | 
 |       return i; | 
 |     } | 
 |   } | 
 |   // Not found. | 
 |   return -1; | 
 | } | 
 |  | 
 | FieldPtr Class::FieldFromIndex(intptr_t idx) const { | 
 |   Array& fields = Array::Handle(this->fields()); | 
 |   if ((idx < 0) || (idx >= fields.Length())) { | 
 |     return Field::null(); | 
 |   } | 
 |   return Field::RawCast(fields.At(idx)); | 
 | } | 
 |  | 
 | bool Class::InjectCIDFields() const { | 
 |   if (library() != Library::InternalLibrary() || | 
 |       Name() != Symbols::ClassID().ptr()) { | 
 |     return false; | 
 |   } | 
 |  | 
 |   auto thread = Thread::Current(); | 
 |   auto isolate_group = thread->isolate_group(); | 
 |   auto zone = thread->zone(); | 
 |   Field& field = Field::Handle(zone); | 
 |   Smi& value = Smi::Handle(zone); | 
 |   String& field_name = String::Handle(zone); | 
 |  | 
 |   // clang-format off | 
 |   static const struct { | 
 |     const char* const field_name; | 
 |     const intptr_t cid; | 
 |   } cid_fields[] = { | 
 | #define CLASS_LIST_WITH_NULL(V)                                                \ | 
 |     V(Null)                                                                    \ | 
 |     CLASS_LIST_NO_OBJECT(V) | 
 | #define ADD_SET_FIELD(clazz)                                                   \ | 
 |     {"cid" #clazz, k##clazz##Cid}, | 
 |     CLASS_LIST_WITH_NULL(ADD_SET_FIELD) | 
 | #undef ADD_SET_FIELD | 
 | #undef CLASS_LIST_WITH_NULL | 
 | #define ADD_SET_FIELD(clazz)                                                   \ | 
 |     {"cid" #clazz, kTypedData##clazz##Cid},                                    \ | 
 |     {"cid" #clazz "View", kTypedData##clazz##ViewCid},                         \ | 
 |     {"cidExternal" #clazz, kExternalTypedData##clazz##Cid},                    \ | 
 |     {"cidUnmodifiable" #clazz "View", kUnmodifiableTypedData##clazz##ViewCid}, \ | 
 |     CLASS_LIST_TYPED_DATA(ADD_SET_FIELD) | 
 | #undef ADD_SET_FIELD | 
 |     // Used in const hashing to determine whether we're dealing with a | 
 |     // user-defined const. See lib/_internal/vm/lib/compact_hash.dart. | 
 |     {"numPredefinedCids", kNumPredefinedCids}, | 
 |   }; | 
 |   // clang-format on | 
 |  | 
 |   const AbstractType& field_type = Type::Handle(zone, Type::IntType()); | 
 |   for (size_t i = 0; i < ARRAY_SIZE(cid_fields); i++) { | 
 |     field_name = Symbols::New(thread, cid_fields[i].field_name); | 
 |     field = Field::New(field_name, /* is_static = */ true, | 
 |                        /* is_final = */ false, | 
 |                        /* is_const = */ true, | 
 |                        /* is_reflectable = */ false, | 
 |                        /* is_late = */ false, *this, field_type, | 
 |                        TokenPosition::kMinSource, TokenPosition::kMinSource); | 
 |     value = Smi::New(cid_fields[i].cid); | 
 |     isolate_group->RegisterStaticField(field, value); | 
 |     AddField(field); | 
 |   } | 
 |  | 
 |   return true; | 
 | } | 
 |  | 
 | template <class FakeInstance, class TargetFakeInstance> | 
 | ClassPtr Class::NewCommon(intptr_t index) { | 
 |   ASSERT(Object::class_class() != Class::null()); | 
 |   const auto& result = Class::Handle(Object::Allocate<Class>(Heap::kOld)); | 
 |   // Here kIllegalCid means not-yet-assigned. | 
 |   Object::VerifyBuiltinVtable<FakeInstance>(index == kIllegalCid ? kInstanceCid | 
 |                                                                  : index); | 
 |   NOT_IN_PRECOMPILED(result.set_token_pos(TokenPosition::kNoSource)); | 
 |   NOT_IN_PRECOMPILED(result.set_end_token_pos(TokenPosition::kNoSource)); | 
 |   const intptr_t host_instance_size = FakeInstance::InstanceSize(); | 
 |   const intptr_t target_instance_size = compiler::target::RoundedAllocationSize( | 
 |       TargetFakeInstance::InstanceSize()); | 
 |   result.set_instance_size(host_instance_size, target_instance_size); | 
 |   result.set_type_arguments_field_offset_in_words(kNoTypeArguments, | 
 |                                                   RTN::Class::kNoTypeArguments); | 
 |   const intptr_t host_next_field_offset = FakeInstance::NextFieldOffset(); | 
 |   const intptr_t target_next_field_offset = | 
 |       TargetFakeInstance::NextFieldOffset(); | 
 |   result.set_next_field_offset(host_next_field_offset, | 
 |                                target_next_field_offset); | 
 |   result.set_id(index); | 
 |   NOT_IN_PRECOMPILED(result.set_implementor_cid(kIllegalCid)); | 
 |   result.set_num_type_arguments_unsafe(kUnknownNumTypeArguments); | 
 |   result.set_num_native_fields(0); | 
 |   result.set_state_bits(0); | 
 |   NOT_IN_PRECOMPILED(result.set_kernel_offset(0)); | 
 |   result.InitEmptyFields(); | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | template <class FakeInstance, class TargetFakeInstance> | 
 | ClassPtr Class::New(intptr_t index, | 
 |                     IsolateGroup* isolate_group, | 
 |                     bool register_class, | 
 |                     bool is_abstract) { | 
 |   Class& result = | 
 |       Class::Handle(NewCommon<FakeInstance, TargetFakeInstance>(index)); | 
 |   if (is_abstract) { | 
 |     result.set_is_abstract(); | 
 |   } | 
 |   if (register_class) { | 
 |     isolate_group->class_table()->Register(result); | 
 |   } | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | ClassPtr Class::New(const Library& lib, | 
 |                     const String& name, | 
 |                     const Script& script, | 
 |                     TokenPosition token_pos, | 
 |                     bool register_class) { | 
 |   Class& result = | 
 |       Class::Handle(NewCommon<Instance, RTN::Instance>(kIllegalCid)); | 
 |   result.set_library(lib); | 
 |   result.set_name(name); | 
 |   result.set_script(script); | 
 |   NOT_IN_PRECOMPILED(result.set_token_pos(token_pos)); | 
 |  | 
 |   // The size gets initialized to 0. Once the class gets finalized the class | 
 |   // finalizer will set the correct size. | 
 |   ASSERT(!result.is_finalized() && !result.is_prefinalized()); | 
 |   result.set_instance_size_in_words(0, 0); | 
 |  | 
 |   if (register_class) { | 
 |     IsolateGroup::Current()->RegisterClass(result); | 
 |   } | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | ClassPtr Class::NewInstanceClass() { | 
 |   return Class::New<Instance, RTN::Instance>(kIllegalCid, | 
 |                                              IsolateGroup::Current()); | 
 | } | 
 |  | 
 | ClassPtr Class::NewNativeWrapper(const Library& library, | 
 |                                  const String& name, | 
 |                                  int field_count) { | 
 |   Class& cls = Class::Handle(library.LookupClass(name)); | 
 |   if (cls.IsNull()) { | 
 |     cls = New(library, name, Script::Handle(), TokenPosition::kNoSource); | 
 |     cls.SetFields(Object::empty_array()); | 
 |     cls.SetFunctions(Object::empty_array()); | 
 |     // Set super class to Object. | 
 |     cls.set_super_type(Type::Handle(Type::ObjectType())); | 
 |     // Compute instance size. First word contains a pointer to a properly | 
 |     // sized typed array once the first native field has been set. | 
 |     const intptr_t host_instance_size = | 
 |         sizeof(UntaggedInstance) + kCompressedWordSize; | 
 | #if defined(DART_PRECOMPILER) | 
 |     const intptr_t target_instance_size = | 
 |         compiler::target::Instance::InstanceSize() + | 
 |         compiler::target::kCompressedWordSize; | 
 | #else | 
 |     const intptr_t target_instance_size = | 
 |         sizeof(UntaggedInstance) + compiler::target::kCompressedWordSize; | 
 | #endif | 
 |     cls.set_instance_size( | 
 |         RoundedAllocationSize(host_instance_size), | 
 |         compiler::target::RoundedAllocationSize(target_instance_size)); | 
 |     cls.set_next_field_offset(host_instance_size, target_instance_size); | 
 |     cls.set_num_native_fields(field_count); | 
 |     cls.set_is_allocate_finalized(); | 
 |     // The signature of the constructor yet to be added to this class will have | 
 |     // to be finalized explicitly, since the class is prematurely marked as | 
 |     // 'is_allocate_finalized' and finalization of member types will not occur. | 
 |     cls.set_is_declaration_loaded(); | 
 |     cls.set_is_type_finalized(); | 
 |     cls.set_is_synthesized_class(); | 
 |     cls.set_is_isolate_unsendable(true); | 
 |     NOT_IN_PRECOMPILED(cls.set_implementor_cid(kDynamicCid)); | 
 |     library.AddClass(cls); | 
 |     return cls.ptr(); | 
 |   } else { | 
 |     return Class::null(); | 
 |   } | 
 | } | 
 |  | 
 | ClassPtr Class::NewStringClass(intptr_t class_id, IsolateGroup* isolate_group) { | 
 |   intptr_t host_instance_size, target_instance_size; | 
 |   if (class_id == kOneByteStringCid) { | 
 |     host_instance_size = OneByteString::InstanceSize(); | 
 |     target_instance_size = compiler::target::RoundedAllocationSize( | 
 |         RTN::OneByteString::InstanceSize()); | 
 |   } else { | 
 |     ASSERT(class_id == kTwoByteStringCid); | 
 |     host_instance_size = TwoByteString::InstanceSize(); | 
 |     target_instance_size = compiler::target::RoundedAllocationSize( | 
 |         RTN::TwoByteString::InstanceSize()); | 
 |   } | 
 |   Class& result = Class::Handle(New<String, RTN::String>( | 
 |       class_id, isolate_group, /*register_class=*/false)); | 
 |   result.set_instance_size(host_instance_size, target_instance_size); | 
 |  | 
 |   const intptr_t host_next_field_offset = String::NextFieldOffset(); | 
 |   const intptr_t target_next_field_offset = RTN::String::NextFieldOffset(); | 
 |   result.set_next_field_offset(host_next_field_offset, | 
 |                                target_next_field_offset); | 
 |   result.set_is_prefinalized(); | 
 |   ASSERT(IsDeeplyImmutableCid(class_id)); | 
 |   result.set_is_deeply_immutable(true); | 
 |   isolate_group->class_table()->Register(result); | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | ClassPtr Class::NewTypedDataClass(intptr_t class_id, | 
 |                                   IsolateGroup* isolate_group) { | 
 |   ASSERT(IsTypedDataClassId(class_id)); | 
 |   const intptr_t host_instance_size = TypedData::InstanceSize(); | 
 |   const intptr_t target_instance_size = | 
 |       compiler::target::RoundedAllocationSize(RTN::TypedData::InstanceSize()); | 
 |   Class& result = Class::Handle(New<TypedData, RTN::TypedData>( | 
 |       class_id, isolate_group, /*register_class=*/false)); | 
 |   result.set_instance_size(host_instance_size, target_instance_size); | 
 |  | 
 |   const intptr_t host_next_field_offset = TypedData::NextFieldOffset(); | 
 |   const intptr_t target_next_field_offset = RTN::TypedData::NextFieldOffset(); | 
 |   result.set_next_field_offset(host_next_field_offset, | 
 |                                target_next_field_offset); | 
 |   result.set_is_prefinalized(); | 
 |   isolate_group->class_table()->Register(result); | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | ClassPtr Class::NewTypedDataViewClass(intptr_t class_id, | 
 |                                       IsolateGroup* isolate_group) { | 
 |   ASSERT(IsTypedDataViewClassId(class_id)); | 
 |   const intptr_t host_instance_size = TypedDataView::InstanceSize(); | 
 |   const intptr_t target_instance_size = compiler::target::RoundedAllocationSize( | 
 |       RTN::TypedDataView::InstanceSize()); | 
 |   Class& result = Class::Handle(New<TypedDataView, RTN::TypedDataView>( | 
 |       class_id, isolate_group, /*register_class=*/false)); | 
 |   result.set_instance_size(host_instance_size, target_instance_size); | 
 |  | 
 |   const intptr_t host_next_field_offset = TypedDataView::NextFieldOffset(); | 
 |   const intptr_t target_next_field_offset = | 
 |       RTN::TypedDataView::NextFieldOffset(); | 
 |   result.set_next_field_offset(host_next_field_offset, | 
 |                                target_next_field_offset); | 
 |   result.set_is_prefinalized(); | 
 |   isolate_group->class_table()->Register(result); | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | ClassPtr Class::NewUnmodifiableTypedDataViewClass(intptr_t class_id, | 
 |                                                   IsolateGroup* isolate_group) { | 
 |   ASSERT(IsUnmodifiableTypedDataViewClassId(class_id)); | 
 |   const intptr_t host_instance_size = TypedDataView::InstanceSize(); | 
 |   const intptr_t target_instance_size = compiler::target::RoundedAllocationSize( | 
 |       RTN::TypedDataView::InstanceSize()); | 
 |   Class& result = Class::Handle(New<TypedDataView, RTN::TypedDataView>( | 
 |       class_id, isolate_group, /*register_class=*/false)); | 
 |   result.set_instance_size(host_instance_size, target_instance_size); | 
 |  | 
 |   const intptr_t host_next_field_offset = TypedDataView::NextFieldOffset(); | 
 |   const intptr_t target_next_field_offset = | 
 |       RTN::TypedDataView::NextFieldOffset(); | 
 |   result.set_next_field_offset(host_next_field_offset, | 
 |                                target_next_field_offset); | 
 |   result.set_is_prefinalized(); | 
 |   isolate_group->class_table()->Register(result); | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | ClassPtr Class::NewExternalTypedDataClass(intptr_t class_id, | 
 |                                           IsolateGroup* isolate_group) { | 
 |   ASSERT(IsExternalTypedDataClassId(class_id)); | 
 |   const intptr_t host_instance_size = ExternalTypedData::InstanceSize(); | 
 |   const intptr_t target_instance_size = compiler::target::RoundedAllocationSize( | 
 |       RTN::ExternalTypedData::InstanceSize()); | 
 |   Class& result = Class::Handle(New<ExternalTypedData, RTN::ExternalTypedData>( | 
 |       class_id, isolate_group, /*register_class=*/false)); | 
 |  | 
 |   const intptr_t host_next_field_offset = ExternalTypedData::NextFieldOffset(); | 
 |   const intptr_t target_next_field_offset = | 
 |       RTN::ExternalTypedData::NextFieldOffset(); | 
 |   result.set_instance_size(host_instance_size, target_instance_size); | 
 |   result.set_next_field_offset(host_next_field_offset, | 
 |                                target_next_field_offset); | 
 |   result.set_is_prefinalized(); | 
 |   isolate_group->class_table()->Register(result); | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | ClassPtr Class::NewPointerClass(intptr_t class_id, | 
 |                                 IsolateGroup* isolate_group) { | 
 |   ASSERT(IsFfiPointerClassId(class_id)); | 
 |   intptr_t host_instance_size = Pointer::InstanceSize(); | 
 |   intptr_t target_instance_size = | 
 |       compiler::target::RoundedAllocationSize(RTN::Pointer::InstanceSize()); | 
 |   Class& result = Class::Handle(New<Pointer, RTN::Pointer>( | 
 |       class_id, isolate_group, /*register_class=*/false)); | 
 |   result.set_instance_size(host_instance_size, target_instance_size); | 
 |   result.set_type_arguments_field_offset(Pointer::type_arguments_offset(), | 
 |                                          RTN::Pointer::type_arguments_offset()); | 
 |  | 
 |   const intptr_t host_next_field_offset = Pointer::NextFieldOffset(); | 
 |   const intptr_t target_next_field_offset = RTN::Pointer::NextFieldOffset(); | 
 |  | 
 |   result.set_next_field_offset(host_next_field_offset, | 
 |                                target_next_field_offset); | 
 |   result.set_is_prefinalized(); | 
 |   isolate_group->class_table()->Register(result); | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | void Class::set_name(const String& value) const { | 
 |   ASSERT(untag()->name() == String::null()); | 
 |   ASSERT(value.IsSymbol()); | 
 |   untag()->set_name(value.ptr()); | 
 | #if !defined(PRODUCT) | 
 |   if (untag()->user_name() == String::null()) { | 
 |     // TODO(johnmccutchan): Eagerly set user name for VM isolate classes, | 
 |     // lazily set user name for the other classes. | 
 |     // Generate and set user_name. | 
 |     const String& user_name = String::Handle( | 
 |         Symbols::New(Thread::Current(), GenerateUserVisibleName())); | 
 |     set_user_name(user_name); | 
 |   } | 
 | #endif  // !defined(PRODUCT) | 
 | } | 
 |  | 
 | #if !defined(PRODUCT) | 
 | void Class::set_user_name(const String& value) const { | 
 |   untag()->set_user_name(value.ptr()); | 
 | } | 
 | #endif  // !defined(PRODUCT) | 
 |  | 
 | #if !defined(PRODUCT) || defined(FORCE_INCLUDE_SAMPLING_HEAP_PROFILER) | 
 | void Class::SetUserVisibleNameInClassTable() { | 
 |   IsolateGroup* isolate_group = IsolateGroup::Current(); | 
 |   auto class_table = isolate_group->class_table(); | 
 |   if (class_table->UserVisibleNameFor(id()) == nullptr) { | 
 |     String& name = String::Handle(UserVisibleName()); | 
 |     class_table->SetUserVisibleNameFor(id(), name.ToMallocCString()); | 
 |   } | 
 | } | 
 | #endif  // !defined(PRODUCT) || defined(FORCE_INCLUDE_SAMPLING_HEAP_PROFILER) | 
 |  | 
 | const char* Class::GenerateUserVisibleName() const { | 
 |   if (FLAG_show_internal_names) { | 
 |     return String::Handle(Name()).ToCString(); | 
 |   } | 
 |   switch (id()) { | 
 |     case kFloat32x4Cid: | 
 |       return Symbols::Float32x4().ToCString(); | 
 |     case kFloat64x2Cid: | 
 |       return Symbols::Float64x2().ToCString(); | 
 |     case kInt32x4Cid: | 
 |       return Symbols::Int32x4().ToCString(); | 
 |     case kTypedDataInt8ArrayCid: | 
 |     case kExternalTypedDataInt8ArrayCid: | 
 |       return Symbols::Int8List().ToCString(); | 
 |     case kTypedDataUint8ArrayCid: | 
 |     case kExternalTypedDataUint8ArrayCid: | 
 |       return Symbols::Uint8List().ToCString(); | 
 |     case kTypedDataUint8ClampedArrayCid: | 
 |     case kExternalTypedDataUint8ClampedArrayCid: | 
 |       return Symbols::Uint8ClampedList().ToCString(); | 
 |     case kTypedDataInt16ArrayCid: | 
 |     case kExternalTypedDataInt16ArrayCid: | 
 |       return Symbols::Int16List().ToCString(); | 
 |     case kTypedDataUint16ArrayCid: | 
 |     case kExternalTypedDataUint16ArrayCid: | 
 |       return Symbols::Uint16List().ToCString(); | 
 |     case kTypedDataInt32ArrayCid: | 
 |     case kExternalTypedDataInt32ArrayCid: | 
 |       return Symbols::Int32List().ToCString(); | 
 |     case kTypedDataUint32ArrayCid: | 
 |     case kExternalTypedDataUint32ArrayCid: | 
 |       return Symbols::Uint32List().ToCString(); | 
 |     case kTypedDataInt64ArrayCid: | 
 |     case kExternalTypedDataInt64ArrayCid: | 
 |       return Symbols::Int64List().ToCString(); | 
 |     case kTypedDataUint64ArrayCid: | 
 |     case kExternalTypedDataUint64ArrayCid: | 
 |       return Symbols::Uint64List().ToCString(); | 
 |     case kTypedDataInt32x4ArrayCid: | 
 |     case kExternalTypedDataInt32x4ArrayCid: | 
 |       return Symbols::Int32x4List().ToCString(); | 
 |     case kTypedDataFloat32x4ArrayCid: | 
 |     case kExternalTypedDataFloat32x4ArrayCid: | 
 |       return Symbols::Float32x4List().ToCString(); | 
 |     case kTypedDataFloat64x2ArrayCid: | 
 |     case kExternalTypedDataFloat64x2ArrayCid: | 
 |       return Symbols::Float64x2List().ToCString(); | 
 |     case kTypedDataFloat32ArrayCid: | 
 |     case kExternalTypedDataFloat32ArrayCid: | 
 |       return Symbols::Float32List().ToCString(); | 
 |     case kTypedDataFloat64ArrayCid: | 
 |     case kExternalTypedDataFloat64ArrayCid: | 
 |       return Symbols::Float64List().ToCString(); | 
 |     case kPointerCid: | 
 |       return Symbols::FfiPointer().ToCString(); | 
 |     case kDynamicLibraryCid: | 
 |       return Symbols::FfiDynamicLibrary().ToCString(); | 
 |     case kNullCid: | 
 |       return Symbols::Null().ToCString(); | 
 |     case kDynamicCid: | 
 |       return Symbols::Dynamic().ToCString(); | 
 |     case kVoidCid: | 
 |       return Symbols::Void().ToCString(); | 
 |     case kNeverCid: | 
 |       return Symbols::Never().ToCString(); | 
 |     case kClassCid: | 
 |       return Symbols::Class().ToCString(); | 
 |     case kTypeParametersCid: | 
 |       return Symbols::TypeParameters().ToCString(); | 
 |     case kTypeArgumentsCid: | 
 |       return Symbols::TypeArguments().ToCString(); | 
 |     case kPatchClassCid: | 
 |       return Symbols::PatchClass().ToCString(); | 
 |     case kFunctionCid: | 
 |       return Symbols::Function().ToCString(); | 
 |     case kClosureDataCid: | 
 |       return Symbols::ClosureData().ToCString(); | 
 |     case kFfiTrampolineDataCid: | 
 |       return Symbols::FfiTrampolineData().ToCString(); | 
 |     case kFieldCid: | 
 |       return Symbols::Field().ToCString(); | 
 |     case kScriptCid: | 
 |       return Symbols::Script().ToCString(); | 
 |     case kLibraryCid: | 
 |       return Symbols::Library().ToCString(); | 
 |     case kLibraryPrefixCid: | 
 |       return Symbols::LibraryPrefix().ToCString(); | 
 |     case kNamespaceCid: | 
 |       return Symbols::Namespace().ToCString(); | 
 |     case kKernelProgramInfoCid: | 
 |       return Symbols::KernelProgramInfo().ToCString(); | 
 |     case kWeakSerializationReferenceCid: | 
 |       return Symbols::WeakSerializationReference().ToCString(); | 
 |     case kWeakArrayCid: | 
 |       return Symbols::WeakArray().ToCString(); | 
 |     case kCodeCid: | 
 |       return Symbols::Code().ToCString(); | 
 |     case kInstructionsCid: | 
 |       return Symbols::Instructions().ToCString(); | 
 |     case kInstructionsSectionCid: | 
 |       return Symbols::InstructionsSection().ToCString(); | 
 |     case kInstructionsTableCid: | 
 |       return Symbols::InstructionsTable().ToCString(); | 
 |     case kObjectPoolCid: | 
 |       return Symbols::ObjectPool().ToCString(); | 
 |     case kCodeSourceMapCid: | 
 |       return Symbols::CodeSourceMap().ToCString(); | 
 |     case kPcDescriptorsCid: | 
 |       return Symbols::PcDescriptors().ToCString(); | 
 |     case kCompressedStackMapsCid: | 
 |       return Symbols::CompressedStackMaps().ToCString(); | 
 |     case kLocalVarDescriptorsCid: | 
 |       return Symbols::LocalVarDescriptors().ToCString(); | 
 |     case kExceptionHandlersCid: | 
 |       return Symbols::ExceptionHandlers().ToCString(); | 
 |     case kContextCid: | 
 |       return Symbols::Context().ToCString(); | 
 |     case kContextScopeCid: | 
 |       return Symbols::ContextScope().ToCString(); | 
 |     case kSentinelCid: | 
 |       return Symbols::Sentinel().ToCString(); | 
 |     case kSingleTargetCacheCid: | 
 |       return Symbols::SingleTargetCache().ToCString(); | 
 |     case kICDataCid: | 
 |       return Symbols::ICData().ToCString(); | 
 |     case kMegamorphicCacheCid: | 
 |       return Symbols::MegamorphicCache().ToCString(); | 
 |     case kSubtypeTestCacheCid: | 
 |       return Symbols::SubtypeTestCache().ToCString(); | 
 |     case kLoadingUnitCid: | 
 |       return Symbols::LoadingUnit().ToCString(); | 
 |     case kApiErrorCid: | 
 |       return Symbols::ApiError().ToCString(); | 
 |     case kLanguageErrorCid: | 
 |       return Symbols::LanguageError().ToCString(); | 
 |     case kUnhandledExceptionCid: | 
 |       return Symbols::UnhandledException().ToCString(); | 
 |     case kUnwindErrorCid: | 
 |       return Symbols::UnwindError().ToCString(); | 
 |     case kIntegerCid: | 
 |     case kSmiCid: | 
 |     case kMintCid: | 
 |       return Symbols::Int().ToCString(); | 
 |     case kDoubleCid: | 
 |       return Symbols::Double().ToCString(); | 
 |     case kOneByteStringCid: | 
 |     case kTwoByteStringCid: | 
 |       return Symbols::_String().ToCString(); | 
 |     case kArrayCid: | 
 |     case kImmutableArrayCid: | 
 |     case kGrowableObjectArrayCid: | 
 |       return Symbols::List().ToCString(); | 
 |   } | 
 |   String& name = String::Handle(Name()); | 
 |   name = Symbols::New(Thread::Current(), String::ScrubName(name)); | 
 |   if (name.ptr() == Symbols::_Future().ptr() && | 
 |       library() == Library::AsyncLibrary()) { | 
 |     return Symbols::Future().ToCString(); | 
 |   } | 
 |   return name.ToCString(); | 
 | } | 
 |  | 
 | void Class::set_script(const Script& value) const { | 
 |   untag()->set_script(value.ptr()); | 
 | } | 
 |  | 
 | #if !defined(DART_PRECOMPILED_RUNTIME) | 
 | KernelProgramInfoPtr Class::KernelProgramInfo() const { | 
 |   const auto& lib = Library::Handle(library()); | 
 |   return lib.kernel_program_info(); | 
 | } | 
 |  | 
 | void Class::set_token_pos(TokenPosition token_pos) const { | 
 |   ASSERT(!token_pos.IsClassifying()); | 
 |   StoreNonPointer(&untag()->token_pos_, token_pos); | 
 | } | 
 |  | 
 | void Class::set_end_token_pos(TokenPosition token_pos) const { | 
 |   ASSERT(!token_pos.IsClassifying()); | 
 |   StoreNonPointer(&untag()->end_token_pos_, token_pos); | 
 | } | 
 |  | 
 | void Class::set_implementor_cid(intptr_t value) const { | 
 |   ASSERT(value >= 0 && value < std::numeric_limits<classid_t>::max()); | 
 |   StoreNonPointer(&untag()->implementor_cid_, value); | 
 | } | 
 |  | 
 | bool Class::NoteImplementor(const Class& implementor) const { | 
 |   ASSERT(!implementor.is_abstract()); | 
 |   ASSERT(IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); | 
 |   if (implementor_cid() == kDynamicCid) { | 
 |     return false; | 
 |   } else if (implementor_cid() == implementor.id()) { | 
 |     return false; | 
 |   } else if (implementor_cid() == kIllegalCid) { | 
 |     set_implementor_cid(implementor.id()); | 
 |     return true;  // None -> One | 
 |   } else { | 
 |     set_implementor_cid(kDynamicCid); | 
 |     return true;  // One -> Many | 
 |   } | 
 | } | 
 | #endif  // !defined(DART_PRECOMPILED_RUNTIME) | 
 |  | 
 | uint32_t Class::Hash() const { | 
 |   return Class::Hash(ptr()); | 
 | } | 
 | uint32_t Class::Hash(ClassPtr obj) { | 
 |   return String::HashRawSymbol(obj.untag()->name()); | 
 | } | 
 |  | 
 | int32_t Class::SourceFingerprint() const { | 
 | #if !defined(DART_PRECOMPILED_RUNTIME) | 
 |   return kernel::KernelSourceFingerprintHelper::CalculateClassFingerprint( | 
 |       *this); | 
 | #else | 
 |   return 0; | 
 | #endif  // !defined(DART_PRECOMPILED_RUNTIME) | 
 | } | 
 |  | 
 | void Class::set_is_implemented() const { | 
 |   ASSERT(IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); | 
 |   set_is_implemented_unsafe(); | 
 | } | 
 |  | 
 | void Class::set_is_implemented_unsafe() const { | 
 |   set_state_bits(ImplementedBit::update(true, state_bits())); | 
 | } | 
 |  | 
 | void Class::set_is_abstract() const { | 
 |   ASSERT(IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); | 
 |   set_state_bits(AbstractBit::update(true, state_bits())); | 
 | } | 
 |  | 
 | void Class::set_is_declaration_loaded() const { | 
 |   ASSERT(IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); | 
 |   set_is_declaration_loaded_unsafe(); | 
 | } | 
 |  | 
 | void Class::set_is_declaration_loaded_unsafe() const { | 
 |   ASSERT(!is_declaration_loaded()); | 
 |   set_state_bits(ClassLoadingBits::update(UntaggedClass::kDeclarationLoaded, | 
 |                                           state_bits())); | 
 | } | 
 |  | 
 | void Class::set_is_type_finalized() const { | 
 |   ASSERT(IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); | 
 |   ASSERT(is_declaration_loaded()); | 
 |   ASSERT(!is_type_finalized()); | 
 |   set_state_bits( | 
 |       ClassLoadingBits::update(UntaggedClass::kTypeFinalized, state_bits())); | 
 | } | 
 |  | 
 | void Class::set_is_synthesized_class() const { | 
 |   ASSERT(IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); | 
 |   set_is_synthesized_class_unsafe(); | 
 | } | 
 |  | 
 | void Class::set_is_synthesized_class_unsafe() const { | 
 |   set_state_bits(SynthesizedClassBit::update(true, state_bits())); | 
 | } | 
 |  | 
 | void Class::set_is_enum_class() const { | 
 |   ASSERT(IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); | 
 |   set_state_bits(EnumBit::update(true, state_bits())); | 
 | } | 
 |  | 
 | void Class::set_is_const() const { | 
 |   ASSERT(IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); | 
 |   set_state_bits(ConstBit::update(true, state_bits())); | 
 | } | 
 |  | 
 | void Class::set_is_transformed_mixin_application() const { | 
 |   ASSERT(IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); | 
 |   set_state_bits(TransformedMixinApplicationBit::update(true, state_bits())); | 
 | } | 
 |  | 
 | void Class::set_is_sealed() const { | 
 |   ASSERT(IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); | 
 |   set_state_bits(SealedBit::update(true, state_bits())); | 
 | } | 
 |  | 
 | void Class::set_is_mixin_class() const { | 
 |   ASSERT(IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); | 
 |   set_state_bits(MixinClassBit::update(true, state_bits())); | 
 | } | 
 |  | 
 | void Class::set_is_base_class() const { | 
 |   ASSERT(IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); | 
 |   set_state_bits(BaseClassBit::update(true, state_bits())); | 
 | } | 
 |  | 
 | void Class::set_is_interface_class() const { | 
 |   ASSERT(IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); | 
 |   set_state_bits(InterfaceClassBit::update(true, state_bits())); | 
 | } | 
 |  | 
 | void Class::set_is_final() const { | 
 |   ASSERT(IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); | 
 |   set_state_bits(FinalBit::update(true, state_bits())); | 
 | } | 
 |  | 
 | void Class::set_is_fields_marked_nullable() const { | 
 |   ASSERT(IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); | 
 |   set_state_bits(FieldsMarkedNullableBit::update(true, state_bits())); | 
 | } | 
 |  | 
 | void Class::set_is_allocated(bool value) const { | 
 |   ASSERT(IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); | 
 |   set_is_allocated_unsafe(value); | 
 | } | 
 |  | 
 | void Class::set_is_allocated_unsafe(bool value) const { | 
 |   set_state_bits(IsAllocatedBit::update(value, state_bits())); | 
 | } | 
 |  | 
 | void Class::set_is_loaded(bool value) const { | 
 |   ASSERT(IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); | 
 |   set_state_bits(IsLoadedBit::update(value, state_bits())); | 
 | } | 
 |  | 
 | void Class::set_is_finalized() const { | 
 |   ASSERT(IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); | 
 |   ASSERT(!is_finalized()); | 
 |   set_is_finalized_unsafe(); | 
 | } | 
 |  | 
 | void Class::set_is_finalized_unsafe() const { | 
 |   set_state_bits( | 
 |       ClassFinalizedBits::update(UntaggedClass::kFinalized, state_bits())); | 
 | } | 
 |  | 
 | void Class::set_is_allocate_finalized() const { | 
 |   ASSERT(IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); | 
 |   ASSERT(!is_allocate_finalized()); | 
 |   set_state_bits(ClassFinalizedBits::update(UntaggedClass::kAllocateFinalized, | 
 |                                             state_bits())); | 
 | } | 
 |  | 
 | void Class::set_is_prefinalized() const { | 
 |   ASSERT(IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); | 
 |   ASSERT(!is_finalized()); | 
 |   set_state_bits( | 
 |       ClassFinalizedBits::update(UntaggedClass::kPreFinalized, state_bits())); | 
 | } | 
 |  | 
 | void Class::set_interfaces(const Array& value) const { | 
 |   ASSERT(!value.IsNull()); | 
 |   untag()->set_interfaces(value.ptr()); | 
 | } | 
 |  | 
 | #if !defined(DART_PRECOMPILED_RUNTIME) | 
 |  | 
 | void Class::AddDirectImplementor(const Class& implementor, | 
 |                                  bool is_mixin) const { | 
 |   ASSERT(IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); | 
 |   ASSERT(is_implemented()); | 
 |   ASSERT(!implementor.IsNull()); | 
 |   GrowableObjectArray& direct_implementors = | 
 |       GrowableObjectArray::Handle(untag()->direct_implementors()); | 
 |   if (direct_implementors.IsNull()) { | 
 |     direct_implementors = GrowableObjectArray::New(4, Heap::kOld); | 
 |     untag()->set_direct_implementors(direct_implementors.ptr()); | 
 |   } | 
 | #if defined(DEBUG) | 
 |   // Verify that the same class is not added twice. | 
 |   // The only exception is mixins: when mixin application is transformed, | 
 |   // mixin is added to the end of interfaces list and may be duplicated: | 
 |   //   class X = A with B implements B; | 
 |   // This is rare and harmless. | 
 |   if (!is_mixin) { | 
 |     for (intptr_t i = 0; i < direct_implementors.Length(); i++) { | 
 |       ASSERT(direct_implementors.At(i) != implementor.ptr()); | 
 |     } | 
 |   } | 
 | #endif | 
 |   direct_implementors.Add(implementor, Heap::kOld); | 
 | } | 
 |  | 
 | void Class::set_direct_implementors( | 
 |     const GrowableObjectArray& implementors) const { | 
 |   ASSERT(IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); | 
 |   untag()->set_direct_implementors(implementors.ptr()); | 
 | } | 
 |  | 
 | void Class::AddDirectSubclass(const Class& subclass) const { | 
 |   ASSERT(IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); | 
 |   ASSERT(!subclass.IsNull()); | 
 |   ASSERT(subclass.SuperClass() == ptr()); | 
 |   // Do not keep track of the direct subclasses of class Object. | 
 |   ASSERT(!IsObjectClass()); | 
 |   GrowableObjectArray& direct_subclasses = | 
 |       GrowableObjectArray::Handle(untag()->direct_subclasses()); | 
 |   if (direct_subclasses.IsNull()) { | 
 |     direct_subclasses = GrowableObjectArray::New(4, Heap::kOld); | 
 |     untag()->set_direct_subclasses(direct_subclasses.ptr()); | 
 |   } | 
 | #if defined(DEBUG) | 
 |   // Verify that the same class is not added twice. | 
 |   for (intptr_t i = 0; i < direct_subclasses.Length(); i++) { | 
 |     ASSERT(direct_subclasses.At(i) != subclass.ptr()); | 
 |   } | 
 | #endif | 
 |   direct_subclasses.Add(subclass, Heap::kOld); | 
 | } | 
 |  | 
 | void Class::set_direct_subclasses(const GrowableObjectArray& subclasses) const { | 
 |   ASSERT(IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); | 
 |   untag()->set_direct_subclasses(subclasses.ptr()); | 
 | } | 
 |  | 
 | #endif  // !defined(DART_PRECOMPILED_RUNTIME) | 
 |  | 
 | ArrayPtr Class::constants() const { | 
 |   return untag()->constants(); | 
 | } | 
 |  | 
 | void Class::set_constants(const Array& value) const { | 
 |   untag()->set_constants(value.ptr()); | 
 | } | 
 |  | 
 | void Class::set_declaration_type(const Type& value) const { | 
 |   ASSERT(id() != kDynamicCid && id() != kVoidCid); | 
 |   ASSERT(!value.IsNull() && value.IsCanonical() && value.IsOld()); | 
 |   ASSERT((declaration_type() == Object::null()) || | 
 |          (declaration_type() == value.ptr()));  // Set during own finalization. | 
 |   // Since DeclarationType is used as the runtime type of instances of a | 
 |   // non-generic class, its nullability must be kNonNullable. | 
 |   // The exception is DeclarationType of Null which is kNullable. | 
 |   ASSERT(value.type_class_id() != kNullCid || value.IsNullable()); | 
 |   ASSERT(value.type_class_id() == kNullCid || value.IsNonNullable()); | 
 |   untag()->set_declaration_type<std::memory_order_release>(value.ptr()); | 
 | } | 
 |  | 
 | TypePtr Class::DeclarationType() const { | 
 |   ASSERT(is_declaration_loaded()); | 
 |   if (IsNullClass()) { | 
 |     return Type::NullType(); | 
 |   } | 
 |   if (IsDynamicClass()) { | 
 |     return Type::DynamicType(); | 
 |   } | 
 |   if (IsVoidClass()) { | 
 |     return Type::VoidType(); | 
 |   } | 
 |   if (declaration_type() != Type::null()) { | 
 |     return declaration_type(); | 
 |   } | 
 |   { | 
 |     auto thread = Thread::Current(); | 
 |     SafepointWriteRwLocker ml(thread, thread->isolate_group()->program_lock()); | 
 |     if (declaration_type() != Type::null()) { | 
 |       return declaration_type(); | 
 |     } | 
 |     // For efficiency, the runtimeType intrinsic returns the type cached by | 
 |     // DeclarationType without checking its nullability. Therefore, we | 
 |     // consistently cache the kNonNullable version of the type. | 
 |     // The exception is type Null which is stored as kNullable. | 
 |     TypeArguments& type_args = TypeArguments::Handle(); | 
 |     const intptr_t num_type_params = NumTypeParameters(); | 
 |     if (num_type_params > 0) { | 
 |       type_args = TypeArguments::New(num_type_params); | 
 |       TypeParameter& type_param = TypeParameter::Handle(); | 
 |       for (intptr_t i = 0; i < num_type_params; i++) { | 
 |         type_param = TypeParameterAt(i); | 
 |         type_args.SetTypeAt(i, type_param); | 
 |       } | 
 |     } | 
 |     Type& type = | 
 |         Type::Handle(Type::New(*this, type_args, Nullability::kNonNullable)); | 
 |     type ^= ClassFinalizer::FinalizeType(type); | 
 |     set_declaration_type(type); | 
 |     return type.ptr(); | 
 |   } | 
 | } | 
 |  | 
 | #if !defined(DART_PRECOMPILED_RUNTIME) | 
 | void Class::set_allocation_stub(const Code& value) const { | 
 |   // Never clear the stub as it may still be a target, but will be GC-d if | 
 |   // not referenced. | 
 |   ASSERT(!value.IsNull()); | 
 |   ASSERT(untag()->allocation_stub() == Code::null()); | 
 |   untag()->set_allocation_stub(value.ptr()); | 
 | } | 
 | #endif  // !defined(DART_PRECOMPILED_RUNTIME) | 
 |  | 
 | void Class::DisableAllocationStub() const { | 
 | #if defined(DART_PRECOMPILED_RUNTIME) | 
 |   UNREACHABLE(); | 
 | #else | 
 |   { | 
 |     const Code& existing_stub = Code::Handle(allocation_stub()); | 
 |     if (existing_stub.IsNull()) { | 
 |       return; | 
 |     } | 
 |   } | 
 |   auto thread = Thread::Current(); | 
 |   SafepointWriteRwLocker ml(thread, thread->isolate_group()->program_lock()); | 
 |   const Code& existing_stub = Code::Handle(allocation_stub()); | 
 |   if (existing_stub.IsNull()) { | 
 |     return; | 
 |   } | 
 |   ASSERT(!existing_stub.IsDisabled()); | 
 |   // Change the stub so that the next caller will regenerate the stub. | 
 |   existing_stub.DisableStubCode(NumTypeParameters() > 0); | 
 |   // Disassociate the existing stub from class. | 
 |   untag()->set_allocation_stub(Code::null()); | 
 | #endif  // defined(DART_PRECOMPILED_RUNTIME) | 
 | } | 
 |  | 
 | bool Class::IsDartFunctionClass() const { | 
 |   return ptr() == Type::Handle(Type::DartFunctionType()).type_class(); | 
 | } | 
 |  | 
 | bool Class::IsFutureClass() const { | 
 |   // Looking up future_class in the object store would not work, because | 
 |   // this function is called during class finalization, before the object store | 
 |   // field would be initialized by InitKnownObjects(). | 
 |   return (Name() == Symbols::Future().ptr()) && | 
 |          (library() == Library::AsyncLibrary()); | 
 | } | 
 |  | 
 | // Checks if type T0 is a subtype of type T1. | 
 | // Type T0 is specified by class 'cls' parameterized with 'type_arguments' and | 
 | // by 'nullability', and type T1 is specified by 'other' and must have a type | 
 | // class. | 
 | // [type_arguments] should be a flattened instance type arguments vector. | 
 | bool Class::IsSubtypeOf(const Class& cls, | 
 |                         const TypeArguments& type_arguments, | 
 |                         Nullability nullability, | 
 |                         const AbstractType& other, | 
 |                         Heap::Space space, | 
 |                         FunctionTypeMapping* function_type_equivalence) { | 
 |   TRACE_TYPE_CHECKS_VERBOSE("  Class::IsSubtypeOf(%s %s, %s)\n", | 
 |                             cls.ToCString(), type_arguments.ToCString(), | 
 |                             other.ToCString()); | 
 |   // This function does not support Null, Never, dynamic, or void as type T0. | 
 |   classid_t this_cid = cls.id(); | 
 |   ASSERT(this_cid != kNullCid && this_cid != kNeverCid && | 
 |          this_cid != kDynamicCid && this_cid != kVoidCid); | 
 |   ASSERT(type_arguments.IsNull() || | 
 |          (type_arguments.Length() >= cls.NumTypeArguments())); | 
 |   // Type T1 must have a type class (e.g. not a type param or a function type). | 
 |   ASSERT(other.HasTypeClass()); | 
 |   const classid_t other_cid = other.type_class_id(); | 
 |   if (other_cid == kDynamicCid || other_cid == kVoidCid) { | 
 |     TRACE_TYPE_CHECKS_VERBOSE("   - result: true (right is top)\n"); | 
 |     return true; | 
 |   } | 
 |   // Left nullable: | 
 |   //   if T0 is S0? then: | 
 |   //     T0 <: T1 iff S0 <: T1 and Null <: T1 | 
 |   if ((nullability == Nullability::kNullable) && | 
 |       !Instance::NullIsAssignableTo(other)) { | 
 |     TRACE_TYPE_CHECKS_VERBOSE("   - result: false (nullability)\n"); | 
 |     return false; | 
 |   } | 
 |  | 
 |   // Right Object. | 
 |   if (other_cid == kObjectCid) { | 
 |     TRACE_TYPE_CHECKS_VERBOSE("   - result: true (right is Object)\n"); | 
 |     return true; | 
 |   } | 
 |  | 
 |   Thread* thread = Thread::Current(); | 
 |   Zone* zone = thread->zone(); | 
 |   const Class& other_class = Class::Handle(zone, other.type_class()); | 
 |   const TypeArguments& other_type_arguments = | 
 |       TypeArguments::Handle(zone, other.arguments()); | 
 |   // Use the 'this_class' object as if it was the receiver of this method, but | 
 |   // instead of recursing, reset it to the super class and loop. | 
 |   Class& this_class = Class::Handle(zone, cls.ptr()); | 
 |   while (true) { | 
 |     // Apply additional subtyping rules if T0 or T1 are 'FutureOr'. | 
 |  | 
 |     // Left FutureOr: | 
 |     //   if T0 is FutureOr<S0> then: | 
 |     //     T0 <: T1 iff Future<S0> <: T1 and S0 <: T1 | 
 |     if (this_cid == kFutureOrCid) { | 
 |       // Check Future<S0> <: T1. | 
 |       ObjectStore* object_store = IsolateGroup::Current()->object_store(); | 
 |       const Class& future_class = | 
 |           Class::Handle(zone, object_store->future_class()); | 
 |       ASSERT(!future_class.IsNull() && future_class.NumTypeParameters() == 1 && | 
 |              this_class.NumTypeParameters() == 1); | 
 |       ASSERT(type_arguments.IsNull() || type_arguments.Length() >= 1); | 
 |       if (Class::IsSubtypeOf(future_class, type_arguments, | 
 |                              Nullability::kNonNullable, other, space, | 
 |                              function_type_equivalence)) { | 
 |         // Check S0 <: T1. | 
 |         const AbstractType& type_arg = | 
 |             AbstractType::Handle(zone, type_arguments.TypeAtNullSafe(0)); | 
 |         if (type_arg.IsSubtypeOf(other, space, function_type_equivalence)) { | 
 |           TRACE_TYPE_CHECKS_VERBOSE("   - result: true (left is FutureOr)\n"); | 
 |           return true; | 
 |         } | 
 |       } | 
 |     } | 
 |  | 
 |     // Right FutureOr: | 
 |     //   if T1 is FutureOr<S1> then: | 
 |     //     T0 <: T1 iff any of the following hold: | 
 |     //     either T0 <: Future<S1> | 
 |     //     or T0 <: S1 | 
 |     //     or T0 is X0 and X0 has bound S0 and S0 <: T1  (checked elsewhere) | 
 |     if (other_cid == kFutureOrCid) { | 
 |       const AbstractType& other_type_arg = | 
 |           AbstractType::Handle(zone, other_type_arguments.TypeAtNullSafe(0)); | 
 |       // Check if S1 is a top type. | 
 |       if (other_type_arg.IsTopTypeForSubtyping()) { | 
 |         TRACE_TYPE_CHECKS_VERBOSE( | 
 |             "   - result: true (right is FutureOr top)\n"); | 
 |         return true; | 
 |       } | 
 |       // Check T0 <: Future<S1> when T0 is Future<S0>. | 
 |       if (this_class.IsFutureClass()) { | 
 |         const AbstractType& type_arg = | 
 |             AbstractType::Handle(zone, type_arguments.TypeAtNullSafe(0)); | 
 |         // If T0 is Future<S0>, then T0 <: Future<S1>, iff S0 <: S1. | 
 |         if (type_arg.IsSubtypeOf(other_type_arg, space, | 
 |                                  function_type_equivalence)) { | 
 |           TRACE_TYPE_CHECKS_VERBOSE( | 
 |               "   - result: true (left is Future, right is FutureOr)\n"); | 
 |           return true; | 
 |         } | 
 |       } | 
 |       // Check T0 <: Future<S1> when T0 is FutureOr<S0> is already done. | 
 |       // Check T0 <: S1. | 
 |       if (other_type_arg.HasTypeClass() && | 
 |           Class::IsSubtypeOf(this_class, type_arguments, nullability, | 
 |                              other_type_arg, space, | 
 |                              function_type_equivalence)) { | 
 |         TRACE_TYPE_CHECKS_VERBOSE( | 
 |             "   - result: true (right is FutureOr, subtype of arg)\n"); | 
 |         return true; | 
 |       } | 
 |     } | 
 |  | 
 |     // Check for reflexivity. | 
 |     if (this_class.ptr() == other_class.ptr()) { | 
 |       const intptr_t num_type_params = this_class.NumTypeParameters(); | 
 |       if (num_type_params == 0) { | 
 |         TRACE_TYPE_CHECKS_VERBOSE( | 
 |             "   - result: true (same non-generic class)\n"); | 
 |         return true; | 
 |       } | 
 |       // Check for covariance. | 
 |       if (other_type_arguments.IsNull()) { | 
 |         TRACE_TYPE_CHECKS_VERBOSE( | 
 |             "   - result: true (same class, dynamic type args)\n"); | 
 |         return true; | 
 |       } | 
 |       const intptr_t num_type_args = this_class.NumTypeArguments(); | 
 |       const intptr_t from_index = num_type_args - num_type_params; | 
 |       ASSERT(other_type_arguments.Length() == num_type_params); | 
 |       AbstractType& type = AbstractType::Handle(zone); | 
 |       AbstractType& other_type = AbstractType::Handle(zone); | 
 |       for (intptr_t i = 0; i < num_type_params; ++i) { | 
 |         type = type_arguments.TypeAtNullSafe(from_index + i); | 
 |         other_type = other_type_arguments.TypeAt(i); | 
 |         ASSERT(!type.IsNull() && !other_type.IsNull()); | 
 |         if (!type.IsSubtypeOf(other_type, space, function_type_equivalence)) { | 
 |           TRACE_TYPE_CHECKS_VERBOSE( | 
 |               "   - result: false (same class, type args mismatch)\n"); | 
 |           return false; | 
 |         } | 
 |       } | 
 |       TRACE_TYPE_CHECKS_VERBOSE( | 
 |           "   - result: true (same class, matching type args)\n"); | 
 |       return true; | 
 |     } | 
 |  | 
 |     // _Closure <: Function | 
 |     if (this_class.IsClosureClass() && other_class.IsDartFunctionClass()) { | 
 |       TRACE_TYPE_CHECKS_VERBOSE( | 
 |           "   - result: true (left is closure, right is Function)\n"); | 
 |       return true; | 
 |     } | 
 |  | 
 |     // Check for 'direct super type' specified in the implements clause | 
 |     // and check for transitivity at the same time. | 
 |     Array& interfaces = Array::Handle(zone, this_class.interfaces()); | 
 |     Type& interface = Type::Handle(zone); | 
 |     Class& interface_class = Class::Handle(zone); | 
 |     TypeArguments& interface_args = TypeArguments::Handle(zone); | 
 |     for (intptr_t i = 0; i < interfaces.Length(); i++) { | 
 |       interface ^= interfaces.At(i); | 
 |       ASSERT(interface.IsFinalized()); | 
 |       interface_class = interface.type_class(); | 
 |       interface_args = interface.arguments(); | 
 |       if (!interface_args.IsNull() && !interface_args.IsInstantiated()) { | 
 |         // This type class implements an interface that is parameterized with | 
 |         // generic type(s), e.g. it implements List<T>. | 
 |         // The uninstantiated type T must be instantiated using the type | 
 |         // parameters of this type before performing the type test. | 
 |         // The type arguments of this type that are referred to by the type | 
 |         // parameters of the interface are at the end of the type vector, | 
 |         // after the type arguments of the super type of this type. | 
 |         // The index of the type parameters is adjusted upon finalization. | 
 |         interface_args = interface_args.InstantiateFrom( | 
 |             type_arguments, Object::null_type_arguments(), kNoneFree, space); | 
 |       } | 
 |       interface_args = interface_class.GetInstanceTypeArguments( | 
 |           thread, interface_args, /*canonicalize=*/false); | 
 |       // In Dart 2, implementing Function has no meaning. | 
 |       // TODO(regis): Can we encounter and skip Object as well? | 
 |       if (interface_class.IsDartFunctionClass()) { | 
 |         continue; | 
 |       } | 
 |       if (Class::IsSubtypeOf(interface_class, interface_args, | 
 |                              Nullability::kNonNullable, other, space, | 
 |                              function_type_equivalence)) { | 
 |         TRACE_TYPE_CHECKS_VERBOSE("   - result: true (interface found)\n"); | 
 |         return true; | 
 |       } | 
 |     } | 
 |     // "Recurse" up the class hierarchy until we have reached the top. | 
 |     this_class = this_class.SuperClass(); | 
 |     if (this_class.IsNull()) { | 
 |       TRACE_TYPE_CHECKS_VERBOSE("   - result: false (supertype not found)\n"); | 
 |       return false; | 
 |     } | 
 |     this_cid = this_class.id(); | 
 |   } | 
 |   UNREACHABLE(); | 
 |   return false; | 
 | } | 
 |  | 
 | bool Class::IsTopLevel() const { | 
 |   return Name() == Symbols::TopLevel().ptr(); | 
 | } | 
 |  | 
 | bool Class::IsPrivate() const { | 
 |   return Library::IsPrivate(String::Handle(Name())); | 
 | } | 
 |  | 
 | FunctionPtr Class::LookupDynamicFunctionUnsafe(const String& name) const { | 
 |   return LookupFunctionReadLocked(name, kInstance); | 
 | } | 
 |  | 
 | FunctionPtr Class::LookupDynamicFunctionAllowPrivate(const String& name) const { | 
 |   return LookupFunctionAllowPrivate(name, kInstance); | 
 | } | 
 |  | 
 | FunctionPtr Class::LookupStaticFunction(const String& name) const { | 
 |   Thread* thread = Thread::Current(); | 
 |   SafepointReadRwLocker ml(thread, thread->isolate_group()->program_lock()); | 
 |   return LookupFunctionReadLocked(name, kStatic); | 
 | } | 
 |  | 
 | FunctionPtr Class::LookupStaticFunctionAllowPrivate(const String& name) const { | 
 |   return LookupFunctionAllowPrivate(name, kStatic); | 
 | } | 
 |  | 
 | FunctionPtr Class::LookupConstructor(const String& name) const { | 
 |   Thread* thread = Thread::Current(); | 
 |   SafepointReadRwLocker ml(thread, thread->isolate_group()->program_lock()); | 
 |   return LookupFunctionReadLocked(name, kConstructor); | 
 | } | 
 |  | 
 | FunctionPtr Class::LookupConstructorAllowPrivate(const String& name) const { | 
 |   return LookupFunctionAllowPrivate(name, kConstructor); | 
 | } | 
 |  | 
 | FunctionPtr Class::LookupFactory(const String& name) const { | 
 |   Thread* thread = Thread::Current(); | 
 |   SafepointReadRwLocker ml(thread, thread->isolate_group()->program_lock()); | 
 |   return LookupFunctionReadLocked(name, kFactory); | 
 | } | 
 |  | 
 | FunctionPtr Class::LookupFactoryAllowPrivate(const String& name) const { | 
 |   return LookupFunctionAllowPrivate(name, kFactory); | 
 | } | 
 |  | 
 | FunctionPtr Class::LookupFunctionAllowPrivate(const String& name) const { | 
 |   return LookupFunctionAllowPrivate(name, kAny); | 
 | } | 
 |  | 
 | FunctionPtr Class::LookupFunctionReadLocked(const String& name) const { | 
 |   return LookupFunctionReadLocked(name, kAny); | 
 | } | 
 |  | 
 | // Returns true if 'prefix' and 'accessor_name' match 'name'. | 
 | static bool MatchesAccessorName(const String& name, | 
 |                                 const char* prefix, | 
 |                                 intptr_t prefix_length, | 
 |                                 const String& accessor_name) { | 
 |   intptr_t name_len = name.Length(); | 
 |   intptr_t accessor_name_len = accessor_name.Length(); | 
 |  | 
 |   if (name_len != (accessor_name_len + prefix_length)) { | 
 |     return false; | 
 |   } | 
 |   for (intptr_t i = 0; i < prefix_length; i++) { | 
 |     if (name.CharAt(i) != prefix[i]) { | 
 |       return false; | 
 |     } | 
 |   } | 
 |   for (intptr_t i = 0, j = prefix_length; i < accessor_name_len; i++, j++) { | 
 |     if (name.CharAt(j) != accessor_name.CharAt(i)) { | 
 |       return false; | 
 |     } | 
 |   } | 
 |   return true; | 
 | } | 
 |  | 
 | FunctionPtr Class::CheckFunctionType(const Function& func, MemberKind kind) { | 
 |   if ((kind == kInstance) || (kind == kInstanceAllowAbstract)) { | 
 |     if (func.IsDynamicFunction(kind == kInstanceAllowAbstract)) { | 
 |       return func.ptr(); | 
 |     } | 
 |   } else if (kind == kStatic) { | 
 |     if (func.IsStaticFunction()) { | 
 |       return func.ptr(); | 
 |     } | 
 |   } else if (kind == kConstructor) { | 
 |     if (func.IsGenerativeConstructor()) { | 
 |       ASSERT(!func.is_static()); | 
 |       return func.ptr(); | 
 |     } | 
 |   } else if (kind == kFactory) { | 
 |     if (func.IsFactory()) { | 
 |       ASSERT(func.is_static()); | 
 |       return func.ptr(); | 
 |     } | 
 |   } else if (kind == kAny) { | 
 |     return func.ptr(); | 
 |   } | 
 |   return Function::null(); | 
 | } | 
 |  | 
 | FunctionPtr Class::LookupFunctionReadLocked(const String& name, | 
 |                                             MemberKind kind) const { | 
 |   ASSERT(!IsNull()); | 
 |   Thread* thread = Thread::Current(); | 
 |   RELEASE_ASSERT(is_finalized()); | 
 |   // Caller needs to ensure they grab program_lock because this method | 
 |   // can be invoked with either ReadRwLock or WriteRwLock. | 
 | #if defined(DEBUG) | 
 |   ASSERT(thread->isolate_group()->program_lock()->IsCurrentThreadReader()); | 
 | #endif | 
 |   ASSERT(functions() != Array::null()); | 
 |   REUSABLE_ARRAY_HANDLESCOPE(thread); | 
 |   REUSABLE_FUNCTION_HANDLESCOPE(thread); | 
 |   Array& funcs = thread->ArrayHandle(); | 
 |   funcs = functions(); | 
 |   const intptr_t len = funcs.Length(); | 
 |   Function& function = thread->FunctionHandle(); | 
 |   if (len >= kFunctionLookupHashThreshold) { | 
 |     // TODO(dartbug.com/36097): We require currently a read lock in the resolver | 
 |     // to avoid read-write race access to this hash table. | 
 |     // If we want to increase resolver speed by avoiding the need for read lock, | 
 |     // we could make change this hash table to be lock-free for the reader. | 
 |     const Array& hash_table = | 
 |         Array::Handle(thread->zone(), untag()->functions_hash_table()); | 
 |     if (!hash_table.IsNull()) { | 
 |       ClassFunctionsSet set(hash_table.ptr()); | 
 |       REUSABLE_STRING_HANDLESCOPE(thread); | 
 |       function ^= set.GetOrNull(FunctionName(name, &(thread->StringHandle()))); | 
 |       // No mutations. | 
 |       ASSERT(set.Release().ptr() == hash_table.ptr()); | 
 |       return function.IsNull() ? Function::null() | 
 |                                : CheckFunctionType(function, kind); | 
 |     } | 
 |   } | 
 |   if (name.IsSymbol()) { | 
 |     // Quick Symbol compare. | 
 |     NoSafepointScope no_safepoint; | 
 |     for (intptr_t i = 0; i < len; i++) { | 
 |       function ^= funcs.At(i); | 
 |       if (function.name() == name.ptr()) { | 
 |         return CheckFunctionType(function, kind); | 
 |       } | 
 |     } | 
 |   } else { | 
 |     REUSABLE_STRING_HANDLESCOPE(thread); | 
 |     String& function_name = thread->StringHandle(); | 
 |     for (intptr_t i = 0; i < len; i++) { | 
 |       function ^= funcs.At(i); | 
 |       function_name = function.name(); | 
 |       if (function_name.Equals(name)) { | 
 |         return CheckFunctionType(function, kind); | 
 |       } | 
 |     } | 
 |   } | 
 |   // No function found. | 
 |   return Function::null(); | 
 | } | 
 |  | 
 | FunctionPtr Class::LookupFunctionAllowPrivate(const String& name, | 
 |                                               MemberKind kind) const { | 
 |   ASSERT(!IsNull()); | 
 |   Thread* thread = Thread::Current(); | 
 |   RELEASE_ASSERT(is_finalized()); | 
 |   SafepointReadRwLocker ml(thread, thread->isolate_group()->program_lock()); | 
 |   REUSABLE_ARRAY_HANDLESCOPE(thread); | 
 |   REUSABLE_FUNCTION_HANDLESCOPE(thread); | 
 |   REUSABLE_STRING_HANDLESCOPE(thread); | 
 |   Array& funcs = thread->ArrayHandle(); | 
 |   funcs = current_functions(); | 
 |   ASSERT(!funcs.IsNull()); | 
 |   const intptr_t len = funcs.Length(); | 
 |   Function& function = thread->FunctionHandle(); | 
 |   String& function_name = thread->StringHandle(); | 
 |   for (intptr_t i = 0; i < len; i++) { | 
 |     function ^= funcs.At(i); | 
 |     function_name = function.name(); | 
 |     if (String::EqualsIgnoringPrivateKey(function_name, name)) { | 
 |       return CheckFunctionType(function, kind); | 
 |     } | 
 |   } | 
 |   // No function found. | 
 |   return Function::null(); | 
 | } | 
 |  | 
 | FunctionPtr Class::LookupGetterFunction(const String& name) const { | 
 |   return LookupAccessorFunction(kGetterPrefix, kGetterPrefixLength, name); | 
 | } | 
 |  | 
 | FunctionPtr Class::LookupSetterFunction(const String& name) const { | 
 |   return LookupAccessorFunction(kSetterPrefix, kSetterPrefixLength, name); | 
 | } | 
 |  | 
 | FunctionPtr Class::LookupAccessorFunction(const char* prefix, | 
 |                                           intptr_t prefix_length, | 
 |                                           const String& name) const { | 
 |   ASSERT(!IsNull()); | 
 |   Thread* thread = Thread::Current(); | 
 |   if (EnsureIsFinalized(thread) != Error::null()) { | 
 |     return Function::null(); | 
 |   } | 
 |   REUSABLE_ARRAY_HANDLESCOPE(thread); | 
 |   REUSABLE_FUNCTION_HANDLESCOPE(thread); | 
 |   REUSABLE_STRING_HANDLESCOPE(thread); | 
 |   Array& funcs = thread->ArrayHandle(); | 
 |   funcs = current_functions(); | 
 |   intptr_t len = funcs.Length(); | 
 |   Function& function = thread->FunctionHandle(); | 
 |   String& function_name = thread->StringHandle(); | 
 |   for (intptr_t i = 0; i < len; i++) { | 
 |     function ^= funcs.At(i); | 
 |     function_name = function.name(); | 
 |     if (MatchesAccessorName(function_name, prefix, prefix_length, name)) { | 
 |       return function.ptr(); | 
 |     } | 
 |   } | 
 |  | 
 |   // No function found. | 
 |   return Function::null(); | 
 | } | 
 |  | 
 | FieldPtr Class::LookupInstanceField(const String& name) const { | 
 |   return LookupField(name, kInstance); | 
 | } | 
 |  | 
 | FieldPtr Class::LookupStaticField(const String& name) const { | 
 |   return LookupField(name, kStatic); | 
 | } | 
 |  | 
 | FieldPtr Class::LookupField(const String& name) const { | 
 |   return LookupField(name, kAny); | 
 | } | 
 |  | 
 | FieldPtr Class::LookupField(const String& name, MemberKind kind) const { | 
 |   ASSERT(!IsNull()); | 
 |   Thread* thread = Thread::Current(); | 
 |   if (EnsureIsFinalized(thread) != Error::null()) { | 
 |     return Field::null(); | 
 |   } | 
 |   REUSABLE_ARRAY_HANDLESCOPE(thread); | 
 |   REUSABLE_FIELD_HANDLESCOPE(thread); | 
 |   REUSABLE_STRING_HANDLESCOPE(thread); | 
 |   Array& flds = thread->ArrayHandle(); | 
 |   flds = fields(); | 
 |   ASSERT(!flds.IsNull()); | 
 |   intptr_t len = flds.Length(); | 
 |   Field& field = thread->FieldHandle(); | 
 |   if (name.IsSymbol()) { | 
 |     // Use fast raw pointer string compare for symbols. | 
 |     for (intptr_t i = 0; i < len; i++) { | 
 |       field ^= flds.At(i); | 
 |       if (name.ptr() == field.name()) { | 
 |         if (kind == kInstance) { | 
 |           return field.is_static() ? Field::null() : field.ptr(); | 
 |         } else if (kind == kStatic) { | 
 |           return field.is_static() ? field.ptr() : Field::null(); | 
 |         } | 
 |         ASSERT(kind == kAny); | 
 |         return field.ptr(); | 
 |       } | 
 |     } | 
 |   } else { | 
 |     String& field_name = thread->StringHandle(); | 
 |     for (intptr_t i = 0; i < len; i++) { | 
 |       field ^= flds.At(i); | 
 |       field_name = field.name(); | 
 |       if (name.Equals(field_name)) { | 
 |         if (kind == kInstance) { | 
 |           return field.is_static() ? Field::null() : field.ptr(); | 
 |         } else if (kind == kStatic) { | 
 |           return field.is_static() ? field.ptr() : Field::null(); | 
 |         } | 
 |         ASSERT(kind == kAny); | 
 |         return field.ptr(); | 
 |       } | 
 |     } | 
 |   } | 
 |   return Field::null(); | 
 | } | 
 |  | 
 | FieldPtr Class::LookupFieldAllowPrivate(const String& name, | 
 |                                         bool instance_only) const { | 
 |   ASSERT(!IsNull()); | 
 |   // Use slow string compare, ignoring privacy name mangling. | 
 |   Thread* thread = Thread::Current(); | 
 |   if (EnsureIsFinalized(thread) != Error::null()) { | 
 |     return Field::null(); | 
 |   } | 
 |   REUSABLE_ARRAY_HANDLESCOPE(thread); | 
 |   REUSABLE_FIELD_HANDLESCOPE(thread); | 
 |   REUSABLE_STRING_HANDLESCOPE(thread); | 
 |   Array& flds = thread->ArrayHandle(); | 
 |   flds = fields(); | 
 |   ASSERT(!flds.IsNull()); | 
 |   intptr_t len = flds.Length(); | 
 |   Field& field = thread->FieldHandle(); | 
 |   String& field_name = thread->StringHandle(); | 
 |   for (intptr_t i = 0; i < len; i++) { | 
 |     field ^= flds.At(i); | 
 |     field_name = field.name(); | 
 |     if (field.is_static() && instance_only) { | 
 |       // If we only care about instance fields, skip statics. | 
 |       continue; | 
 |     } | 
 |     if (String::EqualsIgnoringPrivateKey(field_name, name)) { | 
 |       return field.ptr(); | 
 |     } | 
 |   } | 
 |   return Field::null(); | 
 | } | 
 |  | 
 | FieldPtr Class::LookupInstanceFieldAllowPrivate(const String& name) const { | 
 |   Field& field = Field::Handle(LookupFieldAllowPrivate(name, true)); | 
 |   if (!field.IsNull() && !field.is_static()) { | 
 |     return field.ptr(); | 
 |   } | 
 |   return Field::null(); | 
 | } | 
 |  | 
 | FieldPtr Class::LookupStaticFieldAllowPrivate(const String& name) const { | 
 |   Field& field = Field::Handle(LookupFieldAllowPrivate(name)); | 
 |   if (!field.IsNull() && field.is_static()) { | 
 |     return field.ptr(); | 
 |   } | 
 |   return Field::null(); | 
 | } | 
 |  | 
 | const char* Class::ToCString() const { | 
 |   NoSafepointScope no_safepoint; | 
 |   const Library& lib = Library::Handle(library()); | 
 |   const char* library_name = lib.IsNull() ? "" : lib.ToCString(); | 
 |   const char* class_name = String::Handle(Name()).ToCString(); | 
 |   return OS::SCreate(Thread::Current()->zone(), "%s Class: %s", library_name, | 
 |                      class_name); | 
 | } | 
 |  | 
 | // Thomas Wang, Integer Hash Functions. | 
 | // https://gist.github.com/badboy/6267743 | 
 | // "64 bit to 32 bit Hash Functions" | 
 | static uword Hash64To32(uint64_t v) { | 
 |   v = ~v + (v << 18); | 
 |   v = v ^ (v >> 31); | 
 |   v = v * 21; | 
 |   v = v ^ (v >> 11); | 
 |   v = v + (v << 6); | 
 |   v = v ^ (v >> 22); | 
 |   return static_cast<uint32_t>(v); | 
 | } | 
 |  | 
 | InstancePtr Class::LookupCanonicalInstance(Zone* zone, | 
 |                                            const Instance& value) const { | 
 |   ASSERT(this->ptr() == value.clazz()); | 
 |   ASSERT(is_finalized() || is_prefinalized()); | 
 |   Instance& canonical_value = Instance::Handle(zone); | 
 |   if (this->constants() != Array::null()) { | 
 |     CanonicalInstancesSet constants(zone, this->constants()); | 
 |     canonical_value ^= constants.GetOrNull(CanonicalInstanceKey(value)); | 
 |     this->set_constants(constants.Release()); | 
 |   } | 
 |   return canonical_value.ptr(); | 
 | } | 
 |  | 
 | InstancePtr Class::InsertCanonicalConstant(Zone* zone, | 
 |                                            const Instance& constant) const { | 
 |   ASSERT(constant.IsCanonical()); | 
 |   ASSERT(this->ptr() == constant.clazz()); | 
 |   Instance& canonical_value = Instance::Handle(zone); | 
 |   if (this->constants() == Array::null()) { | 
 |     CanonicalInstancesSet constants( | 
 |         HashTables::New<CanonicalInstancesSet>(128, Heap::kOld)); | 
 |     canonical_value ^= constants.InsertNewOrGet(CanonicalInstanceKey(constant)); | 
 |     this->set_constants(constants.Release()); | 
 |   } else { | 
 |     CanonicalInstancesSet constants(Thread::Current()->zone(), | 
 |                                     this->constants()); | 
 |     canonical_value ^= constants.InsertNewOrGet(CanonicalInstanceKey(constant)); | 
 |     this->set_constants(constants.Release()); | 
 |   } | 
 |   return canonical_value.ptr(); | 
 | } | 
 |  | 
 | // Scoped mapping FunctionType -> FunctionType. | 
 | // Used for tracking and updating nested generic function types | 
 | // and their type parameters. | 
 | class FunctionTypeMapping : public ValueObject { | 
 |  public: | 
 |   FunctionTypeMapping(Zone* zone, | 
 |                       FunctionTypeMapping** mapping, | 
 |                       const FunctionType& from, | 
 |                       const FunctionType& to) | 
 |       : zone_(zone), parent_(*mapping), from_(from), to_(to) { | 
 |     // Add self to the linked list. | 
 |     *mapping = this; | 
 |   } | 
 |  | 
 |   const FunctionType* Find(const Object& from) const { | 
 |     if (!from.IsFunctionType()) { | 
 |       return nullptr; | 
 |     } | 
 |     for (const FunctionTypeMapping* scope = this; scope != nullptr; | 
 |          scope = scope->parent_) { | 
 |       if (scope->from_.ptr() == from.ptr()) { | 
 |         return &(scope->to_); | 
 |       } | 
 |     } | 
 |     return nullptr; | 
 |   } | 
 |  | 
 |   TypeParameterPtr MapTypeParameter(const TypeParameter& type_param) const { | 
 |     ASSERT(type_param.IsFunctionTypeParameter()); | 
 |     const FunctionType* new_owner = Find( | 
 |         FunctionType::Handle(zone_, type_param.parameterized_function_type())); | 
 |     if (new_owner != nullptr) { | 
 |       return new_owner->TypeParameterAt(type_param.index() - type_param.base(), | 
 |                                         type_param.nullability()); | 
 |     } | 
 |     return type_param.ptr(); | 
 |   } | 
 |  | 
 |   bool ContainsOwnersOfTypeParameters(const TypeParameter& p1, | 
 |                                       const TypeParameter& p2) const { | 
 |     auto& from = FunctionType::Handle(zone_, p1.parameterized_function_type()); | 
 |     const FunctionType* to = Find(from); | 
 |     if (to != nullptr) { | 
 |       return to->ptr() == p2.parameterized_function_type(); | 
 |     } | 
 |     from = p2.parameterized_function_type(); | 
 |     to = Find(from); | 
 |     if (to != nullptr) { | 
 |       return to->ptr() == p1.parameterized_function_type(); | 
 |     } | 
 |     return false; | 
 |   } | 
 |  | 
 |  private: | 
 |   Zone* zone_; | 
 |   const FunctionTypeMapping* const parent_; | 
 |   const FunctionType& from_; | 
 |   const FunctionType& to_; | 
 | }; | 
 |  | 
 | intptr_t TypeParameters::Length() const { | 
 |   if (IsNull() || untag()->names() == Array::null()) return 0; | 
 |   return Smi::Value(untag()->names()->untag()->length()); | 
 | } | 
 |  | 
 | void TypeParameters::set_names(const Array& value) const { | 
 |   ASSERT(!value.IsNull()); | 
 |   untag()->set_names(value.ptr()); | 
 | } | 
 |  | 
 | StringPtr TypeParameters::NameAt(intptr_t index) const { | 
 |   const Array& names_array = Array::Handle(names()); | 
 |   return String::RawCast(names_array.At(index)); | 
 | } | 
 |  | 
 | void TypeParameters::SetNameAt(intptr_t index, const String& value) const { | 
 |   const Array& names_array = Array::Handle(names()); | 
 |   names_array.SetAt(index, value); | 
 | } | 
 |  | 
 | void TypeParameters::set_flags(const Array& value) const { | 
 |   untag()->set_flags(value.ptr()); | 
 | } | 
 |  | 
 | void TypeParameters::set_bounds(const TypeArguments& value) const { | 
 |   // A null value represents a vector of dynamic. | 
 |   untag()->set_bounds(value.ptr()); | 
 | } | 
 |  | 
 | AbstractTypePtr TypeParameters::BoundAt(intptr_t index) const { | 
 |   const TypeArguments& upper_bounds = TypeArguments::Handle(bounds()); | 
 |   return upper_bounds.IsNull() ? Type::DynamicType() | 
 |                                : upper_bounds.TypeAt(index); | 
 | } | 
 |  | 
 | void TypeParameters::SetBoundAt(intptr_t index, | 
 |                                 const AbstractType& value) const { | 
 |   const TypeArguments& upper_bounds = TypeArguments::Handle(bounds()); | 
 |   upper_bounds.SetTypeAt(index, value); | 
 | } | 
 |  | 
 | bool TypeParameters::AllDynamicBounds() const { | 
 |   return bounds() == TypeArguments::null(); | 
 | } | 
 |  | 
 | void TypeParameters::set_defaults(const TypeArguments& value) const { | 
 |   // The null value represents a vector of dynamic. | 
 |   untag()->set_defaults(value.ptr()); | 
 | } | 
 |  | 
 | AbstractTypePtr TypeParameters::DefaultAt(intptr_t index) const { | 
 |   const TypeArguments& default_type_args = TypeArguments::Handle(defaults()); | 
 |   return default_type_args.IsNull() ? Type::DynamicType() | 
 |                                     : default_type_args.TypeAt(index); | 
 | } | 
 |  | 
 | void TypeParameters::SetDefaultAt(intptr_t index, | 
 |                                   const AbstractType& value) const { | 
 |   const TypeArguments& default_type_args = TypeArguments::Handle(defaults()); | 
 |   default_type_args.SetTypeAt(index, value); | 
 | } | 
 |  | 
 | bool TypeParameters::AllDynamicDefaults() const { | 
 |   return defaults() == TypeArguments::null(); | 
 | } | 
 |  | 
 | void TypeParameters::AllocateFlags(Heap::Space space) const { | 
 |   const intptr_t len = (Length() + kFlagsPerSmiMask) >> kFlagsPerSmiShift; | 
 |   const Array& flags_array = Array::Handle(Array::New(len, space)); | 
 |   // Initialize flags to 0. | 
 |   const Smi& zero = Smi::Handle(Smi::New(0)); | 
 |   for (intptr_t i = 0; i < len; i++) { | 
 |     flags_array.SetAt(i, zero); | 
 |   } | 
 |   set_flags(flags_array); | 
 | } | 
 |  | 
 | void TypeParameters::OptimizeFlags() const { | 
 |   if (untag()->flags() == Array::null()) return;  // Already optimized. | 
 |   const intptr_t len = (Length() + kFlagsPerSmiMask) >> kFlagsPerSmiShift; | 
 |   const Array& flags_array = Array::Handle(flags()); | 
 |   const Smi& zero = Smi::Handle(Smi::New(0)); | 
 |   for (intptr_t i = 0; i < len; i++) { | 
 |     if (flags_array.At(i) != zero.ptr()) return; | 
 |   } | 
 |   set_flags(Object::null_array()); | 
 | } | 
 |  | 
 | bool TypeParameters::IsGenericCovariantImplAt(intptr_t index) const { | 
 |   if (untag()->flags() == Array::null()) return false; | 
 |   const intptr_t flag = Smi::Value( | 
 |       Smi::RawCast(Array::Handle(flags()).At(index >> kFlagsPerSmiShift))); | 
 |   return (flag >> (index & kFlagsPerSmiMask)) != 0; | 
 | } | 
 |  | 
 | void TypeParameters::SetIsGenericCovariantImplAt(intptr_t index, | 
 |                                                  bool value) const { | 
 |   const Array& flg = Array::Handle(flags()); | 
 |   intptr_t flag = Smi::Value(Smi::RawCast(flg.At(index >> kFlagsPerSmiShift))); | 
 |   if (value) { | 
 |     flag |= 1 << (index % kFlagsPerSmiMask); | 
 |   } else { | 
 |     flag &= ~(1 << (index % kFlagsPerSmiMask)); | 
 |   } | 
 |   flg.SetAt(index >> kFlagsPerSmiShift, Smi::Handle(Smi::New(flag))); | 
 | } | 
 |  | 
 | void TypeParameters::Print(Thread* thread, | 
 |                            Zone* zone, | 
 |                            bool are_class_type_parameters, | 
 |                            intptr_t base, | 
 |                            NameVisibility name_visibility, | 
 |                            BaseTextBuffer* printer) const { | 
 |   String& name = String::Handle(zone); | 
 |   AbstractType& type = AbstractType::Handle(zone); | 
 |   const intptr_t num_type_params = Length(); | 
 |   for (intptr_t i = 0; i < num_type_params; i++) { | 
 |     if (are_class_type_parameters) { | 
 |       name = NameAt(i); | 
 |       printer->AddString(name.ToCString()); | 
 |     } else { | 
 |       printer->AddString(TypeParameter::CanonicalNameCString( | 
 |           are_class_type_parameters, base, base + i)); | 
 |     } | 
 |     if (FLAG_show_internal_names || !AllDynamicBounds()) { | 
 |       type = BoundAt(i); | 
 |       // Do not print default bound. | 
 |       if (!type.IsNull() && (FLAG_show_internal_names || !type.IsObjectType() || | 
 |                              type.IsNonNullable())) { | 
 |         printer->AddString(" extends "); | 
 |         type.PrintName(name_visibility, printer); | 
 |         if (FLAG_show_internal_names && !AllDynamicDefaults()) { | 
 |           type = DefaultAt(i); | 
 |           if (!type.IsNull() && | 
 |               (FLAG_show_internal_names || !type.IsDynamicType())) { | 
 |             printer->AddString(" defaults to "); | 
 |             type.PrintName(name_visibility, printer); | 
 |           } | 
 |         } | 
 |       } | 
 |     } | 
 |     if (i != num_type_params - 1) { | 
 |       printer->AddString(", "); | 
 |     } | 
 |   } | 
 | } | 
 |  | 
 | const char* TypeParameters::ToCString() const { | 
 |   if (IsNull()) { | 
 |     return "TypeParameters: null"; | 
 |   } | 
 |   auto thread = Thread::Current(); | 
 |   auto zone = thread->zone(); | 
 |   ZoneTextBuffer buffer(zone); | 
 |   buffer.AddString("TypeParameters: "); | 
 |   Print(thread, zone, true, 0, kInternalName, &buffer); | 
 |   return buffer.buffer(); | 
 | } | 
 |  | 
 | TypeParametersPtr TypeParameters::New(Heap::Space space) { | 
 |   ASSERT(Object::type_parameters_class() != Class::null()); | 
 |   return Object::Allocate<TypeParameters>(space); | 
 | } | 
 |  | 
 | TypeParametersPtr TypeParameters::New(intptr_t count, Heap::Space space) { | 
 |   const TypeParameters& result = | 
 |       TypeParameters::Handle(TypeParameters::New(space)); | 
 |   // Create an [ Array ] of [ String ] objects to represent the names. | 
 |   // Create a [ TypeArguments ] vector representing the bounds. | 
 |   // Create a [ TypeArguments ] vector representing the defaults. | 
 |   // Create an [ Array ] of [ Smi] objects to represent the flags. | 
 |   const Array& names_array = Array::Handle(Array::New(count, space)); | 
 |   result.set_names(names_array); | 
 |   TypeArguments& type_args = TypeArguments::Handle(); | 
 |   type_args = TypeArguments::New(count, Heap::kNew);  // Will get canonicalized. | 
 |   result.set_bounds(type_args); | 
 |   type_args = TypeArguments::New(count, Heap::kNew);  // Will get canonicalized. | 
 |   result.set_defaults(type_args); | 
 |   result.AllocateFlags(space);  // Will get optimized. | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | intptr_t TypeArguments::ComputeNullability() const { | 
 |   if (IsNull()) return 0; | 
 |   const intptr_t num_types = Length(); | 
 |   intptr_t result = 0; | 
 |   if (num_types <= kNullabilityMaxTypes) { | 
 |     AbstractType& type = AbstractType::Handle(); | 
 |     for (intptr_t i = 0; i < num_types; i++) { | 
 |       type = TypeAt(i); | 
 |       intptr_t type_bits = 0; | 
 |       if (!type.IsNull()) { | 
 |         switch (type.nullability()) { | 
 |           case Nullability::kNullable: | 
 |             type_bits = kNullableBit; | 
 |             break; | 
 |           case Nullability::kNonNullable: | 
 |             type_bits = kNonNullableBit; | 
 |             break; | 
 |           default: | 
 |             UNREACHABLE(); | 
 |         } | 
 |       } | 
 |       result |= (type_bits << (i * kNullabilityBitsPerType)); | 
 |     } | 
 |   } | 
 |   set_nullability(result); | 
 |   return result; | 
 | } | 
 |  | 
 | void TypeArguments::set_nullability(intptr_t value) const { | 
 |   untag()->set_nullability(Smi::New(value)); | 
 | } | 
 |  | 
 | uword TypeArguments::HashForRange(intptr_t from_index, intptr_t len) const { | 
 |   if (IsNull()) return kAllDynamicHash; | 
 |   if (IsRaw(from_index, len)) return kAllDynamicHash; | 
 |   uint32_t result = 0; | 
 |   AbstractType& type = AbstractType::Handle(); | 
 |   for (intptr_t i = 0; i < len; i++) { | 
 |     type = TypeAt(from_index + i); | 
 |     ASSERT(!type.IsNull()); | 
 |     result = CombineHashes(result, type.Hash()); | 
 |   } | 
 |   result = FinalizeHash(result, kHashBits); | 
 |   return result; | 
 | } | 
 |  | 
 | uword TypeArguments::ComputeHash() const { | 
 |   if (IsNull()) return kAllDynamicHash; | 
 |   const uword result = HashForRange(0, Length()); | 
 |   ASSERT(result != 0); | 
 |   SetHash(result); | 
 |   return result; | 
 | } | 
 |  | 
 | TypeArgumentsPtr TypeArguments::Prepend(Zone* zone, | 
 |                                         const TypeArguments& other, | 
 |                                         intptr_t other_length, | 
 |                                         intptr_t total_length) const { | 
 |   if (other_length == 0) { | 
 |     ASSERT(IsCanonical()); | 
 |     return ptr(); | 
 |   } else if (other_length == total_length) { | 
 |     ASSERT(other.IsCanonical()); | 
 |     return other.ptr(); | 
 |   } else if (IsNull() && other.IsNull()) { | 
 |     return TypeArguments::null(); | 
 |   } | 
 |   const TypeArguments& result = | 
 |       TypeArguments::Handle(zone, TypeArguments::New(total_length, Heap::kNew)); | 
 |   AbstractType& type = AbstractType::Handle(zone); | 
 |   for (intptr_t i = 0; i < other_length; i++) { | 
 |     type = other.IsNull() ? Type::DynamicType() : other.TypeAt(i); | 
 |     result.SetTypeAt(i, type); | 
 |   } | 
 |   for (intptr_t i = other_length; i < total_length; i++) { | 
 |     type = IsNull() ? Type::DynamicType() : TypeAt(i - other_length); | 
 |     result.SetTypeAt(i, type); | 
 |   } | 
 |   return result.Canonicalize(Thread::Current()); | 
 | } | 
 |  | 
 | TypeArgumentsPtr TypeArguments::ConcatenateTypeParameters( | 
 |     Zone* zone, | 
 |     const TypeArguments& other) const { | 
 |   ASSERT(!IsNull() && !other.IsNull()); | 
 |   const intptr_t this_len = Length(); | 
 |   const intptr_t other_len = other.Length(); | 
 |   const auto& result = TypeArguments::Handle( | 
 |       zone, TypeArguments::New(this_len + other_len, Heap::kNew)); | 
 |   auto& type = AbstractType::Handle(zone); | 
 |   for (intptr_t i = 0; i < this_len; ++i) { | 
 |     type = TypeAt(i); | 
 |     result.SetTypeAt(i, type); | 
 |   } | 
 |   for (intptr_t i = 0; i < other_len; ++i) { | 
 |     type = other.TypeAt(i); | 
 |     result.SetTypeAt(this_len + i, type); | 
 |   } | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | InstantiationMode TypeArguments::GetInstantiationMode(Zone* zone, | 
 |                                                       const Function* function, | 
 |                                                       const Class* cls) const { | 
 |   if (IsNull() || IsInstantiated()) { | 
 |     return InstantiationMode::kIsInstantiated; | 
 |   } | 
 |   if (function != nullptr) { | 
 |     if (CanShareFunctionTypeArguments(*function)) { | 
 |       return InstantiationMode::kSharesFunctionTypeArguments; | 
 |     } | 
 |     if (cls == nullptr) { | 
 |       cls = &Class::Handle(zone, function->Owner()); | 
 |     } | 
 |   } | 
 |   if (cls != nullptr) { | 
 |     if (CanShareInstantiatorTypeArguments(*cls)) { | 
 |       return InstantiationMode::kSharesInstantiatorTypeArguments; | 
 |     } | 
 |   } | 
 |   return InstantiationMode::kNeedsInstantiation; | 
 | } | 
 |  | 
 | StringPtr TypeArguments::Name() const { | 
 |   Thread* thread = Thread::Current(); | 
 |   ZoneTextBuffer printer(thread->zone()); | 
 |   PrintSubvectorName(0, Length(), kInternalName, &printer); | 
 |   return Symbols::New(thread, printer.buffer()); | 
 | } | 
 |  | 
 | StringPtr TypeArguments::UserVisibleName() const { | 
 |   Thread* thread = Thread::Current(); | 
 |   ZoneTextBuffer printer(thread->zone()); | 
 |   PrintSubvectorName(0, Length(), kUserVisibleName, &printer); | 
 |   return Symbols::New(thread, printer.buffer()); | 
 | } | 
 |  | 
 | void TypeArguments::PrintSubvectorName(intptr_t from_index, | 
 |                                        intptr_t len, | 
 |                                        NameVisibility name_visibility, | 
 |                                        BaseTextBuffer* printer) const { | 
 |   printer->AddString("<"); | 
 |   AbstractType& type = AbstractType::Handle(); | 
 |   for (intptr_t i = 0; i < len; i++) { | 
 |     if (from_index + i < Length()) { | 
 |       type = TypeAt(from_index + i); | 
 |       if (type.IsNull()) { | 
 |         printer->AddString("null");  // Unfinalized vector. | 
 |       } else { | 
 |         type.PrintName(name_visibility, printer); | 
 |       } | 
 |     } else { | 
 |       printer->AddString("dynamic"); | 
 |     } | 
 |     if (i < len - 1) { | 
 |       printer->AddString(", "); | 
 |     } | 
 |   } | 
 |   printer->AddString(">"); | 
 | } | 
 |  | 
 | void TypeArguments::PrintTo(BaseTextBuffer* buffer) const { | 
 |   buffer->AddString("TypeArguments: "); | 
 |   if (IsNull()) { | 
 |     return buffer->AddString("null"); | 
 |   } | 
 |   buffer->Printf("(H%" Px ")", Smi::Value(untag()->hash())); | 
 |   auto& type_at = AbstractType::Handle(); | 
 |   for (intptr_t i = 0; i < Length(); i++) { | 
 |     type_at = TypeAt(i); | 
 |     buffer->Printf(" [%s]", type_at.IsNull() ? "null" : type_at.ToCString()); | 
 |   } | 
 | } | 
 |  | 
 | bool TypeArguments::IsSubvectorEquivalent( | 
 |     const TypeArguments& other, | 
 |     intptr_t from_index, | 
 |     intptr_t len, | 
 |     TypeEquality kind, | 
 |     FunctionTypeMapping* function_type_equivalence) const { | 
 |   if (this->ptr() == other.ptr()) { | 
 |     return true; | 
 |   } | 
 |   if (kind == TypeEquality::kCanonical) { | 
 |     if (IsNull() || other.IsNull()) { | 
 |       return false; | 
 |     } | 
 |     if (Length() != other.Length()) { | 
 |       return false; | 
 |     } | 
 |   } | 
 |   AbstractType& type = AbstractType::Handle(); | 
 |   AbstractType& other_type = AbstractType::Handle(); | 
 |   for (intptr_t i = from_index; i < from_index + len; i++) { | 
 |     type = IsNull() ? Type::DynamicType() : TypeAt(i); | 
 |     ASSERT(!type.IsNull()); | 
 |     other_type = other.IsNull() ? Type::DynamicType() : other.TypeAt(i); | 
 |     ASSERT(!other_type.IsNull()); | 
 |     if (!type.IsEquivalent(other_type, kind, function_type_equivalence)) { | 
 |       return false; | 
 |     } | 
 |   } | 
 |   return true; | 
 | } | 
 |  | 
 | bool TypeArguments::IsDynamicTypes(bool raw_instantiated, | 
 |                                    intptr_t from_index, | 
 |                                    intptr_t len) const { | 
 |   ASSERT(Length() >= (from_index + len)); | 
 |   AbstractType& type = AbstractType::Handle(); | 
 |   Class& type_class = Class::Handle(); | 
 |   for (intptr_t i = 0; i < len; i++) { | 
 |     type = TypeAt(from_index + i); | 
 |     if (type.IsNull()) { | 
 |       return false; | 
 |     } | 
 |     if (!type.HasTypeClass()) { | 
 |       if (raw_instantiated && type.IsTypeParameter()) { | 
 |         // An uninstantiated type parameter is equivalent to dynamic. | 
 |         continue; | 
 |       } | 
 |       return false; | 
 |     } | 
 |     type_class = type.type_class(); | 
 |     if (!type_class.IsDynamicClass()) { | 
 |       return false; | 
 |     } | 
 |   } | 
 |   return true; | 
 | } | 
 |  | 
 | TypeArguments::Cache::Cache(Zone* zone, const TypeArguments& source) | 
 |     : zone_(ASSERT_NOTNULL(zone)), | 
 |       cache_container_(&source), | 
 |       data_(Array::Handle(source.instantiations())), | 
 |       smi_handle_(Smi::Handle(zone)) { | 
 |   ASSERT(IsolateGroup::Current() | 
 |              ->type_arguments_canonicalization_mutex() | 
 |              ->IsOwnedByCurrentThread()); | 
 | } | 
 |  | 
 | TypeArguments::Cache::Cache(Zone* zone, const Array& array) | 
 |     : zone_(ASSERT_NOTNULL(zone)), | 
 |       cache_container_(nullptr), | 
 |       data_(Array::Handle(array.ptr())), | 
 |       smi_handle_(Smi::Handle(zone)) { | 
 |   ASSERT(IsolateGroup::Current() | 
 |              ->type_arguments_canonicalization_mutex() | 
 |              ->IsOwnedByCurrentThread()); | 
 | } | 
 |  | 
 | bool TypeArguments::Cache::IsHash(const Array& array) { | 
 |   return array.Length() > kMaxLinearCacheSize; | 
 | } | 
 |  | 
 | intptr_t TypeArguments::Cache::NumOccupied(const Array& array) { | 
 |   return NumOccupiedBits::decode( | 
 |       RawSmiValue(Smi::RawCast(array.AtAcquire(kMetadataIndex)))); | 
 | } | 
 |  | 
 | #if defined(DEBUG) | 
 | bool TypeArguments::Cache::IsValidStorageLocked(const Array& array) { | 
 |   // We only require the mutex be held so we don't need to use acquire/release | 
 |   // semantics to access and set the number of occupied entries in the header. | 
 |   ASSERT(IsolateGroup::Current() | 
 |              ->type_arguments_canonicalization_mutex() | 
 |              ->IsOwnedByCurrentThread()); | 
 |   // Quick check against the empty linear cache. | 
 |   if (array.ptr() == EmptyStorage().ptr()) return true; | 
 |   const intptr_t num_occupied = NumOccupied(array); | 
 |   // We should be using the same shared value for an empty cache. | 
 |   if (num_occupied == 0) return false; | 
 |   const intptr_t storage_len = array.Length(); | 
 |   // All caches have the metadata followed by a series of entries. | 
 |   if ((storage_len % kEntrySize) != kHeaderSize) return false; | 
 |   const intptr_t num_entries = NumEntries(array); | 
 |   // Linear caches contain at least one unoccupied entry, and hash-based caches | 
 |   // grow prior to hitting 100% occupancy. | 
 |   if (num_occupied >= num_entries) return false; | 
 |   // In a linear cache, all entries with indexes smaller than [num_occupied] | 
 |   // should be occupied and ones greater than or equal should be unoccupied. | 
 |   const bool is_linear_cache = IsLinear(array); | 
 |   // The capacity of a hash-based cache must be a power of two (see | 
 |   // EnsureCapacityLocked as to why). | 
 |   if (!is_linear_cache) { | 
 |     if (!Utils::IsPowerOfTwo(num_entries)) return false; | 
 |     const intptr_t metadata = | 
 |         RawSmiValue(Smi::RawCast(array.AtAcquire(kMetadataIndex))); | 
 |     if ((1 << EntryCountLog2Bits::decode(metadata)) != num_entries) { | 
 |       return false; | 
 |     } | 
 |   } | 
 |   for (intptr_t i = 0; i < num_entries; i++) { | 
 |     const intptr_t index = kHeaderSize + i * kEntrySize; | 
 |     if (array.At(index + kSentinelIndex) == Sentinel()) { | 
 |       if (is_linear_cache && i < num_occupied) return false; | 
 |       continue; | 
 |     } | 
 |     if (is_linear_cache && i >= num_occupied) return false; | 
 |     // The elements of an occupied entry are all TypeArguments values. | 
 |     for (intptr_t j = index; j < index + kEntrySize; j++) { | 
 |       if (!array.At(j)->IsHeapObject()) return false; | 
 |       if (array.At(j) == Object::null()) continue;  // null is a valid TAV. | 
 |       if (!array.At(j)->IsTypeArguments()) return false; | 
 |     } | 
 |   } | 
 |   return true; | 
 | } | 
 | #endif | 
 |  | 
 | bool TypeArguments::Cache::IsOccupied(intptr_t entry) const { | 
 |   InstantiationsCacheTable table(data_); | 
 |   ASSERT(entry >= 0 && entry < table.Length()); | 
 |   return table.At(entry).Get<kSentinelIndex>() != Sentinel(); | 
 | } | 
 |  | 
 | TypeArgumentsPtr TypeArguments::Cache::Retrieve(intptr_t entry) const { | 
 |   ASSERT(IsOccupied(entry)); | 
 |   InstantiationsCacheTable table(data_); | 
 |   return table.At(entry).Get<kInstantiatedTypeArgsIndex>(); | 
 | } | 
 |  | 
 | intptr_t TypeArguments::Cache::NumEntries(const Array& array) { | 
 |   InstantiationsCacheTable table(array); | 
 |   return table.Length(); | 
 | } | 
 |  | 
 | TypeArguments::Cache::KeyLocation TypeArguments::Cache::FindKeyOrUnused( | 
 |     const Array& array, | 
 |     const TypeArguments& instantiator_tav, | 
 |     const TypeArguments& function_tav) { | 
 |   const bool is_hash = IsHash(array); | 
 |   InstantiationsCacheTable table(array); | 
 |   const intptr_t num_entries = table.Length(); | 
 |   // For a linear cache, start at the first entry and probe linearly. This can | 
 |   // be done because a linear cache always has at least one unoccupied entry | 
 |   // after all the occupied ones. | 
 |   intptr_t probe = 0; | 
 |   intptr_t probe_distance = 1; | 
 |   if (is_hash) { | 
 |     // For a hash-based cache, instead start at an entry determined by the hash | 
 |     // of the keys. | 
 |     auto hash = FinalizeHash( | 
 |         CombineHashes(instantiator_tav.Hash(), function_tav.Hash())); | 
 |     probe = hash & (num_entries - 1); | 
 |   } | 
 |   while (true) { | 
 |     const auto& tuple = table.At(probe); | 
 |     if (tuple.Get<kSentinelIndex>() == Sentinel()) break; | 
 |     if ((tuple.Get<kInstantiatorTypeArgsIndex>() == instantiator_tav.ptr()) && | 
 |         (tuple.Get<kFunctionTypeArgsIndex>() == function_tav.ptr())) { | 
 |       return {probe, true}; | 
 |     } | 
 |     // Advance probe by the current probing distance. | 
 |     probe = probe + probe_distance; | 
 |     if (is_hash) { | 
 |       // Wrap around if the probe goes off the end of the entries array. | 
 |       probe = probe & (num_entries - 1); | 
 |       // We had a collision, so increase the probe distance. See comment in | 
 |       // EnsureCapacityLocked for an explanation of how this hits all slots. | 
 |       probe_distance++; | 
 |     } | 
 |   } | 
 |   // We should always get the next slot for a linear cache. | 
 |   ASSERT(is_hash || probe == NumOccupied(array)); | 
 |   return {probe, false}; | 
 | } | 
 |  | 
 | TypeArguments::Cache::KeyLocation TypeArguments::Cache::AddEntry( | 
 |     intptr_t entry, | 
 |     const TypeArguments& instantiator_tav, | 
 |     const TypeArguments& function_tav, | 
 |     const TypeArguments& instantiated_tav) const { | 
 |   // We don't do mutating operations in tests without a TypeArguments object. | 
 |   ASSERT(cache_container_ != nullptr); | 
 | #if defined(DEBUG) | 
 |   auto loc = FindKeyOrUnused(instantiator_tav, function_tav); | 
 |   ASSERT_EQUAL(loc.entry, entry); | 
 |   ASSERT(!loc.present); | 
 | #endif | 
 |   // Double-check we got the expected entry index when adding to a linear array. | 
 |   ASSERT(!IsLinear() || entry == NumOccupied()); | 
 |   const intptr_t new_occupied = NumOccupied() + 1; | 
 |   const bool storage_changed = EnsureCapacity(new_occupied); | 
 |   // Note that this call to IsLinear() may return a different result than the | 
 |   // earlier, since EnsureCapacity() may have swapped to hash-based storage. | 
 |   if (storage_changed && !IsLinear()) { | 
 |     // The capacity of the array has changed, and the capacity is used when | 
 |     // probing further into the array due to collisions. Thus, we need to redo | 
 |     // the entry index calculation. | 
 |     auto loc = FindKeyOrUnused(instantiator_tav, function_tav); | 
 |     ASSERT(!loc.present); | 
 |     entry = loc.entry; | 
 |   } | 
 |  | 
 |   // Go ahead and increment the number of occupied entries prior to adding the | 
 |   // entry. Use a store-release barrier in case of concurrent readers. | 
 |   const intptr_t metadata = RawSmiValue(Smi::RawCast(data_.At(kMetadataIndex))); | 
 |   smi_handle_ = Smi::New(NumOccupiedBits::update(new_occupied, metadata)); | 
 |   data_.SetAtRelease(kMetadataIndex, smi_handle_); | 
 |  | 
 |   InstantiationsCacheTable table(data_); | 
 |   const auto& tuple = table.At(entry); | 
 |   // The parts of the tuple that aren't used for sentinel checking are only | 
 |   // retrieved if the entry is occupied. Entries in the cache are never deleted, | 
 |   // so once the entry is marked as occupied, the contents of that entry never | 
 |   // change. Thus, we don't need store-release barriers here. | 
 |   tuple.Set<kFunctionTypeArgsIndex>(function_tav); | 
 |   tuple.Set<kInstantiatedTypeArgsIndex>(instantiated_tav); | 
 |   // For the sentinel position, though, we do. | 
 |   static_assert( | 
 |       kSentinelIndex == kInstantiatorTypeArgsIndex, | 
 |       "the sentinel position is not protected with a store-release barrier"); | 
 |   tuple.Set<kInstantiatorTypeArgsIndex, std::memory_order_release>( | 
 |       instantiator_tav); | 
 |  | 
 |   if (storage_changed) { | 
 |     // Only check for validity on growth, just to keep the overhead on DEBUG | 
 |     // builds down. | 
 |     DEBUG_ASSERT(IsValidStorageLocked(data_)); | 
 |     // Update the container of the original cache to point to the new one. | 
 |     cache_container_->set_instantiations(data_); | 
 |   } | 
 |  | 
 |   return {entry, true}; | 
 | } | 
 |  | 
 | SmiPtr TypeArguments::Cache::Sentinel() { | 
 |   return Smi::New(kSentinelValue); | 
 | } | 
 |  | 
 | bool TypeArguments::Cache::EnsureCapacity(intptr_t new_occupied) const { | 
 |   ASSERT(new_occupied > NumOccupied()); | 
 |   // How many entries are in the current array (including unoccupied entries). | 
 |   const intptr_t current_capacity = NumEntries(); | 
 |  | 
 |   // Early returns for cases where no growth is needed. | 
 |   const bool is_linear = IsLinear(); | 
 |   if (is_linear) { | 
 |     // We need at least one unoccupied entry in addition to the occupied ones. | 
 |     if (current_capacity > new_occupied) return false; | 
 |   } else { | 
 |     if (LoadFactor(new_occupied, current_capacity) < kMaxLoadFactor) { | 
 |       return false; | 
 |     } | 
 |   } | 
 |  | 
 |   if (new_occupied <= kMaxLinearCacheEntries) { | 
 |     ASSERT(is_linear); | 
 |     // Not enough room for both the new entry and at least one unoccupied | 
 |     // entry, so grow the tuple capacity of the linear cache by about 50%, | 
 |     // ensuring that space for at least one new tuple is added, capping the | 
 |     // total number of occupied entries to the max allowed. | 
 |     const intptr_t new_capacity = | 
 |         Utils::Minimum(current_capacity + (current_capacity >> 1), | 
 |                        kMaxLinearCacheEntries) + | 
 |         1; | 
 |     const intptr_t cache_size = kHeaderSize + new_capacity * kEntrySize; | 
 |     ASSERT(cache_size <= kMaxLinearCacheSize); | 
 |     data_ = Array::Grow(data_, cache_size, Heap::kOld); | 
 |     ASSERT(!data_.IsNull()); | 
 |     // No need to adjust the number of occupied entries or old entries, as they | 
 |     // are copied over by Array::Grow. Just mark any new entries as unoccupied. | 
 |     smi_handle_ = Sentinel(); | 
 |     InstantiationsCacheTable table(data_); | 
 |     for (intptr_t i = current_capacity; i < new_capacity; i++) { | 
 |       const auto& tuple = table.At(i); | 
 |       tuple.Set<kSentinelIndex>(smi_handle_); | 
 |     } | 
 |     return true; | 
 |   } | 
 |  | 
 |   // Either we're converting a linear cache into a hash-based cache, or the | 
 |   // load factor of the hash-based cache has increased to the point where we | 
 |   // need to grow it. | 
 |   const intptr_t new_capacity = | 
 |       is_linear ? kNumInitialHashCacheEntries : 2 * current_capacity; | 
 |   // Because we use quadratic (actually triangle number) probing it is | 
 |   // important that the size is a power of two (otherwise we could fail to | 
 |   // find an empty slot).  This is described in Knuth's The Art of Computer | 
 |   // Programming Volume 2, Chapter 6.4, exercise 20 (solution in the | 
 |   // appendix, 2nd edition). | 
 |   ASSERT(Utils::IsPowerOfTwo(new_capacity)); | 
 |   ASSERT(LoadFactor(new_occupied, new_capacity) < kMaxLoadFactor); | 
 |   const intptr_t new_size = kHeaderSize + new_capacity * kEntrySize; | 
 |   const auto& new_data = | 
 |       Array::Handle(zone_, Array::NewUninitialized(new_size, Heap::kOld)); | 
 |   ASSERT(!new_data.IsNull()); | 
 |   // First set up the metadata in new_data. | 
 |   const intptr_t metadata = RawSmiValue(Smi::RawCast(data_.At(kMetadataIndex))); | 
 |   smi_handle_ = Smi::New(EntryCountLog2Bits::update( | 
 |       Utils::ShiftForPowerOfTwo(new_capacity), metadata)); | 
 |   new_data.SetAt(kMetadataIndex, smi_handle_); | 
 |   // Then mark all the entries in new_data as unoccupied. | 
 |   smi_handle_ = Sentinel(); | 
 |   InstantiationsCacheTable to_table(new_data); | 
 |   for (const auto& tuple : to_table) { | 
 |     tuple.Set<kSentinelIndex>(smi_handle_); | 
 |   } | 
 |   // Finally, copy over the entries. | 
 |   auto& instantiator_tav = TypeArguments::Handle(zone_); | 
 |   auto& function_tav = TypeArguments::Handle(zone_); | 
 |   auto& result_tav = TypeArguments::Handle(zone_); | 
 |   const InstantiationsCacheTable from_table(data_); | 
 |   for (const auto& from_tuple : from_table) { | 
 |     // Skip unoccupied entries. | 
 |     if (from_tuple.Get<kSentinelIndex>() == Sentinel()) continue; | 
 |     instantiator_tav ^= from_tuple.Get<kInstantiatorTypeArgsIndex>(); | 
 |     function_tav = from_tuple.Get<kFunctionTypeArgsIndex>(); | 
 |     result_tav = from_tuple.Get<kInstantiatedTypeArgsIndex>(); | 
 |     // Since new_data has a different total capacity, we can't use the old | 
 |     // entry indexes, but must recalculate them. | 
 |     auto loc = FindKeyOrUnused(new_data, instantiator_tav, function_tav); | 
 |     ASSERT(!loc.present); | 
 |     const auto& to_tuple = to_table.At(loc.entry); | 
 |     to_tuple.Set<kInstantiatorTypeArgsIndex>(instantiator_tav); | 
 |     to_tuple.Set<kFunctionTypeArgsIndex>(function_tav); | 
 |     to_tuple.Set<kInstantiatedTypeArgsIndex>(result_tav); | 
 |   } | 
 |   data_ = new_data.ptr(); | 
 |   return true; | 
 | } | 
 |  | 
 | bool TypeArguments::HasInstantiations() const { | 
 |   return instantiations() != Cache::EmptyStorage().ptr(); | 
 | } | 
 |  | 
 | ArrayPtr TypeArguments::instantiations() const { | 
 |   // We rely on the fact that any loads from the array are dependent loads and | 
 |   // avoid the load-acquire barrier here. | 
 |   return untag()->instantiations(); | 
 | } | 
 |  | 
 | void TypeArguments::set_instantiations(const Array& value) const { | 
 |   // We have to ensure that initializing stores to the array are available | 
 |   // when releasing the pointer to the array pointer. | 
 |   // => We have to use store-release here. | 
 |   ASSERT(!value.IsNull()); | 
 |   untag()->set_instantiations<std::memory_order_release>(value.ptr()); | 
 | } | 
 |  | 
 | bool TypeArguments::HasCount(intptr_t count) const { | 
 |   if (IsNull()) { | 
 |     return true; | 
 |   } | 
 |   return Length() == count; | 
 | } | 
 |  | 
 | intptr_t TypeArguments::Length() const { | 
 |   if (IsNull()) { | 
 |     return 0; | 
 |   } | 
 |   return Smi::Value(untag()->length()); | 
 | } | 
 |  | 
 | intptr_t TypeArguments::nullability() const { | 
 |   if (IsNull()) { | 
 |     return 0; | 
 |   } | 
 |   return Smi::Value(untag()->nullability()); | 
 | } | 
 |  | 
 | AbstractTypePtr TypeArguments::TypeAt(intptr_t index) const { | 
 |   ASSERT(!IsNull()); | 
 |   ASSERT((index >= 0) && (index < Length())); | 
 |   return untag()->element(index); | 
 | } | 
 |  | 
 | AbstractTypePtr TypeArguments::TypeAtNullSafe(intptr_t index) const { | 
 |   if (IsNull()) { | 
 |     // null vector represents infinite list of dynamics | 
 |     return Type::dynamic_type().ptr(); | 
 |   } | 
 |   ASSERT((index >= 0) && (index < Length())); | 
 |   return TypeAt(index); | 
 | } | 
 |  | 
 | void TypeArguments::SetTypeAt(intptr_t index, const AbstractType& value) const { | 
 |   ASSERT(!IsCanonical()); | 
 |   ASSERT((index >= 0) && (index < Length())); | 
 |   return untag()->set_element(index, value.ptr()); | 
 | } | 
 |  | 
 | bool TypeArguments::IsSubvectorInstantiated( | 
 |     intptr_t from_index, | 
 |     intptr_t len, | 
 |     Genericity genericity, | 
 |     intptr_t num_free_fun_type_params) const { | 
 |   ASSERT(!IsNull()); | 
 |   AbstractType& type = AbstractType::Handle(); | 
 |   for (intptr_t i = 0; i < len; i++) { | 
 |     type = TypeAt(from_index + i); | 
 |     // If this type argument T is null, the type A containing T in its flattened | 
 |     // type argument vector V is recursive and is still being finalized. | 
 |     // T is the type argument of a super type of A. T is being instantiated | 
 |     // during finalization of V, which is also the instantiator. T depends | 
 |     // solely on the type parameters of A and will be replaced by a non-null | 
 |     // type before A is marked as finalized. | 
 |     if (!type.IsNull() && | 
 |         !type.IsInstantiated(genericity, num_free_fun_type_params)) { | 
 |       return false; | 
 |     } | 
 |   } | 
 |   return true; | 
 | } | 
 |  | 
 | bool TypeArguments::IsUninstantiatedIdentity() const { | 
 |   AbstractType& type = AbstractType::Handle(); | 
 |   const intptr_t num_types = Length(); | 
 |   for (intptr_t i = 0; i < num_types; i++) { | 
 |     type = TypeAt(i); | 
 |     if (type.IsNull()) { | 
 |       return false;  // Still unfinalized, too early to tell. | 
 |     } | 
 |     if (!type.IsTypeParameter()) { | 
 |       return false; | 
 |     } | 
 |     const TypeParameter& type_param = TypeParameter::Cast(type); | 
 |     ASSERT(type_param.IsFinalized()); | 
 |     if ((type_param.index() != i) || type_param.IsFunctionTypeParameter()) { | 
 |       return false; | 
 |     } | 
 |     // Instantiating nullable type parameters may change | 
 |     // nullability of a type, so type arguments vector containing such type | 
 |     // parameters cannot be substituted with instantiator type arguments. | 
 |     if (type_param.IsNullable()) { | 
 |       return false; | 
 |     } | 
 |   } | 
 |   return true; | 
 |   // Note that it is not necessary to verify at runtime that the instantiator | 
 |   // type vector is long enough, since this uninstantiated vector contains as | 
 |   // many different type parameters as it is long. | 
 | } | 
 |  | 
 | // Return true if this uninstantiated type argument vector, once instantiated | 
 | // at runtime, is a prefix of the type argument vector of its instantiator. | 
 | // A runtime check may be required, as indicated by with_runtime_check. | 
 | bool TypeArguments::CanShareInstantiatorTypeArguments( | 
 |     const Class& instantiator_class, | 
 |     bool* with_runtime_check) const { | 
 |   ASSERT(!IsInstantiated()); | 
 |   if (with_runtime_check != nullptr) { | 
 |     *with_runtime_check = false; | 
 |   } | 
 |   const intptr_t num_type_args = Length(); | 
 |   const intptr_t num_instantiator_type_args = | 
 |       instantiator_class.NumTypeArguments(); | 
 |   if (num_type_args > num_instantiator_type_args) { | 
 |     // This vector cannot be a prefix of a shorter vector. | 
 |     return false; | 
 |   } | 
 |   const intptr_t num_instantiator_type_params = | 
 |       instantiator_class.NumTypeParameters(); | 
 |   const intptr_t first_type_param_offset = | 
 |       num_instantiator_type_args - num_instantiator_type_params; | 
 |   // At compile time, the type argument vector of the instantiator consists of | 
 |   // the type argument vector of its super type, which may refer to the type | 
 |   // parameters of the instantiator class, followed by (or overlapping partially | 
 |   // or fully with) the type parameters of the instantiator class in declaration | 
 |   // order. | 
 |   // In other words, the only variables are the type parameters of the | 
 |   // instantiator class. | 
 |   // This uninstantiated type argument vector is also expressed in terms of the | 
 |   // type parameters of the instantiator class. Therefore, in order to be a | 
 |   // prefix once instantiated at runtime, every one of its type argument must be | 
 |   // equal to the type argument of the instantiator vector at the same index. | 
 |  | 
 |   // As a first requirement, the last num_instantiator_type_params type | 
 |   // arguments of this type argument vector must refer to the corresponding type | 
 |   // parameters of the instantiator class. | 
 |   AbstractType& type_arg = AbstractType::Handle(); | 
 |   for (intptr_t i = first_type_param_offset; i < num_type_args; i++) { | 
 |     type_arg = TypeAt(i); | 
 |     if (!type_arg.IsTypeParameter()) { | 
 |       return false; | 
 |     } | 
 |     const TypeParameter& type_param = TypeParameter::Cast(type_arg); | 
 |     ASSERT(type_param.IsFinalized()); | 
 |     if ((type_param.index() != i) || type_param.IsFunctionTypeParameter()) { | 
 |       return false; | 
 |     } | 
 |     // Instantiating nullable type parameters may change nullability | 
 |     // of a type, so type arguments vector containing such type parameters | 
 |     // cannot be substituted with instantiator type arguments, unless we check | 
 |     // at runtime the nullability of the first 1 or 2 type arguments of the | 
 |     // instantiator. | 
 |     // Note that the presence of non-overlapping super type arguments (i.e. | 
 |     // first_type_param_offset > 0) will prevent this optimization. | 
 |     if (type_param.IsNullable()) { | 
 |       if (with_runtime_check == nullptr || i >= kNullabilityMaxTypes) { | 
 |         return false; | 
 |       } | 
 |       *with_runtime_check = true; | 
 |     } | 
 |   } | 
 |   // As a second requirement, the type arguments corresponding to the super type | 
 |   // must be identical. Overlapping ones have already been checked starting at | 
 |   // first_type_param_offset. | 
 |   if (first_type_param_offset == 0) { | 
 |     return true; | 
 |   } | 
 |   Type& super_type = Type::Handle(instantiator_class.super_type()); | 
 |   const TypeArguments& super_type_args = | 
 |       TypeArguments::Handle(super_type.GetInstanceTypeArguments( | 
 |           Thread::Current(), /*canonicalize=*/false)); | 
 |   if (super_type_args.IsNull()) { | 
 |     ASSERT(!IsUninstantiatedIdentity()); | 
 |     return false; | 
 |   } | 
 |   AbstractType& super_type_arg = AbstractType::Handle(); | 
 |   for (intptr_t i = 0; (i < first_type_param_offset) && (i < num_type_args); | 
 |        i++) { | 
 |     type_arg = TypeAt(i); | 
 |     super_type_arg = super_type_args.TypeAt(i); | 
 |     if (!type_arg.Equals(super_type_arg)) { | 
 |       ASSERT(!IsUninstantiatedIdentity()); | 
 |       return false; | 
 |     } | 
 |   } | 
 |   return true; | 
 | } | 
 |  | 
 | // Return true if this uninstantiated type argument vector, once instantiated | 
 | // at runtime, is a prefix of the enclosing function type arguments. | 
 | // A runtime check may be required, as indicated by with_runtime_check. | 
 | bool TypeArguments::CanShareFunctionTypeArguments( | 
 |     const Function& function, | 
 |     bool* with_runtime_check) const { | 
 |   ASSERT(!IsInstantiated()); | 
 |   if (with_runtime_check != nullptr) { | 
 |     *with_runtime_check = false; | 
 |   } | 
 |   const intptr_t num_type_args = Length(); | 
 |   const intptr_t num_parent_type_args = function.NumParentTypeArguments(); | 
 |   const intptr_t num_function_type_params = function.NumTypeParameters(); | 
 |   const intptr_t num_function_type_args = | 
 |       num_parent_type_args + num_function_type_params; | 
 |   if (num_type_args > num_function_type_args) { | 
 |     // This vector cannot be a prefix of a shorter vector. | 
 |     return false; | 
 |   } | 
 |   AbstractType& type_arg = AbstractType::Handle(); | 
 |   for (intptr_t i = 0; i < num_type_args; i++) { | 
 |     type_arg = TypeAt(i); | 
 |     if (!type_arg.IsTypeParameter()) { | 
 |       return false; | 
 |     } | 
 |     const TypeParameter& type_param = TypeParameter::Cast(type_arg); | 
 |     ASSERT(type_param.IsFinalized()); | 
 |     if ((type_param.index() != i) || !type_param.IsFunctionTypeParameter()) { | 
 |       return false; | 
 |     } | 
 |     // Instantiating nullable type parameters may change nullability | 
 |     // of a type, so type arguments vector containing such type parameters | 
 |     // cannot be substituted with the enclosing function type arguments, unless | 
 |     // we check at runtime the nullability of the first 1 or 2 type arguments of | 
 |     // the enclosing function type arguments. | 
 |     if (type_param.IsNullable()) { | 
 |       if (with_runtime_check == nullptr || i >= kNullabilityMaxTypes) { | 
 |         return false; | 
 |       } | 
 |       *with_runtime_check = true; | 
 |     } | 
 |   } | 
 |   return true; | 
 | } | 
 |  | 
 | TypeArgumentsPtr TypeArguments::TruncatedTo(intptr_t length) const { | 
 |   Thread* thread = Thread::Current(); | 
 |   Zone* zone = thread->zone(); | 
 |   const TypeArguments& result = | 
 |       TypeArguments::Handle(zone, TypeArguments::New(length)); | 
 |   AbstractType& type = AbstractType::Handle(zone); | 
 |   for (intptr_t i = 0; i < length; i++) { | 
 |     type = TypeAt(i); | 
 |     result.SetTypeAt(i, type); | 
 |   } | 
 |   return result.Canonicalize(thread); | 
 | } | 
 |  | 
 | bool TypeArguments::IsFinalized() const { | 
 |   ASSERT(!IsNull()); | 
 |   AbstractType& type = AbstractType::Handle(); | 
 |   const intptr_t num_types = Length(); | 
 |   for (intptr_t i = 0; i < num_types; i++) { | 
 |     type = TypeAt(i); | 
 |     if (!type.IsFinalized()) { | 
 |       return false; | 
 |     } | 
 |   } | 
 |   return true; | 
 | } | 
 |  | 
 | TypeArgumentsPtr TypeArguments::InstantiateFrom( | 
 |     const TypeArguments& instantiator_type_arguments, | 
 |     const TypeArguments& function_type_arguments, | 
 |     intptr_t num_free_fun_type_params, | 
 |     Heap::Space space, | 
 |     FunctionTypeMapping* function_type_mapping, | 
 |     intptr_t num_parent_type_args_adjustment) const { | 
 |   ASSERT(!IsInstantiated()); | 
 |   if ((instantiator_type_arguments.IsNull() || | 
 |        instantiator_type_arguments.Length() == Length()) && | 
 |       IsUninstantiatedIdentity()) { | 
 |     return instantiator_type_arguments.ptr(); | 
 |   } | 
 |   const intptr_t num_types = Length(); | 
 |   TypeArguments& instantiated_array = | 
 |       TypeArguments::Handle(TypeArguments::New(num_types, space)); | 
 |   AbstractType& type = AbstractType::Handle(); | 
 |   for (intptr_t i = 0; i < num_types; i++) { | 
 |     type = TypeAt(i); | 
 |     // If this type argument T is null, the type A containing T in its flattened | 
 |     // type argument vector V is recursive and is still being finalized. | 
 |     // T is the type argument of a super type of A. T is being instantiated | 
 |     // during finalization of V, which is also the instantiator. T depends | 
 |     // solely on the type parameters of A and will be replaced by a non-null | 
 |     // type before A is marked as finalized. | 
 |     if (!type.IsNull() && !type.IsInstantiated()) { | 
 |       type = type.InstantiateFrom( | 
 |           instantiator_type_arguments, function_type_arguments, | 
 |           num_free_fun_type_params, space, function_type_mapping, | 
 |           num_parent_type_args_adjustment); | 
 |       // A returned null type indicates a failed instantiation in dead code that | 
 |       // must be propagated up to the caller, the optimizing compiler. | 
 |       if (type.IsNull()) { | 
 |         return Object::empty_type_arguments().ptr(); | 
 |       } | 
 |     } | 
 |     instantiated_array.SetTypeAt(i, type); | 
 |   } | 
 |   return instantiated_array.ptr(); | 
 | } | 
 |  | 
 | TypeArgumentsPtr TypeArguments::UpdateFunctionTypes( | 
 |     intptr_t num_parent_type_args_adjustment, | 
 |     intptr_t num_free_fun_type_params, | 
 |     Heap::Space space, | 
 |     FunctionTypeMapping* function_type_mapping) const { | 
 |   Zone* zone = Thread::Current()->zone(); | 
 |   TypeArguments* updated_args = nullptr; | 
 |   AbstractType& type = AbstractType::Handle(zone); | 
 |   AbstractType& updated = AbstractType::Handle(zone); | 
 |   for (intptr_t i = 0, n = Length(); i < n; ++i) { | 
 |     type = TypeAt(i); | 
 |     updated = type.UpdateFunctionTypes(num_parent_type_args_adjustment, | 
 |                                        num_free_fun_type_params, space, | 
 |                                        function_type_mapping); | 
 |     if (type.ptr() != updated.ptr()) { | 
 |       if (updated_args == nullptr) { | 
 |         updated_args = | 
 |             &TypeArguments::Handle(zone, TypeArguments::New(n, space)); | 
 |         for (intptr_t j = 0; j < i; ++j) { | 
 |           type = TypeAt(j); | 
 |           updated_args->SetTypeAt(j, type); | 
 |         } | 
 |       } | 
 |     } | 
 |     if (updated_args != nullptr) { | 
 |       updated_args->SetTypeAt(i, updated); | 
 |     } | 
 |   } | 
 |   return (updated_args != nullptr) ? updated_args->ptr() : ptr(); | 
 | } | 
 |  | 
 | #if !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME) | 
 | // A local flag used only in object_test.cc that, when true, causes a failure | 
 | // when a cache entry for the given instantiator and function type arguments | 
 | // already exists. Used to check that the InstantiateTypeArguments stub found | 
 | // the cache entry instead of calling the runtime. | 
 | bool TESTING_runtime_fail_on_existing_cache_entry = false; | 
 | #endif | 
 |  | 
 | TypeArgumentsPtr TypeArguments::InstantiateAndCanonicalizeFrom( | 
 |     const TypeArguments& instantiator_type_arguments, | 
 |     const TypeArguments& function_type_arguments) const { | 
 |   auto thread = Thread::Current(); | 
 |   auto zone = thread->zone(); | 
 |   SafepointMutexLocker ml( | 
 |       thread->isolate_group()->type_arguments_canonicalization_mutex()); | 
 |  | 
 |   ASSERT(!IsInstantiated()); | 
 |   ASSERT(instantiator_type_arguments.IsNull() || | 
 |          instantiator_type_arguments.IsCanonical()); | 
 |   ASSERT(function_type_arguments.IsNull() || | 
 |          function_type_arguments.IsCanonical()); | 
 |   // Lookup instantiators and if found, return instantiated result. | 
 |   Cache cache(zone, *this); | 
 |   auto const loc = cache.FindKeyOrUnused(instantiator_type_arguments, | 
 |                                          function_type_arguments); | 
 |   if (loc.present) { | 
 | #if !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME) | 
 |     if (TESTING_runtime_fail_on_existing_cache_entry) { | 
 |       TextBuffer buffer(1024); | 
 |       buffer.Printf("for\n"); | 
 |       buffer.Printf("  * uninstantiated type arguments %s\n", ToCString()); | 
 |       buffer.Printf("  * instantiation type arguments: %s (hash: %" Pu ")\n", | 
 |                     instantiator_type_arguments.ToCString(), | 
 |                     instantiator_type_arguments.Hash()); | 
 |       buffer.Printf("  * function type arguments: %s (hash: %" Pu ")\n", | 
 |                     function_type_arguments.ToCString(), | 
 |                     function_type_arguments.Hash()); | 
 |       buffer.Printf("  * number of occupied entries in cache: %" Pd "\n", | 
 |                     cache.NumOccupied()); | 
 |       buffer.Printf("  * number of total entries in cache: %" Pd "\n", | 
 |                     cache.NumEntries()); | 
 |       buffer.Printf("expected to find entry %" Pd | 
 |                     " of cache in stub, but reached runtime", | 
 |                     loc.entry); | 
 |       FATAL("%s", buffer.buffer()); | 
 |     } | 
 | #endif | 
 |     return cache.Retrieve(loc.entry); | 
 |   } | 
 |   // Cache lookup failed. Instantiate the type arguments. | 
 |   TypeArguments& result = TypeArguments::Handle(zone); | 
 |   result = InstantiateFrom(instantiator_type_arguments, function_type_arguments, | 
 |                            kAllFree, Heap::kOld); | 
 |   // Canonicalize type arguments. | 
 |   result = result.Canonicalize(thread); | 
 |   // InstantiateAndCanonicalizeFrom is not reentrant. It cannot have been called | 
 |   // indirectly, so the prior_instantiations array cannot have grown. | 
 |   ASSERT(cache.data_.ptr() == instantiations()); | 
 |   cache.AddEntry(loc.entry, instantiator_type_arguments, | 
 |                  function_type_arguments, result); | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | TypeArgumentsPtr TypeArguments::New(intptr_t len, Heap::Space space) { | 
 |   if (len < 0 || len > kMaxElements) { | 
 |     // This should be caught before we reach here. | 
 |     FATAL("Fatal error in TypeArguments::New: invalid len %" Pd "\n", len); | 
 |   } | 
 |   TypeArguments& result = TypeArguments::Handle(); | 
 |   { | 
 |     auto raw = Object::Allocate<TypeArguments>(space, len); | 
 |     NoSafepointScope no_safepoint; | 
 |     result = raw; | 
 |     // Length must be set before we start storing into the array. | 
 |     result.SetLength(len); | 
 |     result.SetHash(0); | 
 |     result.set_nullability(0); | 
 |   } | 
 |   // The array used as storage for an empty linear cache should be initialized. | 
 |   ASSERT(Cache::EmptyStorage().ptr() != Array::null()); | 
 |   result.set_instantiations(Cache::EmptyStorage()); | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | void TypeArguments::SetLength(intptr_t value) const { | 
 |   ASSERT(!IsCanonical()); | 
 |   // This is only safe because we create a new Smi, which does not cause | 
 |   // heap allocation. | 
 |   untag()->set_length(Smi::New(value)); | 
 | } | 
 |  | 
 | TypeArgumentsPtr TypeArguments::Canonicalize(Thread* thread) const { | 
 |   if (IsNull() || IsCanonical()) { | 
 |     ASSERT(IsOld()); | 
 |     return this->ptr(); | 
 |   } | 
 |   const intptr_t num_types = Length(); | 
 |   if (num_types == 0) { | 
 |     return TypeArguments::empty_type_arguments().ptr(); | 
 |   } else if (IsRaw(0, num_types)) { | 
 |     return TypeArguments::null(); | 
 |   } | 
 |   Zone* zone = thread->zone(); | 
 |   auto isolate_group = thread->isolate_group(); | 
 |   ObjectStore* object_store = isolate_group->object_store(); | 
 |   TypeArguments& result = TypeArguments::Handle(zone); | 
 |   { | 
 |     SafepointMutexLocker ml(isolate_group->type_canonicalization_mutex()); | 
 |     CanonicalTypeArgumentsSet table(zone, | 
 |                                     object_store->canonical_type_arguments()); | 
 |     result ^= table.GetOrNull(CanonicalTypeArgumentsKey(*this)); | 
 |     object_store->set_canonical_type_arguments(table.Release()); | 
 |   } | 
 |   if (result.IsNull()) { | 
 |     // Canonicalize each type argument. | 
 |     AbstractType& type_arg = AbstractType::Handle(zone); | 
 |     GrowableHandlePtrArray<const AbstractType> canonicalized_types(zone, | 
 |                                                                    num_types); | 
 |     for (intptr_t i = 0; i < num_types; i++) { | 
 |       type_arg = TypeAt(i); | 
 |       type_arg = type_arg.Canonicalize(thread); | 
 |       canonicalized_types.Add(type_arg); | 
 |     } | 
 |     SafepointMutexLocker ml(isolate_group->type_canonicalization_mutex()); | 
 |     CanonicalTypeArgumentsSet table(zone, | 
 |                                     object_store->canonical_type_arguments()); | 
 |     // Since we canonicalized some type arguments above we need to lookup | 
 |     // in the table again to make sure we don't already have an equivalent | 
 |     // canonical entry. | 
 |     result ^= table.GetOrNull(CanonicalTypeArgumentsKey(*this)); | 
 |     if (result.IsNull()) { | 
 |       for (intptr_t i = 0; i < num_types; i++) { | 
 |         SetTypeAt(i, canonicalized_types.At(i)); | 
 |       } | 
 |       // Make sure we have an old space object and add it to the table. | 
 |       if (this->IsNew()) { | 
 |         result ^= Object::Clone(*this, Heap::kOld); | 
 |       } else { | 
 |         result = this->ptr(); | 
 |       } | 
 |       ASSERT(result.IsOld()); | 
 |       result.ComputeNullability(); | 
 |       result.SetCanonical();  // Mark object as being canonical. | 
 |       // Now add this TypeArgument into the canonical list of type arguments. | 
 |       bool present = table.Insert(result); | 
 |       ASSERT(!present); | 
 |     } | 
 |     object_store->set_canonical_type_arguments(table.Release()); | 
 |   } | 
 |   ASSERT(result.Equals(*this)); | 
 |   ASSERT(!result.IsNull()); | 
 |   ASSERT(result.IsTypeArguments()); | 
 |   ASSERT(result.IsCanonical()); | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | TypeArgumentsPtr TypeArguments::FromInstanceTypeArguments( | 
 |     Thread* thread, | 
 |     const Class& cls) const { | 
 |   if (IsNull()) { | 
 |     return ptr(); | 
 |   } | 
 |   const intptr_t num_type_arguments = cls.NumTypeArguments(); | 
 |   const intptr_t num_type_parameters = cls.NumTypeParameters(thread); | 
 |   ASSERT(Length() >= num_type_arguments); | 
 |   if (Length() == num_type_parameters) { | 
 |     return ptr(); | 
 |   } | 
 |   if (num_type_parameters == 0) { | 
 |     return TypeArguments::null(); | 
 |   } | 
 |   Zone* zone = thread->zone(); | 
 |   const auto& args = | 
 |       TypeArguments::Handle(zone, TypeArguments::New(num_type_parameters)); | 
 |   const intptr_t offset = num_type_arguments - num_type_parameters; | 
 |   auto& type = AbstractType::Handle(zone); | 
 |   for (intptr_t i = 0; i < num_type_parameters; ++i) { | 
 |     type = TypeAt(offset + i); | 
 |     args.SetTypeAt(i, type); | 
 |   } | 
 |   return args.ptr(); | 
 | } | 
 |  | 
 | TypeArgumentsPtr TypeArguments::ToInstantiatorTypeArguments( | 
 |     Thread* thread, | 
 |     const Class& cls) const { | 
 |   if (IsNull()) { | 
 |     return ptr(); | 
 |   } | 
 |   const intptr_t num_type_arguments = cls.NumTypeArguments(); | 
 |   const intptr_t num_type_parameters = cls.NumTypeParameters(thread); | 
 |   ASSERT(Length() == num_type_parameters); | 
 |   if (num_type_arguments == num_type_parameters) { | 
 |     return ptr(); | 
 |   } | 
 |   Zone* zone = thread->zone(); | 
 |   const auto& args = | 
 |       TypeArguments::Handle(zone, TypeArguments::New(num_type_arguments)); | 
 |   const intptr_t offset = num_type_arguments - num_type_parameters; | 
 |   auto& type = AbstractType::Handle(zone); | 
 |   for (intptr_t i = 0; i < num_type_parameters; ++i) { | 
 |     type = TypeAt(i); | 
 |     args.SetTypeAt(offset + i, type); | 
 |   } | 
 |   return args.ptr(); | 
 | } | 
 |  | 
 | void TypeArguments::EnumerateURIs(URIs* uris) const { | 
 |   if (IsNull()) { | 
 |     return; | 
 |   } | 
 |   Thread* thread = Thread::Current(); | 
 |   Zone* zone = thread->zone(); | 
 |   AbstractType& type = AbstractType::Handle(zone); | 
 |   const intptr_t num_types = Length(); | 
 |   for (intptr_t i = 0; i < num_types; i++) { | 
 |     type = TypeAt(i); | 
 |     type.EnumerateURIs(uris); | 
 |   } | 
 | } | 
 |  | 
 | const char* TypeArguments::ToCString() const { | 
 |   if (IsNull()) { | 
 |     return "TypeArguments: null";  // Optimizing the frequent case. | 
 |   } | 
 |   ZoneTextBuffer buffer(Thread::Current()->zone()); | 
 |   PrintTo(&buffer); | 
 |   return buffer.buffer(); | 
 | } | 
 |  | 
 | const char* PatchClass::ToCString() const { | 
 |   const Class& cls = Class::Handle(wrapped_class()); | 
 |   const char* cls_name = cls.ToCString(); | 
 |   return OS::SCreate(Thread::Current()->zone(), "PatchClass for %s", cls_name); | 
 | } | 
 |  | 
 | PatchClassPtr PatchClass::New(const Class& wrapped_class, | 
 |                               const KernelProgramInfo& info, | 
 |                               const Script& script) { | 
 |   const PatchClass& result = PatchClass::Handle(PatchClass::New()); | 
 |   result.set_wrapped_class(wrapped_class); | 
 |   NOT_IN_PRECOMPILED_RUNTIME( | 
 |       result.untag()->set_kernel_program_info(info.ptr())); | 
 |   result.set_script(script); | 
 |   result.set_kernel_library_index(-1); | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | PatchClassPtr PatchClass::New() { | 
 |   ASSERT(Object::patch_class_class() != Class::null()); | 
 |   return Object::Allocate<PatchClass>(Heap::kOld); | 
 | } | 
 |  | 
 | void PatchClass::set_wrapped_class(const Class& value) const { | 
 |   untag()->set_wrapped_class(value.ptr()); | 
 | } | 
 |  | 
 | #if !defined(DART_PRECOMPILED_RUNTIME) | 
 | void PatchClass::set_kernel_program_info(const KernelProgramInfo& info) const { | 
 |   untag()->set_kernel_program_info(info.ptr()); | 
 | } | 
 | #endif | 
 |  | 
 | void PatchClass::set_script(const Script& value) const { | 
 |   untag()->set_script(value.ptr()); | 
 | } | 
 |  | 
 | uword Function::Hash() const { | 
 |   uword hash = String::HashRawSymbol(name()); | 
 |   if (IsClosureFunction()) { | 
 |     hash = hash ^ token_pos().Hash(); | 
 |   } | 
 |   if (Owner()->IsClass()) { | 
 |     hash = hash ^ Class::Hash(Class::RawCast(Owner())); | 
 |   } | 
 |   return hash; | 
 | } | 
 |  | 
 | bool Function::HasBreakpoint() const { | 
 | #if defined(PRODUCT) | 
 |   return false; | 
 | #else | 
 |   auto thread = Thread::Current(); | 
 |   return thread->isolate_group()->debugger()->HasBreakpoint(thread, *this); | 
 | #endif | 
 | } | 
 |  | 
 | void Function::InstallOptimizedCode(const Code& code) const { | 
 |   ASSERT(IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); | 
 |   // We may not have previous code if FLAG_precompile is set. | 
 |   // Hot-reload may have already disabled the current code. | 
 |   if (HasCode() && !Code::Handle(CurrentCode()).IsDisabled()) { | 
 |     Code::Handle(CurrentCode()).DisableDartCode(); | 
 |   } | 
 |   AttachCode(code); | 
 | } | 
 |  | 
 | void Function::SetInstructions(const Code& value) const { | 
 |   // Ensure that nobody is executing this function when we install it. | 
 |   if (untag()->code() != Code::null() && HasCode()) { | 
 |     GcSafepointOperationScope safepoint(Thread::Current()); | 
 |     SetInstructionsSafe(value); | 
 |   } else { | 
 |     ASSERT(IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); | 
 |     SetInstructionsSafe(value); | 
 |   } | 
 | } | 
 |  | 
 | void Function::SetInstructionsSafe(const Code& value) const { | 
 |   untag()->set_code<std::memory_order_release>(value.ptr()); | 
 |   StoreNonPointer(&untag()->entry_point_, value.EntryPoint()); | 
 |   StoreNonPointer(&untag()->unchecked_entry_point_, | 
 |                   value.UncheckedEntryPoint()); | 
 | } | 
 |  | 
 | void Function::AttachCode(const Code& value) const { | 
 |   ASSERT(IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); | 
 |   // Finish setting up code before activating it. | 
 |   value.set_owner(*this); | 
 |   SetInstructions(value); | 
 |   ASSERT(Function::Handle(value.function()).IsNull() || | 
 |          (value.function() == this->ptr())); | 
 | } | 
 |  | 
 | bool Function::HasCode() const { | 
 |   NoSafepointScope no_safepoint; | 
 |   ASSERT(untag()->code() != Code::null()); | 
 |   return untag()->code() != StubCode::LazyCompile().ptr(); | 
 | } | 
 |  | 
 | bool Function::HasCode(FunctionPtr function) { | 
 |   NoSafepointScope no_safepoint; | 
 |   ASSERT(function->untag()->code() != Code::null()); | 
 |   return function->untag()->code() != StubCode::LazyCompile().ptr(); | 
 | } | 
 |  | 
 | void Function::ClearCode() const { | 
 | #if defined(DART_PRECOMPILED_RUNTIME) | 
 |   UNREACHABLE(); | 
 | #else | 
 |   ASSERT(IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); | 
 |   untag()->set_unoptimized_code(Code::null()); | 
 |   SetInstructions(StubCode::LazyCompile()); | 
 | #endif  // defined(DART_PRECOMPILED_RUNTIME) | 
 | } | 
 |  | 
 | void Function::ClearCodeSafe() const { | 
 | #if defined(DART_PRECOMPILED_RUNTIME) | 
 |   UNREACHABLE(); | 
 | #else | 
 |   untag()->set_unoptimized_code(Code::null()); | 
 |  | 
 |   SetInstructionsSafe(StubCode::LazyCompile()); | 
 | #endif  // defined(DART_PRECOMPILED_RUNTIME) | 
 | } | 
 |  | 
 | void Function::EnsureHasCompiledUnoptimizedCode() const { | 
 |   ASSERT(!ForceOptimize()); | 
 |   Thread* thread = Thread::Current(); | 
 |   ASSERT(thread->IsDartMutatorThread()); | 
 |   // TODO(35224): DEBUG_ASSERT(thread->TopErrorHandlerIsExitFrame()); | 
 |   Zone* zone = thread->zone(); | 
 |  | 
 |   const Error& error = | 
 |       Error::Handle(zone, Compiler::EnsureUnoptimizedCode(thread, *this)); | 
 |   if (!error.IsNull()) { | 
 |     Exceptions::PropagateError(error); | 
 |   } | 
 | } | 
 |  | 
 | void Function::SwitchToUnoptimizedCode() const { | 
 |   ASSERT(HasOptimizedCode()); | 
 |   ASSERT(!ForceOptimize()); | 
 |   Thread* thread = Thread::Current(); | 
 |   DEBUG_ASSERT( | 
 |       thread->isolate_group()->program_lock()->IsCurrentThreadWriter()); | 
 |   Zone* zone = thread->zone(); | 
 |   // TODO(35224): DEBUG_ASSERT(thread->TopErrorHandlerIsExitFrame()); | 
 |   const Code& current_code = Code::Handle(zone, CurrentCode()); | 
 |  | 
 |   if (FLAG_trace_deoptimization_verbose) { | 
 |     THR_Print("Disabling optimized code: '%s' entry: %#" Px "\n", | 
 |               ToFullyQualifiedCString(), current_code.EntryPoint()); | 
 |   } | 
 |   current_code.DisableDartCode(); | 
 |   const Error& error = | 
 |       Error::Handle(zone, Compiler::EnsureUnoptimizedCode(thread, *this)); | 
 |   if (!error.IsNull()) { | 
 |     Exceptions::PropagateError(error); | 
 |   } | 
 |   const Code& unopt_code = Code::Handle(zone, unoptimized_code()); | 
 |   unopt_code.Enable(); | 
 |   AttachCode(unopt_code); | 
 | } | 
 |  | 
 | void Function::SwitchToLazyCompiledUnoptimizedCode() const { | 
 | #if defined(DART_PRECOMPILED_RUNTIME) | 
 |   UNREACHABLE(); | 
 | #else | 
 |   if (!HasOptimizedCode()) { | 
 |     return; | 
 |   } | 
 |  | 
 |   Thread* thread = Thread::Current(); | 
 |   Zone* zone = thread->zone(); | 
 |   ASSERT(thread->IsDartMutatorThread()); | 
 |  | 
 |   const Code& current_code = Code::Handle(zone, CurrentCode()); | 
 |   TIR_Print("Disabling optimized code for %s\n", ToCString()); | 
 |   current_code.DisableDartCode(); | 
 |  | 
 |   const Code& unopt_code = Code::Handle(zone, unoptimized_code()); | 
 |   if (unopt_code.IsNull()) { | 
 |     // Set the lazy compile stub code. | 
 |     TIR_Print("Switched to lazy compile stub for %s\n", ToCString()); | 
 |     SetInstructions(StubCode::LazyCompile()); | 
 |     return; | 
 |   } | 
 |  | 
 |   TIR_Print("Switched to unoptimized code for %s\n", ToCString()); | 
 |  | 
 |   AttachCode(unopt_code); | 
 |   unopt_code.Enable(); | 
 | #endif | 
 | } | 
 |  | 
 | void Function::set_unoptimized_code(const Code& value) const { | 
 | #if defined(DART_PRECOMPILED_RUNTIME) | 
 |   UNREACHABLE(); | 
 | #else | 
 |   DEBUG_ASSERT(IsMutatorOrAtDeoptSafepoint()); | 
 |   ASSERT(value.IsNull() || !value.is_optimized()); | 
 |   untag()->set_unoptimized_code(value.ptr()); | 
 | #endif | 
 | } | 
 |  | 
 | ContextScopePtr Function::context_scope() const { | 
 |   if (IsClosureFunction()) { | 
 |     const Object& obj = Object::Handle(untag()->data()); | 
 |     ASSERT(!obj.IsNull()); | 
 |     return ClosureData::Cast(obj).context_scope(); | 
 |   } | 
 |   return ContextScope::null(); | 
 | } | 
 |  | 
 | void Function::set_context_scope(const ContextScope& value) const { | 
 |   if (IsClosureFunction()) { | 
 |     const Object& obj = Object::Handle(untag()->data()); | 
 |     ASSERT(!obj.IsNull()); | 
 |     ClosureData::Cast(obj).set_context_scope(value); | 
 |     return; | 
 |   } | 
 |   UNREACHABLE(); | 
 | } | 
 |  | 
 | Function::AwaiterLink Function::awaiter_link() const { | 
 |   if (IsClosureFunction()) { | 
 |     const Object& obj = Object::Handle(untag()->data()); | 
 |     ASSERT(!obj.IsNull()); | 
 |     return ClosureData::Cast(obj).awaiter_link(); | 
 |   } | 
 |   UNREACHABLE(); | 
 |   return {}; | 
 | } | 
 |  | 
 | void Function::set_awaiter_link(Function::AwaiterLink link) const { | 
 |   if (IsClosureFunction()) { | 
 |     const Object& obj = Object::Handle(untag()->data()); | 
 |     ASSERT(!obj.IsNull()); | 
 |     ClosureData::Cast(obj).set_awaiter_link(link); | 
 |     return; | 
 |   } | 
 |   UNREACHABLE(); | 
 | } | 
 |  | 
 | ClosurePtr Function::implicit_static_closure() const { | 
 |   if (IsImplicitStaticClosureFunction()) { | 
 |     const Object& obj = Object::Handle(untag()->data()); | 
 |     ASSERT(!obj.IsNull()); | 
 |     return ClosureData::Cast(obj).implicit_static_closure(); | 
 |   } | 
 |   return Closure::null(); | 
 | } | 
 |  | 
 | void Function::set_implicit_static_closure(const Closure& closure) const { | 
 |   if (IsImplicitStaticClosureFunction()) { | 
 |     const Object& obj = Object::Handle(untag()->data()); | 
 |     ASSERT(!obj.IsNull()); | 
 |     ClosureData::Cast(obj).set_implicit_static_closure(closure); | 
 |     return; | 
 |   } | 
 |   UNREACHABLE(); | 
 | } | 
 |  | 
 | ScriptPtr Function::eval_script() const { | 
 |   const Object& obj = Object::Handle(untag()->data()); | 
 |   if (obj.IsScript()) { | 
 |     return Script::Cast(obj).ptr(); | 
 |   } | 
 |   return Script::null(); | 
 | } | 
 |  | 
 | void Function::set_eval_script(const Script& script) const { | 
 |   ASSERT(token_pos() == TokenPosition::kMinSource); | 
 |   ASSERT(untag()->data() == Object::null()); | 
 |   set_data(script); | 
 | } | 
 |  | 
 | FunctionPtr Function::extracted_method_closure() const { | 
 |   ASSERT(kind() == UntaggedFunction::kMethodExtractor); | 
 |   const Object& obj = Object::Handle(untag()->data()); | 
 |   ASSERT(obj.IsFunction()); | 
 |   return Function::Cast(obj).ptr(); | 
 | } | 
 |  | 
 | void Function::set_extracted_method_closure(const Function& value) const { | 
 |   ASSERT(kind() == UntaggedFunction::kMethodExtractor); | 
 |   ASSERT(untag()->data() == Object::null()); | 
 |   set_data(value); | 
 | } | 
 |  | 
 | ArrayPtr Function::saved_args_desc() const { | 
 |   if (kind() == UntaggedFunction::kDynamicInvocationForwarder) { | 
 |     return Array::null(); | 
 |   } | 
 |   ASSERT(kind() == UntaggedFunction::kNoSuchMethodDispatcher || | 
 |          kind() == UntaggedFunction::kInvokeFieldDispatcher); | 
 |   return Array::RawCast(untag()->data()); | 
 | } | 
 |  | 
 | void Function::set_saved_args_desc(const Array& value) const { | 
 |   ASSERT(kind() == UntaggedFunction::kNoSuchMethodDispatcher || | 
 |          kind() == UntaggedFunction::kInvokeFieldDispatcher); | 
 |   ASSERT(untag()->data() == Object::null()); | 
 |   set_data(value); | 
 | } | 
 |  | 
 | FieldPtr Function::accessor_field() const { | 
 |   ASSERT(kind() == UntaggedFunction::kImplicitGetter || | 
 |          kind() == UntaggedFunction::kImplicitSetter || | 
 |          kind() == UntaggedFunction::kImplicitStaticGetter || | 
 |          kind() == UntaggedFunction::kFieldInitializer); | 
 |   return Field::RawCast(untag()->data()); | 
 | } | 
 |  | 
 | void Function::set_accessor_field(const Field& value) const { | 
 |   ASSERT(kind() == UntaggedFunction::kImplicitGetter || | 
 |          kind() == UntaggedFunction::kImplicitSetter || | 
 |          kind() == UntaggedFunction::kImplicitStaticGetter || | 
 |          kind() == UntaggedFunction::kFieldInitializer); | 
 |   // Top level classes may be finalized multiple times. | 
 |   ASSERT(untag()->data() == Object::null() || untag()->data() == value.ptr()); | 
 |   set_data(value); | 
 | } | 
 |  | 
 | FunctionPtr Function::parent_function() const { | 
 |   if (!IsClosureFunction()) return Function::null(); | 
 |   Object& obj = Object::Handle(untag()->data()); | 
 |   ASSERT(!obj.IsNull()); | 
 |   return ClosureData::Cast(obj).parent_function(); | 
 | } | 
 |  | 
 | void Function::set_parent_function(const Function& value) const { | 
 |   ASSERT(IsClosureFunction()); | 
 |   const Object& obj = Object::Handle(untag()->data()); | 
 |   ASSERT(!obj.IsNull()); | 
 |   ClosureData::Cast(obj).set_parent_function(value); | 
 | } | 
 |  | 
 | TypeArgumentsPtr Function::DefaultTypeArguments(Zone* zone) const { | 
 |   if (type_parameters() == TypeParameters::null()) { | 
 |     return Object::empty_type_arguments().ptr(); | 
 |   } | 
 |   return TypeParameters::Handle(zone, type_parameters()).defaults(); | 
 | } | 
 |  | 
 | InstantiationMode Function::default_type_arguments_instantiation_mode() const { | 
 |   if (!IsClosureFunction()) { | 
 |     UNREACHABLE(); | 
 |   } | 
 |   return ClosureData::DefaultTypeArgumentsInstantiationMode( | 
 |       ClosureData::RawCast(data())); | 
 | } | 
 |  | 
 | void Function::set_default_type_arguments_instantiation_mode( | 
 |     InstantiationMode value) const { | 
 |   if (!IsClosureFunction()) { | 
 |     UNREACHABLE(); | 
 |   } | 
 |   const auto& closure_data = ClosureData::Handle(ClosureData::RawCast(data())); | 
 |   ASSERT(!closure_data.IsNull()); | 
 |   closure_data.set_default_type_arguments_instantiation_mode(value); | 
 | } | 
 |  | 
 | // Enclosing outermost function of this local function. | 
 | FunctionPtr Function::GetOutermostFunction() const { | 
 |   FunctionPtr parent = parent_function(); | 
 |   if (parent == Object::null()) { | 
 |     return ptr(); | 
 |   } | 
 |   Function& function = Function::Handle(); | 
 |   do { | 
 |     function = parent; | 
 |     parent = function.parent_function(); | 
 |   } while (parent != Object::null()); | 
 |   return function.ptr(); | 
 | } | 
 |  | 
 | FunctionPtr Function::implicit_closure_function() const { | 
 |   if (IsClosureFunction() || IsDispatcherOrImplicitAccessor() || | 
 |       IsFieldInitializer() || IsFfiCallbackTrampoline() || | 
 |       IsMethodExtractor()) { | 
 |     return Function::null(); | 
 |   } | 
 |   const Object& obj = Object::Handle(data()); | 
 |   ASSERT(obj.IsNull() || obj.IsScript() || obj.IsFunction() || obj.IsArray()); | 
 |   if (obj.IsNull() || obj.IsScript()) { | 
 |     return Function::null(); | 
 |   } | 
 |   if (obj.IsFunction()) { | 
 |     return Function::Cast(obj).ptr(); | 
 |   } | 
 |   ASSERT(is_native()); | 
 |   ASSERT(obj.IsArray()); | 
 |   const Object& res = Object::Handle(Array::Cast(obj).AtAcquire(1)); | 
 |   return res.IsNull() ? Function::null() : Function::Cast(res).ptr(); | 
 | } | 
 |  | 
 | void Function::set_implicit_closure_function(const Function& value) const { | 
 |   DEBUG_ASSERT( | 
 |       IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); | 
 |   ASSERT(!IsClosureFunction()); | 
 |   const Object& old_data = Object::Handle(data()); | 
 |   if (is_old_native()) { | 
 |     ASSERT(old_data.IsArray()); | 
 |     const auto& pair = Array::Cast(old_data); | 
 |     ASSERT(pair.AtAcquire(NativeFunctionData::kTearOff) == Object::null() || | 
 |            value.IsNull()); | 
 |     pair.SetAtRelease(NativeFunctionData::kTearOff, value); | 
 |   } else { | 
 |     ASSERT(old_data.IsNull() || value.IsNull()); | 
 |     set_data(value); | 
 |   } | 
 | } | 
 |  | 
 | void Function::SetFfiCSignature(const FunctionType& sig) const { | 
 |   ASSERT(IsFfiCallbackTrampoline()); | 
 |   const Object& obj = Object::Handle(data()); | 
 |   ASSERT(!obj.IsNull()); | 
 |   FfiTrampolineData::Cast(obj).set_c_signature(sig); | 
 | } | 
 |  | 
 | FunctionTypePtr Function::FfiCSignature() const { | 
 |   auto* const zone = Thread::Current()->zone(); | 
 |   if (IsFfiCallbackTrampoline()) { | 
 |     const Object& obj = Object::Handle(zone, data()); | 
 |     ASSERT(!obj.IsNull()); | 
 |     return FfiTrampolineData::Cast(obj).c_signature(); | 
 |   } | 
 |   auto& pragma_value = Instance::Handle(zone); | 
 |   if (is_ffi_native()) { | 
 |     pragma_value = GetNativeAnnotation(); | 
 |   } else if (IsFfiCallClosure()) { | 
 |     pragma_value = GetFfiCallClosurePragmaValue(); | 
 |   } else { | 
 |     UNREACHABLE(); | 
 |   } | 
 |   const auto& type_args = | 
 |       TypeArguments::Handle(zone, pragma_value.GetTypeArguments()); | 
 |   ASSERT(type_args.Length() == 1); | 
 |   const auto& native_type = | 
 |       FunctionType::Cast(AbstractType::ZoneHandle(zone, type_args.TypeAt(0))); | 
 |   return native_type.ptr(); | 
 | } | 
 |  | 
 | bool Function::FfiCSignatureContainsHandles() const { | 
 |   const FunctionType& c_signature = FunctionType::Handle(FfiCSignature()); | 
 |   return c_signature.ContainsHandles(); | 
 | } | 
 |  | 
 | bool FunctionType::ContainsHandles() const { | 
 |   const intptr_t num_params = num_fixed_parameters(); | 
 |   for (intptr_t i = 0; i < num_params; i++) { | 
 |     const bool is_handle = | 
 |         AbstractType::Handle(ParameterTypeAt(i)).type_class_id() == | 
 |         kFfiHandleCid; | 
 |     if (is_handle) { | 
 |       return true; | 
 |     } | 
 |   } | 
 |   return AbstractType::Handle(result_type()).type_class_id() == kFfiHandleCid; | 
 | } | 
 |  | 
 | // Keep consistent with BaseMarshaller::IsCompound. | 
 | bool Function::FfiCSignatureReturnsStruct() const { | 
 |   ASSERT(IsFfiCallbackTrampoline()); | 
 |   Zone* zone = Thread::Current()->zone(); | 
 |   const auto& c_signature = FunctionType::Handle(zone, FfiCSignature()); | 
 |   const auto& type = AbstractType::Handle(zone, c_signature.result_type()); | 
 |   if (IsFfiTypeClassId(type.type_class_id())) { | 
 |     return false; | 
 |   } | 
 |   const auto& cls = Class::Handle(zone, type.type_class()); | 
 |   const auto& superClass = Class::Handle(zone, cls.SuperClass()); | 
 |   const bool is_abi_specific_int = | 
 |       String::Handle(zone, superClass.UserVisibleName()) | 
 |           .Equals(Symbols::AbiSpecificInteger()); | 
 |   if (is_abi_specific_int) { | 
 |     return false; | 
 |   } | 
 | #ifdef DEBUG | 
 |   const bool is_struct = String::Handle(zone, superClass.UserVisibleName()) | 
 |                              .Equals(Symbols::Struct()); | 
 |   const bool is_union = String::Handle(zone, superClass.UserVisibleName()) | 
 |                             .Equals(Symbols::Union()); | 
 |   ASSERT(is_struct || is_union); | 
 | #endif | 
 |   return true; | 
 | } | 
 |  | 
 | int32_t Function::FfiCallbackId() const { | 
 |   ASSERT(IsFfiCallbackTrampoline()); | 
 |  | 
 |   const auto& obj = Object::Handle(data()); | 
 |   ASSERT(!obj.IsNull()); | 
 |   const auto& trampoline_data = FfiTrampolineData::Cast(obj); | 
 |  | 
 |   ASSERT(trampoline_data.callback_id() != -1); | 
 |  | 
 |   return trampoline_data.callback_id(); | 
 | } | 
 |  | 
 | void Function::AssignFfiCallbackId(int32_t callback_id) const { | 
 |   ASSERT(IsFfiCallbackTrampoline()); | 
 |  | 
 |   const auto& obj = Object::Handle(data()); | 
 |   ASSERT(!obj.IsNull()); | 
 |   const auto& trampoline_data = FfiTrampolineData::Cast(obj); | 
 |  | 
 |   ASSERT(trampoline_data.callback_id() == -1); | 
 |   trampoline_data.set_callback_id(callback_id); | 
 | } | 
 |  | 
 | bool Function::FfiIsLeaf() const { | 
 |   Zone* zone = Thread::Current()->zone(); | 
 |   auto& pragma_value = Instance::Handle(zone); | 
 |   if (is_ffi_native()) { | 
 |     pragma_value = GetNativeAnnotation(); | 
 |   } else if (IsFfiCallClosure()) { | 
 |     pragma_value = GetFfiCallClosurePragmaValue(); | 
 |   } else { | 
 |     UNREACHABLE(); | 
 |   } | 
 |   const auto& pragma_value_class = Class::Handle(zone, pragma_value.clazz()); | 
 |   const auto& pragma_value_fields = | 
 |       Array::Handle(zone, pragma_value_class.fields()); | 
 |   ASSERT(pragma_value_fields.Length() >= 1); | 
 |   const auto& is_leaf_field = Field::Handle( | 
 |       zone, | 
 |       Field::RawCast(pragma_value_fields.At(pragma_value_fields.Length() - 1))); | 
 |   ASSERT(is_leaf_field.name() == Symbols::isLeaf().ptr()); | 
 |   return Bool::Handle(zone, Bool::RawCast(pragma_value.GetField(is_leaf_field))) | 
 |       .value(); | 
 | } | 
 |  | 
 | FunctionPtr Function::FfiCallbackTarget() const { | 
 |   ASSERT(IsFfiCallbackTrampoline()); | 
 |   const Object& obj = Object::Handle(data()); | 
 |   ASSERT(!obj.IsNull()); | 
 |   return FfiTrampolineData::Cast(obj).callback_target(); | 
 | } | 
 |  | 
 | void Function::SetFfiCallbackTarget(const Function& target) const { | 
 |   ASSERT(IsFfiCallbackTrampoline()); | 
 |   const Object& obj = Object::Handle(data()); | 
 |   ASSERT(!obj.IsNull()); | 
 |   FfiTrampolineData::Cast(obj).set_callback_target(target); | 
 | } | 
 |  | 
 | InstancePtr Function::FfiCallbackExceptionalReturn() const { | 
 |   ASSERT(IsFfiCallbackTrampoline()); | 
 |   const Object& obj = Object::Handle(data()); | 
 |   ASSERT(!obj.IsNull()); | 
 |   return FfiTrampolineData::Cast(obj).callback_exceptional_return(); | 
 | } | 
 |  | 
 | void Function::SetFfiCallbackExceptionalReturn(const Instance& value) const { | 
 |   ASSERT(IsFfiCallbackTrampoline()); | 
 |   const Object& obj = Object::Handle(data()); | 
 |   ASSERT(!obj.IsNull()); | 
 |   FfiTrampolineData::Cast(obj).set_callback_exceptional_return(value); | 
 | } | 
 |  | 
 | FfiCallbackKind Function::GetFfiCallbackKind() const { | 
 |   ASSERT(IsFfiCallbackTrampoline()); | 
 |   const Object& obj = Object::Handle(data()); | 
 |   ASSERT(!obj.IsNull()); | 
 |   return FfiTrampolineData::Cast(obj).ffi_function_kind(); | 
 | } | 
 |  | 
 | void Function::SetFfiCallbackKind(FfiCallbackKind value) const { | 
 |   ASSERT(IsFfiCallbackTrampoline()); | 
 |   const Object& obj = Object::Handle(data()); | 
 |   ASSERT(!obj.IsNull()); | 
 |   FfiTrampolineData::Cast(obj).set_ffi_function_kind(value); | 
 | } | 
 |  | 
 | const char* Function::KindToCString(UntaggedFunction::Kind kind) { | 
 |   return UntaggedFunction::KindToCString(kind); | 
 | } | 
 |  | 
 | FunctionPtr Function::ForwardingTarget() const { | 
 |   ASSERT(kind() == UntaggedFunction::kDynamicInvocationForwarder); | 
 |   return Function::RawCast(WeakSerializationReference::Unwrap(data())); | 
 | } | 
 |  | 
 | void Function::SetForwardingTarget(const Function& target) const { | 
 |   ASSERT(kind() == UntaggedFunction::kDynamicInvocationForwarder); | 
 |   set_data(target); | 
 | } | 
 |  | 
 | // This field is heavily overloaded: | 
 | //   kernel eval function:    Array[0] = Script | 
 | //                            Array[1] = KernelProgramInfo | 
 | //                            Array[2] = Kernel index of enclosing library | 
 | //   method extractor:        Function extracted closure function | 
 | //   implicit getter:         Field | 
 | //   implicit setter:         Field | 
 | //   impl. static final gttr: Field | 
 | //   field initializer:       Field | 
 | //   noSuchMethod dispatcher: Array arguments descriptor | 
 | //   invoke-field dispatcher: Array arguments descriptor | 
 | //   closure function:        ClosureData | 
 | //   irregexp function:       Array[0] = RegExp | 
 | //                            Array[1] = Smi string specialization cid | 
 | //   native function:         Array[0] = String native name | 
 | //                            Array[1] = Function implicit closure function | 
 | //   regular function:        Function for implicit closure function | 
 | //   constructor, factory:    Function for implicit closure function | 
 | //   ffi trampoline function: FfiTrampolineData  (Dart->C) | 
 | //   dyn inv forwarder:       Forwarding target, a WSR pointing to it or null | 
 | //                            (null can only occur if forwarding target was | 
 | //                            dropped) | 
 | void Function::set_data(const Object& value) const { | 
 |   untag()->set_data<std::memory_order_release>(value.ptr()); | 
 | } | 
 |  | 
 | void Function::set_name(const String& value) const { | 
 |   ASSERT(value.IsSymbol()); | 
 |   untag()->set_name(value.ptr()); | 
 | } | 
 |  | 
 | void Function::set_owner(const Object& value) const { | 
 |   ASSERT(!value.IsNull()); | 
 |   untag()->set_owner(value.ptr()); | 
 | } | 
 |  | 
 | RegExpPtr Function::regexp() const { | 
 |   ASSERT(kind() == UntaggedFunction::kIrregexpFunction); | 
 |   const Array& pair = Array::Cast(Object::Handle(data())); | 
 |   return RegExp::RawCast(pair.At(0)); | 
 | } | 
 |  | 
 | class StickySpecialization : public BitField<intptr_t, bool, 0, 1> {}; | 
 | class StringSpecializationCid | 
 |     : public BitField<intptr_t, intptr_t, 1, UntaggedObject::kClassIdTagSize> { | 
 | }; | 
 |  | 
 | intptr_t Function::string_specialization_cid() const { | 
 |   ASSERT(kind() == UntaggedFunction::kIrregexpFunction); | 
 |   const Array& pair = Array::Cast(Object::Handle(data())); | 
 |   return StringSpecializationCid::decode(Smi::Value(Smi::RawCast(pair.At(1)))); | 
 | } | 
 |  | 
 | bool Function::is_sticky_specialization() const { | 
 |   ASSERT(kind() == UntaggedFunction::kIrregexpFunction); | 
 |   const Array& pair = Array::Cast(Object::Handle(data())); | 
 |   return StickySpecialization::decode(Smi::Value(Smi::RawCast(pair.At(1)))); | 
 | } | 
 |  | 
 | void Function::SetRegExpData(const RegExp& regexp, | 
 |                              intptr_t string_specialization_cid, | 
 |                              bool sticky) const { | 
 |   ASSERT(kind() == UntaggedFunction::kIrregexpFunction); | 
 |   ASSERT(IsStringClassId(string_specialization_cid)); | 
 |   ASSERT(data() == Object::null()); | 
 |   const Array& pair = Array::Handle(Array::New(2, Heap::kOld)); | 
 |   pair.SetAt(0, regexp); | 
 |   pair.SetAt(1, Smi::Handle(Smi::New(StickySpecialization::encode(sticky) | | 
 |                                      StringSpecializationCid::encode( | 
 |                                          string_specialization_cid)))); | 
 |   set_data(pair); | 
 | } | 
 |  | 
 | StringPtr Function::native_name() const { | 
 |   ASSERT(is_native()); | 
 |   const Object& obj = Object::Handle(data()); | 
 |   ASSERT(obj.IsArray()); | 
 |   return String::RawCast(Array::Cast(obj).At(0)); | 
 | } | 
 |  | 
 | void Function::set_native_name(const String& value) const { | 
 |   ASSERT(is_native()); | 
 |   const auto& pair = Array::Cast(Object::Handle(data())); | 
 |   ASSERT(pair.At(0) == Object::null()); | 
 |   pair.SetAt(NativeFunctionData::kNativeName, value); | 
 | } | 
 |  | 
 | InstancePtr Function::GetNativeAnnotation() const { | 
 |   ASSERT(is_ffi_native()); | 
 |   Zone* zone = Thread::Current()->zone(); | 
 |   auto& pragma_value = Object::Handle(zone); | 
 |   Library::FindPragma(dart::Thread::Current(), /*only_core=*/false, | 
 |                       Object::Handle(zone, ptr()), | 
 |                       String::Handle(zone, Symbols::vm_ffi_native().ptr()), | 
 |                       /*multiple=*/false, &pragma_value); | 
 |   auto const& native_instance = Instance::Cast(pragma_value); | 
 |   ASSERT(!native_instance.IsNull()); | 
 | #if defined(DEBUG) | 
 |   const auto& native_class = Class::Handle(zone, native_instance.clazz()); | 
 |   ASSERT(String::Handle(zone, native_class.UserVisibleName()) | 
 |              .Equals(Symbols::FfiNative())); | 
 | #endif | 
 |   return native_instance.ptr(); | 
 | } | 
 |  | 
 | bool Function::is_old_native() const { | 
 |   return is_native() && !is_external(); | 
 | } | 
 |  | 
 | bool Function::is_ffi_native() const { | 
 |   return is_native() && is_external(); | 
 | } | 
 |  | 
 | void Function::SetSignature(const FunctionType& value) const { | 
 |   set_signature(value); | 
 |   ASSERT(NumImplicitParameters() == value.num_implicit_parameters()); | 
 |   if (IsClosureFunction() && value.IsGeneric()) { | 
 |     Zone* zone = Thread::Current()->zone(); | 
 |     const TypeParameters& type_params = | 
 |         TypeParameters::Handle(zone, value.type_parameters()); | 
 |     const TypeArguments& defaults = | 
 |         TypeArguments::Handle(zone, type_params.defaults()); | 
 |     auto mode = defaults.GetInstantiationMode(zone, this); | 
 |     set_default_type_arguments_instantiation_mode(mode); | 
 |   } | 
 | } | 
 |  | 
 | TypeParameterPtr FunctionType::TypeParameterAt(intptr_t index, | 
 |                                                Nullability nullability) const { | 
 |   ASSERT(index >= 0 && index < NumTypeParameters()); | 
 |   Thread* thread = Thread::Current(); | 
 |   Zone* zone = thread->zone(); | 
 |   TypeParameter& type_param = TypeParameter::Handle( | 
 |       zone, TypeParameter::New(*this, NumParentTypeArguments(), | 
 |                                NumParentTypeArguments() + index, nullability)); | 
 |   type_param.SetIsFinalized(); | 
 |   if (IsFinalized()) { | 
 |     type_param ^= type_param.Canonicalize(thread); | 
 |   } | 
 |   return type_param.ptr(); | 
 | } | 
 |  | 
 | void FunctionType::set_result_type(const AbstractType& value) const { | 
 |   ASSERT(!value.IsNull()); | 
 |   untag()->set_result_type(value.ptr()); | 
 | } | 
 |  | 
 | AbstractTypePtr Function::ParameterTypeAt(intptr_t index) const { | 
 |   const Array& types = Array::Handle(parameter_types()); | 
 |   return AbstractType::RawCast(types.At(index)); | 
 | } | 
 |  | 
 | AbstractTypePtr FunctionType::ParameterTypeAt(intptr_t index) const { | 
 |   const Array& parameter_types = Array::Handle(untag()->parameter_types()); | 
 |   return AbstractType::RawCast(parameter_types.At(index)); | 
 | } | 
 |  | 
 | void FunctionType::SetParameterTypeAt(intptr_t index, | 
 |                                       const AbstractType& value) const { | 
 |   ASSERT(!value.IsNull()); | 
 |   const Array& parameter_types = Array::Handle(untag()->parameter_types()); | 
 |   parameter_types.SetAt(index, value); | 
 | } | 
 |  | 
 | void FunctionType::set_parameter_types(const Array& value) const { | 
 |   ASSERT(value.IsNull() || value.Length() > 0); | 
 |   untag()->set_parameter_types(value.ptr()); | 
 | } | 
 |  | 
 | StringPtr Function::ParameterNameAt(intptr_t index) const { | 
 | #if defined(DART_PRECOMPILED_RUNTIME) | 
 |   if (signature() == FunctionType::null()) { | 
 |     // Without the signature, we're guaranteed not to have any name information. | 
 |     return Symbols::OptimizedOut().ptr(); | 
 |   } | 
 | #endif | 
 |   const intptr_t num_fixed = num_fixed_parameters(); | 
 |   if (HasOptionalNamedParameters() && index >= num_fixed) { | 
 |     const Array& parameter_names = | 
 |         Array::Handle(signature()->untag()->named_parameter_names()); | 
 |     return String::RawCast(parameter_names.At(index - num_fixed)); | 
 |   } | 
 | #if defined(DART_PRECOMPILED_RUNTIME) | 
 |   return Symbols::OptimizedOut().ptr(); | 
 | #else | 
 |   const Array& names = Array::Handle(untag()->positional_parameter_names()); | 
 |   return String::RawCast(names.At(index)); | 
 | #endif | 
 | } | 
 |  | 
 | void Function::SetParameterNameAt(intptr_t index, const String& value) const { | 
 | #if defined(DART_PRECOMPILED_RUNTIME) | 
 |   UNREACHABLE(); | 
 | #else | 
 |   ASSERT(!value.IsNull() && value.IsSymbol()); | 
 |   if (HasOptionalNamedParameters() && index >= num_fixed_parameters()) { | 
 |     // These should be set on the signature, not the function. | 
 |     UNREACHABLE(); | 
 |   } | 
 |   const Array& parameter_names = | 
 |       Array::Handle(untag()->positional_parameter_names()); | 
 |   parameter_names.SetAt(index, value); | 
 | #endif | 
 | } | 
 |  | 
 | #if !defined(DART_PRECOMPILED_RUNTIME) | 
 | void Function::set_positional_parameter_names(const Array& value) const { | 
 |   ASSERT(value.ptr() == Object::empty_array().ptr() || value.Length() > 0); | 
 |   untag()->set_positional_parameter_names(value.ptr()); | 
 | } | 
 | #endif | 
 |  | 
 | StringPtr FunctionType::ParameterNameAt(intptr_t index) const { | 
 |   const intptr_t num_fixed = num_fixed_parameters(); | 
 |   if (!HasOptionalNamedParameters() || index < num_fixed) { | 
 |     // The positional parameter names are stored on the function, not here. | 
 |     UNREACHABLE(); | 
 |   } | 
 |   const Array& parameter_names = | 
 |       Array::Handle(untag()->named_parameter_names()); | 
 |   return String::RawCast(parameter_names.At(index - num_fixed)); | 
 | } | 
 |  | 
 | void FunctionType::SetParameterNameAt(intptr_t index, | 
 |                                       const String& value) const { | 
 | #if defined(DART_PRECOMPILED_RUNTIME) | 
 |   UNREACHABLE(); | 
 | #else | 
 |   ASSERT(!value.IsNull() && value.IsSymbol()); | 
 |   const intptr_t num_fixed = num_fixed_parameters(); | 
 |   if (!HasOptionalNamedParameters() || index < num_fixed) { | 
 |     UNREACHABLE(); | 
 |   } | 
 |   const Array& parameter_names = | 
 |       Array::Handle(untag()->named_parameter_names()); | 
 |   parameter_names.SetAt(index - num_fixed, value); | 
 | #endif | 
 | } | 
 |  | 
 | void FunctionType::set_named_parameter_names(const Array& value) const { | 
 |   ASSERT(value.ptr() == Object::empty_array().ptr() || value.Length() > 0); | 
 |   untag()->set_named_parameter_names(value.ptr()); | 
 | } | 
 |  | 
 | void Function::CreateNameArray(Heap::Space space) const { | 
 | #if defined(DART_PRECOMPILED_RUNTIME) | 
 |   UNREACHABLE(); | 
 | #else | 
 |   const intptr_t num_positional_params = | 
 |       num_fixed_parameters() + NumOptionalPositionalParameters(); | 
 |   if (num_positional_params == 0) { | 
 |     set_positional_parameter_names(Object::empty_array()); | 
 |   } else { | 
 |     set_positional_parameter_names( | 
 |         Array::Handle(Array::New(num_positional_params, space))); | 
 |   } | 
 | #endif | 
 | } | 
 |  | 
 | void FunctionType::CreateNameArrayIncludingFlags(Heap::Space space) const { | 
 | #if defined(DART_PRECOMPILED_RUNTIME) | 
 |   UNREACHABLE(); | 
 | #else | 
 |   const intptr_t num_named_parameters = NumOptionalNamedParameters(); | 
 |   if (num_named_parameters == 0) { | 
 |     return set_named_parameter_names(Object::empty_array()); | 
 |   } | 
 |   // Currently, we only store flags for named parameters. | 
 |   const intptr_t last_index = (num_named_parameters - 1) / | 
 |                               compiler::target::kNumParameterFlagsPerElement; | 
 |   const intptr_t num_flag_slots = last_index + 1; | 
 |   intptr_t num_total_slots = num_named_parameters + num_flag_slots; | 
 |   auto& array = Array::Handle(Array::New(num_total_slots, space)); | 
 |   // Set flag slots to Smi 0 before handing off. | 
 |   auto& empty_flags_smi = Smi::Handle(Smi::New(0)); | 
 |   for (intptr_t i = num_named_parameters; i < num_total_slots; i++) { | 
 |     array.SetAt(i, empty_flags_smi); | 
 |   } | 
 |   set_named_parameter_names(array); | 
 | #endif | 
 | } | 
 |  | 
 | intptr_t FunctionType::GetRequiredFlagIndex(intptr_t index, | 
 |                                             intptr_t* flag_mask) const { | 
 |   // If these calculations change, also change | 
 |   // FlowGraphBuilder::BuildClosureCallHasRequiredNamedArgumentsCheck. | 
 |   ASSERT(HasOptionalNamedParameters()); | 
 |   ASSERT(flag_mask != nullptr); | 
 |   ASSERT(index >= num_fixed_parameters()); | 
 |   index -= num_fixed_parameters(); | 
 |   *flag_mask = (1 << compiler::target::kRequiredNamedParameterFlag) | 
 |                << ((static_cast<uintptr_t>(index) % | 
 |                     compiler::target::kNumParameterFlagsPerElement) * | 
 |                    compiler::target::kNumParameterFlags); | 
 |   return NumOptionalNamedParameters() + | 
 |          index / compiler::target::kNumParameterFlagsPerElement; | 
 | } | 
 |  | 
 | bool Function::HasRequiredNamedParameters() const { | 
 | #if defined(DART_PRECOMPILED_RUNTIME) | 
 |   if (signature() == FunctionType::null()) { | 
 |     // Signatures for functions with required named parameters are not dropped. | 
 |     return false; | 
 |   } | 
 | #endif | 
 |   return FunctionType::Handle(signature()).HasRequiredNamedParameters(); | 
 | } | 
 |  | 
 | bool Function::IsRequiredAt(intptr_t index) const { | 
 | #if defined(DART_PRECOMPILED_RUNTIME) | 
 |   if (signature() == FunctionType::null()) { | 
 |     // Signature is not dropped in aot when any named parameter is required. | 
 |     return false; | 
 |   } | 
 | #endif | 
 |   if (!HasOptionalNamedParameters() || index < num_fixed_parameters()) { | 
 |     return false; | 
 |   } | 
 |   const FunctionType& sig = FunctionType::Handle(signature()); | 
 |   return sig.IsRequiredAt(index); | 
 | } | 
 |  | 
 | bool FunctionType::IsRequiredAt(intptr_t index) const { | 
 |   if (!HasOptionalNamedParameters() || index < num_fixed_parameters()) { | 
 |     return false; | 
 |   } | 
 |   intptr_t flag_mask; | 
 |   const intptr_t flag_index = GetRequiredFlagIndex(index, &flag_mask); | 
 |   const Array& parameter_names = | 
 |       Array::Handle(untag()->named_parameter_names()); | 
 |   if (flag_index >= parameter_names.Length()) { | 
 |     return false; | 
 |   } | 
 |   const intptr_t flags = | 
 |       Smi::Value(Smi::RawCast(parameter_names.At(flag_index))); | 
 |   return (flags & flag_mask) != 0; | 
 | } | 
 |  | 
 | void FunctionType::SetIsRequiredAt(intptr_t index) const { | 
 | #if defined(DART_PRECOMPILER_RUNTIME) | 
 |   UNREACHABLE(); | 
 | #else | 
 |   intptr_t flag_mask; | 
 |   const intptr_t flag_index = GetRequiredFlagIndex(index, &flag_mask); | 
 |   const Array& parameter_names = | 
 |       Array::Handle(untag()->named_parameter_names()); | 
 |   ASSERT(flag_index < parameter_names.Length()); | 
 |   const intptr_t flags = | 
 |       Smi::Value(Smi::RawCast(parameter_names.At(flag_index))); | 
 |   parameter_names.SetAt(flag_index, Smi::Handle(Smi::New(flags | flag_mask))); | 
 | #endif | 
 | } | 
 |  | 
 | void FunctionType::FinalizeNameArray() const { | 
 | #if defined(DART_PRECOMPILER_RUNTIME) | 
 |   UNREACHABLE(); | 
 | #else | 
 |   const intptr_t num_named_parameters = NumOptionalNamedParameters(); | 
 |   if (num_named_parameters == 0) { | 
 |     ASSERT(untag()->named_parameter_names() == Object::empty_array().ptr()); | 
 |     return; | 
 |   } | 
 |   const Array& parameter_names = | 
 |       Array::Handle(untag()->named_parameter_names()); | 
 |   // Truncate the parameter names array to remove unused flags from the end. | 
 |   intptr_t last_used = parameter_names.Length() - 1; | 
 |   for (; last_used >= num_named_parameters; --last_used) { | 
 |     if (Smi::Value(Smi::RawCast(parameter_names.At(last_used))) != 0) { | 
 |       break; | 
 |     } | 
 |   } | 
 |   parameter_names.Truncate(last_used + 1); | 
 | #endif | 
 | } | 
 |  | 
 | bool FunctionType::HasRequiredNamedParameters() const { | 
 |   const intptr_t num_named_params = NumOptionalNamedParameters(); | 
 |   if (num_named_params == 0) return false; | 
 |   // Check for flag slots in the named parameter names array. | 
 |   const auto& parameter_names = Array::Handle(named_parameter_names()); | 
 |   ASSERT(!parameter_names.IsNull()); | 
 |   return parameter_names.Length() > num_named_params; | 
 | } | 
 |  | 
 | static void ReportTooManyTypeParameters(const FunctionType& sig) { | 
 |   Report::MessageF(Report::kError, Script::Handle(), TokenPosition::kNoSource, | 
 |                    Report::AtLocation, | 
 |                    "too many type parameters declared in signature '%s' or in " | 
 |                    "its enclosing signatures", | 
 |                    sig.ToUserVisibleCString()); | 
 |   UNREACHABLE(); | 
 | } | 
 |  | 
 | void FunctionType::SetTypeParameters(const TypeParameters& value) const { | 
 |   untag()->set_type_parameters(value.ptr()); | 
 |   const intptr_t count = value.Length(); | 
 |   if (!UntaggedFunctionType::PackedNumTypeParameters::is_valid(count)) { | 
 |     ReportTooManyTypeParameters(*this); | 
 |   } | 
 |   untag()->packed_type_parameter_counts_.Update<PackedNumTypeParameters>(count); | 
 | } | 
 |  | 
 | void FunctionType::SetNumParentTypeArguments(intptr_t value) const { | 
 |   ASSERT(value >= 0); | 
 |   if (!PackedNumParentTypeArguments::is_valid(value)) { | 
 |     ReportTooManyTypeParameters(*this); | 
 |   } | 
 |   untag()->packed_type_parameter_counts_.Update<PackedNumParentTypeArguments>( | 
 |       value); | 
 | } | 
 |  | 
 | bool Function::IsGeneric() const { | 
 |   return FunctionType::IsGeneric(signature()); | 
 | } | 
 | intptr_t Function::NumTypeParameters() const { | 
 |   return FunctionType::NumTypeParametersOf(signature()); | 
 | } | 
 | intptr_t Function::NumParentTypeArguments() const { | 
 |   return FunctionType::NumParentTypeArgumentsOf(signature()); | 
 | } | 
 | intptr_t Function::NumTypeArguments() const { | 
 |   return FunctionType::NumTypeArgumentsOf(signature()); | 
 | } | 
 | intptr_t Function::num_fixed_parameters() const { | 
 |   return FunctionType::NumFixedParametersOf(signature()); | 
 | } | 
 | bool Function::HasOptionalParameters() const { | 
 |   return FunctionType::HasOptionalParameters(signature()); | 
 | } | 
 | bool Function::HasOptionalNamedParameters() const { | 
 |   return FunctionType::HasOptionalNamedParameters(signature()); | 
 | } | 
 | bool Function::HasOptionalPositionalParameters() const { | 
 |   return FunctionType::HasOptionalPositionalParameters(signature()); | 
 | } | 
 | intptr_t Function::NumOptionalParameters() const { | 
 |   return FunctionType::NumOptionalParametersOf(signature()); | 
 | } | 
 | intptr_t Function::NumOptionalPositionalParameters() const { | 
 |   return FunctionType::NumOptionalPositionalParametersOf(signature()); | 
 | } | 
 | intptr_t Function::NumOptionalNamedParameters() const { | 
 |   return FunctionType::NumOptionalNamedParametersOf(signature()); | 
 | } | 
 | intptr_t Function::NumParameters() const { | 
 |   return FunctionType::NumParametersOf(signature()); | 
 | } | 
 |  | 
 | TypeParameterPtr Function::TypeParameterAt(intptr_t index, | 
 |                                            Nullability nullability) const { | 
 |   const FunctionType& sig = FunctionType::Handle(signature()); | 
 |   return sig.TypeParameterAt(index, nullability); | 
 | } | 
 |  | 
 | void Function::set_kind(UntaggedFunction::Kind value) const { | 
 |   untag()->kind_tag_.Update<KindBits>(value); | 
 | } | 
 |  | 
 | void Function::set_modifier(UntaggedFunction::AsyncModifier value) const { | 
 |   untag()->kind_tag_.Update<ModifierBits>(value); | 
 | } | 
 |  | 
 | void Function::set_recognized_kind(MethodRecognizer::Kind value) const { | 
 |   // Prevent multiple settings of kind. | 
 |   ASSERT((value == MethodRecognizer::kUnknown) || !IsRecognized()); | 
 |   untag()->kind_tag_.Update<RecognizedBits>(value); | 
 | } | 
 |  | 
 | void Function::set_token_pos(TokenPosition token_pos) const { | 
 | #if defined(DART_PRECOMPILED_RUNTIME) | 
 |   UNREACHABLE(); | 
 | #else | 
 |   ASSERT(!token_pos.IsClassifying() || IsMethodExtractor()); | 
 |   StoreNonPointer(&untag()->token_pos_, token_pos); | 
 | #endif | 
 | } | 
 |  | 
 | void Function::set_kind_tag(uint32_t value) const { | 
 |   untag()->kind_tag_ = value; | 
 | } | 
 |  | 
 | bool Function::is_eval_function() const { | 
 |   if (data()->IsArray()) { | 
 |     const intptr_t len = Array::LengthOf(Array::RawCast(data())); | 
 |     return len == static_cast<intptr_t>(EvalFunctionData::kLength); | 
 |   } | 
 |   return false; | 
 | } | 
 |  | 
 | void Function::set_packed_fields(uint32_t packed_fields) const { | 
 | #if defined(DART_PRECOMPILED_RUNTIME) | 
 |   UNREACHABLE(); | 
 | #else | 
 |   StoreNonPointer(&untag()->packed_fields_, packed_fields); | 
 | #endif | 
 | } | 
 |  | 
 | bool Function::IsOptimizable() const { | 
 |   if (FLAG_precompiled_mode) { | 
 |     return true; | 
 |   } | 
 |   if (ForceOptimize()) return true; | 
 |   if (is_old_native()) { | 
 |     // Native methods don't need to be optimized. | 
 |     return false; | 
 |   } | 
 |   if (is_optimizable() && (script() != Script::null())) { | 
 |     // Additional check needed for implicit getters. | 
 |     return (unoptimized_code() == Object::null()) || | 
 |            (Code::Handle(unoptimized_code()).Size() < | 
 |             FLAG_huge_method_cutoff_in_code_size); | 
 |   } | 
 |   return false; | 
 | } | 
 |  | 
 | void Function::SetIsOptimizable(bool value) const { | 
 |   ASSERT(!is_native()); | 
 |   set_is_optimizable(value); | 
 |   if (!value) { | 
 |     set_is_inlinable(false); | 
 |     set_usage_counter(INT32_MIN); | 
 |   } | 
 | } | 
 |  | 
 | bool Function::IsTypedDataViewFactory() const { | 
 |   switch (recognized_kind()) { | 
 |     case MethodRecognizer::kTypedData_ByteDataView_factory: | 
 |     case MethodRecognizer::kTypedData_Int8ArrayView_factory: | 
 |     case MethodRecognizer::kTypedData_Uint8ArrayView_factory: | 
 |     case MethodRecognizer::kTypedData_Uint8ClampedArrayView_factory: | 
 |     case MethodRecognizer::kTypedData_Int16ArrayView_factory: | 
 |     case MethodRecognizer::kTypedData_Uint16ArrayView_factory: | 
 |     case MethodRecognizer::kTypedData_Int32ArrayView_factory: | 
 |     case MethodRecognizer::kTypedData_Uint32ArrayView_factory: | 
 |     case MethodRecognizer::kTypedData_Int64ArrayView_factory: | 
 |     case MethodRecognizer::kTypedData_Uint64ArrayView_factory: | 
 |     case MethodRecognizer::kTypedData_Float32ArrayView_factory: | 
 |     case MethodRecognizer::kTypedData_Float64ArrayView_factory: | 
 |     case MethodRecognizer::kTypedData_Float32x4ArrayView_factory: | 
 |     case MethodRecognizer::kTypedData_Int32x4ArrayView_factory: | 
 |     case MethodRecognizer::kTypedData_Float64x2ArrayView_factory: | 
 |       return true; | 
 |     default: | 
 |       return false; | 
 |   } | 
 | } | 
 |  | 
 | bool Function::IsUnmodifiableTypedDataViewFactory() const { | 
 |   switch (recognized_kind()) { | 
 |     case MethodRecognizer::kTypedData_UnmodifiableByteDataView_factory: | 
 |     case MethodRecognizer::kTypedData_UnmodifiableInt8ArrayView_factory: | 
 |     case MethodRecognizer::kTypedData_UnmodifiableUint8ArrayView_factory: | 
 |     case MethodRecognizer::kTypedData_UnmodifiableUint8ClampedArrayView_factory: | 
 |     case MethodRecognizer::kTypedData_UnmodifiableInt16ArrayView_factory: | 
 |     case MethodRecognizer::kTypedData_UnmodifiableUint16ArrayView_factory: | 
 |     case MethodRecognizer::kTypedData_UnmodifiableInt32ArrayView_factory: | 
 |     case MethodRecognizer::kTypedData_UnmodifiableUint32ArrayView_factory: | 
 |     case MethodRecognizer::kTypedData_UnmodifiableInt64ArrayView_factory: | 
 |     case MethodRecognizer::kTypedData_UnmodifiableUint64ArrayView_factory: | 
 |     case MethodRecognizer::kTypedData_UnmodifiableFloat32ArrayView_factory: | 
 |     case MethodRecognizer::kTypedData_UnmodifiableFloat64ArrayView_factory: | 
 |     case MethodRecognizer::kTypedData_UnmodifiableFloat32x4ArrayView_factory: | 
 |     case MethodRecognizer::kTypedData_UnmodifiableInt32x4ArrayView_factory: | 
 |     case MethodRecognizer::kTypedData_UnmodifiableFloat64x2ArrayView_factory: | 
 |       return true; | 
 |     default: | 
 |       return false; | 
 |   } | 
 | } | 
 |  | 
 | static bool InVmTests(const Function& function) { | 
 | #if defined(TESTING) | 
 |   return true; | 
 | #else | 
 |   auto* zone = Thread::Current()->zone(); | 
 |   const auto& cls = Class::Handle(zone, function.Owner()); | 
 |   const auto& lib = Library::Handle(zone, cls.library()); | 
 |   const auto& url = String::Handle(zone, lib.url()); | 
 |   const bool in_vm_tests = | 
 |       strstr(url.ToCString(), "runtime/tests/vm/") != nullptr; | 
 |   return in_vm_tests; | 
 | #endif | 
 | } | 
 |  | 
 | bool Function::ForceOptimize() const { | 
 |   if (RecognizedKindForceOptimize() || IsFfiCallClosure() || | 
 |       IsFfiCallbackTrampoline() || is_ffi_native() || | 
 |       IsTypedDataViewFactory() || IsUnmodifiableTypedDataViewFactory()) { | 
 |     return true; | 
 |   } | 
 |  | 
 |   if (!has_pragma()) return false; | 
 |  | 
 |   const bool has_vm_pragma = Library::FindPragma( | 
 |       Thread::Current(), false, *this, Symbols::vm_force_optimize()); | 
 |   if (!has_vm_pragma) return false; | 
 |  | 
 |   // For run_vm_tests and runtime/tests/vm allow marking arbitrary functions as | 
 |   // force-optimize via `@pragma('vm:force-optimize')`. | 
 |   return InVmTests(*this); | 
 | } | 
 |  | 
 | bool Function::IsPreferInline() const { | 
 |   if (!has_pragma()) return false; | 
 |  | 
 |   return Library::FindPragma(Thread::Current(), /*only_core=*/false, *this, | 
 |                              Symbols::vm_prefer_inline()); | 
 | } | 
 |  | 
 | bool Function::IsIdempotent() const { | 
 |   if (!has_pragma()) return false; | 
 |  | 
 | #if defined(TESTING) | 
 |   const bool kAllowOnlyForCoreLibFunctions = false; | 
 | #else | 
 |   const bool kAllowOnlyForCoreLibFunctions = true; | 
 | #endif  // defined(TESTING) | 
 |  | 
 |   return Library::FindPragma(Thread::Current(), kAllowOnlyForCoreLibFunctions, | 
 |                              *this, Symbols::vm_idempotent()); | 
 | } | 
 |  | 
 | bool Function::IsCachableIdempotent() const { | 
 |   if (!has_pragma()) return false; | 
 |  | 
 |   const bool has_vm_pragma = | 
 |       Library::FindPragma(Thread::Current(), /*only_core=*/false, *this, | 
 |                           Symbols::vm_cachable_idempotent()); | 
 |   if (!has_vm_pragma) return false; | 
 |  | 
 |   // For run_vm_tests and runtime/tests/vm allow marking arbitrary functions. | 
 |   return InVmTests(*this); | 
 | } | 
 |  | 
 | bool Function::IsFfiCallClosure() const { | 
 |   if (!IsNonImplicitClosureFunction()) return false; | 
 |   if (!has_pragma()) return false; | 
 |   return Library::FindPragma(Thread::Current(), /*only_core=*/false, *this, | 
 |                              Symbols::vm_ffi_call_closure()); | 
 | } | 
 |  | 
 | InstancePtr Function::GetFfiCallClosurePragmaValue() const { | 
 |   ASSERT(IsFfiCallClosure()); | 
 |   Thread* thread = Thread::Current(); | 
 |   Zone* zone = thread->zone(); | 
 |   auto& pragma_value = Object::Handle(zone); | 
 |   Library::FindPragma(thread, /*only_core=*/false, *this, | 
 |                       Symbols::vm_ffi_call_closure(), | 
 |                       /*multiple=*/false, &pragma_value); | 
 |   ASSERT(!pragma_value.IsNull()); | 
 |   return Instance::Cast(pragma_value).ptr(); | 
 | } | 
 |  | 
 | bool Function::RecognizedKindForceOptimize() const { | 
 |   switch (recognized_kind()) { | 
 |     // Uses unboxed/untagged data not supported in unoptimized, or uses | 
 |     // LoadIndexed/StoreIndexed/MemoryCopy instructions with typed data | 
 |     // arrays, which requires optimization for payload extraction. | 
 |     case MethodRecognizer::kObjectArrayGetIndexed: | 
 |     case MethodRecognizer::kGrowableArrayGetIndexed: | 
 | #define TYPED_DATA_GET_INDEXED_CASES(clazz)                                    \ | 
 |   case MethodRecognizer::k##clazz##ArrayGetIndexed:                            \ | 
 |     FALL_THROUGH;                                                              \ | 
 |   case MethodRecognizer::kExternal##clazz##ArrayGetIndexed:                    \ | 
 |     FALL_THROUGH;                                                              \ | 
 |   case MethodRecognizer::k##clazz##ArrayViewGetIndexed:                        \ | 
 |     FALL_THROUGH; | 
 |       DART_CLASS_LIST_TYPED_DATA(TYPED_DATA_GET_INDEXED_CASES) | 
 | #undef TYPED_DATA_GET_INDEXED_CASES | 
 |     case MethodRecognizer::kCopyRangeFromUint8ListToOneByteString: | 
 |     case MethodRecognizer::kFinalizerBase_getIsolateFinalizers: | 
 |     case MethodRecognizer::kFinalizerBase_setIsolate: | 
 |     case MethodRecognizer::kFinalizerBase_setIsolateFinalizers: | 
 |     case MethodRecognizer::kFinalizerEntry_getExternalSize: | 
 |     case MethodRecognizer::kExtensionStreamHasListener: | 
 |     case MethodRecognizer::kFfiLoadInt8: | 
 |     case MethodRecognizer::kFfiLoadInt16: | 
 |     case MethodRecognizer::kFfiLoadInt32: | 
 |     case MethodRecognizer::kFfiLoadInt64: | 
 |     case MethodRecognizer::kFfiLoadUint8: | 
 |     case MethodRecognizer::kFfiLoadUint16: | 
 |     case MethodRecognizer::kFfiLoadUint32: | 
 |     case MethodRecognizer::kFfiLoadUint64: | 
 |     case MethodRecognizer::kFfiLoadFloat: | 
 |     case MethodRecognizer::kFfiLoadFloatUnaligned: | 
 |     case MethodRecognizer::kFfiLoadDouble: | 
 |     case MethodRecognizer::kFfiLoadDoubleUnaligned: | 
 |     case MethodRecognizer::kFfiLoadPointer: | 
 |     case MethodRecognizer::kFfiStoreInt8: | 
 |     case MethodRecognizer::kFfiStoreInt16: | 
 |     case MethodRecognizer::kFfiStoreInt32: | 
 |     case MethodRecognizer::kFfiStoreInt64: | 
 |     case MethodRecognizer::kFfiStoreUint8: | 
 |     case MethodRecognizer::kFfiStoreUint16: | 
 |     case MethodRecognizer::kFfiStoreUint32: | 
 |     case MethodRecognizer::kFfiStoreUint64: | 
 |     case MethodRecognizer::kFfiStoreFloat: | 
 |     case MethodRecognizer::kFfiStoreFloatUnaligned: | 
 |     case MethodRecognizer::kFfiStoreDouble: | 
 |     case MethodRecognizer::kFfiStoreDoubleUnaligned: | 
 |     case MethodRecognizer::kFfiStorePointer: | 
 |     case MethodRecognizer::kFfiFromAddress: | 
 |     case MethodRecognizer::kFfiGetAddress: | 
 |     case MethodRecognizer::kFfiAsExternalTypedDataInt8: | 
 |     case MethodRecognizer::kFfiAsExternalTypedDataInt16: | 
 |     case MethodRecognizer::kFfiAsExternalTypedDataInt32: | 
 |     case MethodRecognizer::kFfiAsExternalTypedDataInt64: | 
 |     case MethodRecognizer::kFfiAsExternalTypedDataUint8: | 
 |     case MethodRecognizer::kFfiAsExternalTypedDataUint16: | 
 |     case MethodRecognizer::kFfiAsExternalTypedDataUint32: | 
 |     case MethodRecognizer::kFfiAsExternalTypedDataUint64: | 
 |     case MethodRecognizer::kFfiAsExternalTypedDataFloat: | 
 |     case MethodRecognizer::kFfiAsExternalTypedDataDouble: | 
 |     case MethodRecognizer::kGetNativeField: | 
 |     case MethodRecognizer::kRecord_fieldNames: | 
 |     case MethodRecognizer::kRecord_numFields: | 
 |     case MethodRecognizer::kStringBaseCodeUnitAt: | 
 |     case MethodRecognizer::kUtf8DecoderScan: | 
 |     case MethodRecognizer::kDouble_hashCode: | 
 |     case MethodRecognizer::kTypedList_GetInt8: | 
 |     case MethodRecognizer::kTypedList_SetInt8: | 
 |     case MethodRecognizer::kTypedList_GetUint8: | 
 |     case MethodRecognizer::kTypedList_SetUint8: | 
 |     case MethodRecognizer::kTypedList_GetInt16: | 
 |     case MethodRecognizer::kTypedList_SetInt16: | 
 |     case MethodRecognizer::kTypedList_GetUint16: | 
 |     case MethodRecognizer::kTypedList_SetUint16: | 
 |     case MethodRecognizer::kTypedList_GetInt32: | 
 |     case MethodRecognizer::kTypedList_SetInt32: | 
 |     case MethodRecognizer::kTypedList_GetUint32: | 
 |     case MethodRecognizer::kTypedList_SetUint32: | 
 |     case MethodRecognizer::kTypedList_GetInt64: | 
 |     case MethodRecognizer::kTypedList_SetInt64: | 
 |     case MethodRecognizer::kTypedList_GetUint64: | 
 |     case MethodRecognizer::kTypedList_SetUint64: | 
 |     case MethodRecognizer::kTypedList_GetFloat32: | 
 |     case MethodRecognizer::kTypedList_SetFloat32: | 
 |     case MethodRecognizer::kTypedList_GetFloat64: | 
 |     case MethodRecognizer::kTypedList_SetFloat64: | 
 |     case MethodRecognizer::kTypedList_GetInt32x4: | 
 |     case MethodRecognizer::kTypedList_SetInt32x4: | 
 |     case MethodRecognizer::kTypedList_GetFloat32x4: | 
 |     case MethodRecognizer::kTypedList_SetFloat32x4: | 
 |     case MethodRecognizer::kTypedList_GetFloat64x2: | 
 |     case MethodRecognizer::kTypedList_SetFloat64x2: | 
 |     case MethodRecognizer::kTypedData_memMove1: | 
 |     case MethodRecognizer::kTypedData_memMove2: | 
 |     case MethodRecognizer::kTypedData_memMove4: | 
 |     case MethodRecognizer::kTypedData_memMove8: | 
 |     case MethodRecognizer::kTypedData_memMove16: | 
 |     case MethodRecognizer::kMemCopy: | 
 |     // Prevent the GC from running so that the operation is atomic from | 
 |     // a GC point of view. Always double check implementation in | 
 |     // kernel_to_il.cc that no GC can happen in between the relevant IL | 
 |     // instructions. | 
 |     // TODO(https://dartbug.com/48527): Support inlining. | 
 |     case MethodRecognizer::kFinalizerBase_exchangeEntriesCollectedWithNull: | 
 |     // Both unboxed/untagged data and atomic-to-GC operation. | 
 |     case MethodRecognizer::kFinalizerEntry_allocate: | 
 |       return true; | 
 |     default: | 
 |       return false; | 
 |   } | 
 | } | 
 |  | 
 | #if !defined(DART_PRECOMPILED_RUNTIME) | 
 | bool Function::CanBeInlined() const { | 
 |   if (ForceOptimize()) { | 
 |     if (IsFfiCallClosure() || IsFfiCallbackTrampoline() || is_ffi_native()) { | 
 |       // We currently don't support inlining FFI trampolines. Some of them | 
 |       // are naturally non-inlinable because they contain a try/catch block, | 
 |       // but this condition is broader than strictly necessary. | 
 |       // The work necessary for inlining FFI trampolines is tracked by | 
 |       // http://dartbug.com/45055. | 
 |       return false; | 
 |     } | 
 |     if (CompilerState::Current().is_aot()) { | 
 |       return true; | 
 |     } | 
 |     // Inlining of force-optimized functions requires target function to be | 
 |     // idempotent becase if deoptimization is needed in inlined body, the | 
 |     // execution of the force-optimized will be restarted at the beginning of | 
 |     // the function. | 
 |     ASSERT(!IsPreferInline() || IsIdempotent()); | 
 |     return IsIdempotent(); | 
 |   } | 
 |  | 
 |   if (HasBreakpoint()) { | 
 |     return false; | 
 |   } | 
 |  | 
 |   return is_inlinable(); | 
 | } | 
 | #endif  // !defined(DART_PRECOMPILED_RUNTIME) | 
 |  | 
 | intptr_t Function::NumImplicitParameters() const { | 
 |   const UntaggedFunction::Kind k = kind(); | 
 |   if (k == UntaggedFunction::kConstructor) { | 
 |     // Type arguments for factory; instance for generative constructor. | 
 |     return 1; | 
 |   } | 
 |   if ((k == UntaggedFunction::kClosureFunction) || | 
 |       (k == UntaggedFunction::kImplicitClosureFunction) || | 
 |       (k == UntaggedFunction::kFfiTrampoline)) { | 
 |     return 1;  // Closure object. | 
 |   } | 
 |   if (!is_static()) { | 
 |     // Closure functions defined inside instance (i.e. non-static) functions are | 
 |     // marked as non-static, but they do not have a receiver. | 
 |     // Closures are handled above. | 
 |     ASSERT((k != UntaggedFunction::kClosureFunction) && | 
 |            (k != UntaggedFunction::kImplicitClosureFunction)); | 
 |     return 1;  // Receiver. | 
 |   } | 
 |   return 0;  // No implicit parameters. | 
 | } | 
 |  | 
 | bool Function::AreValidArgumentCounts(intptr_t num_type_arguments, | 
 |                                       intptr_t num_arguments, | 
 |                                       intptr_t num_named_arguments, | 
 |                                       String* error_message) const { | 
 |   if ((num_type_arguments != 0) && | 
 |       (num_type_arguments != NumTypeParameters())) { | 
 |     if (error_message != nullptr) { | 
 |       const intptr_t kMessageBufferSize = 64; | 
 |       char message_buffer[kMessageBufferSize]; | 
 |       Utils::SNPrint(message_buffer, kMessageBufferSize, | 
 |                      "%" Pd " type arguments passed, but %" Pd " expected", | 
 |                      num_type_arguments, NumTypeParameters()); | 
 |       // Allocate in old space because it can be invoked in background | 
 |       // optimizing compilation. | 
 |       *error_message = String::New(message_buffer, Heap::kOld); | 
 |     } | 
 |     return false;  // Too many type arguments. | 
 |   } | 
 |   if (num_named_arguments > NumOptionalNamedParameters()) { | 
 |     if (error_message != nullptr) { | 
 |       const intptr_t kMessageBufferSize = 64; | 
 |       char message_buffer[kMessageBufferSize]; | 
 |       Utils::SNPrint(message_buffer, kMessageBufferSize, | 
 |                      "%" Pd " named passed, at most %" Pd " expected", | 
 |                      num_named_arguments, NumOptionalNamedParameters()); | 
 |       // Allocate in old space because it can be invoked in background | 
 |       // optimizing compilation. | 
 |       *error_message = String::New(message_buffer, Heap::kOld); | 
 |     } | 
 |     return false;  // Too many named arguments. | 
 |   } | 
 |   const intptr_t num_pos_args = num_arguments - num_named_arguments; | 
 |   const intptr_t num_opt_pos_params = NumOptionalPositionalParameters(); | 
 |   const intptr_t num_pos_params = num_fixed_parameters() + num_opt_pos_params; | 
 |   if (num_pos_args > num_pos_params) { | 
 |     if (error_message != nullptr) { | 
 |       const intptr_t kMessageBufferSize = 64; | 
 |       char message_buffer[kMessageBufferSize]; | 
 |       // Hide implicit parameters to the user. | 
 |       const intptr_t num_hidden_params = NumImplicitParameters(); | 
 |       Utils::SNPrint(message_buffer, kMessageBufferSize, | 
 |                      "%" Pd "%s passed, %s%" Pd " expected", | 
 |                      num_pos_args - num_hidden_params, | 
 |                      num_opt_pos_params > 0 ? " positional" : "", | 
 |                      num_opt_pos_params > 0 ? "at most " : "", | 
 |                      num_pos_params - num_hidden_params); | 
 |       // Allocate in old space because it can be invoked in background | 
 |       // optimizing compilation. | 
 |       *error_message = String::New(message_buffer, Heap::kOld); | 
 |     } | 
 |     return false;  // Too many fixed and/or positional arguments. | 
 |   } | 
 |   if (num_pos_args < num_fixed_parameters()) { | 
 |     if (error_message != nullptr) { | 
 |       const intptr_t kMessageBufferSize = 64; | 
 |       char message_buffer[kMessageBufferSize]; | 
 |       // Hide implicit parameters to the user. | 
 |       const intptr_t num_hidden_params = NumImplicitParameters(); | 
 |       Utils::SNPrint(message_buffer, kMessageBufferSize, | 
 |                      "%" Pd "%s passed, %s%" Pd " expected", | 
 |                      num_pos_args - num_hidden_params, | 
 |                      num_opt_pos_params > 0 ? " positional" : "", | 
 |                      num_opt_pos_params > 0 ? "at least " : "", | 
 |                      num_fixed_parameters() - num_hidden_params); | 
 |       // Allocate in old space because it can be invoked in background | 
 |       // optimizing compilation. | 
 |       *error_message = String::New(message_buffer, Heap::kOld); | 
 |     } | 
 |     return false;  // Too few fixed and/or positional arguments. | 
 |   } | 
 |   return true; | 
 | } | 
 |  | 
 | bool Function::AreValidArguments(intptr_t num_type_arguments, | 
 |                                  intptr_t num_arguments, | 
 |                                  const Array& argument_names, | 
 |                                  String* error_message) const { | 
 |   const Array& args_desc_array = Array::Handle(ArgumentsDescriptor::NewBoxed( | 
 |       num_type_arguments, num_arguments, argument_names, Heap::kNew)); | 
 |   ArgumentsDescriptor args_desc(args_desc_array); | 
 |   return AreValidArguments(args_desc, error_message); | 
 | } | 
 |  | 
 | bool Function::AreValidArguments(const ArgumentsDescriptor& args_desc, | 
 |                                  String* error_message) const { | 
 |   const intptr_t num_type_arguments = args_desc.TypeArgsLen(); | 
 |   const intptr_t num_arguments = args_desc.Count(); | 
 |   const intptr_t num_named_arguments = args_desc.NamedCount(); | 
 |  | 
 |   if (!AreValidArgumentCounts(num_type_arguments, num_arguments, | 
 |                               num_named_arguments, error_message)) { | 
 |     return false; | 
 |   } | 
 |   // Verify that all argument names are valid parameter names. | 
 |   Thread* thread = Thread::Current(); | 
 |   Zone* zone = thread->zone(); | 
 |   String& argument_name = String::Handle(zone); | 
 |   String& parameter_name = String::Handle(zone); | 
 |   const intptr_t num_positional_args = num_arguments - num_named_arguments; | 
 |   const intptr_t num_parameters = NumParameters(); | 
 |   for (intptr_t i = 0; i < num_named_arguments; i++) { | 
 |     argument_name = args_desc.NameAt(i); | 
 |     ASSERT(argument_name.IsSymbol()); | 
 |     bool found = false; | 
 |     for (intptr_t j = num_positional_args; j < num_parameters; j++) { | 
 |       parameter_name = ParameterNameAt(j); | 
 |       ASSERT(parameter_name.IsSymbol()); | 
 |       if (argument_name.Equals(parameter_name)) { | 
 |         found = true; | 
 |         break; | 
 |       } | 
 |     } | 
 |     if (!found) { | 
 |       if (error_message != nullptr) { | 
 |         const intptr_t kMessageBufferSize = 64; | 
 |         char message_buffer[kMessageBufferSize]; | 
 |         Utils::SNPrint(message_buffer, kMessageBufferSize, | 
 |                        "no optional formal parameter named '%s'", | 
 |                        argument_name.ToCString()); | 
 |         *error_message = String::New(message_buffer); | 
 |       } | 
 |       return false; | 
 |     } | 
 |   } | 
 |   // Verify that all required named parameters are filled. | 
 |   for (intptr_t j = num_parameters - NumOptionalNamedParameters(); | 
 |        j < num_parameters; j++) { | 
 |     if (IsRequiredAt(j)) { | 
 |       parameter_name = ParameterNameAt(j); | 
 |       ASSERT(parameter_name.IsSymbol()); | 
 |       bool found = false; | 
 |       for (intptr_t i = 0; i < num_named_arguments; i++) { | 
 |         argument_name = args_desc.NameAt(i); | 
 |         ASSERT(argument_name.IsSymbol()); | 
 |         if (argument_name.Equals(parameter_name)) { | 
 |           found = true; | 
 |           break; | 
 |         } | 
 |       } | 
 |       if (!found) { | 
 |         if (error_message != nullptr) { | 
 |           const intptr_t kMessageBufferSize = 64; | 
 |           char message_buffer[kMessageBufferSize]; | 
 |           Utils::SNPrint(message_buffer, kMessageBufferSize, | 
 |                          "missing required named parameter '%s'", | 
 |                          parameter_name.ToCString()); | 
 |           *error_message = String::New(message_buffer); | 
 |         } | 
 |         return false; | 
 |       } | 
 |     } | 
 |   } | 
 |   return true; | 
 | } | 
 |  | 
 | // Retrieves the function type arguments, if any. This could be explicitly | 
 | // passed type from the arguments array, delayed type arguments in closures, | 
 | // or instantiated bounds for the type parameters if no other source for | 
 | // function type arguments are found. | 
 | static TypeArgumentsPtr RetrieveFunctionTypeArguments( | 
 |     Thread* thread, | 
 |     Zone* zone, | 
 |     const Function& function, | 
 |     const Instance& receiver, | 
 |     const TypeArguments& instantiator_type_args, | 
 |     const Array& args, | 
 |     const ArgumentsDescriptor& args_desc) { | 
 |   ASSERT(!function.IsNull()); | 
 |  | 
 |   const intptr_t kNumCurrentTypeArgs = function.NumTypeParameters(); | 
 |   const intptr_t kNumParentTypeArgs = function.NumParentTypeArguments(); | 
 |   const intptr_t kNumTypeArgs = kNumCurrentTypeArgs + kNumParentTypeArgs; | 
 |   // Non-generic functions don't receive type arguments. | 
 |   if (kNumTypeArgs == 0) return Object::empty_type_arguments().ptr(); | 
 |   // Closure functions require that the receiver be provided (and is a closure). | 
 |   ASSERT(!function.IsClosureFunction() || receiver.IsClosure()); | 
 |  | 
 |   // Only closure functions should have possibly generic parents. | 
 |   ASSERT(function.IsClosureFunction() || kNumParentTypeArgs == 0); | 
 |   const auto& parent_type_args = | 
 |       function.IsClosureFunction() | 
 |           ? TypeArguments::Handle( | 
 |                 zone, Closure::Cast(receiver).function_type_arguments()) | 
 |           : Object::empty_type_arguments(); | 
 |   // We don't try to instantiate the parent type parameters to their bounds | 
 |   // if not provided or check any closed-over type arguments against the parent | 
 |   // type parameter bounds (since they have been type checked already). | 
 |   if (kNumCurrentTypeArgs == 0) return parent_type_args.ptr(); | 
 |  | 
 |   auto& function_type_args = TypeArguments::Handle(zone); | 
 |   // First check for delayed type arguments before using either provided or | 
 |   // default type arguments. | 
 |   bool has_delayed_type_args = false; | 
 |   if (function.IsClosureFunction()) { | 
 |     const auto& closure = Closure::Cast(receiver); | 
 |     function_type_args = closure.delayed_type_arguments(); | 
 |     has_delayed_type_args = | 
 |         function_type_args.ptr() != Object::empty_type_arguments().ptr(); | 
 |   } | 
 |  | 
 |   if (args_desc.TypeArgsLen() > 0) { | 
 |     // We should never end up here when the receiver is a closure with delayed | 
 |     // type arguments unless this dynamically called closure function was | 
 |     // retrieved directly from the closure instead of going through | 
 |     // DartEntry::ResolveCallable, which appropriately checks for this case. | 
 |     ASSERT(!has_delayed_type_args); | 
 |     function_type_args ^= args.At(0); | 
 |   } else if (!has_delayed_type_args) { | 
 |     // We have no explicitly provided function type arguments, so instantiate | 
 |     // the type parameters to bounds or replace as appropriate. | 
 |     function_type_args = function.DefaultTypeArguments(zone); | 
 |     auto const mode = | 
 |         function.IsClosureFunction() | 
 |             ? function.default_type_arguments_instantiation_mode() | 
 |             : function_type_args.GetInstantiationMode(zone, &function); | 
 |     switch (mode) { | 
 |       case InstantiationMode::kIsInstantiated: | 
 |         // Nothing left to do. | 
 |         break; | 
 |       case InstantiationMode::kNeedsInstantiation: | 
 |         function_type_args = function_type_args.InstantiateAndCanonicalizeFrom( | 
 |             instantiator_type_args, parent_type_args); | 
 |         break; | 
 |       case InstantiationMode::kSharesInstantiatorTypeArguments: | 
 |         function_type_args = instantiator_type_args.ptr(); | 
 |         break; | 
 |       case InstantiationMode::kSharesFunctionTypeArguments: | 
 |         function_type_args = parent_type_args.ptr(); | 
 |         break; | 
 |     } | 
 |   } | 
 |  | 
 |   return function_type_args.Prepend(zone, parent_type_args, kNumParentTypeArgs, | 
 |                                     kNumTypeArgs); | 
 | } | 
 |  | 
 | // Retrieves the instantiator type arguments, if any, from the receiver. | 
 | static TypeArgumentsPtr RetrieveInstantiatorTypeArguments( | 
 |     Zone* zone, | 
 |     const Function& function, | 
 |     const Instance& receiver) { | 
 |   if (function.IsClosureFunction()) { | 
 |     ASSERT(receiver.IsClosure()); | 
 |     const auto& closure = Closure::Cast(receiver); | 
 |     return closure.instantiator_type_arguments(); | 
 |   } | 
 |   if (!receiver.IsNull()) { | 
 |     const auto& cls = Class::Handle(zone, receiver.clazz()); | 
 |     if (cls.NumTypeArguments() > 0) { | 
 |       return receiver.GetTypeArguments(); | 
 |     } | 
 |   } | 
 |   return Object::empty_type_arguments().ptr(); | 
 | } | 
 |  | 
 | ObjectPtr Function::DoArgumentTypesMatch( | 
 |     const Array& args, | 
 |     const ArgumentsDescriptor& args_desc) const { | 
 | #if defined(DART_PRECOMPILED_RUNTIME) | 
 |   if (signature() == FunctionType::null()) { | 
 |     // Precompiler deleted signature because of missing entry point pragma. | 
 |     return EntryPointMemberInvocationError(*this); | 
 |   } | 
 | #endif | 
 |   Thread* thread = Thread::Current(); | 
 |   Zone* zone = thread->zone(); | 
 |  | 
 |   auto& receiver = Instance::Handle(zone); | 
 |   if (IsClosureFunction() || HasThisParameter()) { | 
 |     receiver ^= args.At(args_desc.FirstArgIndex()); | 
 |   } | 
 |   const auto& instantiator_type_arguments = TypeArguments::Handle( | 
 |       zone, RetrieveInstantiatorTypeArguments(zone, *this, receiver)); | 
 |   return Function::DoArgumentTypesMatch(args, args_desc, | 
 |                                         instantiator_type_arguments); | 
 | } | 
 |  | 
 | ObjectPtr Function::DoArgumentTypesMatch( | 
 |     const Array& args, | 
 |     const ArgumentsDescriptor& args_desc, | 
 |     const TypeArguments& instantiator_type_arguments) const { | 
 | #if defined(DART_PRECOMPILED_RUNTIME) | 
 |   if (signature() == FunctionType::null()) { | 
 |     // Precompiler deleted signature because of missing entry point pragma. | 
 |     return EntryPointMemberInvocationError(*this); | 
 |   } | 
 | #endif | 
 |   Thread* thread = Thread::Current(); | 
 |   Zone* zone = thread->zone(); | 
 |  | 
 |   auto& receiver = Instance::Handle(zone); | 
 |   if (IsClosureFunction() || HasThisParameter()) { | 
 |     receiver ^= args.At(args_desc.FirstArgIndex()); | 
 |   } | 
 |  | 
 |   const auto& function_type_arguments = TypeArguments::Handle( | 
 |       zone, RetrieveFunctionTypeArguments(thread, zone, *this, receiver, | 
 |                                           instantiator_type_arguments, args, | 
 |                                           args_desc)); | 
 |   return Function::DoArgumentTypesMatch( | 
 |       args, args_desc, instantiator_type_arguments, function_type_arguments); | 
 | } | 
 |  | 
 | ObjectPtr Function::DoArgumentTypesMatch( | 
 |     const Array& args, | 
 |     const ArgumentsDescriptor& args_desc, | 
 |     const TypeArguments& instantiator_type_arguments, | 
 |     const TypeArguments& function_type_arguments) const { | 
 | #if defined(DART_PRECOMPILED_RUNTIME) | 
 |   if (signature() == FunctionType::null()) { | 
 |     // Precompiler deleted signature because of missing entry point pragma. | 
 |     return EntryPointMemberInvocationError(*this); | 
 |   } | 
 | #endif | 
 |   Thread* thread = Thread::Current(); | 
 |   Zone* zone = thread->zone(); | 
 |  | 
 |   // Perform any non-covariant bounds checks on the provided function type | 
 |   // arguments to make sure they are appropriate subtypes of the bounds. | 
 |   const intptr_t kNumLocalTypeArgs = NumTypeParameters(); | 
 |   if (kNumLocalTypeArgs > 0) { | 
 |     const intptr_t kNumParentTypeArgs = NumParentTypeArguments(); | 
 |     ASSERT(function_type_arguments.HasCount(kNumParentTypeArgs + | 
 |                                             kNumLocalTypeArgs)); | 
 |     const auto& params = TypeParameters::Handle(zone, type_parameters()); | 
 |     // No checks are needed if all bounds are dynamic. | 
 |     if (!params.AllDynamicBounds()) { | 
 |       auto& param = AbstractType::Handle(zone); | 
 |       auto& bound = AbstractType::Handle(zone); | 
 |       for (intptr_t i = 0; i < kNumLocalTypeArgs; i++) { | 
 |         bound = params.BoundAt(i); | 
 |         // Only perform non-covariant checks where the bound is not | 
 |         // the top type. | 
 |         if (params.IsGenericCovariantImplAt(i) || | 
 |             bound.IsTopTypeForSubtyping()) { | 
 |           continue; | 
 |         } | 
 |         param = TypeParameterAt(i); | 
 |         if (!AbstractType::InstantiateAndTestSubtype( | 
 |                 ¶m, &bound, instantiator_type_arguments, | 
 |                 function_type_arguments)) { | 
 |           const auto& names = Array::Handle(zone, params.names()); | 
 |           auto& name = String::Handle(zone); | 
 |           name ^= names.At(i); | 
 |           return Error::RawCast( | 
 |               ThrowTypeError(token_pos(), param, bound, name)); | 
 |         } | 
 |       } | 
 |     } | 
 |   } else { | 
 |     ASSERT(function_type_arguments.HasCount(NumParentTypeArguments())); | 
 |   } | 
 |  | 
 |   AbstractType& type = AbstractType::Handle(zone); | 
 |   Instance& argument = Instance::Handle(zone); | 
 |  | 
 |   auto check_argument = [](const Instance& argument, const AbstractType& type, | 
 |                            const TypeArguments& instantiator_type_args, | 
 |                            const TypeArguments& function_type_args) -> bool { | 
 |     // If the argument type is the top type, no need to check. | 
 |     if (type.IsTopTypeForSubtyping()) return true; | 
 |     if (argument.IsNull()) { | 
 |       return Instance::NullIsAssignableTo(type, instantiator_type_args, | 
 |                                           function_type_args); | 
 |     } | 
 |     return argument.IsAssignableTo(type, instantiator_type_args, | 
 |                                    function_type_args); | 
 |   }; | 
 |  | 
 |   // Check types of the provided arguments against the expected parameter types. | 
 |   const intptr_t arg_offset = args_desc.FirstArgIndex(); | 
 |   // Only check explicit arguments. | 
 |   const intptr_t arg_start = arg_offset + NumImplicitParameters(); | 
 |   const intptr_t end_positional_args = arg_offset + args_desc.PositionalCount(); | 
 |   for (intptr_t arg_index = arg_start; arg_index < end_positional_args; | 
 |        ++arg_index) { | 
 |     argument ^= args.At(arg_index); | 
 |     // Adjust for type arguments when they're present. | 
 |     const intptr_t param_index = arg_index - arg_offset; | 
 |     type = ParameterTypeAt(param_index); | 
 |     if (!check_argument(argument, type, instantiator_type_arguments, | 
 |                         function_type_arguments)) { | 
 |       auto& name = String::Handle(zone, ParameterNameAt(param_index)); | 
 |       if (!type.IsInstantiated()) { | 
 |         type = | 
 |             type.InstantiateFrom(instantiator_type_arguments, | 
 |                                  function_type_arguments, kAllFree, Heap::kNew); | 
 |       } | 
 |       return ThrowTypeError(token_pos(), argument, type, name); | 
 |     } | 
 |   } | 
 |  | 
 |   const intptr_t num_named_arguments = args_desc.NamedCount(); | 
 |   if (num_named_arguments == 0) { | 
 |     return Error::null(); | 
 |   } | 
 |  | 
 |   const int num_parameters = NumParameters(); | 
 |   const int num_fixed_params = num_fixed_parameters(); | 
 |  | 
 |   String& argument_name = String::Handle(zone); | 
 |   String& parameter_name = String::Handle(zone); | 
 |  | 
 |   // Check types of named arguments against expected parameter type. | 
 |   for (intptr_t named_index = 0; named_index < num_named_arguments; | 
 |        named_index++) { | 
 |     argument_name = args_desc.NameAt(named_index); | 
 |     ASSERT(argument_name.IsSymbol()); | 
 |     argument ^= args.At(arg_offset + args_desc.PositionAt(named_index)); | 
 |  | 
 |     // Try to find the named parameter that matches the provided argument. | 
 |     // Even when annotated with @required, named parameters are still stored | 
 |     // as if they were optional and so come after the fixed parameters. | 
 |     // Currently O(n^2) as there's no guarantee from either the CFE or the | 
 |     // VM that named parameters and named arguments are sorted in the same way. | 
 |     intptr_t param_index = num_fixed_params; | 
 |     for (; param_index < num_parameters; param_index++) { | 
 |       parameter_name = ParameterNameAt(param_index); | 
 |       ASSERT(parameter_name.IsSymbol()); | 
 |  | 
 |       if (!parameter_name.Equals(argument_name)) continue; | 
 |  | 
 |       type = ParameterTypeAt(param_index); | 
 |       if (!check_argument(argument, type, instantiator_type_arguments, | 
 |                           function_type_arguments)) { | 
 |         auto& name = String::Handle(zone, ParameterNameAt(param_index)); | 
 |         if (!type.IsInstantiated()) { | 
 |           type = type.InstantiateFrom(instantiator_type_arguments, | 
 |                                       function_type_arguments, kAllFree, | 
 |                                       Heap::kNew); | 
 |         } | 
 |         return ThrowTypeError(token_pos(), argument, type, name); | 
 |       } | 
 |       break; | 
 |     } | 
 |     // Only should fail if AreValidArguments returns a false positive. | 
 |     ASSERT(param_index < num_parameters); | 
 |   } | 
 |   return Error::null(); | 
 | } | 
 |  | 
 | // Helper allocating a C string buffer in the zone, printing the fully qualified | 
 | // name of a function in it, and replacing ':' by '_' to make sure the | 
 | // constructed name is a valid C++ identifier for debugging purpose. | 
 | // Set 'chars' to allocated buffer and return number of written characters. | 
 |  | 
 | enum QualifiedFunctionLibKind { | 
 |   kQualifiedFunctionLibKindLibUrl, | 
 |   kQualifiedFunctionLibKindLibName | 
 | }; | 
 |  | 
 | static intptr_t ConstructFunctionFullyQualifiedCString( | 
 |     const Function& function, | 
 |     char** chars, | 
 |     intptr_t reserve_len, | 
 |     bool with_lib, | 
 |     QualifiedFunctionLibKind lib_kind) { | 
 |   Zone* zone = Thread::Current()->zone(); | 
 |   const char* name = String::Handle(zone, function.name()).ToCString(); | 
 |   const char* function_format = (reserve_len == 0) ? "%s" : "%s_"; | 
 |   reserve_len += Utils::SNPrint(nullptr, 0, function_format, name); | 
 |   const Function& parent = Function::Handle(zone, function.parent_function()); | 
 |   intptr_t written = 0; | 
 |   if (parent.IsNull()) { | 
 |     const Class& function_class = Class::Handle(zone, function.Owner()); | 
 |     ASSERT(!function_class.IsNull()); | 
 |     const char* class_name = | 
 |         String::Handle(zone, function_class.Name()).ToCString(); | 
 |     ASSERT(class_name != nullptr); | 
 |     const char* library_name = nullptr; | 
 |     const char* lib_class_format = nullptr; | 
 |     if (with_lib) { | 
 |       const Library& library = Library::Handle(zone, function_class.library()); | 
 |       ASSERT(!library.IsNull()); | 
 |       switch (lib_kind) { | 
 |         case kQualifiedFunctionLibKindLibUrl: | 
 |           library_name = String::Handle(zone, library.url()).ToCString(); | 
 |           break; | 
 |         case kQualifiedFunctionLibKindLibName: | 
 |           library_name = String::Handle(zone, library.name()).ToCString(); | 
 |           break; | 
 |         default: | 
 |           UNREACHABLE(); | 
 |       } | 
 |       ASSERT(library_name != nullptr); | 
 |       lib_class_format = (library_name[0] == '\0') ? "%s%s_" : "%s_%s_"; | 
 |     } else { | 
 |       library_name = ""; | 
 |       lib_class_format = "%s%s."; | 
 |     } | 
 |     reserve_len += | 
 |         Utils::SNPrint(nullptr, 0, lib_class_format, library_name, class_name); | 
 |     ASSERT(chars != nullptr); | 
 |     *chars = zone->Alloc<char>(reserve_len + 1); | 
 |     written = Utils::SNPrint(*chars, reserve_len + 1, lib_class_format, | 
 |                              library_name, class_name); | 
 |   } else { | 
 |     written = ConstructFunctionFullyQualifiedCString(parent, chars, reserve_len, | 
 |                                                      with_lib, lib_kind); | 
 |   } | 
 |   ASSERT(*chars != nullptr); | 
 |   char* next = *chars + written; | 
 |   written += Utils::SNPrint(next, reserve_len + 1, function_format, name); | 
 |   // Replace ":" with "_". | 
 |   while (true) { | 
 |     next = strchr(next, ':'); | 
 |     if (next == nullptr) break; | 
 |     *next = '_'; | 
 |   } | 
 |   return written; | 
 | } | 
 |  | 
 | const char* Function::ToFullyQualifiedCString() const { | 
 |   char* chars = nullptr; | 
 |   ConstructFunctionFullyQualifiedCString(*this, &chars, 0, true, | 
 |                                          kQualifiedFunctionLibKindLibUrl); | 
 |   return chars; | 
 | } | 
 |  | 
 | const char* Function::ToLibNamePrefixedQualifiedCString() const { | 
 |   char* chars = nullptr; | 
 |   ConstructFunctionFullyQualifiedCString(*this, &chars, 0, true, | 
 |                                          kQualifiedFunctionLibKindLibName); | 
 |   return chars; | 
 | } | 
 |  | 
 | const char* Function::ToQualifiedCString() const { | 
 |   char* chars = nullptr; | 
 |   ConstructFunctionFullyQualifiedCString(*this, &chars, 0, false, | 
 |                                          kQualifiedFunctionLibKindLibUrl); | 
 |   return chars; | 
 | } | 
 |  | 
 | AbstractTypePtr FunctionType::InstantiateFrom( | 
 |     const TypeArguments& instantiator_type_arguments, | 
 |     const TypeArguments& function_type_arguments, | 
 |     intptr_t num_free_fun_type_params, | 
 |     Heap::Space space, | 
 |     FunctionTypeMapping* function_type_mapping, | 
 |     intptr_t num_parent_type_args_adjustment) const { | 
 |   ASSERT(IsFinalized()); | 
 |   Zone* zone = Thread::Current()->zone(); | 
 |   const intptr_t num_parent_type_args = NumParentTypeArguments(); | 
 |   bool delete_type_parameters = false; | 
 |   if (num_free_fun_type_params == kCurrentAndEnclosingFree) { | 
 |     // See the comment on kCurrentAndEnclosingFree to understand why we don't | 
 |     // adjust 'num_free_fun_type_params' downward in this case. | 
 |     num_free_fun_type_params = kAllFree; | 
 |     delete_type_parameters = true; | 
 |   } else { | 
 |     ASSERT(!IsInstantiated(kAny, num_free_fun_type_params)); | 
 |     // We only consider the function type parameters declared by the parents | 
 |     // of this signature function as free. | 
 |     if (num_parent_type_args < num_free_fun_type_params) { | 
 |       num_free_fun_type_params = num_parent_type_args; | 
 |     } | 
 |   } | 
 |  | 
 |   // The number of parent type parameters that remain uninstantiated. | 
 |   const intptr_t remaining_parent_type_params = | 
 |       num_free_fun_type_params < num_parent_type_args | 
 |           ? num_parent_type_args - num_free_fun_type_params | 
 |           : 0; | 
 |  | 
 |   // Adjust number of parent type arguments for all nested substituted types. | 
 |   num_parent_type_args_adjustment = | 
 |       remaining_parent_type_params + | 
 |       (delete_type_parameters ? 0 : NumTypeParameters()); | 
 |  | 
 |   FunctionType& sig = FunctionType::Handle( | 
 |       FunctionType::New(remaining_parent_type_params, nullability(), space)); | 
 |   AbstractType& type = AbstractType::Handle(zone); | 
 |  | 
 |   FunctionTypeMapping scope(zone, &function_type_mapping, *this, sig); | 
 |  | 
 |   // Copy the type parameters and instantiate their bounds and defaults. | 
 |   if (!delete_type_parameters) { | 
 |     const TypeParameters& type_params = | 
 |         TypeParameters::Handle(zone, type_parameters()); | 
 |     if (!type_params.IsNull()) { | 
 |       const TypeParameters& sig_type_params = | 
 |           TypeParameters::Handle(zone, TypeParameters::New()); | 
 |       // No need to set names that are ignored in a signature, however, the | 
 |       // length of the names array defines the number of type parameters. | 
 |       sig_type_params.set_names(Array::Handle(zone, type_params.names())); | 
 |       sig_type_params.set_flags(Array::Handle(zone, type_params.flags())); | 
 |       sig.SetTypeParameters(sig_type_params); | 
 |       TypeArguments& type_args = TypeArguments::Handle(zone); | 
 |       type_args = type_params.bounds(); | 
 |       if (!type_args.IsNull() && !type_args.IsInstantiated()) { | 
 |         type_args = type_args.InstantiateFrom( | 
 |             instantiator_type_arguments, function_type_arguments, | 
 |             num_free_fun_type_params, space, function_type_mapping, | 
 |             num_parent_type_args_adjustment); | 
 |       } | 
 |       sig_type_params.set_bounds(type_args); | 
 |       type_args = type_params.defaults(); | 
 |       if (!type_args.IsNull() && !type_args.IsInstantiated()) { | 
 |         type_args = type_args.InstantiateFrom( | 
 |             instantiator_type_arguments, function_type_arguments, | 
 |             num_free_fun_type_params, space, function_type_mapping, | 
 |             num_parent_type_args_adjustment); | 
 |       } | 
 |       sig_type_params.set_defaults(type_args); | 
 |     } | 
 |   } | 
 |  | 
 |   type = result_type(); | 
 |   if (!type.IsInstantiated()) { | 
 |     type = type.InstantiateFrom( | 
 |         instantiator_type_arguments, function_type_arguments, | 
 |         num_free_fun_type_params, space, function_type_mapping, | 
 |         num_parent_type_args_adjustment); | 
 |     // A returned null type indicates a failed instantiation in dead code that | 
 |     // must be propagated up to the caller, the optimizing compiler. | 
 |     if (type.IsNull()) { | 
 |       return FunctionType::null(); | 
 |     } | 
 |   } | 
 |   sig.set_result_type(type); | 
 |   const intptr_t num_params = NumParameters(); | 
 |   sig.set_num_implicit_parameters(num_implicit_parameters()); | 
 |   sig.set_num_fixed_parameters(num_fixed_parameters()); | 
 |   sig.SetNumOptionalParameters(NumOptionalParameters(), | 
 |                                HasOptionalPositionalParameters()); | 
 |   sig.set_parameter_types(Array::Handle(Array::New(num_params, space))); | 
 |   for (intptr_t i = 0; i < num_params; i++) { | 
 |     type = ParameterTypeAt(i); | 
 |     if (!type.IsInstantiated()) { | 
 |       type = type.InstantiateFrom( | 
 |           instantiator_type_arguments, function_type_arguments, | 
 |           num_free_fun_type_params, space, function_type_mapping, | 
 |           num_parent_type_args_adjustment); | 
 |       // A returned null type indicates a failed instantiation in dead code that | 
 |       // must be propagated up to the caller, the optimizing compiler. | 
 |       if (type.IsNull()) { | 
 |         return FunctionType::null(); | 
 |       } | 
 |     } | 
 |     sig.SetParameterTypeAt(i, type); | 
 |   } | 
 |   sig.set_named_parameter_names(Array::Handle(zone, named_parameter_names())); | 
 |  | 
 |   if (delete_type_parameters) { | 
 |     ASSERT(sig.IsInstantiated(kFunctions)); | 
 |   } | 
 |  | 
 |   sig.SetIsFinalized(); | 
 |  | 
 |   // Canonicalization is not part of instantiation. | 
 |   return sig.ptr(); | 
 | } | 
 |  | 
 | AbstractTypePtr FunctionType::UpdateFunctionTypes( | 
 |     intptr_t num_parent_type_args_adjustment, | 
 |     intptr_t num_free_fun_type_params, | 
 |     Heap::Space space, | 
 |     FunctionTypeMapping* function_type_mapping) const { | 
 |   ASSERT(num_parent_type_args_adjustment >= 0); | 
 |   ASSERT(IsFinalized()); | 
 |   Zone* zone = Thread::Current()->zone(); | 
 |  | 
 |   const intptr_t old_num_parent_type_args = NumParentTypeArguments(); | 
 |   // From now on, adjust all type parameter types | 
 |   // which belong to this or nested function types. | 
 |   if (num_free_fun_type_params > old_num_parent_type_args) { | 
 |     num_free_fun_type_params = old_num_parent_type_args; | 
 |   } | 
 |  | 
 |   FunctionType& new_type = FunctionType::Handle( | 
 |       zone, FunctionType::New( | 
 |                 NumParentTypeArguments() + num_parent_type_args_adjustment, | 
 |                 nullability(), space)); | 
 |   AbstractType& type = AbstractType::Handle(zone); | 
 |  | 
 |   FunctionTypeMapping scope(zone, &function_type_mapping, *this, new_type); | 
 |  | 
 |   const TypeParameters& type_params = | 
 |       TypeParameters::Handle(zone, type_parameters()); | 
 |   if (!type_params.IsNull()) { | 
 |     const TypeParameters& new_type_params = | 
 |         TypeParameters::Handle(zone, TypeParameters::New()); | 
 |     // No need to set names that are ignored in a signature, however, the | 
 |     // length of the names array defines the number of type parameters. | 
 |     new_type_params.set_names(Array::Handle(zone, type_params.names())); | 
 |     new_type_params.set_flags(Array::Handle(zone, type_params.flags())); | 
 |     TypeArguments& type_args = TypeArguments::Handle(zone); | 
 |     type_args = type_params.bounds(); | 
 |     if (!type_args.IsNull()) { | 
 |       type_args = type_args.UpdateFunctionTypes(num_parent_type_args_adjustment, | 
 |                                                 num_free_fun_type_params, space, | 
 |                                                 function_type_mapping); | 
 |     } | 
 |     new_type_params.set_bounds(type_args); | 
 |     type_args = type_params.defaults(); | 
 |     if (!type_args.IsNull()) { | 
 |       type_args = type_args.UpdateFunctionTypes(num_parent_type_args_adjustment, | 
 |                                                 num_free_fun_type_params, space, | 
 |                                                 function_type_mapping); | 
 |     } | 
 |     new_type_params.set_defaults(type_args); | 
 |     new_type.SetTypeParameters(new_type_params); | 
 |   } | 
 |  | 
 |   type = result_type(); | 
 |   type = type.UpdateFunctionTypes(num_parent_type_args_adjustment, | 
 |                                   num_free_fun_type_params, space, | 
 |                                   function_type_mapping); | 
 |   new_type.set_result_type(type); | 
 |  | 
 |   const intptr_t num_params = NumParameters(); | 
 |   new_type.set_num_implicit_parameters(num_implicit_parameters()); | 
 |   new_type.set_num_fixed_parameters(num_fixed_parameters()); | 
 |   new_type.SetNumOptionalParameters(NumOptionalParameters(), | 
 |                                     HasOptionalPositionalParameters()); | 
 |   new_type.set_parameter_types(Array::Handle(Array::New(num_params, space))); | 
 |   for (intptr_t i = 0; i < num_params; i++) { | 
 |     type = ParameterTypeAt(i); | 
 |     type = type.UpdateFunctionTypes(num_parent_type_args_adjustment, | 
 |                                     num_free_fun_type_params, space, | 
 |                                     function_type_mapping); | 
 |     new_type.SetParameterTypeAt(i, type); | 
 |   } | 
 |   new_type.set_named_parameter_names( | 
 |       Array::Handle(zone, named_parameter_names())); | 
 |   new_type.SetIsFinalized(); | 
 |  | 
 |   return new_type.ptr(); | 
 | } | 
 |  | 
 | // Checks if the type of the specified parameter of this signature is a | 
 | // supertype of the type of the specified parameter of the other signature | 
 | // (i.e. check parameter contravariance). | 
 | // Note that types marked as covariant are already dealt with in the front-end. | 
 | bool FunctionType::IsContravariantParameter( | 
 |     intptr_t parameter_position, | 
 |     const FunctionType& other, | 
 |     intptr_t other_parameter_position, | 
 |     Heap::Space space, | 
 |     FunctionTypeMapping* function_type_equivalence) const { | 
 |   const AbstractType& param_type = | 
 |       AbstractType::Handle(ParameterTypeAt(parameter_position)); | 
 |   if (param_type.IsTopTypeForSubtyping()) { | 
 |     return true; | 
 |   } | 
 |   const AbstractType& other_param_type = | 
 |       AbstractType::Handle(other.ParameterTypeAt(other_parameter_position)); | 
 |   return other_param_type.IsSubtypeOf(param_type, space, | 
 |                                       function_type_equivalence); | 
 | } | 
 |  | 
 | bool FunctionType::HasSameTypeParametersAndBounds( | 
 |     const FunctionType& other, | 
 |     TypeEquality kind, | 
 |     FunctionTypeMapping* function_type_equivalence) const { | 
 |   Zone* const zone = Thread::Current()->zone(); | 
 |   TRACE_TYPE_CHECKS_VERBOSE( | 
 |       "  FunctionType::HasSameTypeParametersAndBounds(%s, %s)\n", ToCString(), | 
 |       other.ToCString()); | 
 |  | 
 |   const intptr_t num_type_params = NumTypeParameters(); | 
 |   if (num_type_params != other.NumTypeParameters()) { | 
 |     TRACE_TYPE_CHECKS_VERBOSE( | 
 |         "   - result: false (number of type parameters)\n"); | 
 |     return false; | 
 |   } | 
 |   if (num_type_params > 0) { | 
 |     const TypeParameters& type_params = | 
 |         TypeParameters::Handle(zone, type_parameters()); | 
 |     ASSERT(!type_params.IsNull()); | 
 |     const TypeParameters& other_type_params = | 
 |         TypeParameters::Handle(zone, other.type_parameters()); | 
 |     ASSERT(!other_type_params.IsNull()); | 
 |     if (kind == TypeEquality::kInSubtypeTest) { | 
 |       if (!type_params.AllDynamicBounds() || | 
 |           !other_type_params.AllDynamicBounds()) { | 
 |         AbstractType& bound = AbstractType::Handle(zone); | 
 |         AbstractType& other_bound = AbstractType::Handle(zone); | 
 |         for (intptr_t i = 0; i < num_type_params; i++) { | 
 |           bound = type_params.BoundAt(i); | 
 |           other_bound = other_type_params.BoundAt(i); | 
 |           // Bounds that are mutual subtypes are considered equal. | 
 |           if (!bound.IsSubtypeOf(other_bound, Heap::kOld, | 
 |                                  function_type_equivalence) || | 
 |               !other_bound.IsSubtypeOf(bound, Heap::kOld, | 
 |                                        function_type_equivalence)) { | 
 |             TRACE_TYPE_CHECKS_VERBOSE( | 
 |                 "   - result: false (bounds are not mutual subtypes)\n"); | 
 |             return false; | 
 |           } | 
 |         } | 
 |       } | 
 |     } else { | 
 |       if (NumParentTypeArguments() != other.NumParentTypeArguments()) { | 
 |         TRACE_TYPE_CHECKS_VERBOSE( | 
 |             "   - result: false (mismatch in number of type arguments)\n"); | 
 |         return false; | 
 |       } | 
 |       const TypeArguments& bounds = | 
 |           TypeArguments::Handle(zone, type_params.bounds()); | 
 |       const TypeArguments& other_bounds = | 
 |           TypeArguments::Handle(zone, other_type_params.bounds()); | 
 |       if (!bounds.IsEquivalent(other_bounds, kind, function_type_equivalence)) { | 
 |         TRACE_TYPE_CHECKS_VERBOSE( | 
 |             "   - result: false (bounds are not equivalent)\n"); | 
 |         return false; | 
 |       } | 
 |       if (kind == TypeEquality::kCanonical) { | 
 |         // Compare default arguments. | 
 |         const TypeArguments& defaults = | 
 |             TypeArguments::Handle(zone, type_params.defaults()); | 
 |         const TypeArguments& other_defaults = | 
 |             TypeArguments::Handle(zone, other_type_params.defaults()); | 
 |         if (defaults.IsNull()) { | 
 |           if (!other_defaults.IsNull()) { | 
 |             TRACE_TYPE_CHECKS_VERBOSE( | 
 |                 "   - result: false (mismatch in defaults)\n"); | 
 |             return false; | 
 |           } | 
 |         } else if (!defaults.IsEquivalent(other_defaults, kind, | 
 |                                           function_type_equivalence)) { | 
 |           TRACE_TYPE_CHECKS_VERBOSE( | 
 |               "   - result: false (default types are not equivalent)\n"); | 
 |           return false; | 
 |         } | 
 |       } | 
 |     } | 
 |     if (kind != TypeEquality::kInSubtypeTest) { | 
 |       // Compare flags (IsGenericCovariantImpl). | 
 |       if (!Array::Equals(type_params.flags(), other_type_params.flags())) { | 
 |         TRACE_TYPE_CHECKS_VERBOSE("   - result: false (flags are not equal)\n"); | 
 |         return false; | 
 |       } | 
 |     } | 
 |   } | 
 |   TRACE_TYPE_CHECKS_VERBOSE("   - result: true\n"); | 
 |   return true; | 
 | } | 
 |  | 
 | bool FunctionType::IsSubtypeOf( | 
 |     const FunctionType& other, | 
 |     Heap::Space space, | 
 |     FunctionTypeMapping* function_type_equivalence) const { | 
 |   TRACE_TYPE_CHECKS_VERBOSE("  FunctionType::IsSubtypeOf(%s, %s)\n", | 
 |                             ToCString(), other.ToCString()); | 
 |   const intptr_t num_fixed_params = num_fixed_parameters(); | 
 |   const intptr_t num_opt_pos_params = NumOptionalPositionalParameters(); | 
 |   const intptr_t num_opt_named_params = NumOptionalNamedParameters(); | 
 |   const intptr_t other_num_fixed_params = other.num_fixed_parameters(); | 
 |   const intptr_t other_num_opt_pos_params = | 
 |       other.NumOptionalPositionalParameters(); | 
 |   const intptr_t other_num_opt_named_params = | 
 |       other.NumOptionalNamedParameters(); | 
 |   // This signature requires the same arguments or less and accepts the same | 
 |   // arguments or more. We can ignore implicit parameters. | 
 |   const intptr_t num_ignored_params = num_implicit_parameters(); | 
 |   const intptr_t other_num_ignored_params = other.num_implicit_parameters(); | 
 |   if (((num_fixed_params - num_ignored_params) > | 
 |        (other_num_fixed_params - other_num_ignored_params)) || | 
 |       ((num_fixed_params - num_ignored_params + num_opt_pos_params) < | 
 |        (other_num_fixed_params - other_num_ignored_params + | 
 |         other_num_opt_pos_params)) || | 
 |       (num_opt_named_params < other_num_opt_named_params)) { | 
 |     TRACE_TYPE_CHECKS_VERBOSE( | 
 |         "   - result: false (mismatch in number of parameters)\n"); | 
 |     return false; | 
 |   } | 
 |   Thread* thread = Thread::Current(); | 
 |   Zone* zone = thread->zone(); | 
 |   FunctionTypeMapping scope(zone, &function_type_equivalence, *this, other); | 
 |  | 
 |   // Check the type parameters and bounds of generic functions. | 
 |   if (!HasSameTypeParametersAndBounds(other, TypeEquality::kInSubtypeTest, | 
 |                                       function_type_equivalence)) { | 
 |     TRACE_TYPE_CHECKS_VERBOSE( | 
 |         "   - result: false (mismatch in type parameters)\n"); | 
 |     return false; | 
 |   } | 
 |   // Check the result type. | 
 |   const AbstractType& other_res_type = | 
 |       AbstractType::Handle(zone, other.result_type()); | 
 |   // 'void Function()' is a subtype of 'Object Function()'. | 
 |   if (!other_res_type.IsTopTypeForSubtyping()) { | 
 |     const AbstractType& res_type = AbstractType::Handle(zone, result_type()); | 
 |     if (!res_type.IsSubtypeOf(other_res_type, space, | 
 |                               function_type_equivalence)) { | 
 |       TRACE_TYPE_CHECKS_VERBOSE("   - result: false (result type)\n"); | 
 |       return false; | 
 |     } | 
 |   } | 
 |   // Check the types of fixed and optional positional parameters. | 
 |   for (intptr_t i = 0; i < (other_num_fixed_params - other_num_ignored_params + | 
 |                             other_num_opt_pos_params); | 
 |        i++) { | 
 |     if (!IsContravariantParameter(i + num_ignored_params, other, | 
 |                                   i + other_num_ignored_params, space, | 
 |                                   function_type_equivalence)) { | 
 |       TRACE_TYPE_CHECKS_VERBOSE("   - result: false (parameter type)\n"); | 
 |       return false; | 
 |     } | 
 |   } | 
 |   // Check that for each optional named parameter of type T of the other | 
 |   // function type, there exists an optional named parameter of this function | 
 |   // type with an identical name and with a type S that is a supertype of T. | 
 |   // Note that SetParameterNameAt() guarantees that names are symbols, so we | 
 |   // can compare their raw pointers. | 
 |   const int num_params = num_fixed_params + num_opt_named_params; | 
 |   const int other_num_params = | 
 |       other_num_fixed_params + other_num_opt_named_params; | 
 |   bool found_param_name; | 
 |   String& other_param_name = String::Handle(zone); | 
 |   for (intptr_t i = other_num_fixed_params; i < other_num_params; i++) { | 
 |     other_param_name = other.ParameterNameAt(i); | 
 |     ASSERT(other_param_name.IsSymbol()); | 
 |     found_param_name = false; | 
 |     for (intptr_t j = num_fixed_params; j < num_params; j++) { | 
 |       ASSERT(String::Handle(zone, ParameterNameAt(j)).IsSymbol()); | 
 |       if (ParameterNameAt(j) == other_param_name.ptr()) { | 
 |         found_param_name = true; | 
 |         if (!IsContravariantParameter(j, other, i, space, | 
 |                                       function_type_equivalence)) { | 
 |           TRACE_TYPE_CHECKS_VERBOSE( | 
 |               "   - result: false (optional parameter type)\n"); | 
 |           return false; | 
 |         } | 
 |         break; | 
 |       } | 
 |     } | 
 |     if (!found_param_name) { | 
 |       TRACE_TYPE_CHECKS_VERBOSE( | 
 |           "   - result: false (named parameter not found)\n"); | 
 |       return false; | 
 |     } | 
 |   } | 
 |   // Check that for each required named parameter in this function, there's a | 
 |   // corresponding required named parameter in the other function. | 
 |   String& param_name = other_param_name; | 
 |   for (intptr_t j = num_params - num_opt_named_params; j < num_params; j++) { | 
 |     if (IsRequiredAt(j)) { | 
 |       param_name = ParameterNameAt(j); | 
 |       ASSERT(param_name.IsSymbol()); | 
 |       bool found = false; | 
 |       for (intptr_t i = other_num_fixed_params; i < other_num_params; i++) { | 
 |         ASSERT(String::Handle(zone, other.ParameterNameAt(i)).IsSymbol()); | 
 |         if (other.ParameterNameAt(i) == param_name.ptr()) { | 
 |           found = true; | 
 |           if (!other.IsRequiredAt(i)) { | 
 |             TRACE_TYPE_CHECKS_VERBOSE( | 
 |                 "   - result: false (mismatch in required named " | 
 |                 "parameters)\n"); | 
 |             return false; | 
 |           } | 
 |         } | 
 |       } | 
 |       if (!found) { | 
 |         TRACE_TYPE_CHECKS_VERBOSE( | 
 |             "   - result: false (required named parameter not found)\n"); | 
 |         return false; | 
 |       } | 
 |     } | 
 |   } | 
 |   TRACE_TYPE_CHECKS_VERBOSE("   - result: true\n"); | 
 |   return true; | 
 | } | 
 |  | 
 | // The compiler generates an implicit constructor if a class definition | 
 | // does not contain an explicit constructor or factory. The implicit | 
 | // constructor has the same token position as the owner class. | 
 | bool Function::IsImplicitConstructor() const { | 
 |   return IsGenerativeConstructor() && (token_pos() == end_token_pos()); | 
 | } | 
 |  | 
 | bool Function::IsImplicitStaticClosureFunction(FunctionPtr func) { | 
 |   NoSafepointScope no_safepoint; | 
 |   uint32_t kind_tag = func->untag()->kind_tag_.load(std::memory_order_relaxed); | 
 |   return (KindBits::decode(kind_tag) == | 
 |           UntaggedFunction::kImplicitClosureFunction) && | 
 |          StaticBit::decode(kind_tag); | 
 | } | 
 |  | 
 | bool Function::IsImplicitInstanceClosureFunction(FunctionPtr func) { | 
 |   NoSafepointScope no_safepoint; | 
 |   uint32_t kind_tag = func->untag()->kind_tag_.load(std::memory_order_relaxed); | 
 |   return (KindBits::decode(kind_tag) == | 
 |           UntaggedFunction::kImplicitClosureFunction) && | 
 |          !StaticBit::decode(kind_tag); | 
 | } | 
 |  | 
 | FunctionPtr Function::New(Heap::Space space) { | 
 |   ASSERT(Object::function_class() != Class::null()); | 
 |   return Object::Allocate<Function>(space); | 
 | } | 
 |  | 
 | FunctionPtr Function::New(const FunctionType& signature, | 
 |                           const String& name, | 
 |                           UntaggedFunction::Kind kind, | 
 |                           bool is_static, | 
 |                           bool is_const, | 
 |                           bool is_abstract, | 
 |                           bool is_external, | 
 |                           bool is_native, | 
 |                           const Object& owner, | 
 |                           TokenPosition token_pos, | 
 |                           Heap::Space space) { | 
 |   ASSERT(!owner.IsNull()); | 
 |   ASSERT(!signature.IsNull()); | 
 |   const Function& result = Function::Handle(Function::New(space)); | 
 |   result.set_kind_tag(0); | 
 |   result.set_packed_fields(0); | 
 |   result.set_name(name); | 
 |   result.set_kind_tag(0);  // Ensure determinism of uninitialized bits. | 
 |   result.set_kind(kind); | 
 |   result.set_recognized_kind(MethodRecognizer::kUnknown); | 
 |   result.set_modifier(UntaggedFunction::kNoModifier); | 
 |   result.set_is_static(is_static); | 
 |   result.set_is_const(is_const); | 
 |   result.set_is_abstract(is_abstract); | 
 |   result.set_is_external(is_external); | 
 |   result.set_is_native(is_native); | 
 |   result.set_is_reflectable(true);  // Will be computed later. | 
 |   result.set_is_visible(true);      // Will be computed later. | 
 |   result.set_is_debuggable(true);   // Will be computed later. | 
 |   result.set_is_intrinsic(false); | 
 |   result.set_has_pragma(false); | 
 |   result.set_is_polymorphic_target(false); | 
 |   result.set_is_synthetic(false); | 
 |   NOT_IN_PRECOMPILED(result.set_state_bits(0)); | 
 |   result.set_owner(owner); | 
 |   NOT_IN_PRECOMPILED(result.set_token_pos(token_pos)); | 
 |   NOT_IN_PRECOMPILED(result.set_end_token_pos(token_pos)); | 
 |   NOT_IN_PRECOMPILED(result.set_usage_counter(0)); | 
 |   NOT_IN_PRECOMPILED(result.set_deoptimization_counter(0)); | 
 |   NOT_IN_PRECOMPILED(result.set_optimized_instruction_count(0)); | 
 |   NOT_IN_PRECOMPILED(result.set_optimized_call_site_count(0)); | 
 |   NOT_IN_PRECOMPILED(result.set_inlining_depth(0)); | 
 |   NOT_IN_PRECOMPILED(result.set_kernel_offset(0)); | 
 |   result.set_is_optimizable(is_native ? false : true); | 
 |   result.set_is_inlinable(true); | 
 |   result.reset_unboxed_parameters_and_return(); | 
 |   result.SetInstructionsSafe(StubCode::LazyCompile()); | 
 |  | 
 |   // See Function::set_data() for more information. | 
 |   if (kind == UntaggedFunction::kClosureFunction || | 
 |       kind == UntaggedFunction::kImplicitClosureFunction) { | 
 |     ASSERT(space == Heap::kOld); | 
 |     const ClosureData& data = ClosureData::Handle(ClosureData::New()); | 
 |     data.set_awaiter_link({}); | 
 |     result.set_data(data); | 
 |   } else if (kind == UntaggedFunction::kFfiTrampoline) { | 
 |     const FfiTrampolineData& data = | 
 |         FfiTrampolineData::Handle(FfiTrampolineData::New()); | 
 |     result.set_data(data); | 
 |   } else if (result.is_old_native()) { | 
 |     const auto& data = | 
 |         Array::Handle(Array::New(NativeFunctionData::kLength, Heap::kOld)); | 
 |     result.set_data(data); | 
 |   } else { | 
 |     // Functions other than signature functions have no reason to be allocated | 
 |     // in new space. | 
 |     ASSERT(space == Heap::kOld); | 
 |   } | 
 |  | 
 |   // Force-optimized functions are not debuggable because they cannot | 
 |   // deoptimize. | 
 |   if (result.ForceOptimize()) { | 
 |     result.set_is_debuggable(false); | 
 |   } | 
 |   signature.set_num_implicit_parameters(result.NumImplicitParameters()); | 
 |   result.SetSignature(signature); | 
 |   NOT_IN_PRECOMPILED( | 
 |       result.set_positional_parameter_names(Object::empty_array())); | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | FunctionPtr Function::NewClosureFunctionWithKind(UntaggedFunction::Kind kind, | 
 |                                                  const String& name, | 
 |                                                  const Function& parent, | 
 |                                                  bool is_static, | 
 |                                                  TokenPosition token_pos, | 
 |                                                  const Object& owner) { | 
 |   ASSERT((kind == UntaggedFunction::kClosureFunction) || | 
 |          (kind == UntaggedFunction::kImplicitClosureFunction)); | 
 |   ASSERT(!parent.IsNull()); | 
 |   ASSERT(!owner.IsNull()); | 
 |   const FunctionType& signature = FunctionType::Handle(FunctionType::New( | 
 |       kind == UntaggedFunction::kClosureFunction ? parent.NumTypeArguments() | 
 |                                                  : 0)); | 
 |   const Function& result = Function::Handle( | 
 |       Function::New(signature, name, kind, | 
 |                     /* is_static = */ is_static, | 
 |                     /* is_const = */ false, | 
 |                     /* is_abstract = */ false, | 
 |                     /* is_external = */ false, | 
 |                     /* is_native = */ false, owner, token_pos)); | 
 |   result.set_parent_function(parent); | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | FunctionPtr Function::NewClosureFunction(const String& name, | 
 |                                          const Function& parent, | 
 |                                          TokenPosition token_pos) { | 
 |   // Use the owner defining the parent function and not the class containing it. | 
 |   const Object& parent_owner = Object::Handle(parent.RawOwner()); | 
 |   return NewClosureFunctionWithKind(UntaggedFunction::kClosureFunction, name, | 
 |                                     parent, parent.is_static(), token_pos, | 
 |                                     parent_owner); | 
 | } | 
 |  | 
 | FunctionPtr Function::NewImplicitClosureFunction(const String& name, | 
 |                                                  const Function& parent, | 
 |                                                  TokenPosition token_pos) { | 
 |   // Use the owner defining the parent function and not the class containing it. | 
 |   const Object& parent_owner = Object::Handle(parent.RawOwner()); | 
 |   return NewClosureFunctionWithKind( | 
 |       UntaggedFunction::kImplicitClosureFunction, name, parent, | 
 |       parent.is_static() || parent.IsConstructor(), token_pos, parent_owner); | 
 | } | 
 |  | 
 | bool Function::SafeToClosurize() const { | 
 | #if defined(DART_PRECOMPILED_RUNTIME) | 
 |   return HasImplicitClosureFunction(); | 
 | #else | 
 |   return true; | 
 | #endif | 
 | } | 
 |  | 
 | bool Function::IsDynamicClosureCallDispatcher(Thread* thread) const { | 
 |   if (!IsInvokeFieldDispatcher()) return false; | 
 |   if (thread->isolate_group()->object_store()->closure_class() != Owner()) { | 
 |     return false; | 
 |   } | 
 |   const auto& handle = String::Handle(thread->zone(), name()); | 
 |   return handle.Equals(Symbols::DynamicCall()); | 
 | } | 
 |  | 
 | FunctionPtr Function::ImplicitClosureFunction() const { | 
 |   // Return the existing implicit closure function if any. | 
 |   if (implicit_closure_function() != Function::null()) { | 
 |     return implicit_closure_function(); | 
 |   } | 
 |  | 
 | #if defined(DART_PRECOMPILED_RUNTIME) | 
 |   // In AOT mode all implicit closures are pre-created. | 
 |   FATAL("Cannot create implicit closure in AOT!"); | 
 |   return Function::null(); | 
 | #else | 
 |   ASSERT(!IsClosureFunction()); | 
 |   Thread* thread = Thread::Current(); | 
 |   SafepointWriteRwLocker ml(thread, thread->isolate_group()->program_lock()); | 
 |  | 
 |   if (implicit_closure_function() != Function::null()) { | 
 |     return implicit_closure_function(); | 
 |   } | 
 |  | 
 |   // Create closure function. | 
 |   Zone* zone = thread->zone(); | 
 |   const String& closure_name = String::Handle(zone, name()); | 
 |   const Function& closure_function = Function::Handle( | 
 |       zone, NewImplicitClosureFunction(closure_name, *this, token_pos())); | 
 |  | 
 |   // Set closure function's context scope. | 
 |   if (is_static() || IsConstructor()) { | 
 |     closure_function.set_context_scope(Object::empty_context_scope()); | 
 |   } else { | 
 |     const ContextScope& context_scope = ContextScope::Handle( | 
 |         zone, LocalScope::CreateImplicitClosureScope(*this)); | 
 |     closure_function.set_context_scope(context_scope); | 
 |   } | 
 |  | 
 |   FunctionType& closure_signature = | 
 |       FunctionType::Handle(zone, closure_function.signature()); | 
 |  | 
 |   const auto& cls = Class::Handle(zone, Owner()); | 
 |  | 
 |   if (!is_static() && !IsConstructor() && | 
 |       StackTraceUtils::IsPossibleAwaiterLink(cls)) { | 
 |     closure_function.set_awaiter_link({0, 0}); | 
 |   } | 
 |  | 
 |   const intptr_t num_type_params = | 
 |       IsConstructor() ? cls.NumTypeParameters() : NumTypeParameters(); | 
 |  | 
 |   TypeArguments& instantiator_type_arguments = TypeArguments::Handle(zone); | 
 |   TypeArguments& function_type_arguments = TypeArguments::Handle(zone); | 
 |  | 
 |   FunctionTypeMapping* function_type_mapping = nullptr; | 
 |   FunctionTypeMapping scope(zone, &function_type_mapping, | 
 |                             FunctionType::Handle(zone, signature()), | 
 |                             closure_signature); | 
 |  | 
 |   auto transform_type = [&](AbstractType& type) { | 
 |     if (num_type_params > 0) { | 
 |       if (IsConstructor()) { | 
 |         type = type.UpdateFunctionTypes(num_type_params, kAllFree, Heap::kOld, | 
 |                                         nullptr); | 
 |         if (!type.IsInstantiated(kCurrentClass)) { | 
 |           type = type.InstantiateFrom( | 
 |               instantiator_type_arguments, function_type_arguments, | 
 |               kNoneFree /* avoid truncating parent type args */, Heap::kOld); | 
 |         } | 
 |       } else { | 
 |         type = type.UpdateFunctionTypes(0, kNoneFree, Heap::kOld, | 
 |                                         function_type_mapping); | 
 |       } | 
 |     } | 
 |   }; | 
 |  | 
 |   auto transform_type_args = [&](TypeArguments& type_args) { | 
 |     ASSERT(num_type_params > 0); | 
 |     if (!type_args.IsNull()) { | 
 |       if (IsConstructor()) { | 
 |         type_args = type_args.UpdateFunctionTypes(num_type_params, kAllFree, | 
 |                                                   Heap::kOld, nullptr); | 
 |         if (!type_args.IsInstantiated(kCurrentClass)) { | 
 |           type_args = type_args.InstantiateFrom( | 
 |               instantiator_type_arguments, function_type_arguments, | 
 |               kNoneFree /* avoid truncating parent type args */, Heap::kOld); | 
 |         } | 
 |       } else { | 
 |         type_args = type_args.UpdateFunctionTypes(0, kNoneFree, Heap::kOld, | 
 |                                                   function_type_mapping); | 
 |       } | 
 |     } | 
 |   }; | 
 |  | 
 |   // Set closure function's type parameters. | 
 |   if (num_type_params > 0) { | 
 |     const TypeParameters& old_type_params = TypeParameters::Handle( | 
 |         zone, IsConstructor() ? cls.type_parameters() : type_parameters()); | 
 |     const TypeParameters& new_type_params = | 
 |         TypeParameters::Handle(zone, TypeParameters::New()); | 
 |     // No need to set names that are ignored in a signature, however, the | 
 |     // length of the names array defines the number of type parameters. | 
 |     new_type_params.set_names(Array::Handle(zone, old_type_params.names())); | 
 |     new_type_params.set_flags(Array::Handle(zone, old_type_params.flags())); | 
 |  | 
 |     closure_signature.SetTypeParameters(new_type_params); | 
 |     ASSERT(closure_signature.NumTypeParameters() == num_type_params); | 
 |  | 
 |     TypeArguments& type_args = TypeArguments::Handle(zone); | 
 |     type_args = TypeArguments::New(num_type_params); | 
 |     TypeParameter& type_param = TypeParameter::Handle(zone); | 
 |     for (intptr_t i = 0; i < num_type_params; i++) { | 
 |       type_param = closure_signature.TypeParameterAt(i); | 
 |       type_args.SetTypeAt(i, type_param); | 
 |     } | 
 |  | 
 |     if (IsConstructor()) { | 
 |       instantiator_type_arguments = | 
 |           type_args.ToInstantiatorTypeArguments(thread, cls); | 
 |     } else { | 
 |       ASSERT(NumTypeArguments() == type_args.Length()); | 
 |       function_type_arguments = type_args.ptr(); | 
 |     } | 
 |  | 
 |     type_args = old_type_params.bounds(); | 
 |     transform_type_args(type_args); | 
 |     new_type_params.set_bounds(type_args); | 
 |  | 
 |     type_args = old_type_params.defaults(); | 
 |     transform_type_args(type_args); | 
 |     new_type_params.set_defaults(type_args); | 
 |   } | 
 |  | 
 |   // Set closure function's result type. | 
 |   AbstractType& result_type = AbstractType::Handle(zone); | 
 |   if (IsConstructor()) { | 
 |     result_type = cls.DeclarationType(); | 
 |   } else { | 
 |     result_type = this->result_type(); | 
 |   } | 
 |   transform_type(result_type); | 
 |   closure_signature.set_result_type(result_type); | 
 |  | 
 |   // Set closure function's end token to this end token. | 
 |   closure_function.set_end_token_pos(end_token_pos()); | 
 |  | 
 |   // The closurized method stub just calls into the original method and should | 
 |   // therefore be skipped by the debugger and in stack traces. | 
 |   closure_function.set_is_debuggable(false); | 
 |   closure_function.set_is_visible(false); | 
 |  | 
 |   // Set closure function's formal parameters to this formal parameters, | 
 |   // removing the receiver if this is an instance method and adding the closure | 
 |   // object as first parameter. | 
 |   const int kClosure = 1; | 
 |   const int num_implicit_params = NumImplicitParameters(); | 
 |   const int num_fixed_params = | 
 |       kClosure - num_implicit_params + num_fixed_parameters(); | 
 |   const int num_opt_params = NumOptionalParameters(); | 
 |   const bool has_opt_pos_params = HasOptionalPositionalParameters(); | 
 |   const int num_params = num_fixed_params + num_opt_params; | 
 |   const int num_pos_params = has_opt_pos_params ? num_params : num_fixed_params; | 
 |   closure_signature.set_num_fixed_parameters(num_fixed_params); | 
 |   closure_signature.SetNumOptionalParameters(num_opt_params, | 
 |                                              has_opt_pos_params); | 
 |   closure_signature.set_parameter_types( | 
 |       Array::Handle(zone, Array::New(num_params, Heap::kOld))); | 
 |   closure_function.CreateNameArray(); | 
 |   closure_signature.CreateNameArrayIncludingFlags(); | 
 |   AbstractType& param_type = AbstractType::Handle(zone); | 
 |   String& param_name = String::Handle(zone); | 
 |   // Add implicit closure object parameter. | 
 |   param_type = Type::DynamicType(); | 
 |   closure_signature.SetParameterTypeAt(0, param_type); | 
 |   closure_function.SetParameterNameAt(0, Symbols::ClosureParameter()); | 
 |   for (int i = kClosure; i < num_pos_params; i++) { | 
 |     param_type = ParameterTypeAt(num_implicit_params - kClosure + i); | 
 |     transform_type(param_type); | 
 |     closure_signature.SetParameterTypeAt(i, param_type); | 
 |     param_name = ParameterNameAt(num_implicit_params - kClosure + i); | 
 |     // Set the name in the function for positional parameters. | 
 |     closure_function.SetParameterNameAt(i, param_name); | 
 |   } | 
 |   for (int i = num_pos_params; i < num_params; i++) { | 
 |     param_type = ParameterTypeAt(num_implicit_params - kClosure + i); | 
 |     transform_type(param_type); | 
 |     closure_signature.SetParameterTypeAt(i, param_type); | 
 |     param_name = ParameterNameAt(num_implicit_params - kClosure + i); | 
 |     // Set the name in the signature for named parameters. | 
 |     closure_signature.SetParameterNameAt(i, param_name); | 
 |     if (IsRequiredAt(num_implicit_params - kClosure + i)) { | 
 |       closure_signature.SetIsRequiredAt(i); | 
 |     } | 
 |   } | 
 |   closure_signature.FinalizeNameArray(); | 
 |   closure_function.InheritKernelOffsetFrom(*this); | 
 |  | 
 |   if (!is_static() && !IsConstructor()) { | 
 |     // Change covariant parameter types to Object?. | 
 |     BitVector is_covariant(zone, NumParameters()); | 
 |     BitVector is_generic_covariant_impl(zone, NumParameters()); | 
 |     kernel::ReadParameterCovariance(*this, &is_covariant, | 
 |                                     &is_generic_covariant_impl); | 
 |  | 
 |     ObjectStore* object_store = IsolateGroup::Current()->object_store(); | 
 |     const auto& object_type = | 
 |         Type::Handle(zone, object_store->nullable_object_type()); | 
 |     ASSERT(object_type.IsCanonical()); | 
 |     for (intptr_t i = kClosure; i < num_params; ++i) { | 
 |       const intptr_t original_param_index = num_implicit_params - kClosure + i; | 
 |       if (is_covariant.Contains(original_param_index) || | 
 |           is_generic_covariant_impl.Contains(original_param_index)) { | 
 |         closure_signature.SetParameterTypeAt(i, object_type); | 
 |       } | 
 |     } | 
 |   } | 
 |   ASSERT(!closure_signature.IsFinalized()); | 
 |   closure_signature ^= ClassFinalizer::FinalizeType(closure_signature); | 
 |   closure_function.SetSignature(closure_signature); | 
 |   set_implicit_closure_function(closure_function); | 
 |   ASSERT(closure_function.IsImplicitClosureFunction()); | 
 |   ASSERT(HasImplicitClosureFunction()); | 
 |   return closure_function.ptr(); | 
 | #endif  // defined(DART_PRECOMPILED_RUNTIME) | 
 | } | 
 |  | 
 | void Function::DropUncompiledImplicitClosureFunction() const { | 
 |   if (implicit_closure_function() != Function::null()) { | 
 |     const Function& func = Function::Handle(implicit_closure_function()); | 
 |     if (!func.HasCode()) { | 
 |       set_implicit_closure_function(Function::Handle()); | 
 |     } | 
 |   } | 
 | } | 
 |  | 
 | StringPtr Function::InternalSignature() const { | 
 | #if defined(DART_PRECOMPILED_RUNTIME) | 
 |   if (signature() == FunctionType::null()) { | 
 |     return String::null(); | 
 |   } | 
 | #endif | 
 |   Thread* thread = Thread::Current(); | 
 |   ZoneTextBuffer printer(thread->zone()); | 
 |   const FunctionType& sig = FunctionType::Handle(signature()); | 
 |   sig.Print(kInternalName, &printer); | 
 |   return Symbols::New(thread, printer.buffer()); | 
 | } | 
 |  | 
 | StringPtr Function::UserVisibleSignature() const { | 
 | #if defined(DART_PRECOMPILED_RUNTIME) | 
 |   if (signature() == FunctionType::null()) { | 
 |     return String::null(); | 
 |   } | 
 | #endif | 
 |   Thread* thread = Thread::Current(); | 
 |   ZoneTextBuffer printer(thread->zone()); | 
 |   const FunctionType& sig = FunctionType::Handle(signature()); | 
 |   sig.Print(kUserVisibleName, &printer); | 
 |   return Symbols::New(thread, printer.buffer()); | 
 | } | 
 |  | 
 | void FunctionType::PrintParameters(Thread* thread, | 
 |                                    Zone* zone, | 
 |                                    NameVisibility name_visibility, | 
 |                                    BaseTextBuffer* printer) const { | 
 |   AbstractType& param_type = AbstractType::Handle(zone); | 
 |   const intptr_t num_params = NumParameters(); | 
 |   const intptr_t num_fixed_params = num_fixed_parameters(); | 
 |   const intptr_t num_opt_pos_params = NumOptionalPositionalParameters(); | 
 |   const intptr_t num_opt_named_params = NumOptionalNamedParameters(); | 
 |   const intptr_t num_opt_params = num_opt_pos_params + num_opt_named_params; | 
 |   ASSERT((num_fixed_params + num_opt_params) == num_params); | 
 |   intptr_t i = 0; | 
 |   if (name_visibility == kUserVisibleName) { | 
 |     // Hide implicit parameters. | 
 |     i = num_implicit_parameters(); | 
 |   } | 
 |   String& name = String::Handle(zone); | 
 |   while (i < num_fixed_params) { | 
 |     param_type = ParameterTypeAt(i); | 
 |     ASSERT(!param_type.IsNull()); | 
 |     param_type.PrintName(name_visibility, printer); | 
 |     if (i != (num_params - 1)) { | 
 |       printer->AddString(", "); | 
 |     } | 
 |     i++; | 
 |   } | 
 |   if (num_opt_params > 0) { | 
 |     if (num_opt_pos_params > 0) { | 
 |       printer->AddString("["); | 
 |     } else { | 
 |       printer->AddString("{"); | 
 |     } | 
 |     for (intptr_t i = num_fixed_params; i < num_params; i++) { | 
 |       if (num_opt_named_params > 0 && IsRequiredAt(i)) { | 
 |         printer->AddString("required "); | 
 |       } | 
 |       param_type = ParameterTypeAt(i); | 
 |       ASSERT(!param_type.IsNull()); | 
 |       param_type.PrintName(name_visibility, printer); | 
 |       // The parameter name of an optional positional parameter does not need | 
 |       // to be part of the signature, since it is not used. | 
 |       if (num_opt_named_params > 0) { | 
 |         name = ParameterNameAt(i); | 
 |         printer->AddString(" "); | 
 |         printer->AddString(name.ToCString()); | 
 |       } | 
 |       if (i != (num_params - 1)) { | 
 |         printer->AddString(", "); | 
 |       } | 
 |     } | 
 |     if (num_opt_pos_params > 0) { | 
 |       printer->AddString("]"); | 
 |     } else { | 
 |       printer->AddString("}"); | 
 |     } | 
 |   } | 
 | } | 
 |  | 
 | ClosurePtr Function::ImplicitStaticClosure() const { | 
 |   ASSERT(IsImplicitStaticClosureFunction()); | 
 |   if (implicit_static_closure() != Closure::null()) { | 
 |     return implicit_static_closure(); | 
 |   } | 
 |  | 
 |   auto thread = Thread::Current(); | 
 |   SafepointWriteRwLocker ml(thread, thread->isolate_group()->program_lock()); | 
 |  | 
 |   if (implicit_static_closure() != Closure::null()) { | 
 |     return implicit_static_closure(); | 
 |   } | 
 |  | 
 |   Zone* zone = thread->zone(); | 
 |   const auto& closure = | 
 |       Closure::Handle(zone, Closure::New(Object::null_type_arguments(), | 
 |                                          Object::null_type_arguments(), *this, | 
 |                                          Object::null_object(), Heap::kOld)); | 
 |   set_implicit_static_closure(closure); | 
 |   return implicit_static_closure(); | 
 | } | 
 |  | 
 | ClosurePtr Function::ImplicitInstanceClosure(const Instance& receiver) const { | 
 |   ASSERT(IsImplicitClosureFunction()); | 
 |   Zone* zone = Thread::Current()->zone(); | 
 |   TypeArguments& instantiator_type_arguments = TypeArguments::Handle(zone); | 
 |   if (!HasInstantiatedSignature(kCurrentClass)) { | 
 |     instantiator_type_arguments = receiver.GetTypeArguments(); | 
 |   } | 
 |   ASSERT(!HasGenericParent());  // No generic parent function. | 
 |   return Closure::New(instantiator_type_arguments, | 
 |                       Object::null_type_arguments(), *this, receiver); | 
 | } | 
 |  | 
 | FunctionPtr Function::ImplicitClosureTarget(Zone* zone) const { | 
 |   const auto& parent = Function::Handle(zone, parent_function()); | 
 |   const auto& func_name = String::Handle(zone, parent.name()); | 
 |   const auto& owner = Class::Handle(zone, parent.Owner()); | 
 |   Thread* thread = Thread::Current(); | 
 |   const auto& error = owner.EnsureIsFinalized(thread); | 
 |   ASSERT(error == Error::null()); | 
 |   auto& target = | 
 |       Function::Handle(zone, Resolver::ResolveFunction(zone, owner, func_name)); | 
 |  | 
 |   if (!target.IsNull() && (target.ptr() != parent.ptr())) { | 
 |     DEBUG_ASSERT(IsolateGroup::Current()->HasAttemptedReload()); | 
 |     if ((target.is_static() != parent.is_static()) || | 
 |         (target.kind() != parent.kind())) { | 
 |       target = Function::null(); | 
 |     } | 
 |   } | 
 |  | 
 |   return target.ptr(); | 
 | } | 
 |  | 
 | void FunctionType::Print(NameVisibility name_visibility, | 
 |                          BaseTextBuffer* printer) const { | 
 |   if (IsNull()) { | 
 |     printer->AddString("null");  // Signature optimized out in precompiler. | 
 |     return; | 
 |   } | 
 |   Thread* thread = Thread::Current(); | 
 |   Zone* zone = thread->zone(); | 
 |   const TypeParameters& type_params = | 
 |       TypeParameters::Handle(zone, type_parameters()); | 
 |   if (!type_params.IsNull()) { | 
 |     printer->AddString("<"); | 
 |     const intptr_t base = NumParentTypeArguments(); | 
 |     const bool kIsClassTypeParameter = false; | 
 |     // Type parameter names are meaningless after canonicalization. | 
 |     type_params.Print(thread, zone, kIsClassTypeParameter, base, | 
 |                       name_visibility, printer); | 
 |     printer->AddString(">"); | 
 |   } | 
 |   printer->AddString("("); | 
 |   PrintParameters(thread, zone, name_visibility, printer); | 
 |   printer->AddString(") => "); | 
 |   const AbstractType& res_type = AbstractType::Handle(zone, result_type()); | 
 |   if (!res_type.IsNull()) { | 
 |     res_type.PrintName(name_visibility, printer); | 
 |   } else { | 
 |     printer->AddString("null"); | 
 |   } | 
 | } | 
 |  | 
 | bool Function::HasInstantiatedSignature( | 
 |     Genericity genericity, | 
 |     intptr_t num_free_fun_type_params) const { | 
 |   return FunctionType::Handle(signature()) | 
 |       .IsInstantiated(genericity, num_free_fun_type_params); | 
 | } | 
 |  | 
 | bool FunctionType::IsInstantiated(Genericity genericity, | 
 |                                   intptr_t num_free_fun_type_params) const { | 
 |   if (num_free_fun_type_params == kCurrentAndEnclosingFree) { | 
 |     num_free_fun_type_params = kAllFree; | 
 |   } else if (genericity != kCurrentClass) { | 
 |     const intptr_t num_parent_type_args = NumParentTypeArguments(); | 
 |     if (num_parent_type_args > 0 && num_free_fun_type_params > 0) { | 
 |       // The number of parent type arguments is cached in the FunctionType, so | 
 |       // we can't consider any FunctionType with free parent type arguments as | 
 |       // fully instantiated. Instead, the FunctionType must be instantiated to | 
 |       // reduce the number of parent type arguments, even if they're unused in | 
 |       // its component types. | 
 |       return false; | 
 |     } | 
 |     // Don't consider local function type parameters as free. | 
 |     if (num_free_fun_type_params > num_parent_type_args) { | 
 |       num_free_fun_type_params = num_parent_type_args; | 
 |     } | 
 |   } | 
 |   AbstractType& type = AbstractType::Handle(result_type()); | 
 |   if (!type.IsInstantiated(genericity, num_free_fun_type_params)) { | 
 |     return false; | 
 |   } | 
 |   const intptr_t num_parameters = NumParameters(); | 
 |   for (intptr_t i = 0; i < num_parameters; i++) { | 
 |     type = ParameterTypeAt(i); | 
 |     if (!type.IsInstantiated(genericity, num_free_fun_type_params)) { | 
 |       return false; | 
 |     } | 
 |   } | 
 |   const intptr_t num_type_params = NumTypeParameters(); | 
 |   if (num_type_params > 0) { | 
 |     TypeParameters& type_params = TypeParameters::Handle(type_parameters()); | 
 |     if (!type_params.AllDynamicBounds()) { | 
 |       for (intptr_t i = 0; i < type_params.Length(); ++i) { | 
 |         type = type_params.BoundAt(i); | 
 |         if (!type.IsInstantiated(genericity, num_free_fun_type_params)) { | 
 |           return false; | 
 |         } | 
 |       } | 
 |     } | 
 |   } | 
 |   return true; | 
 | } | 
 |  | 
 | bool Function::IsPrivate() const { | 
 |   return Library::IsPrivate(String::Handle(name())); | 
 | } | 
 |  | 
 | ClassPtr Function::Owner() const { | 
 |   ASSERT(untag()->owner() != Object::null()); | 
 |   if (untag()->owner()->IsClass()) { | 
 |     return Class::RawCast(untag()->owner()); | 
 |   } | 
 |   const Object& obj = Object::Handle(untag()->owner()); | 
 |   ASSERT(obj.IsPatchClass()); | 
 |   return PatchClass::Cast(obj).wrapped_class(); | 
 | } | 
 |  | 
 | void Function::InheritKernelOffsetFrom(const Function& src) const { | 
 | #if defined(DART_PRECOMPILED_RUNTIME) | 
 |   UNREACHABLE(); | 
 | #else | 
 |   StoreNonPointer(&untag()->kernel_offset_, src.untag()->kernel_offset_); | 
 | #endif | 
 | } | 
 |  | 
 | void Function::InheritKernelOffsetFrom(const Field& src) const { | 
 | #if defined(DART_PRECOMPILED_RUNTIME) | 
 |   UNREACHABLE(); | 
 | #else | 
 |   set_kernel_offset(src.kernel_offset()); | 
 | #endif | 
 | } | 
 |  | 
 | void Function::SetKernelLibraryAndEvalScript( | 
 |     const Script& script, | 
 |     const class KernelProgramInfo& kernel_program_info, | 
 |     intptr_t index) const { | 
 |   Array& data_field = Array::Handle( | 
 |       Array::New(static_cast<intptr_t>(EvalFunctionData::kLength))); | 
 |   data_field.SetAt(static_cast<intptr_t>(EvalFunctionData::kScript), script); | 
 |   data_field.SetAt(static_cast<intptr_t>(EvalFunctionData::kKernelProgramInfo), | 
 |                    kernel_program_info); | 
 |   data_field.SetAt(static_cast<intptr_t>(EvalFunctionData::kKernelLibraryIndex), | 
 |                    Smi::Handle(Smi::New(index))); | 
 |   set_data(data_field); | 
 | } | 
 |  | 
 | ScriptPtr Function::script() const { | 
 |   // NOTE(turnidge): If you update this function, you probably want to | 
 |   // update Class::PatchFieldsAndFunctions() at the same time. | 
 |   if (IsDynamicInvocationForwarder()) { | 
 |     const Function& target = Function::Handle(ForwardingTarget()); | 
 |     return target.IsNull() ? Script::null() : target.script(); | 
 |   } | 
 |   if (IsImplicitGetterOrSetter()) { | 
 |     const auto& field = Field::Handle(accessor_field()); | 
 |     return field.IsNull() ? Script::null() : field.Script(); | 
 |   } | 
 |   if (is_eval_function()) { | 
 |     const auto& fdata = Array::Handle(Array::RawCast(data())); | 
 |     return Script::RawCast( | 
 |         fdata.At(static_cast<intptr_t>(EvalFunctionData::kScript))); | 
 |   } | 
 |   if (token_pos() == TokenPosition::kMinSource) { | 
 |     // Testing for position 0 is an optimization that relies on temporary | 
 |     // eval functions having token position 0. | 
 |     const Script& script = Script::Handle(eval_script()); | 
 |     if (!script.IsNull()) { | 
 |       return script.ptr(); | 
 |     } | 
 |   } | 
 |   const Object& obj = Object::Handle(untag()->owner()); | 
 |   if (obj.IsPatchClass()) { | 
 |     return PatchClass::Cast(obj).script(); | 
 |   } | 
 |   if (IsClosureFunction()) { | 
 |     const Function& function = Function::Handle(parent_function()); | 
 |     if (function.IsNull()) return Script::null(); | 
 |     return function.script(); | 
 |   } | 
 |   ASSERT(obj.IsClass()); | 
 |   return Class::Cast(obj).script(); | 
 | } | 
 |  | 
 | #if !defined(DART_PRECOMPILED_RUNTIME) | 
 | KernelProgramInfoPtr Function::KernelProgramInfo() const { | 
 |   if (is_eval_function()) { | 
 |     const auto& fdata = Array::Handle(Array::RawCast(data())); | 
 |     return KernelProgramInfo::RawCast( | 
 |         fdata.At(static_cast<intptr_t>(EvalFunctionData::kKernelProgramInfo))); | 
 |   } | 
 |   if (IsClosureFunction()) { | 
 |     const auto& parent = Function::Handle(parent_function()); | 
 |     return parent.KernelProgramInfo(); | 
 |   } | 
 |   const auto& owner = Object::Handle(RawOwner()); | 
 |   if (owner.IsClass()) { | 
 |     return Class::Cast(owner).KernelProgramInfo(); | 
 |   } | 
 |   return PatchClass::Cast(owner).kernel_program_info(); | 
 | } | 
 |  | 
 | TypedDataViewPtr Function::KernelLibrary() const { | 
 |   const auto& info = KernelProgramInfo::Handle(KernelProgramInfo()); | 
 |   return info.KernelLibrary(KernelLibraryIndex()); | 
 | } | 
 |  | 
 | intptr_t Function::KernelLibraryOffset() const { | 
 |   const intptr_t kernel_library_index = KernelLibraryIndex(); | 
 |   if (kernel_library_index == -1) return 0; | 
 |   const auto& info = KernelProgramInfo::Handle(KernelProgramInfo()); | 
 |   return info.KernelLibraryStartOffset(kernel_library_index); | 
 | } | 
 |  | 
 | intptr_t Function::KernelLibraryIndex() const { | 
 |   if (IsNoSuchMethodDispatcher() || IsInvokeFieldDispatcher() || | 
 |       IsFfiCallbackTrampoline()) { | 
 |     return -1; | 
 |   } | 
 |   if (is_eval_function()) { | 
 |     const auto& fdata = Array::Handle(Array::RawCast(data())); | 
 |     return Smi::Value(static_cast<SmiPtr>(fdata.At( | 
 |         static_cast<intptr_t>(EvalFunctionData::kKernelLibraryIndex)))); | 
 |   } | 
 |   if (IsClosureFunction()) { | 
 |     const auto& parent = Function::Handle(parent_function()); | 
 |     ASSERT(!parent.IsNull()); | 
 |     return parent.KernelLibraryIndex(); | 
 |   } | 
 |  | 
 |   const auto& obj = Object::Handle(untag()->owner()); | 
 |   if (obj.IsClass()) { | 
 |     const auto& lib = Library::Handle(Class::Cast(obj).library()); | 
 |     return lib.kernel_library_index(); | 
 |   } | 
 |   ASSERT(obj.IsPatchClass()); | 
 |   return PatchClass::Cast(obj).kernel_library_index(); | 
 | } | 
 | #endif | 
 |  | 
 | bool Function::HasOptimizedCode() const { | 
 |   return HasCode() && Code::Handle(CurrentCode()).is_optimized(); | 
 | } | 
 |  | 
 | const char* Function::NameCString(NameVisibility name_visibility) const { | 
 |   switch (name_visibility) { | 
 |     case kInternalName: | 
 |       return String::Handle(name()).ToCString(); | 
 |     case kScrubbedName: | 
 |     case kUserVisibleName: | 
 |       return UserVisibleNameCString(); | 
 |   } | 
 |   UNREACHABLE(); | 
 |   return nullptr; | 
 | } | 
 |  | 
 | const char* Function::UserVisibleNameCString() const { | 
 |   if (FLAG_show_internal_names) { | 
 |     return String::Handle(name()).ToCString(); | 
 |   } | 
 |   is_extension_type_member(); | 
 |   return String::ScrubName(String::Handle(name()), | 
 |                            is_extension_member() || is_extension_type_member()); | 
 | } | 
 |  | 
 | StringPtr Function::UserVisibleName() const { | 
 |   if (FLAG_show_internal_names) { | 
 |     return name(); | 
 |   } | 
 |   return Symbols::New( | 
 |       Thread::Current(), | 
 |       String::ScrubName(String::Handle(name()), | 
 |                         is_extension_member() || is_extension_type_member())); | 
 | } | 
 |  | 
 | StringPtr Function::QualifiedScrubbedName() const { | 
 |   Thread* thread = Thread::Current(); | 
 |   ZoneTextBuffer printer(thread->zone()); | 
 |   PrintName(NameFormattingParams(kScrubbedName), &printer); | 
 |   return Symbols::New(thread, printer.buffer()); | 
 | } | 
 |  | 
 | const char* Function::QualifiedScrubbedNameCString() const { | 
 |   Thread* thread = Thread::Current(); | 
 |   ZoneTextBuffer printer(thread->zone()); | 
 |   PrintName(NameFormattingParams(kScrubbedName), &printer); | 
 |   return printer.buffer(); | 
 | } | 
 |  | 
 | StringPtr Function::QualifiedUserVisibleName() const { | 
 |   Thread* thread = Thread::Current(); | 
 |   ZoneTextBuffer printer(thread->zone()); | 
 |   PrintName(NameFormattingParams(kUserVisibleName), &printer); | 
 |   return Symbols::New(thread, printer.buffer()); | 
 | } | 
 |  | 
 | const char* Function::QualifiedUserVisibleNameCString() const { | 
 |   Thread* thread = Thread::Current(); | 
 |   ZoneTextBuffer printer(thread->zone()); | 
 |   PrintName(NameFormattingParams(kUserVisibleName), &printer); | 
 |   return printer.buffer(); | 
 | } | 
 |  | 
 | static void FunctionPrintNameHelper(const Function& fun, | 
 |                                     const NameFormattingParams& params, | 
 |                                     BaseTextBuffer* printer) { | 
 |   if (fun.IsNonImplicitClosureFunction()) { | 
 |     if (params.include_parent_name) { | 
 |       const auto& parent = Function::Handle(fun.parent_function()); | 
 |       if (parent.IsNull()) { | 
 |         printer->AddString(Symbols::OptimizedOut().ToCString()); | 
 |       } else { | 
 |         parent.PrintName(params, printer); | 
 |       } | 
 |       // A function's scrubbed name and its user visible name are identical. | 
 |       printer->AddString("."); | 
 |     } | 
 |     if (params.disambiguate_names && | 
 |         fun.name() == Symbols::AnonymousClosure().ptr()) { | 
 |       if (fun.token_pos().IsReal()) { | 
 |         printer->Printf("<anonymous closure @%" Pd ">", fun.token_pos().Pos()); | 
 |       } else { | 
 |         printer->Printf("<anonymous closure @no position>"); | 
 |       } | 
 |     } else { | 
 |       printer->AddString(fun.NameCString(params.name_visibility)); | 
 |       if (params.disambiguate_names) { | 
 |         if (fun.token_pos().IsReal()) { | 
 |           printer->Printf("@<%" Pd ">", fun.token_pos().Pos()); | 
 |         } else { | 
 |           printer->Printf("@<no position>"); | 
 |         } | 
 |       } | 
 |     } | 
 |     return; | 
 |   } | 
 |   if (params.disambiguate_names) { | 
 |     if (fun.IsInvokeFieldDispatcher()) { | 
 |       printer->AddString("[invoke-field] "); | 
 |     } | 
 |     if (fun.IsNoSuchMethodDispatcher()) { | 
 |       printer->AddString("[no-such-method] "); | 
 |     } | 
 |     if (fun.IsImplicitClosureFunction()) { | 
 |       printer->AddString("[tear-off] "); | 
 |     } | 
 |     if (fun.IsMethodExtractor()) { | 
 |       printer->AddString("[tear-off-extractor] "); | 
 |     } | 
 |   } | 
 |  | 
 |   if (fun.kind() == UntaggedFunction::kConstructor) { | 
 |     printer->AddString("new "); | 
 |   } else if (params.include_class_name) { | 
 |     const Class& cls = Class::Handle(fun.Owner()); | 
 |     if (!cls.IsTopLevel()) { | 
 |       const Class& mixin = Class::Handle(cls.Mixin()); | 
 |       printer->AddString(params.name_visibility == Object::kUserVisibleName | 
 |                              ? mixin.UserVisibleNameCString() | 
 |                              : cls.NameCString(params.name_visibility)); | 
 |       printer->AddString("."); | 
 |     } | 
 |   } | 
 |  | 
 |   printer->AddString(fun.NameCString(params.name_visibility)); | 
 |  | 
 |   // Dispatchers that are created with an arguments descriptor need both the | 
 |   // name and the saved arguments descriptor to disambiguate. | 
 |   if (params.disambiguate_names && fun.HasSavedArgumentsDescriptor()) { | 
 |     const auto& args_desc_array = Array::Handle(fun.saved_args_desc()); | 
 |     const ArgumentsDescriptor args_desc(args_desc_array); | 
 |     args_desc.PrintTo(printer); | 
 |   } | 
 | } | 
 |  | 
 | void Function::PrintName(const NameFormattingParams& params, | 
 |                          BaseTextBuffer* printer) const { | 
 |   if (!IsLocalFunction()) { | 
 |     FunctionPrintNameHelper(*this, params, printer); | 
 |     return; | 
 |   } | 
 |   auto& fun = Function::Handle(ptr()); | 
 |   FunctionPrintNameHelper(fun, params, printer); | 
 | } | 
 |  | 
 | StringPtr Function::GetSource() const { | 
 |   if (IsImplicitConstructor() || is_synthetic()) { | 
 |     // We may need to handle more cases when the restrictions on mixins are | 
 |     // relaxed. In particular we might start associating some source with the | 
 |     // forwarding constructors when it becomes possible to specify a particular | 
 |     // constructor from the mixin to use. | 
 |     return String::null(); | 
 |   } | 
 |   Zone* zone = Thread::Current()->zone(); | 
 |   const Script& func_script = Script::Handle(zone, script()); | 
 |  | 
 |   intptr_t from_line, from_col; | 
 |   if (!func_script.GetTokenLocation(token_pos(), &from_line, &from_col)) { | 
 |     return String::null(); | 
 |   } | 
 |   intptr_t to_line, to_col; | 
 |   if (!func_script.GetTokenLocation(end_token_pos(), &to_line, &to_col)) { | 
 |     return String::null(); | 
 |   } | 
 |   intptr_t to_length = func_script.GetTokenLength(end_token_pos()); | 
 |   if (to_length < 0) { | 
 |     return String::null(); | 
 |   } | 
 |  | 
 |   if (to_length == 1) { | 
 |     // Handle special cases for end tokens of closures (where we exclude the | 
 |     // last token): | 
 |     // (1) "foo(() => null, bar);": End token is `,', but we don't print it. | 
 |     // (2) "foo(() => null);": End token is ')`, but we don't print it. | 
 |     // (3) "var foo = () => null;": End token is `;', but in this case the | 
 |     // token semicolon belongs to the assignment so we skip it. | 
 |     const String& src = String::Handle(func_script.Source()); | 
 |     if (src.IsNull() || src.Length() == 0) { | 
 |       return Symbols::OptimizedOut().ptr(); | 
 |     } | 
 |     uint16_t end_char = src.CharAt(end_token_pos().Pos()); | 
 |     if ((end_char == ',') ||  // Case 1. | 
 |         (end_char == ')') ||  // Case 2. | 
 |         (end_char == ';' && String::Handle(zone, name()) | 
 |                                 .Equals("<anonymous closure>"))) {  // Case 3. | 
 |       to_length = 0; | 
 |     } | 
 |   } | 
 |  | 
 |   return func_script.GetSnippet(from_line, from_col, to_line, | 
 |                                 to_col + to_length); | 
 | } | 
 |  | 
 | // Construct fingerprint from token stream. The token stream contains also | 
 | // arguments. | 
 | int32_t Function::SourceFingerprint() const { | 
 | #if !defined(DART_PRECOMPILED_RUNTIME) | 
 |   return kernel::KernelSourceFingerprintHelper::CalculateFunctionFingerprint( | 
 |       *this); | 
 | #else | 
 |   return 0; | 
 | #endif  // !defined(DART_PRECOMPILED_RUNTIME) | 
 | } | 
 |  | 
 | void Function::SaveICDataMap( | 
 |     const ZoneGrowableArray<const ICData*>& deopt_id_to_ic_data, | 
 |     const Array& edge_counters_array, | 
 |     const Array& coverage_array) const { | 
 | #if !defined(DART_PRECOMPILED_RUNTIME) | 
 |   // Already installed nothing to do. | 
 |   if (ic_data_array() != Array::null()) { | 
 |     ASSERT(coverage_array.ptr() == GetCoverageArray()); | 
 |     return; | 
 |   } | 
 |  | 
 |   // Compute number of ICData objects to save. | 
 |   intptr_t count = 0; | 
 |   for (intptr_t i = 0; i < deopt_id_to_ic_data.length(); i++) { | 
 |     if (deopt_id_to_ic_data[i] != nullptr) { | 
 |       count++; | 
 |     } | 
 |   } | 
 |  | 
 |   // Compress sparse deopt_id_to_ic_data mapping into a linear sequence of | 
 |   // ICData objects. | 
 |   const Array& array = Array::Handle( | 
 |       Array::New(ICDataArrayIndices::kFirstICData + count, Heap::kOld)); | 
 |   for (intptr_t i = 0, pos = ICDataArrayIndices::kFirstICData; | 
 |        i < deopt_id_to_ic_data.length(); i++) { | 
 |     if (deopt_id_to_ic_data[i] != nullptr) { | 
 |       ASSERT(i == deopt_id_to_ic_data[i]->deopt_id()); | 
 |       array.SetAt(pos++, *deopt_id_to_ic_data[i]); | 
 |     } | 
 |   } | 
 |   array.SetAt(ICDataArrayIndices::kEdgeCounters, edge_counters_array); | 
 |   // Preserve coverage_array which is stored early after graph construction. | 
 |   array.SetAt(ICDataArrayIndices::kCoverageData, coverage_array); | 
 |   set_ic_data_array(array); | 
 | #else   // DART_PRECOMPILED_RUNTIME | 
 |   UNREACHABLE(); | 
 | #endif  // DART_PRECOMPILED_RUNTIME | 
 | } | 
 |  | 
 | void Function::RestoreICDataMap( | 
 |     ZoneGrowableArray<const ICData*>* deopt_id_to_ic_data, | 
 |     bool clone_ic_data) const { | 
 | #if !defined(DART_PRECOMPILED_RUNTIME) | 
 |   if (FLAG_force_clone_compiler_objects) { | 
 |     clone_ic_data = true; | 
 |   } | 
 |   ASSERT(deopt_id_to_ic_data->is_empty()); | 
 |   Zone* zone = Thread::Current()->zone(); | 
 |   const Array& saved_ic_data = Array::Handle(zone, ic_data_array()); | 
 |   if (saved_ic_data.IsNull()) { | 
 |     // Could happen with not-yet compiled unoptimized code or force-optimized | 
 |     // functions. | 
 |     return; | 
 |   } | 
 |   const intptr_t saved_length = saved_ic_data.Length(); | 
 |   ASSERT(saved_length > 0); | 
 |   if (saved_length > ICDataArrayIndices::kFirstICData) { | 
 |     const intptr_t restored_length = | 
 |         ICData::Cast(Object::Handle(zone, saved_ic_data.At(saved_length - 1))) | 
 |             .deopt_id() + | 
 |         1; | 
 |     deopt_id_to_ic_data->SetLength(restored_length); | 
 |     for (intptr_t i = 0; i < restored_length; i++) { | 
 |       (*deopt_id_to_ic_data)[i] = nullptr; | 
 |     } | 
 |     for (intptr_t i = ICDataArrayIndices::kFirstICData; i < saved_length; i++) { | 
 |       ICData& ic_data = ICData::ZoneHandle(zone); | 
 |       ic_data ^= saved_ic_data.At(i); | 
 |       if (clone_ic_data) { | 
 |         const ICData& original_ic_data = ICData::Handle(zone, ic_data.ptr()); | 
 |         ic_data = ICData::Clone(ic_data); | 
 |         ic_data.SetOriginal(original_ic_data); | 
 |       } | 
 |       ASSERT(deopt_id_to_ic_data->At(ic_data.deopt_id()) == nullptr); | 
 |       (*deopt_id_to_ic_data)[ic_data.deopt_id()] = &ic_data; | 
 |     } | 
 |   } | 
 | #else   // DART_PRECOMPILED_RUNTIME | 
 |   UNREACHABLE(); | 
 | #endif  // DART_PRECOMPILED_RUNTIME | 
 | } | 
 |  | 
 | ArrayPtr Function::GetCoverageArray() const { | 
 |   const Array& arr = Array::Handle(ic_data_array()); | 
 |   if (arr.IsNull()) { | 
 |     return Array::null(); | 
 |   } | 
 |   return Array::RawCast(arr.At(ICDataArrayIndices::kCoverageData)); | 
 | } | 
 |  | 
 | void Function::set_ic_data_array(const Array& value) const { | 
 |   untag()->set_ic_data_array<std::memory_order_release>(value.ptr()); | 
 | } | 
 |  | 
 | ArrayPtr Function::ic_data_array() const { | 
 |   return untag()->ic_data_array<std::memory_order_acquire>(); | 
 | } | 
 |  | 
 | void Function::ClearICDataArray() const { | 
 |   set_ic_data_array(Array::null_array()); | 
 | } | 
 |  | 
 | ICDataPtr Function::FindICData(intptr_t deopt_id) const { | 
 |   const Array& array = Array::Handle(ic_data_array()); | 
 |   ICData& ic_data = ICData::Handle(); | 
 |   for (intptr_t i = ICDataArrayIndices::kFirstICData; i < array.Length(); i++) { | 
 |     ic_data ^= array.At(i); | 
 |     if (ic_data.deopt_id() == deopt_id) { | 
 |       return ic_data.ptr(); | 
 |     } | 
 |   } | 
 |   return ICData::null(); | 
 | } | 
 |  | 
 | void Function::SetDeoptReasonForAll(intptr_t deopt_id, | 
 |                                     ICData::DeoptReasonId reason) { | 
 |   const Array& array = Array::Handle(ic_data_array()); | 
 |   ICData& ic_data = ICData::Handle(); | 
 |   for (intptr_t i = ICDataArrayIndices::kFirstICData; i < array.Length(); i++) { | 
 |     ic_data ^= array.At(i); | 
 |     if (ic_data.deopt_id() == deopt_id) { | 
 |       ic_data.AddDeoptReason(reason); | 
 |     } | 
 |   } | 
 | } | 
 |  | 
 | bool Function::CheckSourceFingerprint(int32_t fp, const char* kind) const { | 
 | #if !defined(DEBUG) | 
 |   return true;  // Only check on debug. | 
 | #endif | 
 |  | 
 | #if !defined(DART_PRECOMPILED_RUNTIME) | 
 |   // Check that the function is marked as recognized via the vm:recognized | 
 |   // pragma. This is so that optimizations that change the signature will know | 
 |   // not to touch it. | 
 |   if (kind != nullptr && !MethodRecognizer::IsMarkedAsRecognized(*this, kind)) { | 
 |     OS::PrintErr( | 
 |         "Recognized method %s should be marked with: " | 
 |         "@pragma(\"vm:recognized\", \"%s\")\n", | 
 |         ToQualifiedCString(), kind); | 
 |     return false; | 
 |   } | 
 | #endif | 
 |  | 
 |   if (IsolateGroup::Current()->obfuscate() || FLAG_precompiled_mode || | 
 |       (Dart::vm_snapshot_kind() != Snapshot::kNone)) { | 
 |     return true;  // The kernel structure has been altered, skip checking. | 
 |   } | 
 |  | 
 |   if (SourceFingerprint() != fp) { | 
 |     // This output can be copied into a file, then used with sed | 
 |     // to replace the old values. | 
 |     // sed -i.bak -f /tmp/newkeys \ | 
 |     //    runtime/vm/compiler/recognized_methods_list.h | 
 |     THR_Print("s/0x%08x/0x%08x/\n", fp, SourceFingerprint()); | 
 |     return false; | 
 |   } | 
 |   return true; | 
 | } | 
 |  | 
 | CodePtr Function::EnsureHasCode() const { | 
 |   if (HasCode()) return CurrentCode(); | 
 |   Thread* thread = Thread::Current(); | 
 |   ASSERT(thread->IsDartMutatorThread()); | 
 |   DEBUG_ASSERT(thread->TopErrorHandlerIsExitFrame()); | 
 |   Zone* zone = thread->zone(); | 
 |   const Object& result = | 
 |       Object::Handle(zone, Compiler::CompileFunction(thread, *this)); | 
 |   if (result.IsError()) { | 
 |     if (result.ptr() == Object::out_of_memory_error().ptr()) { | 
 |       Exceptions::ThrowOOM(); | 
 |       UNREACHABLE(); | 
 |     } | 
 |     if (result.IsLanguageError()) { | 
 |       Exceptions::ThrowCompileTimeError(LanguageError::Cast(result)); | 
 |       UNREACHABLE(); | 
 |     } | 
 |     Exceptions::PropagateError(Error::Cast(result)); | 
 |     UNREACHABLE(); | 
 |   } | 
 |   // Compiling in unoptimized mode should never fail if there are no errors. | 
 |   RELEASE_ASSERT(HasCode()); | 
 |   ASSERT(ForceOptimize() || unoptimized_code() == result.ptr()); | 
 |   return CurrentCode(); | 
 | } | 
 |  | 
 | bool Function::NeedsMonomorphicCheckedEntry(Zone* zone) const { | 
 | #if !defined(DART_PRECOMPILED_RUNTIME) | 
 |   if (!IsDynamicFunction()) { | 
 |     return false; | 
 |   } | 
 |  | 
 |   // For functions which need an args descriptor the switchable call sites will | 
 |   // transition directly to calling via a stub (and therefore never call the | 
 |   // monomorphic entry). | 
 |   // | 
 |   // See runtime_entry.cc:DEFINE_RUNTIME_ENTRY(UnlinkedCall) | 
 |   if (PrologueNeedsArgumentsDescriptor()) { | 
 |     return false; | 
 |   } | 
 |  | 
 |   // All dyn:* forwarders are called via SwitchableCalls and all except the ones | 
 |   // with `PrologueNeedsArgumentsDescriptor()` transition into monomorphic | 
 |   // state. | 
 |   if (Function::IsDynamicInvocationForwarderName(name())) { | 
 |     return true; | 
 |   } | 
 |  | 
 |   // AOT mode uses table dispatch. | 
 |   // In JIT mode all instance calls use switchable calls. | 
 |   if (!FLAG_precompiled_mode) { | 
 |     return true; | 
 |   } | 
 |  | 
 |   // Only if there are dynamic callers and if we didn't create a dyn:* forwarder | 
 |   // for it do we need the monomorphic checked entry. | 
 |   return HasDynamicCallers(zone) && | 
 |          !kernel::NeedsDynamicInvocationForwarder(*this); | 
 | #else | 
 |   UNREACHABLE(); | 
 |   return true; | 
 | #endif | 
 | } | 
 |  | 
 | bool Function::HasDynamicCallers(Zone* zone) const { | 
 | #if !defined(DART_PRECOMPILED_RUNTIME) | 
 |   // Issue(dartbug.com/42719): | 
 |   // Right now the metadata of _Closure.call says there are no dynamic callers - | 
 |   // even though there can be. To be conservative we return true. | 
 |   if ((name() == Symbols::GetCall().ptr() || name() == Symbols::call().ptr()) && | 
 |       Class::IsClosureClass(Owner())) { | 
 |     return true; | 
 |   } | 
 |  | 
 |   // Use the results of TFA to determine whether this function is ever | 
 |   // called dynamically, i.e. using switchable calls. | 
 |   kernel::ProcedureAttributesMetadata metadata; | 
 |   metadata = kernel::ProcedureAttributesOf(*this, zone); | 
 |   if (IsGetterFunction() || IsImplicitGetterFunction() || IsMethodExtractor()) { | 
 |     // Dynamic method call through field/getter involves dynamic call of | 
 |     // the field/getter. | 
 |     return metadata.getter_called_dynamically || | 
 |            metadata.method_or_setter_called_dynamically; | 
 |   } else { | 
 |     return metadata.method_or_setter_called_dynamically; | 
 |   } | 
 | #else | 
 |   UNREACHABLE(); | 
 |   return true; | 
 | #endif | 
 | } | 
 |  | 
 | bool Function::PrologueNeedsArgumentsDescriptor() const { | 
 |   // These functions have a saved compile-time arguments descriptor that is | 
 |   // used in lieu of the runtime arguments descriptor in generated IL. | 
 |   if (HasSavedArgumentsDescriptor()) { | 
 |     return false; | 
 |   } | 
 |   // The prologue of those functions need to examine the arg descriptor for | 
 |   // various purposes. | 
 |   return IsGeneric() || HasOptionalParameters(); | 
 | } | 
 |  | 
 | bool Function::MayHaveUncheckedEntryPoint() const { | 
 |   return FLAG_enable_multiple_entrypoints && | 
 |          (NeedsTypeArgumentTypeChecks() || NeedsArgumentTypeChecks()); | 
 | } | 
 |  | 
 | intptr_t Function::SourceSize() const { | 
 |   const TokenPosition& start = token_pos(); | 
 |   const TokenPosition& end = end_token_pos(); | 
 |   if (!end.IsReal() || start.IsNoSource() || start.IsClassifying()) { | 
 |     // No source information, so just return 0. | 
 |     return 0; | 
 |   } | 
 |   if (start.IsSynthetic()) { | 
 |     // Try and approximate the source size using the parent's source size. | 
 |     const auto& parent = Function::Handle(parent_function()); | 
 |     ASSERT(!parent.IsNull()); | 
 |     const intptr_t parent_size = parent.SourceSize(); | 
 |     if (parent_size == 0) { | 
 |       return parent_size; | 
 |     } | 
 |     // Parent must have a real ending position. | 
 |     return parent_size - (parent.end_token_pos().Pos() - end.Pos()); | 
 |   } | 
 |   return end.Pos() - start.Pos(); | 
 | } | 
 |  | 
 | const char* Function::ToCString() const { | 
 |   if (IsNull()) { | 
 |     return "Function: null"; | 
 |   } | 
 |   Zone* zone = Thread::Current()->zone(); | 
 |   ZoneTextBuffer buffer(zone); | 
 |   buffer.Printf("Function '%s':", String::Handle(zone, name()).ToCString()); | 
 |   if (is_static()) { | 
 |     buffer.AddString(" static"); | 
 |   } | 
 |   if (is_abstract()) { | 
 |     buffer.AddString(" abstract"); | 
 |   } | 
 |   switch (kind()) { | 
 |     case UntaggedFunction::kRegularFunction: | 
 |     case UntaggedFunction::kClosureFunction: | 
 |     case UntaggedFunction::kImplicitClosureFunction: | 
 |     case UntaggedFunction::kGetterFunction: | 
 |     case UntaggedFunction::kSetterFunction: | 
 |       break; | 
 |     case UntaggedFunction::kConstructor: | 
 |       buffer.AddString(is_static() ? " factory" : " constructor"); | 
 |       break; | 
 |     case UntaggedFunction::kImplicitGetter: | 
 |       buffer.AddString(" getter"); | 
 |       break; | 
 |     case UntaggedFunction::kImplicitSetter: | 
 |       buffer.AddString(" setter"); | 
 |       break; | 
 |     case UntaggedFunction::kImplicitStaticGetter: | 
 |       buffer.AddString(" static-getter"); | 
 |       break; | 
 |     case UntaggedFunction::kFieldInitializer: | 
 |       buffer.AddString(" field-initializer"); | 
 |       break; | 
 |     case UntaggedFunction::kMethodExtractor: | 
 |       buffer.AddString(" method-extractor"); | 
 |       break; | 
 |     case UntaggedFunction::kNoSuchMethodDispatcher: | 
 |       buffer.AddString(" no-such-method-dispatcher"); | 
 |       break; | 
 |     case UntaggedFunction::kDynamicInvocationForwarder: | 
 |       buffer.AddString(" dynamic-invocation-forwarder"); | 
 |       break; | 
 |     case UntaggedFunction::kInvokeFieldDispatcher: | 
 |       buffer.AddString(" invoke-field-dispatcher"); | 
 |       break; | 
 |     case UntaggedFunction::kIrregexpFunction: | 
 |       buffer.AddString(" irregexp-function"); | 
 |       break; | 
 |     case UntaggedFunction::kFfiTrampoline: | 
 |       buffer.AddString(" ffi-trampoline-function"); | 
 |       break; | 
 |     case UntaggedFunction::kRecordFieldGetter: | 
 |       buffer.AddString(" record-field-getter"); | 
 |       break; | 
 |     default: | 
 |       UNREACHABLE(); | 
 |   } | 
 |   if (HasSavedArgumentsDescriptor()) { | 
 |     const auto& args_desc_array = Array::Handle(zone, saved_args_desc()); | 
 |     const ArgumentsDescriptor args_desc(args_desc_array); | 
 |     buffer.AddChar('['); | 
 |     args_desc.PrintTo(&buffer); | 
 |     buffer.AddChar(']'); | 
 |   } | 
 |   if (is_const()) { | 
 |     buffer.AddString(" const"); | 
 |   } | 
 |   buffer.AddChar('.'); | 
 |   return buffer.buffer(); | 
 | } | 
 |  | 
 | void FunctionType::set_packed_parameter_counts( | 
 |     uint32_t packed_parameter_counts) const { | 
 |   untag()->packed_parameter_counts_ = packed_parameter_counts; | 
 | } | 
 |  | 
 | void FunctionType::set_packed_type_parameter_counts( | 
 |     uint16_t packed_type_parameter_counts) const { | 
 |   untag()->packed_type_parameter_counts_ = packed_type_parameter_counts; | 
 | } | 
 |  | 
 | void FunctionType::set_num_implicit_parameters(intptr_t value) const { | 
 |   ASSERT(value >= 0); | 
 |   untag()->packed_parameter_counts_.Update<PackedNumImplicitParameters>(value); | 
 | } | 
 |  | 
 | void ClosureData::set_default_type_arguments_instantiation_mode( | 
 |     InstantiationMode value) const { | 
 |   untag()->packed_fields_.Update<PackedInstantiationMode>(value); | 
 | } | 
 |  | 
 | Function::AwaiterLink ClosureData::awaiter_link() const { | 
 |   const uint8_t depth = | 
 |       untag() | 
 |           ->packed_fields_.Read<UntaggedClosureData::PackedAwaiterLinkDepth>(); | 
 |   const uint8_t index = | 
 |       untag() | 
 |           ->packed_fields_.Read<UntaggedClosureData::PackedAwaiterLinkIndex>(); | 
 |   return {depth, index}; | 
 | } | 
 |  | 
 | void ClosureData::set_awaiter_link(Function::AwaiterLink link) const { | 
 |   untag()->packed_fields_.Update<UntaggedClosureData::PackedAwaiterLinkDepth>( | 
 |       link.depth); | 
 |   untag()->packed_fields_.Update<UntaggedClosureData::PackedAwaiterLinkIndex>( | 
 |       link.index); | 
 | } | 
 |  | 
 | ClosureDataPtr ClosureData::New() { | 
 |   ASSERT(Object::closure_data_class() != Class::null()); | 
 |   return Object::Allocate<ClosureData>(Heap::kOld); | 
 | } | 
 |  | 
 | const char* ClosureData::ToCString() const { | 
 |   if (IsNull()) { | 
 |     return "ClosureData: null"; | 
 |   } | 
 |   auto const zone = Thread::Current()->zone(); | 
 |   ZoneTextBuffer buffer(zone); | 
 |   buffer.Printf("ClosureData: context_scope: 0x%" Px "", | 
 |                 static_cast<uword>(context_scope())); | 
 |   buffer.AddString(" parent_function: "); | 
 |   if (parent_function() == Object::null()) { | 
 |     buffer.AddString("null"); | 
 |   } else { | 
 |     buffer.AddString(Object::Handle(parent_function()).ToCString()); | 
 |   } | 
 |   buffer.Printf(" implicit_static_closure: 0x%" Px "", | 
 |                 static_cast<uword>(implicit_static_closure())); | 
 |   return buffer.buffer(); | 
 | } | 
 |  | 
 | void FunctionType::set_num_fixed_parameters(intptr_t value) const { | 
 |   ASSERT(value >= 0); | 
 |   untag()->packed_parameter_counts_.Update<PackedNumFixedParameters>(value); | 
 | } | 
 |  | 
 | void FfiTrampolineData::set_callback_target(const Function& value) const { | 
 |   untag()->set_callback_target(value.ptr()); | 
 | } | 
 |  | 
 | void FunctionType::SetNumOptionalParameters( | 
 |     intptr_t value, | 
 |     bool are_optional_positional) const { | 
 |   // HasOptionalNamedParameters only checks this bit, so only set it if there | 
 |   // are actual named parameters. | 
 |   untag()->packed_parameter_counts_.Update<PackedHasNamedOptionalParameters>( | 
 |       (value > 0) && !are_optional_positional); | 
 |   untag()->packed_parameter_counts_.Update<PackedNumOptionalParameters>(value); | 
 | } | 
 |  | 
 | FunctionTypePtr FunctionType::New(Heap::Space space) { | 
 |   return Object::Allocate<FunctionType>(space); | 
 | } | 
 |  | 
 | FunctionTypePtr FunctionType::New(intptr_t num_parent_type_arguments, | 
 |                                   Nullability nullability, | 
 |                                   Heap::Space space) { | 
 |   Zone* Z = Thread::Current()->zone(); | 
 |   const FunctionType& result = | 
 |       FunctionType::Handle(Z, FunctionType::New(space)); | 
 |   result.set_packed_parameter_counts(0); | 
 |   result.set_packed_type_parameter_counts(0); | 
 |   result.set_named_parameter_names(Object::empty_array()); | 
 |   result.SetNumParentTypeArguments(num_parent_type_arguments); | 
 |   result.SetHash(0); | 
 |   result.set_flags(0); | 
 |   result.set_nullability(nullability); | 
 |   result.set_type_state(UntaggedAbstractType::kAllocated); | 
 |   result.InitializeTypeTestingStubNonAtomic( | 
 |       Code::Handle(Z, TypeTestingStubGenerator::DefaultCodeForType(result))); | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | FunctionTypePtr FunctionType::Clone(const FunctionType& orig, | 
 |                                     Heap::Space space) { | 
 |   if (orig.IsGeneric()) { | 
 |     // Need a deep clone in order to update owners of type parameters. | 
 |     return FunctionType::RawCast( | 
 |         orig.UpdateFunctionTypes(0, kAllFree, space, nullptr)); | 
 |   } else { | 
 |     return FunctionType::RawCast(Object::Clone(orig, space)); | 
 |   } | 
 | } | 
 |  | 
 | const char* FunctionType::ToUserVisibleCString() const { | 
 |   Zone* zone = Thread::Current()->zone(); | 
 |   ZoneTextBuffer printer(zone); | 
 |   Print(kUserVisibleName, &printer); | 
 |   return printer.buffer(); | 
 | } | 
 |  | 
 | StringPtr FunctionType::ToUserVisibleString() const { | 
 |   Thread* thread = Thread::Current(); | 
 |   ZoneTextBuffer printer(thread->zone()); | 
 |   Print(kUserVisibleName, &printer); | 
 |   return Symbols::New(thread, printer.buffer()); | 
 | } | 
 |  | 
 | const char* FunctionType::ToCString() const { | 
 |   if (IsNull()) { | 
 |     return "FunctionType: null"; | 
 |   } | 
 |   Zone* zone = Thread::Current()->zone(); | 
 |   ZoneTextBuffer printer(zone); | 
 |   const char* suffix = NullabilitySuffix(kInternalName); | 
 |   if (suffix[0] != '\0') { | 
 |     printer.AddString("("); | 
 |   } | 
 |   Print(kInternalName, &printer); | 
 |   if (suffix[0] != '\0') { | 
 |     printer.AddString(")"); | 
 |     printer.AddString(suffix); | 
 |   } | 
 |   return printer.buffer(); | 
 | } | 
 |  | 
 | void ClosureData::set_context_scope(const ContextScope& value) const { | 
 |   untag()->set_context_scope(value.ptr()); | 
 | } | 
 |  | 
 | void ClosureData::set_implicit_static_closure(const Closure& closure) const { | 
 |   ASSERT(!closure.IsNull()); | 
 |   ASSERT(untag()->closure() == Closure::null()); | 
 |   untag()->set_closure<std::memory_order_release>(closure.ptr()); | 
 | } | 
 |  | 
 | void FfiTrampolineData::set_c_signature(const FunctionType& value) const { | 
 |   untag()->set_c_signature(value.ptr()); | 
 | } | 
 |  | 
 | void FfiTrampolineData::set_callback_id(int32_t callback_id) const { | 
 |   StoreNonPointer(&untag()->callback_id_, callback_id); | 
 | } | 
 |  | 
 | void FfiTrampolineData::set_callback_exceptional_return( | 
 |     const Instance& value) const { | 
 |   untag()->set_callback_exceptional_return(value.ptr()); | 
 | } | 
 |  | 
 | void FfiTrampolineData::set_ffi_function_kind(FfiCallbackKind kind) const { | 
 |   StoreNonPointer(&untag()->ffi_function_kind_, static_cast<uint8_t>(kind)); | 
 | } | 
 |  | 
 | FfiTrampolineDataPtr FfiTrampolineData::New() { | 
 |   ASSERT(Object::ffi_trampoline_data_class() != Class::null()); | 
 |   const auto& data = FfiTrampolineData::Handle( | 
 |       Object::Allocate<FfiTrampolineData>(Heap::kOld)); | 
 |   data.set_callback_id(-1); | 
 |   return data.ptr(); | 
 | } | 
 |  | 
 | const char* FfiTrampolineData::ToCString() const { | 
 |   const FunctionType& c_sig = FunctionType::Handle(c_signature()); | 
 |   return OS::SCreate(Thread::Current()->zone(), | 
 |                      "TrampolineData: c_signature=%s", | 
 |                      c_sig.ToUserVisibleCString()); | 
 | } | 
 |  | 
 | FieldPtr Field::CloneFromOriginal() const { | 
 |   return this->Clone(*this); | 
 | } | 
 |  | 
 | FieldPtr Field::Original() const { | 
 |   if (IsNull()) { | 
 |     return Field::null(); | 
 |   } | 
 |   if (untag()->owner()->IsField()) { | 
 |     return static_cast<FieldPtr>(untag()->owner()); | 
 |   } | 
 |   return this->ptr(); | 
 | } | 
 |  | 
 | intptr_t Field::guarded_cid() const { | 
 | #if defined(DEBUG) | 
 |   // This assertion ensures that the cid seen by the background compiler is | 
 |   // consistent. So the assertion passes if the field is a clone. It also | 
 |   // passes if the field is static, because we don't use field guards on | 
 |   // static fields. It also passes if we're compiling unoptimized | 
 |   // code (in which case the caller might get different answers if it obtains | 
 |   // the guarded cid multiple times). | 
 |   Thread* thread = Thread::Current(); | 
 | #if defined(DART_PRECOMPILED_RUNTIME) | 
 |   ASSERT(!thread->IsInsideCompiler() || is_static()); | 
 | #else | 
 |   ASSERT(!thread->IsInsideCompiler() || | 
 |          ((CompilerState::Current().should_clone_fields() == !IsOriginal())) || | 
 |          is_static()); | 
 | #endif | 
 | #endif | 
 |   return LoadNonPointer<ClassIdTagType, std::memory_order_relaxed>( | 
 |       &untag()->guarded_cid_); | 
 | } | 
 |  | 
 | bool Field::is_nullable() const { | 
 | #if defined(DEBUG) | 
 |   // Same assert as guarded_cid(), because is_nullable() also needs to be | 
 |   // consistent for the background compiler. | 
 |   Thread* thread = Thread::Current(); | 
 | #if defined(DART_PRECOMPILED_RUNTIME) | 
 |   ASSERT(!thread->IsInsideCompiler() || is_static()); | 
 | #else | 
 |   ASSERT(!thread->IsInsideCompiler() || | 
 |          ((CompilerState::Current().should_clone_fields() == !IsOriginal())) || | 
 |          is_static()); | 
 | #endif | 
 | #endif | 
 |   return is_nullable_unsafe(); | 
 | } | 
 |  | 
 | void Field::SetOriginal(const Field& value) const { | 
 |   ASSERT(value.IsOriginal()); | 
 |   ASSERT(!value.IsNull()); | 
 |   untag()->set_owner(static_cast<ObjectPtr>(value.ptr())); | 
 | } | 
 |  | 
 | StringPtr Field::GetterName(const String& field_name) { | 
 |   return String::Concat(Symbols::GetterPrefix(), field_name); | 
 | } | 
 |  | 
 | StringPtr Field::GetterSymbol(const String& field_name) { | 
 |   return Symbols::FromGet(Thread::Current(), field_name); | 
 | } | 
 |  | 
 | StringPtr Field::LookupGetterSymbol(const String& field_name) { | 
 |   return Symbols::LookupFromGet(Thread::Current(), field_name); | 
 | } | 
 |  | 
 | StringPtr Field::SetterName(const String& field_name) { | 
 |   return String::Concat(Symbols::SetterPrefix(), field_name); | 
 | } | 
 |  | 
 | StringPtr Field::SetterSymbol(const String& field_name) { | 
 |   return Symbols::FromSet(Thread::Current(), field_name); | 
 | } | 
 |  | 
 | StringPtr Field::LookupSetterSymbol(const String& field_name) { | 
 |   return Symbols::LookupFromSet(Thread::Current(), field_name); | 
 | } | 
 |  | 
 | StringPtr Field::NameFromGetter(const String& getter_name) { | 
 |   return Symbols::New(Thread::Current(), getter_name, kGetterPrefixLength, | 
 |                       getter_name.Length() - kGetterPrefixLength); | 
 | } | 
 |  | 
 | StringPtr Field::NameFromSetter(const String& setter_name) { | 
 |   return Symbols::New(Thread::Current(), setter_name, kSetterPrefixLength, | 
 |                       setter_name.Length() - kSetterPrefixLength); | 
 | } | 
 |  | 
 | StringPtr Field::NameFromInit(const String& init_name) { | 
 |   return Symbols::New(Thread::Current(), init_name, kInitPrefixLength, | 
 |                       init_name.Length() - kInitPrefixLength); | 
 | } | 
 |  | 
 | bool Field::IsGetterName(const String& function_name) { | 
 |   return function_name.StartsWith(Symbols::GetterPrefix()); | 
 | } | 
 |  | 
 | bool Field::IsSetterName(const String& function_name) { | 
 |   return function_name.StartsWith(Symbols::SetterPrefix()); | 
 | } | 
 |  | 
 | bool Field::IsInitName(const String& function_name) { | 
 |   return function_name.StartsWith(Symbols::InitPrefix()); | 
 | } | 
 |  | 
 | void Field::set_name(const String& value) const { | 
 |   ASSERT(value.IsSymbol()); | 
 |   ASSERT(IsOriginal()); | 
 |   untag()->set_name(value.ptr()); | 
 | } | 
 |  | 
 | ObjectPtr Field::RawOwner() const { | 
 |   if (IsOriginal()) { | 
 |     return untag()->owner(); | 
 |   } else { | 
 |     const Field& field = Field::Handle(Original()); | 
 |     ASSERT(field.IsOriginal()); | 
 |     ASSERT(!Object::Handle(field.untag()->owner()).IsField()); | 
 |     return field.untag()->owner(); | 
 |   } | 
 | } | 
 |  | 
 | ClassPtr Field::Owner() const { | 
 |   const Field& field = Field::Handle(Original()); | 
 |   ASSERT(field.IsOriginal()); | 
 |   const Object& obj = Object::Handle(field.untag()->owner()); | 
 |   if (obj.IsClass()) { | 
 |     return Class::Cast(obj).ptr(); | 
 |   } | 
 |   ASSERT(obj.IsPatchClass()); | 
 |   return PatchClass::Cast(obj).wrapped_class(); | 
 | } | 
 |  | 
 | ScriptPtr Field::Script() const { | 
 |   // NOTE(turnidge): If you update this function, you probably want to | 
 |   // update Class::PatchFieldsAndFunctions() at the same time. | 
 |   const Field& field = Field::Handle(Original()); | 
 |   ASSERT(field.IsOriginal()); | 
 |   const Object& obj = Object::Handle(field.untag()->owner()); | 
 |   if (obj.IsClass()) { | 
 |     return Class::Cast(obj).script(); | 
 |   } | 
 |   ASSERT(obj.IsPatchClass()); | 
 |   return PatchClass::Cast(obj).script(); | 
 | } | 
 |  | 
 | #if !defined(DART_PRECOMPILED_RUNTIME) | 
 | KernelProgramInfoPtr Field::KernelProgramInfo() const { | 
 |   const auto& owner = Object::Handle(RawOwner()); | 
 |   if (owner.IsClass()) { | 
 |     return Class::Cast(owner).KernelProgramInfo(); | 
 |   } | 
 |   return PatchClass::Cast(owner).kernel_program_info(); | 
 | } | 
 | #endif | 
 |  | 
 | uint32_t Field::Hash() const { | 
 |   return String::HashRawSymbol(name()); | 
 | } | 
 |  | 
 | void Field::InheritKernelOffsetFrom(const Field& src) const { | 
 | #if defined(DART_PRECOMPILED_RUNTIME) | 
 |   UNREACHABLE(); | 
 | #else | 
 |   StoreNonPointer(&untag()->kernel_offset_, src.untag()->kernel_offset_); | 
 | #endif | 
 | } | 
 |  | 
 | #if !defined(DART_PRECOMPILED_RUNTIME) | 
 | TypedDataViewPtr Field::KernelLibrary() const { | 
 |   const auto& info = KernelProgramInfo::Handle(KernelProgramInfo()); | 
 |   return info.KernelLibrary(KernelLibraryIndex()); | 
 | } | 
 |  | 
 | intptr_t Field::KernelLibraryOffset() const { | 
 |   const intptr_t kernel_library_index = KernelLibraryIndex(); | 
 |   if (kernel_library_index == -1) return 0; | 
 |   const auto& info = KernelProgramInfo::Handle(KernelProgramInfo()); | 
 |   return info.KernelLibraryStartOffset(kernel_library_index); | 
 | } | 
 |  | 
 | intptr_t Field::KernelLibraryIndex() const { | 
 |   const Object& obj = Object::Handle(untag()->owner()); | 
 |   // During background JIT compilation field objects are copied | 
 |   // and copy points to the original field via the owner field. | 
 |   if (obj.IsField()) { | 
 |     return Field::Cast(obj).KernelLibraryIndex(); | 
 |   } else if (obj.IsClass()) { | 
 |     const auto& lib = Library::Handle(Class::Cast(obj).library()); | 
 |     return lib.kernel_library_index(); | 
 |   } | 
 |   ASSERT(obj.IsPatchClass()); | 
 |   return PatchClass::Cast(obj).kernel_library_index(); | 
 | } | 
 | #endif  // !defined(DART_PRECOMPILED_RUNTIME) | 
 |  | 
 | void Field::SetFieldTypeSafe(const AbstractType& value) const { | 
 |   ASSERT(IsOriginal()); | 
 |   ASSERT(!value.IsNull()); | 
 |   if (value.ptr() != type()) { | 
 |     untag()->set_type(value.ptr()); | 
 |   } | 
 | } | 
 |  | 
 | // Called at finalization time | 
 | void Field::SetFieldType(const AbstractType& value) const { | 
 |   DEBUG_ASSERT( | 
 |       IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); | 
 |   SetFieldTypeSafe(value); | 
 | } | 
 |  | 
 | FieldPtr Field::New() { | 
 |   ASSERT(Object::field_class() != Class::null()); | 
 |   return Object::Allocate<Field>(Heap::kOld); | 
 | } | 
 |  | 
 | void Field::InitializeNew(const Field& result, | 
 |                           const String& name, | 
 |                           bool is_static, | 
 |                           bool is_final, | 
 |                           bool is_const, | 
 |                           bool is_reflectable, | 
 |                           bool is_late, | 
 |                           const Object& owner, | 
 |                           TokenPosition token_pos, | 
 |                           TokenPosition end_token_pos) { | 
 |   result.set_kind_bits(0); | 
 |   result.set_name(name); | 
 |   result.set_is_static(is_static); | 
 |   if (is_static) { | 
 |     result.set_field_id_unsafe(-1); | 
 |   } else { | 
 |     result.SetOffset(0, 0); | 
 |   } | 
 |   result.set_is_final(is_final); | 
 |   result.set_is_const(is_const); | 
 |   result.set_is_reflectable(is_reflectable); | 
 |   result.set_is_late(is_late); | 
 |   result.set_owner(owner); | 
 |   result.set_token_pos(token_pos); | 
 |   result.set_end_token_pos(end_token_pos); | 
 |   result.set_has_nontrivial_initializer_unsafe(false); | 
 |   result.set_has_initializer_unsafe(false); | 
 |   // We will make unboxing decision once we read static type or | 
 |   // in KernelLoader::ReadInferredType. | 
 |   result.set_is_unboxed_unsafe(false); | 
 |   result.set_initializer_changed_after_initialization(false); | 
 |   NOT_IN_PRECOMPILED(result.set_kernel_offset(0)); | 
 |   result.set_has_pragma(false); | 
 |   result.set_static_type_exactness_state_unsafe( | 
 |       StaticTypeExactnessState::NotTracking()); | 
 |   auto isolate_group = IsolateGroup::Current(); | 
 |  | 
 | // Use field guards if they are enabled and the isolate has never reloaded. | 
 | // TODO(johnmccutchan): The reload case assumes the worst case (everything is | 
 | // dynamic and possibly null). Attempt to relax this later. | 
 | // | 
 | // Do not use field guards for late fields as late field initialization | 
 | // doesn't update guarded cid and length. | 
 | #if defined(PRODUCT) | 
 |   const bool use_guarded_cid = | 
 |       FLAG_precompiled_mode || (isolate_group->use_field_guards() && !is_late); | 
 | #else | 
 |   const bool use_guarded_cid = | 
 |       FLAG_precompiled_mode || | 
 |       (isolate_group->use_field_guards() && | 
 |        !isolate_group->HasAttemptedReload() && !is_late); | 
 | #endif  // !defined(PRODUCT) | 
 |   result.set_guarded_cid_unsafe(use_guarded_cid ? kIllegalCid : kDynamicCid); | 
 |   result.set_is_nullable_unsafe(use_guarded_cid ? false : true); | 
 |   result.set_guarded_list_length_in_object_offset_unsafe( | 
 |       Field::kUnknownLengthOffset); | 
 |   // Presently, we only attempt to remember the list length for final fields. | 
 |   if (is_final && use_guarded_cid) { | 
 |     result.set_guarded_list_length_unsafe(Field::kUnknownFixedLength); | 
 |   } else { | 
 |     result.set_guarded_list_length_unsafe(Field::kNoFixedLength); | 
 |   } | 
 | } | 
 |  | 
 | FieldPtr Field::New(const String& name, | 
 |                     bool is_static, | 
 |                     bool is_final, | 
 |                     bool is_const, | 
 |                     bool is_reflectable, | 
 |                     bool is_late, | 
 |                     const Object& owner, | 
 |                     const AbstractType& type, | 
 |                     TokenPosition token_pos, | 
 |                     TokenPosition end_token_pos) { | 
 |   ASSERT(!owner.IsNull()); | 
 |   const Field& result = Field::Handle(Field::New()); | 
 |   InitializeNew(result, name, is_static, is_final, is_const, is_reflectable, | 
 |                 is_late, owner, token_pos, end_token_pos); | 
 |   result.SetFieldTypeSafe(type); | 
 | #if !defined(DART_PRECOMPILED_RUNTIME) | 
 |   compiler::target::UnboxFieldIfSupported(result, type); | 
 | #endif | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | FieldPtr Field::NewTopLevel(const String& name, | 
 |                             bool is_final, | 
 |                             bool is_const, | 
 |                             bool is_late, | 
 |                             const Object& owner, | 
 |                             TokenPosition token_pos, | 
 |                             TokenPosition end_token_pos) { | 
 |   ASSERT(!owner.IsNull()); | 
 |   const Field& result = Field::Handle(Field::New()); | 
 |   InitializeNew(result, name, true,       /* is_static */ | 
 |                 is_final, is_const, true, /* is_reflectable */ | 
 |                 is_late, owner, token_pos, end_token_pos); | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | FieldPtr Field::Clone(const Field& original) const { | 
 |   if (original.IsNull()) { | 
 |     return Field::null(); | 
 |   } | 
 |   ASSERT(original.IsOriginal()); | 
 |   Field& clone = Field::Handle(); | 
 |   // Using relaxed loading is fine because concurrent fields changes are all | 
 |   // guarded, will be reconciled during optimized code installation. | 
 |   clone ^= Object::Clone(*this, Heap::kOld, /*load_with_relaxed_atomics=*/true); | 
 |   clone.SetOriginal(original); | 
 |   clone.InheritKernelOffsetFrom(original); | 
 |   return clone.ptr(); | 
 | } | 
 |  | 
 | int32_t Field::SourceFingerprint() const { | 
 | #if !defined(DART_PRECOMPILED_RUNTIME) | 
 |   return kernel::KernelSourceFingerprintHelper::CalculateFieldFingerprint( | 
 |       *this); | 
 | #else | 
 |   return 0; | 
 | #endif  // !defined(DART_PRECOMPILED_RUNTIME) | 
 | } | 
 |  | 
 | StringPtr Field::InitializingExpression() const { | 
 |   UNREACHABLE(); | 
 |   return String::null(); | 
 | } | 
 |  | 
 | const char* Field::UserVisibleNameCString() const { | 
 |   NoSafepointScope no_safepoint; | 
 |   if (FLAG_show_internal_names) { | 
 |     return String::Handle(name()).ToCString(); | 
 |   } | 
 |   return String::ScrubName(String::Handle(name()), | 
 |                            is_extension_member() || is_extension_type_member()); | 
 | } | 
 |  | 
 | StringPtr Field::UserVisibleName() const { | 
 |   if (FLAG_show_internal_names) { | 
 |     return name(); | 
 |   } | 
 |   return Symbols::New( | 
 |       Thread::Current(), | 
 |       String::ScrubName(String::Handle(name()), | 
 |                         is_extension_member() || is_extension_type_member())); | 
 | } | 
 |  | 
 | intptr_t Field::guarded_list_length() const { | 
 |   return Smi::Value(untag()->guarded_list_length()); | 
 | } | 
 |  | 
 | void Field::set_guarded_list_length_unsafe(intptr_t list_length) const { | 
 |   ASSERT(IsOriginal()); | 
 |   untag()->set_guarded_list_length(Smi::New(list_length)); | 
 | } | 
 |  | 
 | intptr_t Field::guarded_list_length_in_object_offset() const { | 
 |   return untag()->guarded_list_length_in_object_offset_ + kHeapObjectTag; | 
 | } | 
 |  | 
 | void Field::set_guarded_list_length_in_object_offset_unsafe( | 
 |     intptr_t list_length_offset) const { | 
 |   ASSERT(IsOriginal()); | 
 |   StoreNonPointer<int8_t, int8_t, std::memory_order_relaxed>( | 
 |       &untag()->guarded_list_length_in_object_offset_, | 
 |       static_cast<int8_t>(list_length_offset - kHeapObjectTag)); | 
 |   ASSERT(guarded_list_length_in_object_offset() == list_length_offset); | 
 | } | 
 |  | 
 | bool Field::NeedsSetter() const { | 
 |   // According to the Dart language specification, final fields don't have | 
 |   // a setter, except late final fields without initializer. | 
 |   if (is_final()) { | 
 |     // Late final fields without initializer always need a setter to check | 
 |     // if they are already initialized. | 
 |     if (is_late() && !has_initializer()) { | 
 |       return true; | 
 |     } | 
 |     return false; | 
 |   } | 
 |  | 
 |   // Instance non-final fields always need a setter. | 
 |   if (!is_static()) { | 
 |     return true; | 
 |   } | 
 |  | 
 |   // Otherwise, setters for static fields can be omitted | 
 |   // and fields can be accessed directly. | 
 |   return false; | 
 | } | 
 |  | 
 | bool Field::NeedsGetter() const { | 
 |   // All instance fields need a getter. | 
 |   if (!is_static()) return true; | 
 |  | 
 |   // Static fields also need a getter if they have a non-trivial initializer, | 
 |   // because it needs to be initialized lazily. | 
 |   if (has_nontrivial_initializer()) return true; | 
 |  | 
 |   // Static late fields with no initializer also need a getter, to check if it's | 
 |   // been initialized. | 
 |   return is_late() && !has_initializer(); | 
 | } | 
 |  | 
 | const char* Field::ToCString() const { | 
 |   NoSafepointScope no_safepoint; | 
 |   if (IsNull()) { | 
 |     return "Field: null"; | 
 |   } | 
 |   const char* kF0 = is_static() ? " static" : ""; | 
 |   const char* kF1 = is_late() ? " late" : ""; | 
 |   const char* kF2 = is_final() ? " final" : ""; | 
 |   const char* kF3 = is_const() ? " const" : ""; | 
 |   const char* field_name = String::Handle(name()).ToCString(); | 
 |   const Class& cls = Class::Handle(Owner()); | 
 |   const char* cls_name = String::Handle(cls.Name()).ToCString(); | 
 |   return OS::SCreate(Thread::Current()->zone(), "Field <%s.%s>:%s%s%s%s", | 
 |                      cls_name, field_name, kF0, kF1, kF2, kF3); | 
 | } | 
 |  | 
 | // Build a closure object that gets (or sets) the contents of a static | 
 | // field f and cache the closure in a newly created static field | 
 | // named #f (or #f= in case of a setter). | 
 | InstancePtr Field::AccessorClosure(bool make_setter) const { | 
 |   Thread* thread = Thread::Current(); | 
 |   Zone* zone = thread->zone(); | 
 |   ASSERT(is_static()); | 
 |   const Class& field_owner = Class::Handle(zone, Owner()); | 
 |  | 
 |   String& closure_name = String::Handle(zone, this->name()); | 
 |   closure_name = Symbols::FromConcat(thread, Symbols::HashMark(), closure_name); | 
 |   if (make_setter) { | 
 |     closure_name = | 
 |         Symbols::FromConcat(thread, Symbols::HashMark(), closure_name); | 
 |   } | 
 |  | 
 |   Field& closure_field = Field::Handle(zone); | 
 |   closure_field = field_owner.LookupStaticField(closure_name); | 
 |   if (!closure_field.IsNull()) { | 
 |     ASSERT(closure_field.is_static()); | 
 |     const Instance& closure = | 
 |         Instance::Handle(zone, Instance::RawCast(closure_field.StaticValue())); | 
 |     ASSERT(!closure.IsNull()); | 
 |     ASSERT(closure.IsClosure()); | 
 |     return closure.ptr(); | 
 |   } | 
 |  | 
 |   UNREACHABLE(); | 
 |   return Instance::null(); | 
 | } | 
 |  | 
 | InstancePtr Field::GetterClosure() const { | 
 |   return AccessorClosure(false); | 
 | } | 
 |  | 
 | InstancePtr Field::SetterClosure() const { | 
 |   return AccessorClosure(true); | 
 | } | 
 |  | 
 | WeakArrayPtr Field::dependent_code() const { | 
 |   DEBUG_ASSERT( | 
 |       IsolateGroup::Current()->program_lock()->IsCurrentThreadReader()); | 
 |   return untag()->dependent_code(); | 
 | } | 
 |  | 
 | void Field::set_dependent_code(const WeakArray& array) const { | 
 |   ASSERT(IsOriginal()); | 
 |   DEBUG_ASSERT( | 
 |       IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); | 
 |   untag()->set_dependent_code(array.ptr()); | 
 | } | 
 |  | 
 | class FieldDependentArray : public WeakCodeReferences { | 
 |  public: | 
 |   explicit FieldDependentArray(const Field& field) | 
 |       : WeakCodeReferences(WeakArray::Handle(field.dependent_code())), | 
 |         field_(field) {} | 
 |  | 
 |   virtual void UpdateArrayTo(const WeakArray& value) { | 
 |     field_.set_dependent_code(value); | 
 |   } | 
 |  | 
 |   virtual void ReportDeoptimization(const Code& code) { | 
 |     if (FLAG_trace_deoptimization || FLAG_trace_deoptimization_verbose) { | 
 |       Function& function = Function::Handle(code.function()); | 
 |       THR_Print("Deoptimizing %s because guard on field %s failed.\n", | 
 |                 function.ToFullyQualifiedCString(), field_.ToCString()); | 
 |     } | 
 |   } | 
 |  | 
 |   virtual void ReportSwitchingCode(const Code& code) { | 
 |     if (FLAG_trace_deoptimization || FLAG_trace_deoptimization_verbose) { | 
 |       Function& function = Function::Handle(code.function()); | 
 |       THR_Print( | 
 |           "Switching '%s' to unoptimized code because guard" | 
 |           " on field '%s' was violated.\n", | 
 |           function.ToFullyQualifiedCString(), field_.ToCString()); | 
 |     } | 
 |   } | 
 |  | 
 |  private: | 
 |   const Field& field_; | 
 |   DISALLOW_COPY_AND_ASSIGN(FieldDependentArray); | 
 | }; | 
 |  | 
 | void Field::RegisterDependentCode(const Code& code) const { | 
 |   ASSERT(IsOriginal()); | 
 |   DEBUG_ASSERT(IsMutatorOrAtDeoptSafepoint()); | 
 |   ASSERT(code.is_optimized()); | 
 |   FieldDependentArray a(*this); | 
 |   a.Register(code); | 
 | } | 
 |  | 
 | void Field::DeoptimizeDependentCode(bool are_mutators_stopped) const { | 
 |   DEBUG_ASSERT( | 
 |       IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); | 
 |   ASSERT(IsOriginal()); | 
 |   FieldDependentArray a(*this); | 
 |   if (FLAG_trace_deoptimization && a.HasCodes()) { | 
 |     THR_Print("Deopt for field guard (field %s)\n", ToCString()); | 
 |   } | 
 |   a.DisableCode(are_mutators_stopped); | 
 | } | 
 |  | 
 | bool Field::IsConsistentWith(const Field& other) const { | 
 |   return (untag()->guarded_cid_ == other.untag()->guarded_cid_) && | 
 |          (untag()->is_nullable_ == other.untag()->is_nullable_) && | 
 |          (untag()->guarded_list_length() == | 
 |           other.untag()->guarded_list_length()) && | 
 |          (is_unboxed() == other.is_unboxed()) && | 
 |          (static_type_exactness_state().Encode() == | 
 |           other.static_type_exactness_state().Encode()); | 
 | } | 
 |  | 
 | bool Field::IsUninitialized() const { | 
 |   Thread* thread = Thread::Current(); | 
 |   const FieldTable* field_table = thread->isolate()->field_table(); | 
 |   const ObjectPtr raw_value = field_table->At(field_id()); | 
 |   ASSERT(raw_value != Object::transition_sentinel().ptr()); | 
 |   return raw_value == Object::sentinel().ptr(); | 
 | } | 
 |  | 
 | FunctionPtr Field::EnsureInitializerFunction() const { | 
 |   ASSERT(has_nontrivial_initializer()); | 
 |   ASSERT(IsOriginal()); | 
 |   Thread* thread = Thread::Current(); | 
 |   Zone* zone = thread->zone(); | 
 |   Function& initializer = Function::Handle(zone, InitializerFunction()); | 
 |   if (initializer.IsNull()) { | 
 | #if defined(DART_PRECOMPILED_RUNTIME) | 
 |     UNREACHABLE(); | 
 | #else | 
 |     SafepointMutexLocker ml( | 
 |         thread->isolate_group()->initializer_functions_mutex()); | 
 |     // Double check after grabbing the lock. | 
 |     initializer = InitializerFunction(); | 
 |     if (initializer.IsNull()) { | 
 |       initializer = kernel::CreateFieldInitializerFunction(thread, zone, *this); | 
 |     } | 
 | #endif | 
 |   } | 
 |   return initializer.ptr(); | 
 | } | 
 |  | 
 | void Field::SetInitializerFunction(const Function& initializer) const { | 
 | #if defined(DART_PRECOMPILED_RUNTIME) | 
 |   UNREACHABLE(); | 
 | #else | 
 |   ASSERT(IsOriginal()); | 
 |   ASSERT(IsolateGroup::Current() | 
 |              ->initializer_functions_mutex() | 
 |              ->IsOwnedByCurrentThread()); | 
 |   // We have to ensure that all stores into the initializer function object | 
 |   // happen before releasing the pointer to the initializer as it may be | 
 |   // accessed without grabbing the lock. | 
 |   untag()->set_initializer_function<std::memory_order_release>( | 
 |       initializer.ptr()); | 
 | #endif | 
 | } | 
 |  | 
 | bool Field::HasInitializerFunction() const { | 
 |   return untag()->initializer_function() != Function::null(); | 
 | } | 
 |  | 
 | ErrorPtr Field::InitializeInstance(const Instance& instance) const { | 
 |   ASSERT(IsOriginal()); | 
 |   ASSERT(is_instance()); | 
 |   ASSERT(instance.GetField(*this) == Object::sentinel().ptr()); | 
 |   Object& value = Object::Handle(); | 
 |  | 
 |   if (has_nontrivial_initializer()) { | 
 |     const Function& initializer = Function::Handle(EnsureInitializerFunction()); | 
 |     const Array& args = Array::Handle(Array::New(1)); | 
 |     args.SetAt(0, instance); | 
 |     value = DartEntry::InvokeFunction(initializer, args); | 
 |     if (!value.IsNull() && value.IsError()) { | 
 |       return Error::Cast(value).ptr(); | 
 |     } | 
 |   } else { | 
 |     if (is_late() && !has_initializer()) { | 
 |       Exceptions::ThrowLateFieldNotInitialized(String::Handle(name())); | 
 |       UNREACHABLE(); | 
 |     } | 
 | #if defined(DART_PRECOMPILED_RUNTIME) | 
 |     UNREACHABLE(); | 
 | #else | 
 |     // Our trivial initializer is `null`. Any non-`null` initializer is | 
 |     // non-trivial (see `KernelLoader::CheckForInitializer()`). | 
 |     value = Object::null(); | 
 | #endif | 
 |   } | 
 |   ASSERT(value.IsNull() || value.IsInstance()); | 
 |   if (is_late() && is_final() && | 
 |       (instance.GetField(*this) != Object::sentinel().ptr())) { | 
 |     Exceptions::ThrowLateFieldAssignedDuringInitialization( | 
 |         String::Handle(name())); | 
 |     UNREACHABLE(); | 
 |   } | 
 |   instance.SetField(*this, value); | 
 |   return Error::null(); | 
 | } | 
 |  | 
 | ErrorPtr Field::InitializeStatic() const { | 
 |   ASSERT(IsOriginal()); | 
 |   ASSERT(is_static()); | 
 |   if (StaticValue() == Object::sentinel().ptr()) { | 
 |     auto& value = Object::Handle(); | 
 |     if (is_late()) { | 
 |       if (!has_initializer()) { | 
 |         Exceptions::ThrowLateFieldNotInitialized(String::Handle(name())); | 
 |         UNREACHABLE(); | 
 |       } | 
 |       value = EvaluateInitializer(); | 
 |       if (value.IsError()) { | 
 |         return Error::Cast(value).ptr(); | 
 |       } | 
 |       if (is_final() && (StaticValue() != Object::sentinel().ptr())) { | 
 |         Exceptions::ThrowLateFieldAssignedDuringInitialization( | 
 |             String::Handle(name())); | 
 |         UNREACHABLE(); | 
 |       } | 
 |     } else { | 
 |       SetStaticValue(Object::transition_sentinel()); | 
 |       value = EvaluateInitializer(); | 
 |       if (value.IsError()) { | 
 |         SetStaticValue(Object::null_instance()); | 
 |         return Error::Cast(value).ptr(); | 
 |       } | 
 |     } | 
 |     ASSERT(value.IsNull() || value.IsInstance()); | 
 |     SetStaticValue(value.IsNull() ? Instance::null_instance() | 
 |                                   : Instance::Cast(value)); | 
 |     return Error::null(); | 
 |   } else if (StaticValue() == Object::transition_sentinel().ptr()) { | 
 |     ASSERT(!is_late()); | 
 |     const Array& ctor_args = Array::Handle(Array::New(1)); | 
 |     const String& field_name = String::Handle(name()); | 
 |     ctor_args.SetAt(0, field_name); | 
 |     Exceptions::ThrowByType(Exceptions::kCyclicInitializationError, ctor_args); | 
 |     UNREACHABLE(); | 
 |   } | 
 |   return Error::null(); | 
 | } | 
 |  | 
 | ObjectPtr Field::StaticConstFieldValue() const { | 
 |   ASSERT(is_static() && | 
 |          (is_const() || (is_final() && has_trivial_initializer()))); | 
 |  | 
 |   auto thread = Thread::Current(); | 
 |   auto zone = thread->zone(); | 
 |   auto initial_field_table = thread->isolate_group()->initial_field_table(); | 
 |  | 
 |   // We can safely cache the value of the static const field in the initial | 
 |   // field table. | 
 |   auto& value = Object::Handle( | 
 |       zone, initial_field_table->At(field_id(), /*concurrent_use=*/true)); | 
 |   if (value.ptr() == Object::sentinel().ptr()) { | 
 |     // Fields with trivial initializers get their initial value | 
 |     // eagerly when they are registered. | 
 |     ASSERT(is_const()); | 
 |     ASSERT(has_initializer()); | 
 |     ASSERT(has_nontrivial_initializer()); | 
 |     value = EvaluateInitializer(); | 
 |     if (!value.IsError()) { | 
 |       ASSERT(value.IsNull() || value.IsInstance()); | 
 |       SetStaticConstFieldValue(value.IsNull() ? Instance::null_instance() | 
 |                                               : Instance::Cast(value)); | 
 |     } | 
 |   } | 
 |   return value.ptr(); | 
 | } | 
 |  | 
 | void Field::SetStaticConstFieldValue(const Instance& value, | 
 |                                      bool assert_initializing_store) const { | 
 |   ASSERT(is_static()); | 
 |   auto thread = Thread::Current(); | 
 |   auto initial_field_table = thread->isolate_group()->initial_field_table(); | 
 |  | 
 |   SafepointWriteRwLocker ml(thread, thread->isolate_group()->program_lock()); | 
 |   ASSERT(initial_field_table->At(field_id()) == Object::sentinel().ptr() || | 
 |          initial_field_table->At(field_id()) == value.ptr() || | 
 |          !assert_initializing_store); | 
 |   initial_field_table->SetAt(field_id(), | 
 |                              value.IsNull() ? Instance::null_instance().ptr() | 
 |                                             : Instance::Cast(value).ptr(), | 
 |                              /*concurrent_use=*/true); | 
 | } | 
 |  | 
 | ObjectPtr Field::EvaluateInitializer() const { | 
 |   ASSERT(Thread::Current()->IsDartMutatorThread()); | 
 |  | 
 | #if !defined(DART_PRECOMPILED_RUNTIME) | 
 |   if (is_static() && is_const()) { | 
 |     return kernel::EvaluateStaticConstFieldInitializer(*this); | 
 |   } | 
 | #endif  // !defined(DART_PRECOMPILED_RUNTIME) | 
 |  | 
 |   const Function& initializer = Function::Handle(EnsureInitializerFunction()); | 
 |   return DartEntry::InvokeFunction(initializer, Object::empty_array()); | 
 | } | 
 |  | 
 | static intptr_t GetListLength(const Object& value) { | 
 |   if (value.IsTypedDataBase()) { | 
 |     return TypedDataBase::Cast(value).Length(); | 
 |   } else if (value.IsArray()) { | 
 |     return Array::Cast(value).Length(); | 
 |   } else if (value.IsGrowableObjectArray()) { | 
 |     // List length is variable. | 
 |     return Field::kNoFixedLength; | 
 |   } | 
 |   return Field::kNoFixedLength; | 
 | } | 
 |  | 
 | static intptr_t GetListLengthOffset(intptr_t cid) { | 
 |   if (IsTypedDataClassId(cid) || IsTypedDataViewClassId(cid) || | 
 |       IsUnmodifiableTypedDataViewClassId(cid) || | 
 |       IsExternalTypedDataClassId(cid)) { | 
 |     return TypedData::length_offset(); | 
 |   } else if (cid == kArrayCid || cid == kImmutableArrayCid) { | 
 |     return Array::length_offset(); | 
 |   } else if (cid == kGrowableObjectArrayCid) { | 
 |     // List length is variable. | 
 |     return Field::kUnknownLengthOffset; | 
 |   } | 
 |   return Field::kUnknownLengthOffset; | 
 | } | 
 |  | 
 | const char* Field::GuardedPropertiesAsCString() const { | 
 |   if (guarded_cid() == kIllegalCid) { | 
 |     return "<?>"; | 
 |   } else if (guarded_cid() == kDynamicCid) { | 
 |     ASSERT(!static_type_exactness_state().IsExactOrUninitialized()); | 
 |     return "<*>"; | 
 |   } | 
 |  | 
 |   Zone* zone = Thread::Current()->zone(); | 
 |  | 
 |   const char* exactness = ""; | 
 |   if (static_type_exactness_state().IsTracking()) { | 
 |     exactness = | 
 |         zone->PrintToString(" {%s}", static_type_exactness_state().ToCString()); | 
 |   } | 
 |  | 
 |   const Class& cls = | 
 |       Class::Handle(IsolateGroup::Current()->class_table()->At(guarded_cid())); | 
 |   const char* class_name = String::Handle(cls.Name()).ToCString(); | 
 |  | 
 |   if (IsBuiltinListClassId(guarded_cid()) && !is_nullable() && is_final()) { | 
 |     ASSERT(guarded_list_length() != kUnknownFixedLength); | 
 |     if (guarded_list_length() == kNoFixedLength) { | 
 |       return zone->PrintToString("<%s [*]%s>", class_name, exactness); | 
 |     } else { | 
 |       return zone->PrintToString( | 
 |           "<%s [%" Pd " @%" Pd "]%s>", class_name, guarded_list_length(), | 
 |           guarded_list_length_in_object_offset(), exactness); | 
 |     } | 
 |   } | 
 |  | 
 |   return zone->PrintToString("<%s %s%s>", | 
 |                              is_nullable() ? "nullable" : "not-nullable", | 
 |                              class_name, exactness); | 
 | } | 
 |  | 
 | void Field::InitializeGuardedListLengthInObjectOffset(bool unsafe) const { | 
 |   auto setter = unsafe ? &Field::set_guarded_list_length_in_object_offset_unsafe | 
 |                        : &Field::set_guarded_list_length_in_object_offset; | 
 |   ASSERT(IsOriginal()); | 
 |   if (needs_length_check() && | 
 |       (guarded_list_length() != Field::kUnknownFixedLength)) { | 
 |     const intptr_t offset = GetListLengthOffset(guarded_cid()); | 
 |     (this->*setter)(offset); | 
 |     ASSERT(offset != Field::kUnknownLengthOffset); | 
 |   } else { | 
 |     (this->*setter)(Field::kUnknownLengthOffset); | 
 |   } | 
 | } | 
 |  | 
 | class FieldGuardUpdater { | 
 |  public: | 
 |   FieldGuardUpdater(const Field* field, const Object& value); | 
 |  | 
 |   bool IsUpdateNeeded() { | 
 |     return does_guarded_cid_need_update_ || does_is_nullable_need_update_ || | 
 |            does_list_length_and_offset_need_update_ || | 
 |            does_static_type_exactness_state_need_update_; | 
 |   } | 
 |   void DoUpdate(); | 
 |  | 
 |  private: | 
 |   void ReviewExactnessState(); | 
 |   void ReviewGuards(); | 
 |  | 
 |   intptr_t guarded_cid() { return guarded_cid_; } | 
 |   void set_guarded_cid(intptr_t guarded_cid) { | 
 |     guarded_cid_ = guarded_cid; | 
 |     does_guarded_cid_need_update_ = true; | 
 |   } | 
 |  | 
 |   bool is_nullable() { return is_nullable_; } | 
 |   void set_is_nullable(bool is_nullable) { | 
 |     is_nullable_ = is_nullable; | 
 |     does_is_nullable_need_update_ = true; | 
 |   } | 
 |  | 
 |   intptr_t guarded_list_length() { return list_length_; } | 
 |   void set_guarded_list_length_and_offset( | 
 |       intptr_t list_length, | 
 |       intptr_t list_length_in_object_offset) { | 
 |     list_length_ = list_length; | 
 |     list_length_in_object_offset_ = list_length_in_object_offset; | 
 |     does_list_length_and_offset_need_update_ = true; | 
 |   } | 
 |  | 
 |   StaticTypeExactnessState static_type_exactness_state() { | 
 |     return static_type_exactness_state_; | 
 |   } | 
 |   void set_static_type_exactness_state(StaticTypeExactnessState state) { | 
 |     static_type_exactness_state_ = state; | 
 |     does_static_type_exactness_state_need_update_ = true; | 
 |   } | 
 |  | 
 |   const Field* field_; | 
 |   const Object& value_; | 
 |  | 
 |   intptr_t guarded_cid_; | 
 |   bool is_nullable_; | 
 |   intptr_t list_length_; | 
 |   intptr_t list_length_in_object_offset_; | 
 |   StaticTypeExactnessState static_type_exactness_state_; | 
 |  | 
 |   bool does_guarded_cid_need_update_ = false; | 
 |   bool does_is_nullable_need_update_ = false; | 
 |   bool does_list_length_and_offset_need_update_ = false; | 
 |   bool does_static_type_exactness_state_need_update_ = false; | 
 | }; | 
 |  | 
 | void FieldGuardUpdater::ReviewGuards() { | 
 |   ASSERT(field_->IsOriginal()); | 
 |   const intptr_t cid = value_.GetClassId(); | 
 |  | 
 |   if (guarded_cid() == kIllegalCid) { | 
 |     set_guarded_cid(cid); | 
 |     set_is_nullable(cid == kNullCid); | 
 |  | 
 |     // Start tracking length if needed. | 
 |     ASSERT((guarded_list_length() == Field::kUnknownFixedLength) || | 
 |            (guarded_list_length() == Field::kNoFixedLength)); | 
 |     if (field_->needs_length_check()) { | 
 |       ASSERT(guarded_list_length() == Field::kUnknownFixedLength); | 
 |       set_guarded_list_length_and_offset(GetListLength(value_), | 
 |                                          GetListLengthOffset(cid)); | 
 |     } | 
 |  | 
 |     if (FLAG_trace_field_guards) { | 
 |       THR_Print("    => %s\n", field_->GuardedPropertiesAsCString()); | 
 |     } | 
 |     return; | 
 |   } | 
 |  | 
 |   if ((cid == guarded_cid()) || ((cid == kNullCid) && is_nullable())) { | 
 |     // Class id of the assigned value matches expected class id and nullability. | 
 |  | 
 |     // If we are tracking length check if it has matches. | 
 |     if (field_->needs_length_check() && | 
 |         (guarded_list_length() != GetListLength(value_))) { | 
 |       ASSERT(guarded_list_length() != Field::kUnknownFixedLength); | 
 |       set_guarded_list_length_and_offset(Field::kNoFixedLength, | 
 |                                          Field::kUnknownLengthOffset); | 
 |       return; | 
 |     } | 
 |  | 
 |     // Everything matches. | 
 |     return; | 
 |   } | 
 |  | 
 |   if ((cid == kNullCid) && !is_nullable()) { | 
 |     // Assigning null value to a non-nullable field makes it nullable. | 
 |     set_is_nullable(true); | 
 |   } else if ((cid != kNullCid) && (guarded_cid() == kNullCid)) { | 
 |     // Assigning non-null value to a field that previously contained only null | 
 |     // turns it into a nullable field with the given class id. | 
 |     ASSERT(is_nullable()); | 
 |     set_guarded_cid(cid); | 
 |   } else { | 
 |     // Give up on tracking class id of values contained in this field. | 
 |     ASSERT(guarded_cid() != cid); | 
 |     set_guarded_cid(kDynamicCid); | 
 |     set_is_nullable(true); | 
 |   } | 
 |  | 
 |   // If we were tracking length drop collected feedback. | 
 |   if (field_->needs_length_check()) { | 
 |     ASSERT(guarded_list_length() != Field::kUnknownFixedLength); | 
 |     set_guarded_list_length_and_offset(Field::kNoFixedLength, | 
 |                                        Field::kUnknownLengthOffset); | 
 |   } | 
 | } | 
 |  | 
 | bool Class::FindInstantiationOf(Zone* zone, | 
 |                                 const Class& cls, | 
 |                                 GrowableArray<const Type*>* path, | 
 |                                 bool consider_only_super_classes) const { | 
 |   ASSERT(cls.is_type_finalized()); | 
 |   if (cls.ptr() == ptr()) { | 
 |     return true;  // Found instantiation. | 
 |   } | 
 |  | 
 |   Class& cls2 = Class::Handle(zone); | 
 |   Type& super = Type::Handle(zone, super_type()); | 
 |   if (!super.IsNull() && !super.IsObjectType()) { | 
 |     cls2 = super.type_class(); | 
 |     if (path != nullptr) { | 
 |       path->Add(&super); | 
 |     } | 
 |     if (cls2.FindInstantiationOf(zone, cls, path, | 
 |                                  consider_only_super_classes)) { | 
 |       return true;  // Found instantiation. | 
 |     } | 
 |     if (path != nullptr) { | 
 |       path->RemoveLast(); | 
 |     } | 
 |   } | 
 |  | 
 |   if (!consider_only_super_classes) { | 
 |     Array& super_interfaces = Array::Handle(zone, interfaces()); | 
 |     for (intptr_t i = 0; i < super_interfaces.Length(); i++) { | 
 |       super ^= super_interfaces.At(i); | 
 |       cls2 = super.type_class(); | 
 |       if (path != nullptr) { | 
 |         path->Add(&super); | 
 |       } | 
 |       if (cls2.FindInstantiationOf(zone, cls, path)) { | 
 |         return true;  // Found instantiation. | 
 |       } | 
 |       if (path != nullptr) { | 
 |         path->RemoveLast(); | 
 |       } | 
 |     } | 
 |   } | 
 |  | 
 |   return false;  // Not found. | 
 | } | 
 |  | 
 | bool Class::FindInstantiationOf(Zone* zone, | 
 |                                 const Type& type, | 
 |                                 GrowableArray<const Type*>* path, | 
 |                                 bool consider_only_super_classes) const { | 
 |   return FindInstantiationOf(zone, Class::Handle(zone, type.type_class()), path, | 
 |                              consider_only_super_classes); | 
 | } | 
 |  | 
 | TypePtr Class::GetInstantiationOf(Zone* zone, const Class& cls) const { | 
 |   if (ptr() == cls.ptr()) { | 
 |     return DeclarationType(); | 
 |   } | 
 |   if (FindInstantiationOf(zone, cls, /*consider_only_super_classes=*/true)) { | 
 |     // Since [cls] is a superclass of [this], use [cls]'s declaration type. | 
 |     return cls.DeclarationType(); | 
 |   } | 
 |   const auto& decl_type = Type::Handle(zone, DeclarationType()); | 
 |   GrowableArray<const Type*> path(zone, 0); | 
 |   if (!FindInstantiationOf(zone, cls, &path)) { | 
 |     return Type::null(); | 
 |   } | 
 |   Thread* thread = Thread::Current(); | 
 |   ASSERT(!path.is_empty()); | 
 |   auto& calculated_type = Type::Handle(zone, decl_type.ptr()); | 
 |   auto& calculated_type_class = | 
 |       Class::Handle(zone, calculated_type.type_class()); | 
 |   auto& calculated_type_args = | 
 |       TypeArguments::Handle(zone, calculated_type.arguments()); | 
 |   calculated_type_args = calculated_type_args.ToInstantiatorTypeArguments( | 
 |       thread, calculated_type_class); | 
 |   for (auto* const type : path) { | 
 |     calculated_type ^= type->ptr(); | 
 |     if (!calculated_type.IsInstantiated()) { | 
 |       calculated_type ^= calculated_type.InstantiateFrom( | 
 |           calculated_type_args, Object::null_type_arguments(), kAllFree, | 
 |           Heap::kNew); | 
 |     } | 
 |     calculated_type_class = calculated_type.type_class(); | 
 |     calculated_type_args = calculated_type.arguments(); | 
 |     calculated_type_args = calculated_type_args.ToInstantiatorTypeArguments( | 
 |         thread, calculated_type_class); | 
 |   } | 
 |   ASSERT_EQUAL(calculated_type.type_class_id(), cls.id()); | 
 |   return calculated_type.ptr(); | 
 | } | 
 |  | 
 | TypePtr Class::GetInstantiationOf(Zone* zone, const Type& type) const { | 
 |   return GetInstantiationOf(zone, Class::Handle(zone, type.type_class())); | 
 | } | 
 |  | 
 | void Field::SetStaticValue(const Object& value) const { | 
 |   auto thread = Thread::Current(); | 
 |   ASSERT(thread->IsDartMutatorThread()); | 
 |   ASSERT(value.IsNull() || value.IsSentinel() || value.IsInstance()); | 
 |  | 
 |   ASSERT(is_static());  // Valid only for static dart fields. | 
 |   const intptr_t id = field_id(); | 
 |   ASSERT(id >= 0); | 
 |  | 
 |   SafepointWriteRwLocker ml(thread, thread->isolate_group()->program_lock()); | 
 |   thread->isolate()->field_table()->SetAt(id, value.ptr()); | 
 | } | 
 |  | 
 | static StaticTypeExactnessState TrivialTypeExactnessFor(const Class& cls) { | 
 |   const intptr_t type_arguments_offset = cls.host_type_arguments_field_offset(); | 
 |   ASSERT(type_arguments_offset != Class::kNoTypeArguments); | 
 |   if (StaticTypeExactnessState::CanRepresentAsTriviallyExact( | 
 |           type_arguments_offset / kCompressedWordSize)) { | 
 |     return StaticTypeExactnessState::TriviallyExact(type_arguments_offset / | 
 |                                                     kCompressedWordSize); | 
 |   } else { | 
 |     return StaticTypeExactnessState::NotExact(); | 
 |   } | 
 | } | 
 |  | 
 | static const char* SafeTypeArgumentsToCString(const TypeArguments& args) { | 
 |   return (args.ptr() == TypeArguments::null()) ? "<null>" : args.ToCString(); | 
 | } | 
 |  | 
 | StaticTypeExactnessState StaticTypeExactnessState::Compute( | 
 |     const Type& static_type, | 
 |     const Instance& value, | 
 |     bool print_trace /* = false */) { | 
 |   ASSERT(!value.IsNull());  // Should be handled by the caller. | 
 |   ASSERT(value.ptr() != Object::sentinel().ptr()); | 
 |   ASSERT(value.ptr() != Object::transition_sentinel().ptr()); | 
 |  | 
 |   Thread* thread = Thread::Current(); | 
 |   Zone* const zone = thread->zone(); | 
 |   const TypeArguments& static_type_args = | 
 |       TypeArguments::Handle(zone, static_type.GetInstanceTypeArguments(thread)); | 
 |  | 
 |   TypeArguments& args = TypeArguments::Handle(zone); | 
 |  | 
 |   ASSERT(static_type.IsFinalized()); | 
 |   const Class& cls = Class::Handle(zone, value.clazz()); | 
 |   GrowableArray<const Type*> path(10); | 
 |  | 
 |   bool is_super_class = true; | 
 |   if (!cls.FindInstantiationOf(zone, static_type, &path, | 
 |                                /*consider_only_super_classes=*/true)) { | 
 |     is_super_class = false; | 
 |     bool found_super_interface = | 
 |         cls.FindInstantiationOf(zone, static_type, &path); | 
 |     ASSERT(found_super_interface); | 
 |   } | 
 |  | 
 |   // Trivial case: field has type G<T0, ..., Tn> and value has type | 
 |   // G<U0, ..., Un>. Check if type arguments match. | 
 |   if (path.is_empty()) { | 
 |     ASSERT(cls.ptr() == static_type.type_class()); | 
 |     args = value.GetTypeArguments(); | 
 |     // TODO(dartbug.com/34170) Evaluate if comparing relevant subvectors (that | 
 |     // disregards superclass own arguments) improves precision of the | 
 |     // tracking. | 
 |     if (args.ptr() == static_type_args.ptr()) { | 
 |       return TrivialTypeExactnessFor(cls); | 
 |     } | 
 |  | 
 |     if (print_trace) { | 
 |       THR_Print("  expected %s got %s type arguments\n", | 
 |                 SafeTypeArgumentsToCString(static_type_args), | 
 |                 SafeTypeArgumentsToCString(args)); | 
 |     } | 
 |     return StaticTypeExactnessState::NotExact(); | 
 |   } | 
 |  | 
 |   // Value has type C<U0, ..., Un> and field has type G<T0, ..., Tn> and G != C. | 
 |   // Compute C<X0, ..., Xn> at G (Xi are free type arguments). | 
 |   // Path array contains a chain of immediate supertypes S0 <: S1 <: ... Sn, | 
 |   // such that S0 is an immediate supertype of C and Sn is G<...>. | 
 |   // Each Si might depend on type parameters of the previous supertype S{i-1}. | 
 |   // To compute C<X0, ..., Xn> at G we walk the chain backwards and | 
 |   // instantiate Si using type parameters of S{i-1} which gives us a type | 
 |   // depending on type parameters of S{i-2}. | 
 |   Type& type = Type::Handle(zone, path.Last()->ptr()); | 
 |   for (intptr_t i = path.length() - 2; (i >= 0) && !type.IsInstantiated(); | 
 |        i--) { | 
 |     args = path[i]->GetInstanceTypeArguments(thread, /*canonicalize=*/false); | 
 |     type ^= type.InstantiateFrom(args, TypeArguments::null_type_arguments(), | 
 |                                  kAllFree, Heap::kNew); | 
 |   } | 
 |  | 
 |   if (type.IsInstantiated()) { | 
 |     // C<X0, ..., Xn> at G is fully instantiated and does not depend on | 
 |     // Xi. In this case just check if type arguments match. | 
 |     args = type.GetInstanceTypeArguments(thread, /*canonicalize=*/false); | 
 |     if (args.Equals(static_type_args)) { | 
 |       return is_super_class ? StaticTypeExactnessState::HasExactSuperClass() | 
 |                             : StaticTypeExactnessState::HasExactSuperType(); | 
 |     } | 
 |  | 
 |     if (print_trace) { | 
 |       THR_Print("  expected %s got %s type arguments\n", | 
 |                 SafeTypeArgumentsToCString(static_type_args), | 
 |                 SafeTypeArgumentsToCString(args)); | 
 |     } | 
 |  | 
 |     return StaticTypeExactnessState::NotExact(); | 
 |   } | 
 |  | 
 |   // The most complicated case: C<X0, ..., Xn> at G depends on | 
 |   // Xi values. To compare type arguments we would need to instantiate | 
 |   // it fully from value's type arguments and compare with <U0, ..., Un>. | 
 |   // However this would complicate fast path in the native code. To avoid this | 
 |   // complication we would optimize for the trivial case: we check if | 
 |   // C<X0, ..., Xn> at G is exactly G<X0, ..., Xn> which means we can simply | 
 |   // compare values type arguments (<T0, ..., Tn>) to fields type arguments | 
 |   // (<U0, ..., Un>) to establish if field type is exact. | 
 |   ASSERT(cls.IsGeneric()); | 
 |   const intptr_t num_type_params = cls.NumTypeParameters(); | 
 |   bool trivial_case = | 
 |       (num_type_params == | 
 |        Class::Handle(zone, static_type.type_class()).NumTypeParameters()) && | 
 |       (value.GetTypeArguments() == static_type_args.ptr()); | 
 |   if (!trivial_case && FLAG_trace_field_guards) { | 
 |     THR_Print("Not a simple case: %" Pd " vs %" Pd | 
 |               " type parameters, %s vs %s type arguments\n", | 
 |               num_type_params, | 
 |               Class::Handle(zone, static_type.type_class()).NumTypeParameters(), | 
 |               SafeTypeArgumentsToCString( | 
 |                   TypeArguments::Handle(zone, value.GetTypeArguments())), | 
 |               SafeTypeArgumentsToCString(static_type_args)); | 
 |   } | 
 |  | 
 |   AbstractType& type_arg = AbstractType::Handle(zone); | 
 |   args = type.GetInstanceTypeArguments(thread, /*canonicalize=*/false); | 
 |   for (intptr_t i = 0; (i < num_type_params) && trivial_case; i++) { | 
 |     type_arg = args.TypeAt(i); | 
 |     if (!type_arg.IsTypeParameter() || | 
 |         (TypeParameter::Cast(type_arg).index() != i)) { | 
 |       if (FLAG_trace_field_guards) { | 
 |         THR_Print("  => encountered %s at index % " Pd "\n", | 
 |                   type_arg.ToCString(), i); | 
 |       } | 
 |       trivial_case = false; | 
 |     } | 
 |   } | 
 |  | 
 |   return trivial_case ? TrivialTypeExactnessFor(cls) | 
 |                       : StaticTypeExactnessState::NotExact(); | 
 | } | 
 |  | 
 | const char* StaticTypeExactnessState::ToCString() const { | 
 |   if (!IsTracking()) { | 
 |     return "not-tracking"; | 
 |   } else if (!IsExactOrUninitialized()) { | 
 |     return "not-exact"; | 
 |   } else if (IsTriviallyExact()) { | 
 |     return Thread::Current()->zone()->PrintToString( | 
 |         "trivially-exact(%hhu)", GetTypeArgumentsOffsetInWords()); | 
 |   } else if (IsHasExactSuperType()) { | 
 |     return "has-exact-super-type"; | 
 |   } else if (IsHasExactSuperClass()) { | 
 |     return "has-exact-super-class"; | 
 |   } else { | 
 |     ASSERT(IsUninitialized()); | 
 |     return "uninitialized-exactness"; | 
 |   } | 
 | } | 
 |  | 
 | void FieldGuardUpdater::ReviewExactnessState() { | 
 |   if (!static_type_exactness_state().IsExactOrUninitialized()) { | 
 |     // Nothing to update. | 
 |     return; | 
 |   } | 
 |  | 
 |   if (guarded_cid() == kDynamicCid) { | 
 |     if (FLAG_trace_field_guards) { | 
 |       THR_Print( | 
 |           "  => switching off exactness tracking because guarded cid is " | 
 |           "dynamic\n"); | 
 |     } | 
 |     set_static_type_exactness_state(StaticTypeExactnessState::NotExact()); | 
 |     return; | 
 |   } | 
 |  | 
 |   // If we are storing null into a field or we have an exact super type | 
 |   // then there is nothing to do. | 
 |   if (value_.IsNull() || static_type_exactness_state().IsHasExactSuperType() || | 
 |       static_type_exactness_state().IsHasExactSuperClass()) { | 
 |     return; | 
 |   } | 
 |  | 
 |   // If we are storing a non-null value into a field that is considered | 
 |   // to be trivially exact then we need to check if value has an appropriate | 
 |   // type. | 
 |   ASSERT(guarded_cid() != kNullCid); | 
 |  | 
 |   const Type& field_type = Type::Cast(AbstractType::Handle(field_->type())); | 
 |   const Instance& instance = Instance::Cast(value_); | 
 |  | 
 |   if (static_type_exactness_state().IsTriviallyExact()) { | 
 |     const TypeArguments& args = | 
 |         TypeArguments::Handle(instance.GetTypeArguments()); | 
 |     const TypeArguments& field_type_args = TypeArguments::Handle( | 
 |         field_type.GetInstanceTypeArguments(Thread::Current())); | 
 |     if (args.ptr() == field_type_args.ptr()) { | 
 |       return; | 
 |     } | 
 |  | 
 |     if (FLAG_trace_field_guards) { | 
 |       THR_Print("  expected %s got %s type arguments\n", | 
 |                 field_type_args.ToCString(), args.ToCString()); | 
 |     } | 
 |  | 
 |     set_static_type_exactness_state(StaticTypeExactnessState::NotExact()); | 
 |     return; | 
 |   } | 
 |  | 
 |   ASSERT(static_type_exactness_state().IsUninitialized()); | 
 |   set_static_type_exactness_state(StaticTypeExactnessState::Compute( | 
 |       field_type, instance, FLAG_trace_field_guards)); | 
 |   return; | 
 | } | 
 |  | 
 | FieldGuardUpdater::FieldGuardUpdater(const Field* field, const Object& value) | 
 |     : field_(field), | 
 |       value_(value), | 
 |       guarded_cid_(field->guarded_cid()), | 
 |       is_nullable_(field->is_nullable()), | 
 |       list_length_(field->guarded_list_length()), | 
 |       list_length_in_object_offset_( | 
 |           field->guarded_list_length_in_object_offset()), | 
 |       static_type_exactness_state_(field->static_type_exactness_state()) { | 
 |   ReviewGuards(); | 
 |   ReviewExactnessState(); | 
 | } | 
 |  | 
 | void FieldGuardUpdater::DoUpdate() { | 
 |   if (does_guarded_cid_need_update_) { | 
 |     field_->set_guarded_cid(guarded_cid_); | 
 |   } | 
 |   if (does_is_nullable_need_update_) { | 
 |     field_->set_is_nullable(is_nullable_); | 
 |   } | 
 |   if (does_list_length_and_offset_need_update_) { | 
 |     field_->set_guarded_list_length(list_length_); | 
 |     field_->set_guarded_list_length_in_object_offset( | 
 |         list_length_in_object_offset_); | 
 |   } | 
 |   if (does_static_type_exactness_state_need_update_) { | 
 |     field_->set_static_type_exactness_state(static_type_exactness_state_); | 
 |   } | 
 | } | 
 |  | 
 | void Field::RecordStore(const Object& value) const { | 
 |   ASSERT(IsOriginal()); | 
 |   Thread* const thread = Thread::Current(); | 
 |   if (!thread->isolate_group()->use_field_guards()) { | 
 |     return; | 
 |   } | 
 |  | 
 |   // We should never try to record a sentinel. | 
 |   ASSERT(value.ptr() != Object::sentinel().ptr()); | 
 |  | 
 |   SafepointWriteRwLocker ml(thread, thread->isolate_group()->program_lock()); | 
 |   if ((guarded_cid() == kDynamicCid) || | 
 |       (is_nullable() && value.ptr() == Object::null())) { | 
 |     // Nothing to do: the field is not guarded or we are storing null into | 
 |     // a nullable field. | 
 |     return; | 
 |   } | 
 |  | 
 |   if (FLAG_trace_field_guards) { | 
 |     THR_Print("Store %s %s <- %s\n", ToCString(), GuardedPropertiesAsCString(), | 
 |               value.ToCString()); | 
 |   } | 
 |  | 
 |   FieldGuardUpdater updater(this, value); | 
 |   if (updater.IsUpdateNeeded()) { | 
 |     if (FLAG_trace_field_guards) { | 
 |       THR_Print("    => %s\n", GuardedPropertiesAsCString()); | 
 |     } | 
 |     // Nobody else could have updated guard state since we are holding write | 
 |     // program lock. But we need to ensure we stop mutators as we update | 
 |     // guard state as we can't have optimized code running with updated fields. | 
 |     auto isolate_group = IsolateGroup::Current(); | 
 |     isolate_group->RunWithStoppedMutators([&]() { | 
 |       updater.DoUpdate(); | 
 |       DeoptimizeDependentCode(/*are_mutators_stopped=*/true); | 
 |     }); | 
 |   } | 
 | } | 
 |  | 
 | void Field::ForceDynamicGuardedCidAndLength() const { | 
 |   if (!is_unboxed()) { | 
 |     set_guarded_cid(kDynamicCid); | 
 |     set_is_nullable(true); | 
 |   } | 
 |   set_guarded_list_length(Field::kNoFixedLength); | 
 |   set_guarded_list_length_in_object_offset(Field::kUnknownLengthOffset); | 
 |   if (static_type_exactness_state().IsTracking()) { | 
 |     set_static_type_exactness_state(StaticTypeExactnessState::NotExact()); | 
 |   } | 
 |   // Drop any code that relied on the above assumptions. | 
 |   DeoptimizeDependentCode(); | 
 | } | 
 |  | 
 | StringPtr Script::resolved_url() const { | 
 | #if defined(DART_PRECOMPILER) | 
 |   return String::RawCast( | 
 |       WeakSerializationReference::Unwrap(untag()->resolved_url())); | 
 | #else | 
 |   return untag()->resolved_url(); | 
 | #endif | 
 | } | 
 |  | 
 | bool Script::HasSource() const { | 
 |   return untag()->source() != String::null(); | 
 | } | 
 |  | 
 | StringPtr Script::Source() const { | 
 |   return untag()->source(); | 
 | } | 
 |  | 
 | bool Script::IsPartOfDartColonLibrary() const { | 
 |   const String& script_url = String::Handle(url()); | 
 |   return (script_url.StartsWith(Symbols::DartScheme()) || | 
 |           script_url.StartsWith(Symbols::DartSchemePrivate())); | 
 | } | 
 |  | 
 | #if !defined(DART_PRECOMPILED_RUNTIME) | 
 | void Script::LoadSourceFromKernel(const uint8_t* kernel_buffer, | 
 |                                   intptr_t kernel_buffer_len) const { | 
 |   String& uri = String::Handle(resolved_url()); | 
 |   String& source = String::Handle(kernel::KernelLoader::FindSourceForScript( | 
 |       kernel_buffer, kernel_buffer_len, uri)); | 
 |   set_source(source); | 
 | } | 
 |  | 
 | void Script::InitializeFromKernel( | 
 |     const KernelProgramInfo& info, | 
 |     intptr_t script_index, | 
 |     const TypedData& line_starts, | 
 |     const TypedDataView& constant_coverage) const { | 
 |   StoreNonPointer(&untag()->kernel_script_index_, script_index); | 
 |   untag()->set_kernel_program_info(info.ptr()); | 
 |   untag()->set_line_starts(line_starts.ptr()); | 
 |   untag()->set_debug_positions(Array::null_array().ptr()); | 
 |   NOT_IN_PRODUCT(untag()->set_constant_coverage(constant_coverage.ptr())); | 
 | } | 
 | #endif | 
 |  | 
 | GrowableObjectArrayPtr Script::GenerateLineNumberArray() const { | 
 |   Zone* zone = Thread::Current()->zone(); | 
 |   const GrowableObjectArray& info = | 
 |       GrowableObjectArray::Handle(zone, GrowableObjectArray::New()); | 
 |   const Object& line_separator = Object::Handle(zone); | 
 |   if (line_starts() == TypedData::null()) { | 
 |     // Scripts in the AOT snapshot do not have a line starts array. | 
 |     // A well-formed line number array has a leading null. | 
 |     info.Add(line_separator);  // New line. | 
 |     return info.ptr(); | 
 |   } | 
 | #if !defined(DART_PRECOMPILED_RUNTIME) | 
 |   Smi& value = Smi::Handle(zone); | 
 |   const TypedData& line_starts_data = TypedData::Handle(zone, line_starts()); | 
 |   intptr_t line_count = line_starts_data.Length(); | 
 |   const Array& debug_positions_array = Array::Handle(debug_positions()); | 
 |   intptr_t token_count = debug_positions_array.Length(); | 
 |   int token_index = 0; | 
 |  | 
 |   kernel::KernelLineStartsReader line_starts_reader(line_starts_data, zone); | 
 |   for (int line_index = 0; line_index < line_count; ++line_index) { | 
 |     intptr_t start = line_starts_reader.At(line_index); | 
 |     // Output the rest of the tokens if we have no next line. | 
 |     intptr_t end = TokenPosition::kMaxSourcePos; | 
 |     if (line_index + 1 < line_count) { | 
 |       end = line_starts_reader.At(line_index + 1); | 
 |     } | 
 |     bool first = true; | 
 |     while (token_index < token_count) { | 
 |       value ^= debug_positions_array.At(token_index); | 
 |       intptr_t debug_position = value.Value(); | 
 |       if (debug_position >= end) break; | 
 |  | 
 |       if (first) { | 
 |         info.Add(line_separator);          // New line. | 
 |         value = Smi::New(line_index + 1);  // Line number. | 
 |         info.Add(value); | 
 |         first = false; | 
 |       } | 
 |  | 
 |       value ^= debug_positions_array.At(token_index); | 
 |       info.Add(value);                               // Token position. | 
 |       value = Smi::New(debug_position - start + 1);  // Column. | 
 |       info.Add(value); | 
 |       ++token_index; | 
 |     } | 
 |   } | 
 | #endif  // !defined(DART_PRECOMPILED_RUNTIME) | 
 |   return info.ptr(); | 
 | } | 
 |  | 
 | TokenPosition Script::MaxPosition() const { | 
 | #if !defined(DART_PRECOMPILED_RUNTIME) | 
 |   if (HasCachedMaxPosition()) { | 
 |     return TokenPosition::Deserialize( | 
 |         UntaggedScript::CachedMaxPositionBitField::decode( | 
 |             untag()->flags_and_max_position_)); | 
 |   } | 
 |   auto const zone = Thread::Current()->zone(); | 
 |   if (!HasCachedMaxPosition() && line_starts() != TypedData::null()) { | 
 |     const auto& starts = TypedData::Handle(zone, line_starts()); | 
 |     kernel::KernelLineStartsReader reader(starts, zone); | 
 |     const intptr_t max_position = reader.MaxPosition(); | 
 |     SetCachedMaxPosition(max_position); | 
 |     SetHasCachedMaxPosition(true); | 
 |     return TokenPosition::Deserialize(max_position); | 
 |   } | 
 | #endif | 
 |   return TokenPosition::kNoSource; | 
 | } | 
 |  | 
 | void Script::set_url(const String& value) const { | 
 |   untag()->set_url(value.ptr()); | 
 | } | 
 |  | 
 | void Script::set_resolved_url(const String& value) const { | 
 |   untag()->set_resolved_url(value.ptr()); | 
 | } | 
 |  | 
 | void Script::set_source(const String& value) const { | 
 |   untag()->set_source(value.ptr()); | 
 | } | 
 |  | 
 | #if !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME) | 
 | TypedDataViewPtr Script::constant_coverage() const { | 
 |   return untag()->constant_coverage(); | 
 | } | 
 | #endif  // !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME) | 
 |  | 
 | void Script::set_debug_positions(const Array& value) const { | 
 |   untag()->set_debug_positions(value.ptr()); | 
 | } | 
 |  | 
 | TypedDataPtr Script::line_starts() const { | 
 |   return untag()->line_starts(); | 
 | } | 
 |  | 
 | ArrayPtr Script::debug_positions() const { | 
 | #if !defined(DART_PRECOMPILED_RUNTIME) | 
 |   Array& debug_positions_array = Array::Handle(untag()->debug_positions()); | 
 |   if (debug_positions_array.IsNull()) { | 
 |     // This is created lazily. Now we need it. | 
 |     CollectTokenPositionsFor(); | 
 |   } | 
 | #endif  // !defined(DART_PRECOMPILED_RUNTIME) | 
 |   return untag()->debug_positions(); | 
 | } | 
 |  | 
 | #if !defined(DART_PRECOMPILED_RUNTIME) | 
 | bool Script::HasCachedMaxPosition() const { | 
 |   return UntaggedScript::HasCachedMaxPositionBit::decode( | 
 |       untag()->flags_and_max_position_); | 
 | } | 
 |  | 
 | void Script::SetHasCachedMaxPosition(bool value) const { | 
 |   StoreNonPointer(&untag()->flags_and_max_position_, | 
 |                   UntaggedScript::HasCachedMaxPositionBit::update( | 
 |                       value, untag()->flags_and_max_position_)); | 
 | } | 
 |  | 
 | void Script::SetCachedMaxPosition(intptr_t value) const { | 
 |   StoreNonPointer(&untag()->flags_and_max_position_, | 
 |                   UntaggedScript::CachedMaxPositionBitField::update( | 
 |                       value, untag()->flags_and_max_position_)); | 
 | } | 
 | #endif | 
 |  | 
 | void Script::set_load_timestamp(int64_t value) const { | 
 |   StoreNonPointer(&untag()->load_timestamp_, value); | 
 | } | 
 |  | 
 | bool Script::IsValidTokenPosition(TokenPosition token_pos) const { | 
 |   const TokenPosition& max_position = MaxPosition(); | 
 |   // We may end up with scripts that have the empty string as a source file | 
 |   // in testing and the like, so allow any token position when the max position | 
 |   // is 0 as well as when it is kNoSource. | 
 |   return !max_position.IsReal() || !token_pos.IsReal() || | 
 |          max_position.Pos() == 0 || token_pos <= max_position; | 
 | } | 
 |  | 
 | #if !defined(DART_PRECOMPILED_RUNTIME) | 
 | static bool IsLetter(int32_t c) { | 
 |   return (('A' <= c) && (c <= 'Z')) || (('a' <= c) && (c <= 'z')); | 
 | } | 
 |  | 
 | static bool IsDecimalDigit(int32_t c) { | 
 |   return '0' <= c && c <= '9'; | 
 | } | 
 |  | 
 | static bool IsIdentStartChar(int32_t c) { | 
 |   return IsLetter(c) || (c == '_') || (c == '$'); | 
 | } | 
 |  | 
 | static bool IsIdentChar(int32_t c) { | 
 |   return IsLetter(c) || IsDecimalDigit(c) || (c == '_') || (c == '$'); | 
 | } | 
 | #endif  // !defined(DART_PRECOMPILED_RUNTIME) | 
 |  | 
 | bool Script::GetTokenLocation(const TokenPosition& token_pos, | 
 |                               intptr_t* line, | 
 |                               intptr_t* column) const { | 
 |   ASSERT(line != nullptr); | 
 | #if defined(DART_PRECOMPILED_RUNTIME) | 
 |   // Scripts in the AOT snapshot do not have a line starts array. | 
 |   return false; | 
 | #else | 
 |   if (!token_pos.IsReal()) return false; | 
 |  | 
 |   auto const zone = Thread::Current()->zone(); | 
 |   const TypedData& line_starts_data = TypedData::Handle(zone, line_starts()); | 
 |   if (line_starts_data.IsNull()) return false; | 
 |   kernel::KernelLineStartsReader line_starts_reader(line_starts_data, zone); | 
 |   return line_starts_reader.LocationForPosition(token_pos.Pos(), line, column); | 
 | #endif  // defined(DART_PRECOMPILED_RUNTIME) | 
 | } | 
 |  | 
 | intptr_t Script::GetTokenLength(const TokenPosition& token_pos) const { | 
 | #if defined(DART_PRECOMPILED_RUNTIME) | 
 |   // Scripts in the AOT snapshot do not have their source. | 
 |   return -1; | 
 | #else | 
 |   if (!HasSource() || !token_pos.IsReal()) return -1; | 
 |   auto const zone = Thread::Current()->zone(); | 
 |   // We don't explicitly save this data: Load the source and find it from there. | 
 |   const String& source = String::Handle(zone, Source()); | 
 |   const intptr_t start = token_pos.Pos(); | 
 |   if (start >= source.Length()) return -1;  // Can't determine token_len. | 
 |   intptr_t end = start; | 
 |   if (IsIdentStartChar(source.CharAt(end++))) { | 
 |     for (; end < source.Length(); ++end) { | 
 |       if (!IsIdentChar(source.CharAt(end))) break; | 
 |     } | 
 |   } | 
 |   return end - start; | 
 | #endif | 
 | } | 
 |  | 
 | bool Script::TokenRangeAtLine(intptr_t line_number, | 
 |                               TokenPosition* first_token_index, | 
 |                               TokenPosition* last_token_index) const { | 
 |   ASSERT(first_token_index != nullptr && last_token_index != nullptr); | 
 | #if defined(DART_PRECOMPILED_RUNTIME) | 
 |   // Scripts in the AOT snapshot do not have a line starts array. | 
 |   return false; | 
 | #else | 
 |   // Line numbers are 1-indexed. | 
 |   if (line_number <= 0) return false; | 
 |   Zone* zone = Thread::Current()->zone(); | 
 |   const TypedData& line_starts_data = TypedData::Handle(zone, line_starts()); | 
 |   kernel::KernelLineStartsReader line_starts_reader(line_starts_data, zone); | 
 |   if (!line_starts_reader.TokenRangeAtLine(line_number, first_token_index, | 
 |                                            last_token_index)) { | 
 |     return false; | 
 |   } | 
 | #if defined(DEBUG) | 
 |   intptr_t source_length; | 
 |   if (!HasSource()) { | 
 |     Smi& value = Smi::Handle(zone); | 
 |     const Array& debug_positions_array = Array::Handle(zone, debug_positions()); | 
 |     value ^= debug_positions_array.At(debug_positions_array.Length() - 1); | 
 |     source_length = value.Value(); | 
 |   } else { | 
 |     const String& source = String::Handle(zone, Source()); | 
 |     source_length = source.Length(); | 
 |   } | 
 |   ASSERT(last_token_index->Serialize() <= source_length); | 
 | #endif | 
 |   return true; | 
 | #endif  // !defined(DART_PRECOMPILED_RUNTIME) | 
 | } | 
 |  | 
 | // Returns the index in the given source string for the given (1-based) absolute | 
 | // line and column numbers. The line and column offsets are used to calculate | 
 | // the absolute line and column number for the starting index in the source. | 
 | // | 
 | // If the given line number is outside the range of lines represented by the | 
 | // source, the given column number invalid for the given line, or a negative | 
 | // starting index is given, a negative value is returned to indicate failure. | 
 | static intptr_t GetRelativeSourceIndex(const String& src, | 
 |                                        intptr_t line, | 
 |                                        intptr_t line_offset = 0, | 
 |                                        intptr_t column = 1, | 
 |                                        intptr_t column_offset = 0, | 
 |                                        intptr_t starting_index = 0) { | 
 |   if (starting_index < 0 || line < 1 || column < 1 || line <= line_offset || | 
 |       (line == line_offset + 1 && column <= column_offset)) { | 
 |     return -1; | 
 |   } | 
 |   intptr_t len = src.Length(); | 
 |   intptr_t current_line = line_offset + 1; | 
 |   intptr_t current_index = starting_index; | 
 |   for (; current_index < len; current_index++) { | 
 |     if (current_line == line) { | 
 |       break; | 
 |     } | 
 |     const uint16_t c = src.CharAt(current_index); | 
 |     if (c == '\n' || c == '\r') { | 
 |       current_line++; | 
 |     } | 
 |     if (c == '\r' && current_index + 1 < len && | 
 |         src.CharAt(current_index + 1) == '\n') { | 
 |       // \r\n is treated as a single line terminator. | 
 |       current_index++; | 
 |     } | 
 |   } | 
 |   if (current_line != line) { | 
 |     return -1; | 
 |   } | 
 |   // Only adjust with column offset when still on the first line. | 
 |   intptr_t current_column = 1 + (line == line_offset + 1 ? column_offset : 0); | 
 |   for (; current_index < len; current_index++, current_column++) { | 
 |     if (current_column == column) { | 
 |       return current_index; | 
 |     } | 
 |     const uint16_t c = src.CharAt(current_index); | 
 |     if (c == '\n' || c == '\r') { | 
 |       break; | 
 |     } | 
 |   } | 
 |   // Check for a column value representing the source's end. | 
 |   if (current_column == column) { | 
 |     return current_index; | 
 |   } | 
 |   return -1; | 
 | } | 
 |  | 
 | StringPtr Script::GetLine(intptr_t line_number, Heap::Space space) const { | 
 |   if (!HasSource()) { | 
 |     return Symbols::OptimizedOut().ptr(); | 
 |   } | 
 |   const String& src = String::Handle(Source()); | 
 |   const intptr_t start = | 
 |       GetRelativeSourceIndex(src, line_number, line_offset()); | 
 |   if (start < 0) { | 
 |     return Symbols::Empty().ptr(); | 
 |   } | 
 |   intptr_t end = start; | 
 |   for (; end < src.Length(); end++) { | 
 |     const uint16_t c = src.CharAt(end); | 
 |     if (c == '\n' || c == '\r') { | 
 |       break; | 
 |     } | 
 |   } | 
 |   return String::SubString(src, start, end - start, space); | 
 | } | 
 |  | 
 | StringPtr Script::GetSnippet(intptr_t from_line, | 
 |                              intptr_t from_column, | 
 |                              intptr_t to_line, | 
 |                              intptr_t to_column) const { | 
 |   if (!HasSource()) { | 
 |     return Symbols::OptimizedOut().ptr(); | 
 |   } | 
 |   const String& src = String::Handle(Source()); | 
 |   const intptr_t start = GetRelativeSourceIndex(src, from_line, line_offset(), | 
 |                                                 from_column, col_offset()); | 
 |   // Lines and columns are 1-based, so need to subtract one to get offsets. | 
 |   const intptr_t end = GetRelativeSourceIndex( | 
 |       src, to_line, from_line - 1, to_column, from_column - 1, start); | 
 |   // Only need to check end, because a negative start results in a negative end. | 
 |   if (end < 0) { | 
 |     return String::null(); | 
 |   } | 
 |   return String::SubString(src, start, end - start); | 
 | } | 
 |  | 
 | ScriptPtr Script::New(const String& url, const String& source) { | 
 |   return Script::New(url, url, source); | 
 | } | 
 |  | 
 | ScriptPtr Script::New(const String& url, | 
 |                       const String& resolved_url, | 
 |                       const String& source) { | 
 |   ASSERT(Object::script_class() != Class::null()); | 
 |   Thread* thread = Thread::Current(); | 
 |   Zone* zone = thread->zone(); | 
 |   const Script& result = | 
 |       Script::Handle(zone, Object::Allocate<Script>(Heap::kOld)); | 
 |   result.set_url(String::Handle(zone, Symbols::New(thread, url))); | 
 |   result.set_resolved_url( | 
 |       String::Handle(zone, Symbols::New(thread, resolved_url))); | 
 |   result.set_source(source); | 
 |   NOT_IN_PRECOMPILED(ASSERT_EQUAL(result.HasCachedMaxPosition(), false)); | 
 |   ASSERT_EQUAL(result.kernel_script_index(), 0); | 
 |   if (FLAG_remove_script_timestamps_for_test) { | 
 |     ASSERT_EQUAL(result.load_timestamp(), 0); | 
 |   } else { | 
 |     result.set_load_timestamp(OS::GetCurrentTimeMillis()); | 
 |   } | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | const char* Script::ToCString() const { | 
 |   const String& name = String::Handle(url()); | 
 |   return OS::SCreate(Thread::Current()->zone(), "Script(%s)", name.ToCString()); | 
 | } | 
 |  | 
 | LibraryPtr Script::FindLibrary() const { | 
 |   Thread* thread = Thread::Current(); | 
 |   Zone* zone = thread->zone(); | 
 |   auto isolate_group = thread->isolate_group(); | 
 |   const GrowableObjectArray& libs = GrowableObjectArray::Handle( | 
 |       zone, isolate_group->object_store()->libraries()); | 
 |   Library& lib = Library::Handle(zone); | 
 |   Array& scripts = Array::Handle(zone); | 
 |   for (intptr_t i = 0; i < libs.Length(); i++) { | 
 |     lib ^= libs.At(i); | 
 |     scripts = lib.LoadedScripts(); | 
 |     for (intptr_t j = 0; j < scripts.Length(); j++) { | 
 |       if (scripts.At(j) == ptr()) { | 
 |         return lib.ptr(); | 
 |       } | 
 |     } | 
 |   } | 
 |   return Library::null(); | 
 | } | 
 |  | 
 | DictionaryIterator::DictionaryIterator(const Library& library) | 
 |     : array_(Array::Handle(library.dictionary())), | 
 |       // Last element in array is a Smi indicating the number of entries used. | 
 |       size_(Array::Handle(library.dictionary()).Length() - 1), | 
 |       next_ix_(0) { | 
 |   MoveToNextObject(); | 
 | } | 
 |  | 
 | ObjectPtr DictionaryIterator::GetNext() { | 
 |   ASSERT(HasNext()); | 
 |   int ix = next_ix_++; | 
 |   MoveToNextObject(); | 
 |   ASSERT(array_.At(ix) != Object::null()); | 
 |   return array_.At(ix); | 
 | } | 
 |  | 
 | void DictionaryIterator::MoveToNextObject() { | 
 |   Object& obj = Object::Handle(array_.At(next_ix_)); | 
 |   while (obj.IsNull() && HasNext()) { | 
 |     next_ix_++; | 
 |     obj = array_.At(next_ix_); | 
 |   } | 
 | } | 
 |  | 
 | ClassDictionaryIterator::ClassDictionaryIterator(const Library& library, | 
 |                                                  IterationKind kind) | 
 |     : DictionaryIterator(library), | 
 |       toplevel_class_(Class::Handle((kind == kIteratePrivate) | 
 |                                         ? library.toplevel_class() | 
 |                                         : Class::null())) { | 
 |   MoveToNextClass(); | 
 | } | 
 |  | 
 | ClassPtr ClassDictionaryIterator::GetNextClass() { | 
 |   ASSERT(HasNext()); | 
 |   Class& cls = Class::Handle(); | 
 |   if (next_ix_ < size_) { | 
 |     int ix = next_ix_++; | 
 |     cls ^= array_.At(ix); | 
 |     MoveToNextClass(); | 
 |     return cls.ptr(); | 
 |   } | 
 |   ASSERT(!toplevel_class_.IsNull()); | 
 |   cls = toplevel_class_.ptr(); | 
 |   toplevel_class_ = Class::null(); | 
 |   return cls.ptr(); | 
 | } | 
 |  | 
 | void ClassDictionaryIterator::MoveToNextClass() { | 
 |   Object& obj = Object::Handle(); | 
 |   while (next_ix_ < size_) { | 
 |     obj = array_.At(next_ix_); | 
 |     if (obj.IsClass()) { | 
 |       return; | 
 |     } | 
 |     next_ix_++; | 
 |   } | 
 | } | 
 |  | 
 | static void ReportTooManyImports(const Library& lib) { | 
 |   const String& url = String::Handle(lib.url()); | 
 |   Report::MessageF(Report::kError, Script::Handle(lib.LookupScript(url)), | 
 |                    TokenPosition::kNoSource, Report::AtLocation, | 
 |                    "too many imports in library '%s'", url.ToCString()); | 
 |   UNREACHABLE(); | 
 | } | 
 |  | 
 | bool Library::IsAnyCoreLibrary() const { | 
 |   String& url_str = Thread::Current()->StringHandle(); | 
 |   url_str = url(); | 
 |   return url_str.StartsWith(Symbols::DartScheme()) || | 
 |          url_str.StartsWith(Symbols::DartSchemePrivate()); | 
 | } | 
 |  | 
 | void Library::set_num_imports(intptr_t value) const { | 
 |   if (!Utils::IsUint(16, value)) { | 
 |     ReportTooManyImports(*this); | 
 |   } | 
 |   StoreNonPointer(&untag()->num_imports_, value); | 
 | } | 
 |  | 
 | void Library::set_name(const String& name) const { | 
 |   ASSERT(name.IsSymbol()); | 
 |   untag()->set_name(name.ptr()); | 
 | } | 
 |  | 
 | void Library::set_url(const String& url) const { | 
 |   untag()->set_url(url.ptr()); | 
 | } | 
 |  | 
 | void Library::set_private_key(const String& key) const { | 
 |   untag()->set_private_key(key.ptr()); | 
 | } | 
 |  | 
 | #if !defined(DART_PRECOMPILED_RUNTIME) | 
 | void Library::set_kernel_program_info(const KernelProgramInfo& info) const { | 
 |   untag()->set_kernel_program_info(info.ptr()); | 
 | } | 
 |  | 
 | TypedDataViewPtr Library::KernelLibrary() const { | 
 |   const auto& info = KernelProgramInfo::Handle(kernel_program_info()); | 
 |   return info.KernelLibrary(kernel_library_index()); | 
 | } | 
 |  | 
 | intptr_t Library::KernelLibraryOffset() const { | 
 |   const auto& info = KernelProgramInfo::Handle(kernel_program_info()); | 
 |   return info.KernelLibraryStartOffset(kernel_library_index()); | 
 | } | 
 | #endif | 
 |  | 
 | void Library::set_loading_unit(const LoadingUnit& value) const { | 
 |   untag()->set_loading_unit(value.ptr()); | 
 | } | 
 |  | 
 | void Library::SetName(const String& name) const { | 
 |   // Only set name once. | 
 |   ASSERT(!Loaded()); | 
 |   set_name(name); | 
 | } | 
 |  | 
 | void Library::SetLoadInProgress() const { | 
 |   // Must not already be in the process of being loaded. | 
 |   ASSERT(untag()->load_state_ <= UntaggedLibrary::kLoadRequested); | 
 |   StoreNonPointer(&untag()->load_state_, UntaggedLibrary::kLoadInProgress); | 
 | } | 
 |  | 
 | void Library::SetLoadRequested() const { | 
 |   // Must not be already loaded. | 
 |   ASSERT(untag()->load_state_ == UntaggedLibrary::kAllocated); | 
 |   StoreNonPointer(&untag()->load_state_, UntaggedLibrary::kLoadRequested); | 
 | } | 
 |  | 
 | void Library::SetLoaded() const { | 
 |   // Should not be already loaded or just allocated. | 
 |   ASSERT(LoadInProgress() || LoadRequested()); | 
 |   StoreNonPointer(&untag()->load_state_, UntaggedLibrary::kLoaded); | 
 | } | 
 |  | 
 | void Library::AddMetadata(const Object& declaration, | 
 |                           intptr_t kernel_offset) const { | 
 | #if defined(DART_PRECOMPILED_RUNTIME) | 
 |   UNREACHABLE(); | 
 | #else | 
 |   Thread* thread = Thread::Current(); | 
 |   ASSERT(thread->isolate_group()->program_lock()->IsCurrentThreadWriter()); | 
 |  | 
 |   MetadataMap map(metadata()); | 
 |   map.UpdateOrInsert(declaration, Smi::Handle(Smi::New(kernel_offset))); | 
 |   set_metadata(map.Release()); | 
 | #endif  // defined(DART_PRECOMPILED_RUNTIME) | 
 | } | 
 |  | 
 | ObjectPtr Library::GetMetadata(const Object& declaration) const { | 
 | #if defined(DART_PRECOMPILED_RUNTIME) | 
 |   return Object::empty_array().ptr(); | 
 | #else | 
 |   RELEASE_ASSERT(declaration.IsClass() || declaration.IsField() || | 
 |                  declaration.IsFunction() || declaration.IsLibrary() || | 
 |                  declaration.IsTypeParameter() || declaration.IsNamespace()); | 
 |  | 
 |   auto thread = Thread::Current(); | 
 |   auto zone = thread->zone(); | 
 |  | 
 |   if (declaration.IsLibrary()) { | 
 |     // Ensure top-level class is loaded as it may contain annotations of | 
 |     // a library. | 
 |     const auto& cls = Class::Handle(zone, toplevel_class()); | 
 |     if (!cls.IsNull()) { | 
 |       cls.EnsureDeclarationLoaded(); | 
 |     } | 
 |   } | 
 |   Object& value = Object::Handle(zone); | 
 |   { | 
 |     SafepointReadRwLocker ml(thread, thread->isolate_group()->program_lock()); | 
 |     MetadataMap map(metadata()); | 
 |     value = map.GetOrNull(declaration); | 
 |     set_metadata(map.Release()); | 
 |   } | 
 |   if (value.IsNull()) { | 
 |     // There is no metadata for this object. | 
 |     return Object::empty_array().ptr(); | 
 |   } | 
 |   if (!value.IsSmi()) { | 
 |     // Metadata is already evaluated. | 
 |     ASSERT(value.IsArray()); | 
 |     return value.ptr(); | 
 |   } | 
 |   const auto& smi_value = Smi::Cast(value); | 
 |   intptr_t kernel_offset = smi_value.Value(); | 
 |   ASSERT(kernel_offset > 0); | 
 |   const auto& evaluated_value = Object::Handle( | 
 |       zone, kernel::EvaluateMetadata( | 
 |                 *this, kernel_offset, | 
 |                 /* is_annotations_offset = */ declaration.IsLibrary() || | 
 |                     declaration.IsNamespace())); | 
 |   if (evaluated_value.IsArray() || evaluated_value.IsNull()) { | 
 |     ASSERT(evaluated_value.ptr() != Object::empty_array().ptr()); | 
 |     SafepointWriteRwLocker ml(thread, thread->isolate_group()->program_lock()); | 
 |     MetadataMap map(metadata()); | 
 |     if (map.GetOrNull(declaration) == smi_value.ptr()) { | 
 |       map.UpdateOrInsert(declaration, evaluated_value); | 
 |     } else { | 
 |       ASSERT(map.GetOrNull(declaration) == evaluated_value.ptr()); | 
 |     } | 
 |     set_metadata(map.Release()); | 
 |   } | 
 |   return evaluated_value.ptr(); | 
 | #endif  // defined(DART_PRECOMPILED_RUNTIME) | 
 | } | 
 |  | 
 | #if !defined(DART_PRECOMPILED_RUNTIME) | 
 | static bool HasPragma(const Object& declaration) { | 
 |   return (declaration.IsClass() && Class::Cast(declaration).has_pragma()) || | 
 |          (declaration.IsFunction() && | 
 |           Function::Cast(declaration).has_pragma()) || | 
 |          (declaration.IsField() && Field::Cast(declaration).has_pragma()); | 
 | } | 
 |  | 
 | void Library::EvaluatePragmas() { | 
 |   Object& declaration = Object::Handle(); | 
 |   const GrowableObjectArray& declarations = | 
 |       GrowableObjectArray::Handle(GrowableObjectArray::New()); | 
 |   { | 
 |     auto thread = Thread::Current(); | 
 |     SafepointReadRwLocker ml(thread, thread->isolate_group()->program_lock()); | 
 |     MetadataMap map(metadata()); | 
 |     MetadataMap::Iterator it(&map); | 
 |     while (it.MoveNext()) { | 
 |       const intptr_t entry = it.Current(); | 
 |       ASSERT(entry != -1); | 
 |       declaration = map.GetKey(entry); | 
 |       if (HasPragma(declaration)) { | 
 |         declarations.Add(declaration); | 
 |       } | 
 |     } | 
 |     set_metadata(map.Release()); | 
 |   } | 
 |   for (intptr_t i = 0; i < declarations.Length(); ++i) { | 
 |     declaration = declarations.At(i); | 
 |     GetMetadata(declaration); | 
 |   } | 
 | } | 
 |  | 
 | void Library::CopyPragmas(const Library& old_lib) { | 
 |   auto thread = Thread::Current(); | 
 |   SafepointWriteRwLocker ml(thread, thread->isolate_group()->program_lock()); | 
 |   MetadataMap new_map(metadata()); | 
 |   MetadataMap old_map(old_lib.metadata()); | 
 |   Object& declaration = Object::Handle(); | 
 |   Object& value = Object::Handle(); | 
 |   MetadataMap::Iterator it(&old_map); | 
 |   while (it.MoveNext()) { | 
 |     const intptr_t entry = it.Current(); | 
 |     ASSERT(entry != -1); | 
 |     declaration = old_map.GetKey(entry); | 
 |     if (HasPragma(declaration)) { | 
 |       value = old_map.GetPayload(entry, 0); | 
 |       ASSERT(!value.IsNull()); | 
 |       // Pragmas should be evaluated during hot reload phase 1 | 
 |       // (when checkpointing libraries). | 
 |       ASSERT(!value.IsSmi()); | 
 |       new_map.UpdateOrInsert(declaration, value); | 
 |     } | 
 |   } | 
 |   old_lib.set_metadata(old_map.Release()); | 
 |   set_metadata(new_map.Release()); | 
 | } | 
 | #endif  // !defined(DART_PRECOMPILED_RUNTIME) | 
 |  | 
 | static bool ShouldBePrivate(const String& name) { | 
 |   return (name.Length() >= 1 && name.CharAt(0) == '_') || | 
 |          (name.Length() >= 5 && | 
 |           (name.CharAt(4) == '_' && | 
 |            (name.CharAt(0) == 'g' || name.CharAt(0) == 's') && | 
 |            name.CharAt(1) == 'e' && name.CharAt(2) == 't' && | 
 |            name.CharAt(3) == ':')); | 
 | } | 
 |  | 
 | void Library::RehashDictionary(const Array& old_dict, | 
 |                                intptr_t new_dict_size) const { | 
 |   intptr_t old_dict_size = old_dict.Length() - 1; | 
 |   const Array& new_dict = | 
 |       Array::Handle(Array::New(new_dict_size + 1, Heap::kOld)); | 
 |   // Rehash all elements from the original dictionary | 
 |   // to the newly allocated array. | 
 |   Object& entry = Class::Handle(); | 
 |   String& entry_name = String::Handle(); | 
 |   Object& new_entry = Object::Handle(); | 
 |   intptr_t used = 0; | 
 |   for (intptr_t i = 0; i < old_dict_size; i++) { | 
 |     entry = old_dict.At(i); | 
 |     if (!entry.IsNull()) { | 
 |       entry_name = entry.DictionaryName(); | 
 |       ASSERT(!entry_name.IsNull()); | 
 |       const intptr_t hash = entry_name.Hash(); | 
 |       intptr_t index = hash % new_dict_size; | 
 |       new_entry = new_dict.At(index); | 
 |       while (!new_entry.IsNull()) { | 
 |         index = (index + 1) % new_dict_size;  // Move to next element. | 
 |         new_entry = new_dict.At(index); | 
 |       } | 
 |       new_dict.SetAt(index, entry); | 
 |       used++; | 
 |     } | 
 |   } | 
 |   // Set used count. | 
 |   ASSERT(used < new_dict_size);  // Need at least one empty slot. | 
 |   new_entry = Smi::New(used); | 
 |   new_dict.SetAt(new_dict_size, new_entry); | 
 |   // Remember the new dictionary now. | 
 |   untag()->set_dictionary(new_dict.ptr()); | 
 | } | 
 |  | 
 | void Library::AddObject(const Object& obj, const String& name) const { | 
 |   ASSERT(Thread::Current()->IsDartMutatorThread()); | 
 |   ASSERT(obj.IsClass() || obj.IsFunction() || obj.IsField() || | 
 |          obj.IsLibraryPrefix()); | 
 |   ASSERT(name.Equals(String::Handle(obj.DictionaryName()))); | 
 |   ASSERT(LookupLocalObject(name) == Object::null()); | 
 |   const Array& dict = Array::Handle(dictionary()); | 
 |   intptr_t dict_size = dict.Length() - 1; | 
 |   intptr_t index = name.Hash() % dict_size; | 
 |  | 
 |   Object& entry = Object::Handle(); | 
 |   entry = dict.At(index); | 
 |   // An empty spot will be found because we keep the hash set at most 75% full. | 
 |   while (!entry.IsNull()) { | 
 |     index = (index + 1) % dict_size; | 
 |     entry = dict.At(index); | 
 |   } | 
 |  | 
 |   // Insert the object at the empty slot. | 
 |   dict.SetAt(index, obj); | 
 |   // One more element added. | 
 |   intptr_t used_elements = Smi::Value(Smi::RawCast(dict.At(dict_size))) + 1; | 
 |   const Smi& used = Smi::Handle(Smi::New(used_elements)); | 
 |   dict.SetAt(dict_size, used);  // Update used count. | 
 |  | 
 |   // Rehash if symbol_table is 75% full. | 
 |   if (used_elements > ((dict_size / 4) * 3)) { | 
 |     // TODO(iposva): Avoid exponential growth. | 
 |     RehashDictionary(dict, 2 * dict_size); | 
 |   } | 
 |  | 
 |   // Invalidate the cache of loaded scripts. | 
 |   if (loaded_scripts() != Array::null()) { | 
 |     untag()->set_loaded_scripts(Array::null()); | 
 |   } | 
 | } | 
 |  | 
 | // Lookup a name in the library's re-export namespace. | 
 | // This lookup can occur from two different threads: background compiler and | 
 | // mutator thread. | 
 | ObjectPtr Library::LookupReExport(const String& name, | 
 |                                   ZoneGrowableArray<intptr_t>* trail) const { | 
 |   if (!HasExports()) { | 
 |     return Object::null(); | 
 |   } | 
 |  | 
 |   if (trail == nullptr) { | 
 |     trail = new ZoneGrowableArray<intptr_t>(); | 
 |   } | 
 |   Object& obj = Object::Handle(); | 
 |  | 
 |   const intptr_t lib_id = this->index(); | 
 |   ASSERT(lib_id >= 0);  // We use -1 to indicate that a cycle was found. | 
 |   trail->Add(lib_id); | 
 |   const Array& exports = Array::Handle(this->exports()); | 
 |   Namespace& ns = Namespace::Handle(); | 
 |   for (int i = 0; i < exports.Length(); i++) { | 
 |     ns ^= exports.At(i); | 
 |     obj = ns.Lookup(name, trail); | 
 |     if (!obj.IsNull()) { | 
 |       // The Lookup call above may return a setter x= when we are looking | 
 |       // for the name x. Make sure we only return when a matching name | 
 |       // is found. | 
 |       String& obj_name = String::Handle(obj.DictionaryName()); | 
 |       if (Field::IsSetterName(obj_name) == Field::IsSetterName(name)) { | 
 |         break; | 
 |       } | 
 |     } | 
 |   } | 
 |   trail->RemoveLast(); | 
 |   return obj.ptr(); | 
 | } | 
 |  | 
 | ObjectPtr Library::LookupEntry(const String& name, intptr_t* index) const { | 
 |   ASSERT(!IsNull()); | 
 |   Thread* thread = Thread::Current(); | 
 |   REUSABLE_ARRAY_HANDLESCOPE(thread); | 
 |   REUSABLE_OBJECT_HANDLESCOPE(thread); | 
 |   REUSABLE_STRING_HANDLESCOPE(thread); | 
 |   Array& dict = thread->ArrayHandle(); | 
 |   dict = dictionary(); | 
 |   intptr_t dict_size = dict.Length() - 1; | 
 |   *index = name.Hash() % dict_size; | 
 |   Object& entry = thread->ObjectHandle(); | 
 |   String& entry_name = thread->StringHandle(); | 
 |   entry = dict.At(*index); | 
 |   // Search the entry in the hash set. | 
 |   while (!entry.IsNull()) { | 
 |     entry_name = entry.DictionaryName(); | 
 |     ASSERT(!entry_name.IsNull()); | 
 |     if (entry_name.Equals(name)) { | 
 |       return entry.ptr(); | 
 |     } | 
 |     *index = (*index + 1) % dict_size; | 
 |     entry = dict.At(*index); | 
 |   } | 
 |   return Object::null(); | 
 | } | 
 |  | 
 | void Library::AddClass(const Class& cls) const { | 
 |   ASSERT(!Compiler::IsBackgroundCompilation()); | 
 |   const String& class_name = String::Handle(cls.Name()); | 
 |   AddObject(cls, class_name); | 
 |   // Link class to this library. | 
 |   cls.set_library(*this); | 
 | } | 
 |  | 
 | static void AddScriptIfUnique(const GrowableObjectArray& scripts, | 
 |                               const Script& candidate) { | 
 |   if (candidate.IsNull()) { | 
 |     return; | 
 |   } | 
 |   Script& script_obj = Script::Handle(); | 
 |  | 
 |   for (int i = 0; i < scripts.Length(); i++) { | 
 |     script_obj ^= scripts.At(i); | 
 |     if (script_obj.ptr() == candidate.ptr()) { | 
 |       // We already have a reference to this script. | 
 |       return; | 
 |     } | 
 |   } | 
 |   // Add script to the list of scripts. | 
 |   scripts.Add(candidate); | 
 | } | 
 |  | 
 | ArrayPtr Library::LoadedScripts() const { | 
 |   // We compute the list of loaded scripts lazily. The result is | 
 |   // cached in loaded_scripts_. | 
 |   if (loaded_scripts() == Array::null()) { | 
 |     // TODO(jensj): This can be cleaned up. | 
 |     // It really should just return the content of `used_scripts`, and there | 
 |     // should be no need to do the O(n) call to `AddScriptIfUnique` per script. | 
 |  | 
 |     // Iterate over the library dictionary and collect all scripts. | 
 |     const GrowableObjectArray& scripts = | 
 |         GrowableObjectArray::Handle(GrowableObjectArray::New(8)); | 
 |     Object& entry = Object::Handle(); | 
 |     Class& cls = Class::Handle(); | 
 |     Script& owner_script = Script::Handle(); | 
 |     DictionaryIterator it(*this); | 
 |     while (it.HasNext()) { | 
 |       entry = it.GetNext(); | 
 |       if (entry.IsClass()) { | 
 |         owner_script = Class::Cast(entry).script(); | 
 |       } else if (entry.IsFunction()) { | 
 |         owner_script = Function::Cast(entry).script(); | 
 |       } else if (entry.IsField()) { | 
 |         owner_script = Field::Cast(entry).Script(); | 
 |       } else { | 
 |         continue; | 
 |       } | 
 |       AddScriptIfUnique(scripts, owner_script); | 
 |     } | 
 |  | 
 |     // Add all scripts from patch classes. | 
 |     GrowableObjectArray& patches = GrowableObjectArray::Handle(used_scripts()); | 
 |     for (intptr_t i = 0; i < patches.Length(); i++) { | 
 |       entry = patches.At(i); | 
 |       if (entry.IsClass()) { | 
 |         owner_script = Class::Cast(entry).script(); | 
 |       } else { | 
 |         ASSERT(entry.IsScript()); | 
 |         owner_script = Script::Cast(entry).ptr(); | 
 |       } | 
 |       AddScriptIfUnique(scripts, owner_script); | 
 |     } | 
 |  | 
 |     cls = toplevel_class(); | 
 |     if (!cls.IsNull()) { | 
 |       owner_script = cls.script(); | 
 |       AddScriptIfUnique(scripts, owner_script); | 
 |       // Special case: Scripts that only contain external top-level functions | 
 |       // are not included above, but can be referenced through a library's | 
 |       // anonymous classes. Example: dart-core:identical.dart. | 
 |       Function& func = Function::Handle(); | 
 |       Array& functions = Array::Handle(cls.current_functions()); | 
 |       for (intptr_t j = 0; j < functions.Length(); j++) { | 
 |         func ^= functions.At(j); | 
 |         if (func.is_external()) { | 
 |           owner_script = func.script(); | 
 |           AddScriptIfUnique(scripts, owner_script); | 
 |         } | 
 |       } | 
 |     } | 
 |  | 
 |     // Create the array of scripts and cache it in loaded_scripts_. | 
 |     const Array& scripts_array = Array::Handle(Array::MakeFixedLength(scripts)); | 
 |     untag()->set_loaded_scripts(scripts_array.ptr()); | 
 |   } | 
 |   return loaded_scripts(); | 
 | } | 
 |  | 
 | // TODO(hausner): we might want to add a script dictionary to the | 
 | // library class to make this lookup faster. | 
 | ScriptPtr Library::LookupScript(const String& url, | 
 |                                 bool useResolvedUri /* = false */) const { | 
 |   const intptr_t url_length = url.Length(); | 
 |   if (url_length == 0) { | 
 |     return Script::null(); | 
 |   } | 
 |   const Array& scripts = Array::Handle(LoadedScripts()); | 
 |   Script& script = Script::Handle(); | 
 |   String& script_url = String::Handle(); | 
 |   const intptr_t num_scripts = scripts.Length(); | 
 |   for (int i = 0; i < num_scripts; i++) { | 
 |     script ^= scripts.At(i); | 
 |     if (useResolvedUri) { | 
 |       // Use for urls with 'org-dartlang-sdk:' or 'file:' schemes | 
 |       script_url = script.resolved_url(); | 
 |     } else { | 
 |       // Use for urls with 'dart:', 'package:', or 'file:' schemes | 
 |       script_url = script.url(); | 
 |     } | 
 |     const intptr_t start_idx = script_url.Length() - url_length; | 
 |     if ((start_idx == 0) && url.Equals(script_url)) { | 
 |       return script.ptr(); | 
 |     } else if (start_idx > 0) { | 
 |       // If we do a suffix match, only match if the partial path | 
 |       // starts at or immediately after the path separator. | 
 |       if (((url.CharAt(0) == '/') || | 
 |            (script_url.CharAt(start_idx - 1) == '/')) && | 
 |           url.Equals(script_url, start_idx, url_length)) { | 
 |         return script.ptr(); | 
 |       } | 
 |     } | 
 |   } | 
 |   return Script::null(); | 
 | } | 
 |  | 
 | void Library::EnsureTopLevelClassIsFinalized() const { | 
 |   if (toplevel_class() == Object::null()) { | 
 |     return; | 
 |   } | 
 |   Thread* thread = Thread::Current(); | 
 |   const Class& cls = Class::Handle(thread->zone(), toplevel_class()); | 
 |   if (cls.is_finalized()) { | 
 |     return; | 
 |   } | 
 |   const Error& error = | 
 |       Error::Handle(thread->zone(), cls.EnsureIsFinalized(thread)); | 
 |   if (!error.IsNull()) { | 
 |     Exceptions::PropagateError(error); | 
 |   } | 
 | } | 
 |  | 
 | ObjectPtr Library::LookupLocalObject(const String& name) const { | 
 |   intptr_t index; | 
 |   return LookupEntry(name, &index); | 
 | } | 
 |  | 
 | ObjectPtr Library::LookupLocalOrReExportObject(const String& name) const { | 
 |   intptr_t index; | 
 |   EnsureTopLevelClassIsFinalized(); | 
 |   const Object& result = Object::Handle(LookupEntry(name, &index)); | 
 |   if (!result.IsNull() && !result.IsLibraryPrefix()) { | 
 |     return result.ptr(); | 
 |   } | 
 |   return LookupReExport(name); | 
 | } | 
 |  | 
 | FieldPtr Library::LookupFieldAllowPrivate(const String& name) const { | 
 |   EnsureTopLevelClassIsFinalized(); | 
 |   Object& obj = Object::Handle(LookupLocalObjectAllowPrivate(name)); | 
 |   if (obj.IsField()) { | 
 |     return Field::Cast(obj).ptr(); | 
 |   } | 
 |   return Field::null(); | 
 | } | 
 |  | 
 | FunctionPtr Library::LookupFunctionAllowPrivate(const String& name) const { | 
 |   EnsureTopLevelClassIsFinalized(); | 
 |   Object& obj = Object::Handle(LookupLocalObjectAllowPrivate(name)); | 
 |   if (obj.IsFunction()) { | 
 |     return Function::Cast(obj).ptr(); | 
 |   } | 
 |   return Function::null(); | 
 | } | 
 |  | 
 | ObjectPtr Library::LookupLocalObjectAllowPrivate(const String& name) const { | 
 |   Thread* thread = Thread::Current(); | 
 |   Zone* zone = thread->zone(); | 
 |   Object& obj = Object::Handle(zone, Object::null()); | 
 |   obj = LookupLocalObject(name); | 
 |   if (obj.IsNull() && ShouldBePrivate(name)) { | 
 |     String& private_name = String::Handle(zone, PrivateName(name)); | 
 |     obj = LookupLocalObject(private_name); | 
 |   } | 
 |   return obj.ptr(); | 
 | } | 
 |  | 
 | ClassPtr Library::LookupClass(const String& name) const { | 
 |   Object& obj = Object::Handle(LookupLocalObject(name)); | 
 |   if (obj.IsClass()) { | 
 |     return Class::Cast(obj).ptr(); | 
 |   } | 
 |   return Class::null(); | 
 | } | 
 |  | 
 | ClassPtr Library::LookupClassAllowPrivate(const String& name) const { | 
 |   Object& obj = Object::Handle(LookupLocalObjectAllowPrivate(name)); | 
 |   if (obj.IsClass()) { | 
 |     return Class::Cast(obj).ptr(); | 
 |   } | 
 |   return Class::null(); | 
 | } | 
 |  | 
 | LibraryPrefixPtr Library::LookupLocalLibraryPrefix(const String& name) const { | 
 |   const Object& obj = Object::Handle(LookupLocalObject(name)); | 
 |   if (obj.IsLibraryPrefix()) { | 
 |     return LibraryPrefix::Cast(obj).ptr(); | 
 |   } | 
 |   return LibraryPrefix::null(); | 
 | } | 
 |  | 
 | void Library::set_toplevel_class(const Class& value) const { | 
 |   ASSERT(untag()->toplevel_class() == Class::null()); | 
 |   untag()->set_toplevel_class(value.ptr()); | 
 | } | 
 |  | 
 | void Library::set_dependencies(const Array& deps) const { | 
 |   untag()->set_dependencies(deps.ptr()); | 
 | } | 
 |  | 
 | void Library::set_metadata(const Array& value) const { | 
 |   if (untag()->metadata() != value.ptr()) { | 
 |     DEBUG_ASSERT( | 
 |         IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); | 
 |     untag()->set_metadata(value.ptr()); | 
 |   } | 
 | } | 
 |  | 
 | LibraryPtr Library::ImportLibraryAt(intptr_t index) const { | 
 |   Namespace& import = Namespace::Handle(ImportAt(index)); | 
 |   if (import.IsNull()) { | 
 |     return Library::null(); | 
 |   } | 
 |   return import.target(); | 
 | } | 
 |  | 
 | NamespacePtr Library::ImportAt(intptr_t index) const { | 
 |   if ((index < 0) || index >= num_imports()) { | 
 |     return Namespace::null(); | 
 |   } | 
 |   const Array& import_list = Array::Handle(imports()); | 
 |   return Namespace::RawCast(import_list.At(index)); | 
 | } | 
 |  | 
 | void Library::DropDependenciesAndCaches() const { | 
 |   untag()->set_imports(Object::empty_array().ptr()); | 
 |   untag()->set_exports(Object::empty_array().ptr()); | 
 |   StoreNonPointer(&untag()->num_imports_, 0); | 
 |   untag()->set_loaded_scripts(Array::null()); | 
 |   untag()->set_dependencies(Array::null()); | 
 | #if defined(PRODUCT) | 
 |   // used_scripts is only used by vm-service. | 
 |   untag()->set_used_scripts(GrowableObjectArray::null()); | 
 | #endif | 
 | } | 
 |  | 
 | void Library::AddImport(const Namespace& ns) const { | 
 |   Array& imports = Array::Handle(this->imports()); | 
 |   intptr_t capacity = imports.Length(); | 
 |   if (num_imports() == capacity) { | 
 |     capacity = capacity + kImportsCapacityIncrement + (capacity >> 2); | 
 |     imports = Array::Grow(imports, capacity); | 
 |     untag()->set_imports(imports.ptr()); | 
 |   } | 
 |   intptr_t index = num_imports(); | 
 |   imports.SetAt(index, ns); | 
 |   set_num_imports(index + 1); | 
 | } | 
 |  | 
 | // Convenience function to determine whether the export list is | 
 | // non-empty. | 
 | bool Library::HasExports() const { | 
 |   return exports() != Object::empty_array().ptr(); | 
 | } | 
 |  | 
 | // We add one namespace at a time to the exports array and don't | 
 | // pre-allocate any unused capacity. The assumption is that | 
 | // re-exports are quite rare. | 
 | void Library::AddExport(const Namespace& ns) const { | 
 |   Array& exports = Array::Handle(this->exports()); | 
 |   intptr_t num_exports = exports.Length(); | 
 |   exports = Array::Grow(exports, num_exports + 1); | 
 |   untag()->set_exports(exports.ptr()); | 
 |   exports.SetAt(num_exports, ns); | 
 | } | 
 |  | 
 | static ArrayPtr NewDictionary(intptr_t initial_size) { | 
 |   const Array& dict = Array::Handle(Array::New(initial_size + 1, Heap::kOld)); | 
 |   // The last element of the dictionary specifies the number of in use slots. | 
 |   dict.SetAt(initial_size, Object::smi_zero()); | 
 |   return dict.ptr(); | 
 | } | 
 |  | 
 | void Library::InitClassDictionary() const { | 
 |   Thread* thread = Thread::Current(); | 
 |   ASSERT(thread->IsDartMutatorThread()); | 
 |   REUSABLE_FUNCTION_HANDLESCOPE(thread); | 
 |   Array& dictionary = thread->ArrayHandle(); | 
 |   // TODO(iposva): Find reasonable initial size. | 
 |   const int kInitialElementCount = 16; | 
 |   dictionary = NewDictionary(kInitialElementCount); | 
 |   untag()->set_dictionary(dictionary.ptr()); | 
 | } | 
 |  | 
 | void Library::InitImportList() const { | 
 |   const Array& imports = | 
 |       Array::Handle(Array::New(kInitialImportsCapacity, Heap::kOld)); | 
 |   untag()->set_imports(imports.ptr()); | 
 |   StoreNonPointer(&untag()->num_imports_, 0); | 
 | } | 
 |  | 
 | LibraryPtr Library::New() { | 
 |   ASSERT(Object::library_class() != Class::null()); | 
 |   return Object::Allocate<Library>(Heap::kOld); | 
 | } | 
 |  | 
 | LibraryPtr Library::NewLibraryHelper(const String& url, bool import_core_lib) { | 
 |   Thread* thread = Thread::Current(); | 
 |   Zone* zone = thread->zone(); | 
 |   ASSERT(thread->IsDartMutatorThread()); | 
 |   // Force the url to have a hash code. | 
 |   url.Hash(); | 
 |   const bool dart_scheme = url.StartsWith(Symbols::DartScheme()); | 
 |   const Library& result = Library::Handle(zone, Library::New()); | 
 |   result.untag()->set_name(Symbols::Empty().ptr()); | 
 |   result.untag()->set_url(url.ptr()); | 
 |   result.untag()->set_dictionary(Object::empty_array().ptr()); | 
 |   Array& array = Array::Handle(zone); | 
 |   array = HashTables::New<MetadataMap>(4, Heap::kOld); | 
 |   result.untag()->set_metadata(array.ptr()); | 
 |   result.untag()->set_toplevel_class(Class::null()); | 
 |   GrowableObjectArray& list = GrowableObjectArray::Handle(zone); | 
 |   list = GrowableObjectArray::New(Object::empty_array(), Heap::kOld); | 
 |   result.untag()->set_used_scripts(list.ptr()); | 
 |   result.untag()->set_imports(Object::empty_array().ptr()); | 
 |   result.untag()->set_exports(Object::empty_array().ptr()); | 
 |   NOT_IN_PRECOMPILED_RUNTIME( | 
 |       result.untag()->set_kernel_program_info(KernelProgramInfo::null())); | 
 |   result.untag()->set_loaded_scripts(Array::null()); | 
 |   result.set_native_entry_resolver(nullptr); | 
 |   result.set_native_entry_symbol_resolver(nullptr); | 
 |   result.set_ffi_native_resolver(nullptr); | 
 |   result.set_flags(0); | 
 |   result.set_is_in_fullsnapshot(false); | 
 |   // This logic is also in the DAP debug adapter in DDS to avoid needing | 
 |   // to call setLibraryDebuggable for every library for every isolate. | 
 |   // If these defaults change, the same should be done there in | 
 |   // dap/IsolateManager._getIsLibraryDebuggableByDefault. | 
 |   if (dart_scheme) { | 
 |     // Only debug dart: libraries if we have been requested to show invisible | 
 |     // frames. | 
 |     result.set_debuggable(FLAG_show_invisible_frames); | 
 |   } else { | 
 |     // Default to debuggable for all other libraries. | 
 |     result.set_debuggable(true); | 
 |   } | 
 |   result.set_is_dart_scheme(dart_scheme); | 
 |   NOT_IN_PRECOMPILED( | 
 |       result.StoreNonPointer(&result.untag()->kernel_library_index_, -1)); | 
 |   result.StoreNonPointer(&result.untag()->load_state_, | 
 |                          UntaggedLibrary::kAllocated); | 
 |   result.StoreNonPointer(&result.untag()->index_, -1); | 
 |   result.InitClassDictionary(); | 
 |   result.InitImportList(); | 
 |   result.AllocatePrivateKey(); | 
 |   if (import_core_lib) { | 
 |     const Library& core_lib = Library::Handle(zone, Library::CoreLibrary()); | 
 |     ASSERT(!core_lib.IsNull()); | 
 |     const Namespace& ns = | 
 |         Namespace::Handle(zone, Namespace::New(core_lib, Object::null_array(), | 
 |                                                Object::null_array(), result)); | 
 |     result.AddImport(ns); | 
 |   } | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | LibraryPtr Library::New(const String& url) { | 
 |   return NewLibraryHelper(url, false); | 
 | } | 
 |  | 
 | void Library::set_flags(uint8_t flags) const { | 
 |   StoreNonPointer(&untag()->flags_, flags); | 
 | } | 
 |  | 
 | void Library::InitCoreLibrary(IsolateGroup* isolate_group) { | 
 |   Thread* thread = Thread::Current(); | 
 |   Zone* zone = thread->zone(); | 
 |   const String& core_lib_url = Symbols::DartCore(); | 
 |   const Library& core_lib = | 
 |       Library::Handle(zone, Library::NewLibraryHelper(core_lib_url, false)); | 
 |   core_lib.SetLoadRequested(); | 
 |   core_lib.Register(thread); | 
 |   isolate_group->object_store()->set_bootstrap_library(ObjectStore::kCore, | 
 |                                                        core_lib); | 
 |   isolate_group->object_store()->set_root_library(Library::Handle()); | 
 | } | 
 |  | 
 | // Invoke the function, or noSuchMethod if it is null. | 
 | static ObjectPtr InvokeInstanceFunction( | 
 |     Thread* thread, | 
 |     const Instance& receiver, | 
 |     const Function& function, | 
 |     const String& target_name, | 
 |     const Array& args, | 
 |     const Array& args_descriptor_array, | 
 |     bool respect_reflectable, | 
 |     const TypeArguments& instantiator_type_args) { | 
 |   // Note "args" is already the internal arguments with the receiver as the | 
 |   // first element. | 
 |   ArgumentsDescriptor args_descriptor(args_descriptor_array); | 
 |   if (function.IsNull() || | 
 |       !function.AreValidArguments(args_descriptor, nullptr) || | 
 |       (respect_reflectable && !function.is_reflectable())) { | 
 |     return DartEntry::InvokeNoSuchMethod(thread, receiver, target_name, args, | 
 |                                          args_descriptor_array); | 
 |   } | 
 |   ObjectPtr type_error = function.DoArgumentTypesMatch(args, args_descriptor, | 
 |                                                        instantiator_type_args); | 
 |   if (type_error != Error::null()) { | 
 |     return type_error; | 
 |   } | 
 |   return DartEntry::InvokeFunction(function, args, args_descriptor_array); | 
 | } | 
 |  | 
 | ObjectPtr Library::InvokeGetter(const String& getter_name, | 
 |                                 bool throw_nsm_if_absent, | 
 |                                 bool respect_reflectable, | 
 |                                 bool check_is_entrypoint) const { | 
 |   Object& obj = Object::Handle(LookupLocalOrReExportObject(getter_name)); | 
 |   Function& getter = Function::Handle(); | 
 |   if (obj.IsField()) { | 
 |     const Field& field = Field::Cast(obj); | 
 |     if (check_is_entrypoint) { | 
 |       CHECK_ERROR(field.VerifyEntryPoint(EntryPointPragma::kGetterOnly)); | 
 |     } | 
 |     if (!field.IsUninitialized()) { | 
 |       return field.StaticValue(); | 
 |     } | 
 |     // An uninitialized field was found.  Check for a getter in the field's | 
 |     // owner class. | 
 |     const Class& klass = Class::Handle(field.Owner()); | 
 |     const String& internal_getter_name = | 
 |         String::Handle(Field::GetterName(getter_name)); | 
 |     getter = klass.LookupStaticFunction(internal_getter_name); | 
 |   } else { | 
 |     // No field found. Check for a getter in the lib. | 
 |     const String& internal_getter_name = | 
 |         String::Handle(Field::GetterName(getter_name)); | 
 |     obj = LookupLocalOrReExportObject(internal_getter_name); | 
 |     if (obj.IsFunction()) { | 
 |       getter = Function::Cast(obj).ptr(); | 
 |       if (check_is_entrypoint) { | 
 |         CHECK_ERROR(getter.VerifyCallEntryPoint()); | 
 |       } | 
 |     } else { | 
 |       obj = LookupLocalOrReExportObject(getter_name); | 
 |       // Normally static top-level methods cannot be closurized through the | 
 |       // native API even if they are marked as entry-points, with the one | 
 |       // exception of "main". | 
 |       if (obj.IsFunction() && check_is_entrypoint) { | 
 |         if (!getter_name.Equals(String::Handle(String::New("main"))) || | 
 |             ptr() != IsolateGroup::Current()->object_store()->root_library()) { | 
 |           CHECK_ERROR(Function::Cast(obj).VerifyClosurizedEntryPoint()); | 
 |         } | 
 |       } | 
 |       if (obj.IsFunction() && Function::Cast(obj).SafeToClosurize()) { | 
 |         // Looking for a getter but found a regular method: closurize it. | 
 |         const Function& closure_function = | 
 |             Function::Handle(Function::Cast(obj).ImplicitClosureFunction()); | 
 |         return closure_function.ImplicitStaticClosure(); | 
 |       } | 
 |     } | 
 |   } | 
 |  | 
 |   if (getter.IsNull() || (respect_reflectable && !getter.is_reflectable())) { | 
 |     if (throw_nsm_if_absent) { | 
 |       return ThrowNoSuchMethod(Object::null_string(), getter_name, | 
 |                                Object::null_array(), Object::null_array(), | 
 |                                InvocationMirror::kTopLevel, | 
 |                                InvocationMirror::kGetter); | 
 |     } | 
 |  | 
 |     // Fall through case: Indicate that we didn't find any function or field | 
 |     // using a special null instance. This is different from a field being null. | 
 |     // Callers make sure that this null does not leak into Dartland. | 
 |     return Object::sentinel().ptr(); | 
 |   } | 
 |  | 
 |   // Invoke the getter and return the result. | 
 |   return DartEntry::InvokeFunction(getter, Object::empty_array()); | 
 | } | 
 |  | 
 | ObjectPtr Library::InvokeSetter(const String& setter_name, | 
 |                                 const Instance& value, | 
 |                                 bool respect_reflectable, | 
 |                                 bool check_is_entrypoint) const { | 
 |   Object& obj = Object::Handle(LookupLocalOrReExportObject(setter_name)); | 
 |   const String& internal_setter_name = | 
 |       String::Handle(Field::SetterName(setter_name)); | 
 |   AbstractType& setter_type = AbstractType::Handle(); | 
 |   AbstractType& argument_type = AbstractType::Handle(value.GetType(Heap::kOld)); | 
 |   if (obj.IsField()) { | 
 |     const Field& field = Field::Cast(obj); | 
 |     if (check_is_entrypoint) { | 
 |       CHECK_ERROR(field.VerifyEntryPoint(EntryPointPragma::kSetterOnly)); | 
 |     } | 
 |     setter_type = field.type(); | 
 |     if (!argument_type.IsNullType() && !setter_type.IsDynamicType() && | 
 |         !value.IsInstanceOf(setter_type, Object::null_type_arguments(), | 
 |                             Object::null_type_arguments())) { | 
 |       return ThrowTypeError(field.token_pos(), value, setter_type, setter_name); | 
 |     } | 
 |     if (field.is_final() || (respect_reflectable && !field.is_reflectable())) { | 
 |       const int kNumArgs = 1; | 
 |       const Array& args = Array::Handle(Array::New(kNumArgs)); | 
 |       args.SetAt(0, value); | 
 |  | 
 |       return ThrowNoSuchMethod(Object::null_string(), internal_setter_name, | 
 |                                args, Object::null_array(), | 
 |                                InvocationMirror::kTopLevel, | 
 |                                InvocationMirror::kSetter); | 
 |     } | 
 |     field.SetStaticValue(value); | 
 |     return value.ptr(); | 
 |   } | 
 |  | 
 |   Function& setter = Function::Handle(); | 
 |   obj = LookupLocalOrReExportObject(internal_setter_name); | 
 |   if (obj.IsFunction()) { | 
 |     setter ^= obj.ptr(); | 
 |   } | 
 |  | 
 |   if (!setter.IsNull() && check_is_entrypoint) { | 
 |     CHECK_ERROR(setter.VerifyCallEntryPoint()); | 
 |   } | 
 |  | 
 |   const int kNumArgs = 1; | 
 |   const Array& args = Array::Handle(Array::New(kNumArgs)); | 
 |   args.SetAt(0, value); | 
 |   if (setter.IsNull() || (respect_reflectable && !setter.is_reflectable())) { | 
 |     return ThrowNoSuchMethod(Object::null_string(), internal_setter_name, args, | 
 |                              Object::null_array(), InvocationMirror::kTopLevel, | 
 |                              InvocationMirror::kSetter); | 
 |   } | 
 |  | 
 |   setter_type = setter.ParameterTypeAt(0); | 
 |   if (!argument_type.IsNullType() && !setter_type.IsDynamicType() && | 
 |       !value.IsInstanceOf(setter_type, Object::null_type_arguments(), | 
 |                           Object::null_type_arguments())) { | 
 |     return ThrowTypeError(setter.token_pos(), value, setter_type, setter_name); | 
 |   } | 
 |  | 
 |   return DartEntry::InvokeFunction(setter, args); | 
 | } | 
 |  | 
 | ObjectPtr Library::Invoke(const String& function_name, | 
 |                           const Array& args, | 
 |                           const Array& arg_names, | 
 |                           bool respect_reflectable, | 
 |                           bool check_is_entrypoint) const { | 
 |   Thread* thread = Thread::Current(); | 
 |   Zone* zone = thread->zone(); | 
 |  | 
 |   // We don't pass any explicit type arguments, which will be understood as | 
 |   // using dynamic for any function type arguments by lower layers. | 
 |   const int kTypeArgsLen = 0; | 
 |   const Array& args_descriptor_array = Array::Handle( | 
 |       zone, ArgumentsDescriptor::NewBoxed(kTypeArgsLen, args.Length(), | 
 |                                           arg_names, Heap::kNew)); | 
 |   ArgumentsDescriptor args_descriptor(args_descriptor_array); | 
 |  | 
 |   auto& function = Function::Handle(zone); | 
 |   auto& result = | 
 |       Object::Handle(zone, LookupLocalOrReExportObject(function_name)); | 
 |   if (result.IsFunction()) { | 
 |     function ^= result.ptr(); | 
 |   } | 
 |  | 
 |   if (!function.IsNull() && check_is_entrypoint) { | 
 |     CHECK_ERROR(function.VerifyCallEntryPoint()); | 
 |   } | 
 |  | 
 |   if (function.IsNull()) { | 
 |     // Didn't find a method: try to find a getter and invoke call on its result. | 
 |     const Object& getter_result = Object::Handle( | 
 |         zone, InvokeGetter(function_name, false, respect_reflectable, | 
 |                            check_is_entrypoint)); | 
 |     if (getter_result.ptr() != Object::sentinel().ptr()) { | 
 |       if (check_is_entrypoint) { | 
 |         CHECK_ERROR(EntryPointFieldInvocationError(function_name)); | 
 |       } | 
 |       const auto& call_args_descriptor_array = Array::Handle( | 
 |           zone, ArgumentsDescriptor::NewBoxed(args_descriptor.TypeArgsLen(), | 
 |                                               args_descriptor.Count() + 1, | 
 |                                               arg_names, Heap::kNew)); | 
 |       const auto& call_args = Array::Handle( | 
 |           zone, | 
 |           CreateCallableArgumentsFromStatic(zone, Instance::Cast(getter_result), | 
 |                                             args, arg_names, args_descriptor)); | 
 |       return DartEntry::InvokeClosure(thread, call_args, | 
 |                                       call_args_descriptor_array); | 
 |     } | 
 |   } | 
 |  | 
 |   if (function.IsNull() || | 
 |       (respect_reflectable && !function.is_reflectable())) { | 
 |     return ThrowNoSuchMethod(Object::null_string(), function_name, args, | 
 |                              arg_names, InvocationMirror::kTopLevel, | 
 |                              InvocationMirror::kMethod); | 
 |   } | 
 |   if (!function.AreValidArguments(args_descriptor, nullptr)) { | 
 |     return ThrowNoSuchMethod( | 
 |         String::Handle(function.UserVisibleSignature()), function_name, args, | 
 |         arg_names, InvocationMirror::kTopLevel, InvocationMirror::kMethod); | 
 |   } | 
 |   // This is a static function, so we pass an empty instantiator tav. | 
 |   ASSERT(function.is_static()); | 
 |   ObjectPtr type_error = function.DoArgumentTypesMatch( | 
 |       args, args_descriptor, Object::empty_type_arguments()); | 
 |   if (type_error != Error::null()) { | 
 |     return type_error; | 
 |   } | 
 |   return DartEntry::InvokeFunction(function, args, args_descriptor_array); | 
 | } | 
 |  | 
 | void Library::InitNativeWrappersLibrary(IsolateGroup* isolate_group, | 
 |                                         bool is_kernel) { | 
 |   const int kNumNativeWrappersClasses = 4; | 
 |   COMPILE_ASSERT((kNumNativeWrappersClasses > 0) && | 
 |                  (kNumNativeWrappersClasses < 10)); | 
 |   Thread* thread = Thread::Current(); | 
 |   Zone* zone = thread->zone(); | 
 |   const String& native_flds_lib_url = Symbols::DartNativeWrappers(); | 
 |   const Library& native_flds_lib = Library::Handle( | 
 |       zone, Library::NewLibraryHelper(native_flds_lib_url, false)); | 
 |   const String& native_flds_lib_name = Symbols::DartNativeWrappersLibName(); | 
 |   native_flds_lib.SetName(native_flds_lib_name); | 
 |   native_flds_lib.SetLoadRequested(); | 
 |   native_flds_lib.Register(thread); | 
 |   native_flds_lib.SetLoadInProgress(); | 
 |   isolate_group->object_store()->set_native_wrappers_library(native_flds_lib); | 
 |   const char* const kNativeWrappersClass = "NativeFieldWrapperClass"; | 
 |   const int kNameLength = 25; | 
 |   ASSERT(kNameLength == (strlen(kNativeWrappersClass) + 1 + 1)); | 
 |   char name_buffer[kNameLength]; | 
 |   String& cls_name = String::Handle(zone); | 
 |   for (int fld_cnt = 1; fld_cnt <= kNumNativeWrappersClasses; fld_cnt++) { | 
 |     Utils::SNPrint(name_buffer, kNameLength, "%s%d", kNativeWrappersClass, | 
 |                    fld_cnt); | 
 |     cls_name = Symbols::New(thread, name_buffer); | 
 |     Class::NewNativeWrapper(native_flds_lib, cls_name, fld_cnt); | 
 |   } | 
 |   // NOTE: If we bootstrap from a Kernel IR file we want to generate the | 
 |   // synthetic constructors for the native wrapper classes.  We leave this up to | 
 |   // the [KernelLoader] who will take care of it later. | 
 |   if (!is_kernel) { | 
 |     native_flds_lib.SetLoaded(); | 
 |   } | 
 | } | 
 |  | 
 | // LibraryLookupSet maps URIs to libraries. | 
 | class LibraryLookupTraits { | 
 |  public: | 
 |   static const char* Name() { return "LibraryLookupTraits"; } | 
 |   static bool ReportStats() { return false; } | 
 |  | 
 |   static bool IsMatch(const Object& a, const Object& b) { | 
 |     const String& a_str = String::Cast(a); | 
 |     const String& b_str = String::Cast(b); | 
 |  | 
 |     ASSERT(a_str.HasHash() && b_str.HasHash()); | 
 |     return a_str.Equals(b_str); | 
 |   } | 
 |  | 
 |   static uword Hash(const Object& key) { return String::Cast(key).Hash(); } | 
 |  | 
 |   static ObjectPtr NewKey(const String& str) { return str.ptr(); } | 
 | }; | 
 | typedef UnorderedHashMap<LibraryLookupTraits> LibraryLookupMap; | 
 |  | 
 | // Returns library with given url in current isolate, or nullptr. | 
 | LibraryPtr Library::LookupLibrary(Thread* thread, const String& url) { | 
 |   Zone* zone = thread->zone(); | 
 |   ObjectStore* object_store = thread->isolate_group()->object_store(); | 
 |  | 
 |   // Make sure the URL string has an associated hash code | 
 |   // to speed up the repeated equality checks. | 
 |   url.Hash(); | 
 |  | 
 |   // Use the libraries map to lookup the library by URL. | 
 |   Library& lib = Library::Handle(zone); | 
 |   if (object_store->libraries_map() == Array::null()) { | 
 |     return Library::null(); | 
 |   } else { | 
 |     LibraryLookupMap map(object_store->libraries_map()); | 
 |     lib ^= map.GetOrNull(url); | 
 |     ASSERT(map.Release().ptr() == object_store->libraries_map()); | 
 |   } | 
 |   return lib.ptr(); | 
 | } | 
 |  | 
 | bool Library::IsPrivate(const String& name) { | 
 |   if (ShouldBePrivate(name)) return true; | 
 |   // Factory names: List._fromLiteral. | 
 |   for (intptr_t i = 1; i < name.Length() - 1; i++) { | 
 |     if (name.CharAt(i) == '.') { | 
 |       if (name.CharAt(i + 1) == '_') { | 
 |         return true; | 
 |       } | 
 |     } | 
 |   } | 
 |   return false; | 
 | } | 
 |  | 
 | // Create a private key for this library. It is based on the hash of the | 
 | // library URI and the sequence number of the library to guarantee unique | 
 | // private keys without having to verify. | 
 | void Library::AllocatePrivateKey() const { | 
 |   Thread* thread = Thread::Current(); | 
 |   Zone* zone = thread->zone(); | 
 |   auto isolate_group = thread->isolate_group(); | 
 |  | 
 | #if !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME) | 
 |   if (isolate_group->IsReloading()) { | 
 |     // When reloading, we need to make sure we use the original private key | 
 |     // if this library previously existed. | 
 |     ProgramReloadContext* program_reload_context = | 
 |         isolate_group->program_reload_context(); | 
 |     const String& original_key = | 
 |         String::Handle(program_reload_context->FindLibraryPrivateKey(*this)); | 
 |     if (!original_key.IsNull()) { | 
 |       untag()->set_private_key(original_key.ptr()); | 
 |       return; | 
 |     } | 
 |   } | 
 | #endif  // !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME) | 
 |  | 
 |   // Format of the private key is: "@<sequence number><6 digits of hash> | 
 |   const intptr_t hash_mask = 0x7FFFF; | 
 |  | 
 |   const String& url = String::Handle(zone, this->url()); | 
 |   intptr_t hash_value = url.Hash() & hash_mask; | 
 |  | 
 |   const GrowableObjectArray& libs = GrowableObjectArray::Handle( | 
 |       zone, isolate_group->object_store()->libraries()); | 
 |   intptr_t sequence_value = libs.Length(); | 
 |  | 
 |   char private_key[32]; | 
 |   Utils::SNPrint(private_key, sizeof(private_key), "%c%" Pd "%06" Pd "", | 
 |                  kPrivateKeySeparator, sequence_value, hash_value); | 
 |   const String& key = | 
 |       String::Handle(zone, String::New(private_key, Heap::kOld)); | 
 |   key.Hash();  // This string may end up in the VM isolate. | 
 |   untag()->set_private_key(key.ptr()); | 
 | } | 
 |  | 
 | const String& Library::PrivateCoreLibName(const String& member) { | 
 |   const Library& core_lib = Library::Handle(Library::CoreLibrary()); | 
 |   const String& private_name = String::ZoneHandle(core_lib.PrivateName(member)); | 
 |   return private_name; | 
 | } | 
 |  | 
 | bool Library::IsPrivateCoreLibName(const String& name, const String& member) { | 
 |   Zone* zone = Thread::Current()->zone(); | 
 |   const auto& core_lib = Library::Handle(zone, Library::CoreLibrary()); | 
 |   const auto& private_key = String::Handle(zone, core_lib.private_key()); | 
 |  | 
 |   ASSERT(core_lib.IsPrivate(member)); | 
 |   return name.EqualsConcat(member, private_key); | 
 | } | 
 |  | 
 | ClassPtr Library::LookupCoreClass(const String& class_name) { | 
 |   Thread* thread = Thread::Current(); | 
 |   Zone* zone = thread->zone(); | 
 |   const Library& core_lib = Library::Handle(zone, Library::CoreLibrary()); | 
 |   String& name = String::Handle(zone, class_name.ptr()); | 
 |   if (class_name.CharAt(0) == kPrivateIdentifierStart) { | 
 |     // Private identifiers are mangled on a per library basis. | 
 |     name = Symbols::FromConcat(thread, name, | 
 |                                String::Handle(zone, core_lib.private_key())); | 
 |   } | 
 |   return core_lib.LookupClass(name); | 
 | } | 
 |  | 
 | // Cannot handle qualified names properly as it only appends private key to | 
 | // the end (e.g. _Alfa.foo -> _Alfa.foo@...). | 
 | StringPtr Library::PrivateName(const String& name) const { | 
 |   Thread* thread = Thread::Current(); | 
 |   Zone* zone = thread->zone(); | 
 |   ASSERT(IsPrivate(name)); | 
 |   // ASSERT(strchr(name, '@') == nullptr); | 
 |   String& str = String::Handle(zone); | 
 |   str = name.ptr(); | 
 |   str = Symbols::FromConcat(thread, str, | 
 |                             String::Handle(zone, this->private_key())); | 
 |   return str.ptr(); | 
 | } | 
 |  | 
 | LibraryPtr Library::GetLibrary(intptr_t index) { | 
 |   Thread* thread = Thread::Current(); | 
 |   Zone* zone = thread->zone(); | 
 |   auto isolate_group = thread->isolate_group(); | 
 |   const GrowableObjectArray& libs = GrowableObjectArray::Handle( | 
 |       zone, isolate_group->object_store()->libraries()); | 
 |   ASSERT(!libs.IsNull()); | 
 |   if ((0 <= index) && (index < libs.Length())) { | 
 |     Library& lib = Library::Handle(zone); | 
 |     lib ^= libs.At(index); | 
 |     return lib.ptr(); | 
 |   } | 
 |   return Library::null(); | 
 | } | 
 |  | 
 | void Library::Register(Thread* thread) const { | 
 |   Zone* zone = thread->zone(); | 
 |   auto isolate_group = thread->isolate_group(); | 
 |   ObjectStore* object_store = isolate_group->object_store(); | 
 |  | 
 |   // A library is "registered" in two places: | 
 |   // - A growable array mapping from index to library. | 
 |   const String& lib_url = String::Handle(zone, url()); | 
 |   ASSERT(Library::LookupLibrary(thread, lib_url) == Library::null()); | 
 |   ASSERT(lib_url.HasHash()); | 
 |   GrowableObjectArray& libs = | 
 |       GrowableObjectArray::Handle(zone, object_store->libraries()); | 
 |   ASSERT(!libs.IsNull()); | 
 |   set_index(libs.Length()); | 
 |   libs.Add(*this); | 
 |  | 
 |   // - A map from URL string to library. | 
 |   if (object_store->libraries_map() == Array::null()) { | 
 |     LibraryLookupMap map(HashTables::New<LibraryLookupMap>(16, Heap::kOld)); | 
 |     object_store->set_libraries_map(map.Release()); | 
 |   } | 
 |  | 
 |   LibraryLookupMap map(object_store->libraries_map()); | 
 |   bool present = map.UpdateOrInsert(lib_url, *this); | 
 |   ASSERT(!present); | 
 |   object_store->set_libraries_map(map.Release()); | 
 | } | 
 |  | 
 | void Library::RegisterLibraries(Thread* thread, | 
 |                                 const GrowableObjectArray& libs) { | 
 |   Zone* zone = thread->zone(); | 
 |   auto isolate_group = thread->isolate_group(); | 
 |   Library& lib = Library::Handle(zone); | 
 |   String& lib_url = String::Handle(zone); | 
 |  | 
 |   LibraryLookupMap map(HashTables::New<LibraryLookupMap>(16, Heap::kOld)); | 
 |  | 
 |   intptr_t len = libs.Length(); | 
 |   for (intptr_t i = 0; i < len; i++) { | 
 |     lib ^= libs.At(i); | 
 |     lib_url = lib.url(); | 
 |     map.InsertNewOrGetValue(lib_url, lib); | 
 |   } | 
 |   // Now remember these in the isolate's object store. | 
 |   isolate_group->object_store()->set_libraries(libs); | 
 |   isolate_group->object_store()->set_libraries_map(map.Release()); | 
 | } | 
 |  | 
 | LibraryPtr Library::AsyncLibrary() { | 
 |   return IsolateGroup::Current()->object_store()->async_library(); | 
 | } | 
 |  | 
 | LibraryPtr Library::ConvertLibrary() { | 
 |   return IsolateGroup::Current()->object_store()->convert_library(); | 
 | } | 
 |  | 
 | LibraryPtr Library::CoreLibrary() { | 
 |   return IsolateGroup::Current()->object_store()->core_library(); | 
 | } | 
 |  | 
 | LibraryPtr Library::CollectionLibrary() { | 
 |   return IsolateGroup::Current()->object_store()->collection_library(); | 
 | } | 
 |  | 
 | LibraryPtr Library::DeveloperLibrary() { | 
 |   return IsolateGroup::Current()->object_store()->developer_library(); | 
 | } | 
 |  | 
 | LibraryPtr Library::FfiLibrary() { | 
 |   return IsolateGroup::Current()->object_store()->ffi_library(); | 
 | } | 
 |  | 
 | LibraryPtr Library::InternalLibrary() { | 
 |   return IsolateGroup::Current()->object_store()->_internal_library(); | 
 | } | 
 |  | 
 | LibraryPtr Library::IsolateLibrary() { | 
 |   return IsolateGroup::Current()->object_store()->isolate_library(); | 
 | } | 
 |  | 
 | LibraryPtr Library::MathLibrary() { | 
 |   return IsolateGroup::Current()->object_store()->math_library(); | 
 | } | 
 |  | 
 | #if !defined(DART_PRECOMPILED_RUNTIME) | 
 | LibraryPtr Library::MirrorsLibrary() { | 
 |   return IsolateGroup::Current()->object_store()->mirrors_library(); | 
 | } | 
 | #endif | 
 |  | 
 | LibraryPtr Library::NativeWrappersLibrary() { | 
 |   return IsolateGroup::Current()->object_store()->native_wrappers_library(); | 
 | } | 
 |  | 
 | LibraryPtr Library::TypedDataLibrary() { | 
 |   return IsolateGroup::Current()->object_store()->typed_data_library(); | 
 | } | 
 |  | 
 | LibraryPtr Library::VMServiceLibrary() { | 
 |   return IsolateGroup::Current()->object_store()->_vmservice_library(); | 
 | } | 
 |  | 
 | const char* Library::ToCString() const { | 
 |   NoSafepointScope no_safepoint; | 
 |   const String& name = String::Handle(url()); | 
 |   return OS::SCreate(Thread::Current()->zone(), "Library:'%s'", | 
 |                      name.ToCString()); | 
 | } | 
 |  | 
 | LibraryPtr LibraryPrefix::GetLibrary(int index) const { | 
 |   if ((index >= 0) || (index < num_imports())) { | 
 |     const Array& imports = Array::Handle(this->imports()); | 
 |     Namespace& import = Namespace::Handle(); | 
 |     import ^= imports.At(index); | 
 |     return import.target(); | 
 |   } | 
 |   return Library::null(); | 
 | } | 
 |  | 
 | void LibraryPrefix::AddImport(const Namespace& import) const { | 
 |   intptr_t num_current_imports = num_imports(); | 
 |  | 
 |   // Prefixes with deferred libraries can only contain one library. | 
 |   ASSERT((num_current_imports == 0) || !is_deferred_load()); | 
 |  | 
 |   // The library needs to be added to the list. | 
 |   Array& imports = Array::Handle(this->imports()); | 
 |   const intptr_t length = (imports.IsNull()) ? 0 : imports.Length(); | 
 |   // Grow the list if it is full. | 
 |   if (num_current_imports >= length) { | 
 |     const intptr_t new_length = length + kIncrementSize + (length >> 2); | 
 |     imports = Array::Grow(imports, new_length, Heap::kOld); | 
 |     set_imports(imports); | 
 |   } | 
 |   imports.SetAt(num_current_imports, import); | 
 |   set_num_imports(num_current_imports + 1); | 
 | } | 
 |  | 
 | LibraryPrefixPtr LibraryPrefix::New() { | 
 |   return Object::Allocate<LibraryPrefix>(Heap::kOld); | 
 | } | 
 |  | 
 | LibraryPrefixPtr LibraryPrefix::New(const String& name, | 
 |                                     const Namespace& import, | 
 |                                     bool deferred_load, | 
 |                                     const Library& importer) { | 
 |   const LibraryPrefix& result = LibraryPrefix::Handle(LibraryPrefix::New()); | 
 |   result.set_name(name); | 
 |   result.set_num_imports(0); | 
 |   result.set_importer(importer); | 
 |   result.StoreNonPointer(&result.untag()->is_deferred_load_, deferred_load); | 
 |   result.set_imports(Array::Handle(Array::New(kInitialSize))); | 
 |   result.AddImport(import); | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | void LibraryPrefix::set_name(const String& value) const { | 
 |   ASSERT(value.IsSymbol()); | 
 |   untag()->set_name(value.ptr()); | 
 | } | 
 |  | 
 | void LibraryPrefix::set_imports(const Array& value) const { | 
 |   untag()->set_imports(value.ptr()); | 
 | } | 
 |  | 
 | void LibraryPrefix::set_num_imports(intptr_t value) const { | 
 |   if (!Utils::IsUint(16, value)) { | 
 |     ReportTooManyImports(Library::Handle(importer())); | 
 |   } | 
 |   StoreNonPointer(&untag()->num_imports_, value); | 
 | } | 
 |  | 
 | void LibraryPrefix::set_importer(const Library& value) const { | 
 |   untag()->set_importer(value.ptr()); | 
 | } | 
 |  | 
 | const char* LibraryPrefix::ToCString() const { | 
 |   const String& prefix = String::Handle(name()); | 
 |   return prefix.ToCString(); | 
 | } | 
 |  | 
 | const char* Namespace::ToCString() const { | 
 |   const Library& lib = Library::Handle(target()); | 
 |   return OS::SCreate(Thread::Current()->zone(), "Namespace for library '%s'", | 
 |                      lib.ToCString()); | 
 | } | 
 |  | 
 | bool Namespace::HidesName(const String& name) const { | 
 |   // Quick check for common case with no combinators. | 
 |   if (hide_names() == show_names()) { | 
 |     ASSERT(hide_names() == Array::null()); | 
 |     return false; | 
 |   } | 
 |   const String* plain_name = &name; | 
 |   if (Field::IsGetterName(name)) { | 
 |     plain_name = &String::Handle(Field::NameFromGetter(name)); | 
 |   } else if (Field::IsSetterName(name)) { | 
 |     plain_name = &String::Handle(Field::NameFromSetter(name)); | 
 |   } | 
 |   // Check whether the name is in the list of explicitly hidden names. | 
 |   if (hide_names() != Array::null()) { | 
 |     const Array& names = Array::Handle(hide_names()); | 
 |     String& hidden = String::Handle(); | 
 |     intptr_t num_names = names.Length(); | 
 |     for (intptr_t i = 0; i < num_names; i++) { | 
 |       hidden ^= names.At(i); | 
 |       if (plain_name->Equals(hidden)) { | 
 |         return true; | 
 |       } | 
 |     } | 
 |   } | 
 |   // The name is not explicitly hidden. Now check whether it is in the | 
 |   // list of explicitly visible names, if there is one. | 
 |   if (show_names() != Array::null()) { | 
 |     const Array& names = Array::Handle(show_names()); | 
 |     String& shown = String::Handle(); | 
 |     intptr_t num_names = names.Length(); | 
 |     for (intptr_t i = 0; i < num_names; i++) { | 
 |       shown ^= names.At(i); | 
 |       if (plain_name->Equals(shown)) { | 
 |         return false; | 
 |       } | 
 |     } | 
 |     // There is a list of visible names. The name we're looking for is not | 
 |     // contained in the list, so it is hidden. | 
 |     return true; | 
 |   } | 
 |   // The name is not filtered out. | 
 |   return false; | 
 | } | 
 |  | 
 | // Look up object with given name in library and filter out hidden | 
 | // names. Also look up getters and setters. | 
 | ObjectPtr Namespace::Lookup(const String& name, | 
 |                             ZoneGrowableArray<intptr_t>* trail) const { | 
 |   Zone* zone = Thread::Current()->zone(); | 
 |   const Library& lib = Library::Handle(zone, target()); | 
 |  | 
 |   if (trail != nullptr) { | 
 |     // Look for cycle in reexport graph. | 
 |     for (int i = 0; i < trail->length(); i++) { | 
 |       if (trail->At(i) == lib.index()) { | 
 |         for (int j = i + 1; j < trail->length(); j++) { | 
 |           (*trail)[j] = -1; | 
 |         } | 
 |         return Object::null(); | 
 |       } | 
 |     } | 
 |   } | 
 |  | 
 |   lib.EnsureTopLevelClassIsFinalized(); | 
 |  | 
 |   intptr_t ignore = 0; | 
 |   // Lookup the name in the library's symbols. | 
 |   Object& obj = Object::Handle(zone, lib.LookupEntry(name, &ignore)); | 
 |   if (!Field::IsGetterName(name) && !Field::IsSetterName(name) && | 
 |       (obj.IsNull() || obj.IsLibraryPrefix())) { | 
 |     String& accessor_name = String::Handle(zone); | 
 |     accessor_name = Field::LookupGetterSymbol(name); | 
 |     if (!accessor_name.IsNull()) { | 
 |       obj = lib.LookupEntry(accessor_name, &ignore); | 
 |     } | 
 |     if (obj.IsNull()) { | 
 |       accessor_name = Field::LookupSetterSymbol(name); | 
 |       if (!accessor_name.IsNull()) { | 
 |         obj = lib.LookupEntry(accessor_name, &ignore); | 
 |       } | 
 |     } | 
 |   } | 
 |  | 
 |   // Library prefixes are not exported. | 
 |   if (obj.IsNull() || obj.IsLibraryPrefix()) { | 
 |     // Lookup in the re-exported symbols. | 
 |     obj = lib.LookupReExport(name, trail); | 
 |     if (obj.IsNull() && !Field::IsSetterName(name)) { | 
 |       // LookupReExport() only returns objects that match the given name. | 
 |       // If there is no field/func/getter, try finding a setter. | 
 |       const String& setter_name = | 
 |           String::Handle(zone, Field::LookupSetterSymbol(name)); | 
 |       if (!setter_name.IsNull()) { | 
 |         obj = lib.LookupReExport(setter_name, trail); | 
 |       } | 
 |     } | 
 |   } | 
 |   if (obj.IsNull() || HidesName(name) || obj.IsLibraryPrefix()) { | 
 |     return Object::null(); | 
 |   } | 
 |   return obj.ptr(); | 
 | } | 
 |  | 
 | NamespacePtr Namespace::New() { | 
 |   ASSERT(Object::namespace_class() != Class::null()); | 
 |   return Object::Allocate<Namespace>(Heap::kOld); | 
 | } | 
 |  | 
 | NamespacePtr Namespace::New(const Library& target, | 
 |                             const Array& show_names, | 
 |                             const Array& hide_names, | 
 |                             const Library& owner) { | 
 |   ASSERT(show_names.IsNull() || (show_names.Length() > 0)); | 
 |   ASSERT(hide_names.IsNull() || (hide_names.Length() > 0)); | 
 |   const Namespace& result = Namespace::Handle(Namespace::New()); | 
 |   result.untag()->set_target(target.ptr()); | 
 |   result.untag()->set_show_names(show_names.ptr()); | 
 |   result.untag()->set_hide_names(hide_names.ptr()); | 
 |   result.untag()->set_owner(owner.ptr()); | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | KernelProgramInfoPtr KernelProgramInfo::New() { | 
 |   return Object::Allocate<KernelProgramInfo>(Heap::kOld); | 
 | } | 
 |  | 
 | KernelProgramInfoPtr KernelProgramInfo::New( | 
 |     const TypedDataBase& kernel_component, | 
 |     const TypedDataView& string_data, | 
 |     const TypedDataView& metadata_payloads, | 
 |     const TypedDataView& metadata_mappings, | 
 |     const TypedDataView& constants_table, | 
 |     const TypedData& string_offsets, | 
 |     const TypedData& canonical_names, | 
 |     const Array& scripts, | 
 |     const Array& libraries_cache, | 
 |     const Array& classes_cache) { | 
 |   ASSERT(kernel_component.IsExternalOrExternalView()); | 
 |   ASSERT(string_data.IsExternalOrExternalView()); | 
 |   ASSERT(metadata_payloads.IsExternalOrExternalView()); | 
 |   ASSERT(metadata_mappings.IsExternalOrExternalView()); | 
 |   ASSERT(constants_table.IsExternalOrExternalView()); | 
 |  | 
 |   const auto& info = KernelProgramInfo::Handle(KernelProgramInfo::New()); | 
 |   info.untag()->set_kernel_component(kernel_component.ptr()); | 
 |   info.untag()->set_string_offsets(string_offsets.ptr()); | 
 |   info.untag()->set_string_data(string_data.ptr()); | 
 |   info.untag()->set_canonical_names(canonical_names.ptr()); | 
 |   info.untag()->set_metadata_payloads(metadata_payloads.ptr()); | 
 |   info.untag()->set_metadata_mappings(metadata_mappings.ptr()); | 
 |   info.untag()->set_scripts(scripts.ptr()); | 
 |   info.untag()->set_constants_table(constants_table.ptr()); | 
 |   info.untag()->set_libraries_cache(libraries_cache.ptr()); | 
 |   info.untag()->set_classes_cache(classes_cache.ptr()); | 
 |   return info.ptr(); | 
 | } | 
 |  | 
 | const char* KernelProgramInfo::ToCString() const { | 
 |   return "[KernelProgramInfo]"; | 
 | } | 
 |  | 
 | ScriptPtr KernelProgramInfo::ScriptAt(intptr_t index) const { | 
 |   const Array& all_scripts = Array::Handle(scripts()); | 
 |   ObjectPtr script = all_scripts.At(index); | 
 |   return Script::RawCast(script); | 
 | } | 
 |  | 
 | void KernelProgramInfo::set_scripts(const Array& scripts) const { | 
 |   untag()->set_scripts(scripts.ptr()); | 
 | } | 
 |  | 
 | void KernelProgramInfo::set_constants(const Array& constants) const { | 
 |   untag()->set_constants(constants.ptr()); | 
 | } | 
 |  | 
 | intptr_t KernelProgramInfo::KernelLibraryStartOffset( | 
 |     intptr_t library_index) const { | 
 |   const auto& blob = TypedDataBase::Handle(kernel_component()); | 
 |   const intptr_t library_count = | 
 |       Utils::BigEndianToHost32(LoadUnaligned(reinterpret_cast<uint32_t*>( | 
 |           blob.DataAddr(blob.LengthInBytes() - 2 * 4)))); | 
 |   const intptr_t library_start = | 
 |       Utils::BigEndianToHost32(LoadUnaligned(reinterpret_cast<uint32_t*>( | 
 |           blob.DataAddr(blob.LengthInBytes() - | 
 |                         (2 + 1 + (library_count - library_index)) * 4)))); | 
 |   return library_start; | 
 | } | 
 |  | 
 | TypedDataViewPtr KernelProgramInfo::KernelLibrary( | 
 |     intptr_t library_index) const { | 
 |   const intptr_t start_offset = KernelLibraryStartOffset(library_index); | 
 |   const intptr_t end_offset = KernelLibraryEndOffset(library_index); | 
 |   const auto& component = TypedDataBase::Handle(kernel_component()); | 
 |   return component.ViewFromTo(start_offset, end_offset); | 
 | } | 
 |  | 
 | intptr_t KernelProgramInfo::KernelLibraryEndOffset( | 
 |     intptr_t library_index) const { | 
 |   const auto& blob = TypedDataBase::Handle(kernel_component()); | 
 |   const intptr_t library_count = | 
 |       Utils::BigEndianToHost32(LoadUnaligned(reinterpret_cast<uint32_t*>( | 
 |           blob.DataAddr(blob.LengthInBytes() - 2 * 4)))); | 
 |   const intptr_t library_end = Utils::BigEndianToHost32( | 
 |       LoadUnaligned(reinterpret_cast<uint32_t*>(blob.DataAddr( | 
 |           blob.LengthInBytes() - (2 + (library_count - library_index)) * 4)))); | 
 |   return library_end; | 
 | } | 
 |  | 
 | void KernelProgramInfo::set_constants_table(const TypedDataView& value) const { | 
 |   untag()->set_constants_table(value.ptr()); | 
 | } | 
 |  | 
 | void KernelProgramInfo::set_libraries_cache(const Array& cache) const { | 
 |   untag()->set_libraries_cache(cache.ptr()); | 
 | } | 
 |  | 
 | LibraryPtr KernelProgramInfo::LookupLibrary(Thread* thread, | 
 |                                             const Smi& name_index) const { | 
 |   REUSABLE_ARRAY_HANDLESCOPE(thread); | 
 |   REUSABLE_LIBRARY_HANDLESCOPE(thread); | 
 |   REUSABLE_OBJECT_HANDLESCOPE(thread); | 
 |   REUSABLE_SMI_HANDLESCOPE(thread); | 
 |   Array& data = thread->ArrayHandle(); | 
 |   Library& result = thread->LibraryHandle(); | 
 |   Object& key = thread->ObjectHandle(); | 
 |   Smi& value = thread->SmiHandle(); | 
 |   { | 
 |     SafepointMutexLocker ml( | 
 |         thread->isolate_group()->kernel_data_lib_cache_mutex()); | 
 |     data = libraries_cache(); | 
 |     ASSERT(!data.IsNull()); | 
 |     IntHashMap table(&key, &value, &data); | 
 |     result ^= table.GetOrNull(name_index); | 
 |     table.Release(); | 
 |   } | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | LibraryPtr KernelProgramInfo::InsertLibrary(Thread* thread, | 
 |                                             const Smi& name_index, | 
 |                                             const Library& lib) const { | 
 |   REUSABLE_ARRAY_HANDLESCOPE(thread); | 
 |   REUSABLE_LIBRARY_HANDLESCOPE(thread); | 
 |   REUSABLE_OBJECT_HANDLESCOPE(thread); | 
 |   REUSABLE_SMI_HANDLESCOPE(thread); | 
 |   Array& data = thread->ArrayHandle(); | 
 |   Library& result = thread->LibraryHandle(); | 
 |   Object& key = thread->ObjectHandle(); | 
 |   Smi& value = thread->SmiHandle(); | 
 |   { | 
 |     SafepointMutexLocker ml( | 
 |         thread->isolate_group()->kernel_data_lib_cache_mutex()); | 
 |     data = libraries_cache(); | 
 |     ASSERT(!data.IsNull()); | 
 |     IntHashMap table(&key, &value, &data); | 
 |     result ^= table.InsertOrGetValue(name_index, lib); | 
 |     set_libraries_cache(table.Release()); | 
 |   } | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | void KernelProgramInfo::set_classes_cache(const Array& cache) const { | 
 |   untag()->set_classes_cache(cache.ptr()); | 
 | } | 
 |  | 
 | ClassPtr KernelProgramInfo::LookupClass(Thread* thread, | 
 |                                         const Smi& name_index) const { | 
 |   REUSABLE_ARRAY_HANDLESCOPE(thread); | 
 |   REUSABLE_CLASS_HANDLESCOPE(thread); | 
 |   REUSABLE_OBJECT_HANDLESCOPE(thread); | 
 |   REUSABLE_SMI_HANDLESCOPE(thread); | 
 |   Array& data = thread->ArrayHandle(); | 
 |   Class& result = thread->ClassHandle(); | 
 |   Object& key = thread->ObjectHandle(); | 
 |   Smi& value = thread->SmiHandle(); | 
 |   { | 
 |     SafepointMutexLocker ml( | 
 |         thread->isolate_group()->kernel_data_class_cache_mutex()); | 
 |     data = classes_cache(); | 
 |     ASSERT(!data.IsNull()); | 
 |     IntHashMap table(&key, &value, &data); | 
 |     result ^= table.GetOrNull(name_index); | 
 |     table.Release(); | 
 |   } | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | ClassPtr KernelProgramInfo::InsertClass(Thread* thread, | 
 |                                         const Smi& name_index, | 
 |                                         const Class& klass) const { | 
 |   REUSABLE_ARRAY_HANDLESCOPE(thread); | 
 |   REUSABLE_CLASS_HANDLESCOPE(thread); | 
 |   REUSABLE_OBJECT_HANDLESCOPE(thread); | 
 |   REUSABLE_SMI_HANDLESCOPE(thread); | 
 |   Array& data = thread->ArrayHandle(); | 
 |   Class& result = thread->ClassHandle(); | 
 |   Object& key = thread->ObjectHandle(); | 
 |   Smi& value = thread->SmiHandle(); | 
 |   { | 
 |     SafepointMutexLocker ml( | 
 |         thread->isolate_group()->kernel_data_class_cache_mutex()); | 
 |     data = classes_cache(); | 
 |     ASSERT(!data.IsNull()); | 
 |     IntHashMap table(&key, &value, &data); | 
 |     result ^= table.InsertOrGetValue(name_index, klass); | 
 |     set_classes_cache(table.Release()); | 
 |   } | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | ErrorPtr Library::CompileAll(bool ignore_error /* = false */) { | 
 |   Thread* thread = Thread::Current(); | 
 |   Zone* zone = thread->zone(); | 
 |   Error& error = Error::Handle(zone); | 
 |   const GrowableObjectArray& libs = GrowableObjectArray::Handle( | 
 |       IsolateGroup::Current()->object_store()->libraries()); | 
 |   Library& lib = Library::Handle(zone); | 
 |   Class& cls = Class::Handle(zone); | 
 |   for (int i = 0; i < libs.Length(); i++) { | 
 |     lib ^= libs.At(i); | 
 |     ClassDictionaryIterator it(lib, ClassDictionaryIterator::kIteratePrivate); | 
 |     while (it.HasNext()) { | 
 |       cls = it.GetNextClass(); | 
 |       error = cls.EnsureIsFinalized(thread); | 
 |       if (!error.IsNull()) { | 
 |         if (ignore_error) continue; | 
 |         return error.ptr(); | 
 |       } | 
 |       error = Compiler::CompileAllFunctions(cls); | 
 |       if (!error.IsNull()) { | 
 |         if (ignore_error) continue; | 
 |         return error.ptr(); | 
 |       } | 
 |     } | 
 |   } | 
 |  | 
 |   Object& result = Object::Handle(zone); | 
 |   ClosureFunctionsCache::ForAllClosureFunctions([&](const Function& func) { | 
 |     if (!func.HasCode()) { | 
 |       result = Compiler::CompileFunction(thread, func); | 
 |       if (result.IsError()) { | 
 |         error = Error::Cast(result).ptr(); | 
 |         return false;  // Stop iteration. | 
 |       } | 
 |     } | 
 |     return true;  // Continue iteration. | 
 |   }); | 
 |   return error.ptr(); | 
 | } | 
 |  | 
 | #if !defined(DART_PRECOMPILED_RUNTIME) | 
 |  | 
 | ErrorPtr Library::FinalizeAllClasses() { | 
 |   Thread* thread = Thread::Current(); | 
 |   ASSERT(thread->IsDartMutatorThread()); | 
 |   Zone* zone = thread->zone(); | 
 |   Error& error = Error::Handle(zone); | 
 |   const GrowableObjectArray& libs = GrowableObjectArray::Handle( | 
 |       IsolateGroup::Current()->object_store()->libraries()); | 
 |   Library& lib = Library::Handle(zone); | 
 |   Class& cls = Class::Handle(zone); | 
 |   for (int i = 0; i < libs.Length(); i++) { | 
 |     lib ^= libs.At(i); | 
 |     if (!lib.Loaded()) { | 
 |       String& uri = String::Handle(zone, lib.url()); | 
 |       String& msg = String::Handle( | 
 |           zone, | 
 |           String::NewFormatted("Library '%s' is not loaded. " | 
 |                                "Did you forget to call Dart_FinalizeLoading?", | 
 |                                uri.ToCString())); | 
 |       return ApiError::New(msg); | 
 |     } | 
 |     ClassDictionaryIterator it(lib, ClassDictionaryIterator::kIteratePrivate); | 
 |     while (it.HasNext()) { | 
 |       cls = it.GetNextClass(); | 
 |       error = cls.EnsureIsFinalized(thread); | 
 |       if (!error.IsNull()) { | 
 |         return error.ptr(); | 
 |       } | 
 |     } | 
 |   } | 
 |   return Error::null(); | 
 | } | 
 |  | 
 | #endif  // !defined(DART_PRECOMPILED_RUNTIME) | 
 |  | 
 | // Return Function::null() if function does not exist in libs. | 
 | FunctionPtr Library::GetFunction(const GrowableArray<Library*>& libs, | 
 |                                  const char* class_name, | 
 |                                  const char* function_name) { | 
 |   Thread* thread = Thread::Current(); | 
 |   Zone* zone = thread->zone(); | 
 |   Function& func = Function::Handle(zone); | 
 |   String& class_str = String::Handle(zone); | 
 |   String& func_str = String::Handle(zone); | 
 |   Class& cls = Class::Handle(zone); | 
 |   for (intptr_t l = 0; l < libs.length(); l++) { | 
 |     const Library& lib = *libs[l]; | 
 |     if (strcmp(class_name, "::") == 0) { | 
 |       cls = lib.toplevel_class(); | 
 |     } else { | 
 |       class_str = String::New(class_name); | 
 |       cls = lib.LookupClassAllowPrivate(class_str); | 
 |     } | 
 |     if (!cls.IsNull()) { | 
 |       if (cls.EnsureIsFinalized(thread) == Error::null()) { | 
 |         func_str = String::New(function_name); | 
 |         if (function_name[0] == '.') { | 
 |           func_str = String::Concat(class_str, func_str); | 
 |         } | 
 |         func = cls.LookupFunctionAllowPrivate(func_str); | 
 |       } | 
 |     } | 
 |     if (!func.IsNull()) { | 
 |       return func.ptr(); | 
 |     } | 
 |   } | 
 |   return Function::null(); | 
 | } | 
 |  | 
 | ObjectPtr Library::GetFunctionClosure(const String& name) const { | 
 |   Thread* thread = Thread::Current(); | 
 |   Zone* zone = thread->zone(); | 
 |   Function& func = Function::Handle(zone, LookupFunctionAllowPrivate(name)); | 
 |   if (func.IsNull()) { | 
 |     // Check whether the function is reexported into the library. | 
 |     const Object& obj = Object::Handle(zone, LookupReExport(name)); | 
 |     if (obj.IsFunction()) { | 
 |       func ^= obj.ptr(); | 
 |     } else { | 
 |       // Check if there is a getter of 'name', in which case invoke it | 
 |       // and return the result. | 
 |       const String& getter_name = String::Handle(zone, Field::GetterName(name)); | 
 |       func = LookupFunctionAllowPrivate(getter_name); | 
 |       if (func.IsNull()) { | 
 |         return Closure::null(); | 
 |       } | 
 |       // Invoke the getter and return the result. | 
 |       return DartEntry::InvokeFunction(func, Object::empty_array()); | 
 |     } | 
 |   } | 
 |   func = func.ImplicitClosureFunction(); | 
 |   return func.ImplicitStaticClosure(); | 
 | } | 
 |  | 
 | #if defined(DEBUG) && !defined(DART_PRECOMPILED_RUNTIME) | 
 | void Library::CheckFunctionFingerprints() { | 
 |   GrowableArray<Library*> all_libs; | 
 |   Function& func = Function::Handle(); | 
 |   bool fingerprints_match = true; | 
 |  | 
 | #define CHECK_FINGERPRINTS_INNER(class_name, function_name, dest, fp, kind)    \ | 
 |   func = GetFunction(all_libs, #class_name, #function_name);                   \ | 
 |   if (func.IsNull()) {                                                         \ | 
 |     fingerprints_match = false;                                                \ | 
 |     OS::PrintErr("Function not found %s.%s\n", #class_name, #function_name);   \ | 
 |   } else {                                                                     \ | 
 |     fingerprints_match =                                                       \ | 
 |         func.CheckSourceFingerprint(fp, kind) && fingerprints_match;           \ | 
 |   } | 
 |  | 
 | #define CHECK_FINGERPRINTS(class_name, function_name, dest, fp)                \ | 
 |   CHECK_FINGERPRINTS_INNER(class_name, function_name, dest, fp, nullptr) | 
 | #define CHECK_FINGERPRINTS_ASM_INTRINSIC(class_name, function_name, dest, fp)  \ | 
 |   CHECK_FINGERPRINTS_INNER(class_name, function_name, dest, fp, "asm-intrinsic") | 
 | #define CHECK_FINGERPRINTS_GRAPH_INTRINSIC(class_name, function_name, dest,    \ | 
 |                                            fp)                                 \ | 
 |   CHECK_FINGERPRINTS_INNER(class_name, function_name, dest, fp,                \ | 
 |                            "graph-intrinsic") | 
 | #define CHECK_FINGERPRINTS_OTHER(class_name, function_name, dest, fp)          \ | 
 |   CHECK_FINGERPRINTS_INNER(class_name, function_name, dest, fp, "other") | 
 |  | 
 |   all_libs.Add(&Library::ZoneHandle(Library::CoreLibrary())); | 
 |   CORE_LIB_INTRINSIC_LIST(CHECK_FINGERPRINTS_ASM_INTRINSIC); | 
 |   CORE_INTEGER_LIB_INTRINSIC_LIST(CHECK_FINGERPRINTS_ASM_INTRINSIC); | 
 |   GRAPH_CORE_INTRINSICS_LIST(CHECK_FINGERPRINTS_GRAPH_INTRINSIC); | 
 |  | 
 |   all_libs.Add(&Library::ZoneHandle(Library::AsyncLibrary())); | 
 |   all_libs.Add(&Library::ZoneHandle(Library::MathLibrary())); | 
 |   all_libs.Add(&Library::ZoneHandle(Library::TypedDataLibrary())); | 
 |   all_libs.Add(&Library::ZoneHandle(Library::CollectionLibrary())); | 
 |   all_libs.Add(&Library::ZoneHandle(Library::ConvertLibrary())); | 
 |   all_libs.Add(&Library::ZoneHandle(Library::InternalLibrary())); | 
 |   all_libs.Add(&Library::ZoneHandle(Library::IsolateLibrary())); | 
 |   all_libs.Add(&Library::ZoneHandle(Library::FfiLibrary())); | 
 |   all_libs.Add(&Library::ZoneHandle(Library::NativeWrappersLibrary())); | 
 |   all_libs.Add(&Library::ZoneHandle(Library::DeveloperLibrary())); | 
 |   INTERNAL_LIB_INTRINSIC_LIST(CHECK_FINGERPRINTS_ASM_INTRINSIC); | 
 |   OTHER_RECOGNIZED_LIST(CHECK_FINGERPRINTS_OTHER); | 
 |   POLYMORPHIC_TARGET_LIST(CHECK_FINGERPRINTS); | 
 |   GRAPH_TYPED_DATA_INTRINSICS_LIST(CHECK_FINGERPRINTS_GRAPH_INTRINSIC); | 
 |  | 
 |   all_libs.Clear(); | 
 |   all_libs.Add(&Library::ZoneHandle(Library::DeveloperLibrary())); | 
 |   DEVELOPER_LIB_INTRINSIC_LIST(CHECK_FINGERPRINTS_ASM_INTRINSIC); | 
 |  | 
 | #undef CHECK_FINGERPRINTS_INNER | 
 | #undef CHECK_FINGERPRINTS | 
 | #undef CHECK_FINGERPRINTS_ASM_INTRINSIC | 
 | #undef CHECK_FINGERPRINTS_GRAPH_INTRINSIC | 
 | #undef CHECK_FINGERPRINTS_OTHER | 
 |  | 
 | #define CHECK_FACTORY_FINGERPRINTS(symbol, class_name, factory_name, cid, fp)  \ | 
 |   func = GetFunction(all_libs, #class_name, #factory_name);                    \ | 
 |   if (func.IsNull()) {                                                         \ | 
 |     fingerprints_match = false;                                                \ | 
 |     OS::PrintErr("Function not found %s.%s\n", #class_name, #factory_name);    \ | 
 |   } else {                                                                     \ | 
 |     fingerprints_match =                                                       \ | 
 |         func.CheckSourceFingerprint(fp) && fingerprints_match;                 \ | 
 |   } | 
 |  | 
 |   all_libs.Clear(); | 
 |   all_libs.Add(&Library::ZoneHandle(Library::CoreLibrary())); | 
 |   all_libs.Add(&Library::ZoneHandle(Library::TypedDataLibrary())); | 
 |   RECOGNIZED_LIST_FACTORY_LIST(CHECK_FACTORY_FINGERPRINTS); | 
 |  | 
 | #undef CHECK_FACTORY_FINGERPRINTS | 
 |  | 
 |   if (!fingerprints_match) { | 
 |     // Private names are mangled. Mangling depends on Library::private_key_. | 
 |     // If registering a new bootstrap library, add at the end. | 
 |     FATAL( | 
 |         "FP mismatch while recognizing methods. If the behavior of " | 
 |         "these functions has changed, then changes are also needed in " | 
 |         "the VM's compiler. Otherwise the fingerprint can simply be " | 
 |         "updated in recognized_methods_list.h\n"); | 
 |   } | 
 | } | 
 | #endif  // defined(DEBUG) && !defined(DART_PRECOMPILED_RUNTIME). | 
 |  | 
 | InstructionsPtr Instructions::New(intptr_t size, | 
 |                                   bool has_monomorphic_entry, | 
 |                                   bool should_be_aligned) { | 
 |   ASSERT(size >= 0); | 
 |   ASSERT(Object::instructions_class() != Class::null()); | 
 |   if (size < 0 || size > kMaxElements) { | 
 |     // This should be caught before we reach here. | 
 |     FATAL("Fatal error in Instructions::New: invalid size %" Pd "\n", size); | 
 |   } | 
 |   Instructions& result = Instructions::Handle(); | 
 |   { | 
 |     auto raw = Object::Allocate<Instructions>(Heap::kCode, size); | 
 |     NoSafepointScope no_safepoint; | 
 |     result = raw; | 
 |     result.SetSize(size); | 
 |     // Set this within the NoSafepointScope as well since it is contained in | 
 |     // the same bitfield as the size. | 
 |     result.SetHasMonomorphicEntry(has_monomorphic_entry); | 
 |     result.SetShouldBeAligned(should_be_aligned); | 
 |   } | 
 |   ASSERT(result.stats() == nullptr); | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | const char* Instructions::ToCString() const { | 
 |   return "Instructions"; | 
 | } | 
 |  | 
 | CodeStatistics* Instructions::stats() const { | 
 | #if defined(DART_PRECOMPILER) | 
 |   return reinterpret_cast<CodeStatistics*>( | 
 |       Thread::Current()->heap()->GetPeer(ptr())); | 
 | #else | 
 |   return nullptr; | 
 | #endif | 
 | } | 
 |  | 
 | void Instructions::set_stats(CodeStatistics* stats) const { | 
 | #if defined(DART_PRECOMPILER) | 
 |   Thread::Current()->heap()->SetPeer(ptr(), stats); | 
 | #endif | 
 | } | 
 |  | 
 | const char* InstructionsSection::ToCString() const { | 
 |   return "InstructionsSection"; | 
 | } | 
 |  | 
 | void InstructionsTable::set_length(intptr_t value) const { | 
 |   StoreNonPointer(&untag()->length_, value); | 
 | } | 
 |  | 
 | void InstructionsTable::set_start_pc(uword value) const { | 
 |   StoreNonPointer(&untag()->start_pc_, value); | 
 | } | 
 |  | 
 | void InstructionsTable::set_end_pc(uword value) const { | 
 |   StoreNonPointer(&untag()->end_pc_, value); | 
 | } | 
 |  | 
 | void InstructionsTable::set_code_objects(const Array& value) const { | 
 |   untag()->set_code_objects(value.ptr()); | 
 | } | 
 |  | 
 | void InstructionsTable::set_rodata(uword value) const { | 
 |   StoreNonPointer( | 
 |       &untag()->rodata_, | 
 |       reinterpret_cast<const UntaggedInstructionsTable::Data*>(value)); | 
 | } | 
 |  | 
 | InstructionsTablePtr InstructionsTable::New(intptr_t length, | 
 |                                             uword start_pc, | 
 |                                             uword end_pc, | 
 |                                             uword rodata) { | 
 |   ASSERT(Object::instructions_table_class() != Class::null()); | 
 |   ASSERT(length >= 0); | 
 |   ASSERT(start_pc <= end_pc); | 
 |   auto* const zone = Thread::Current()->zone(); | 
 |   const Array& code_objects = | 
 |       (length == 0) ? Object::empty_array() | 
 |                     : Array::Handle(zone, Array::New(length, Heap::kOld)); | 
 |   const auto& result = InstructionsTable::Handle( | 
 |       zone, Object::Allocate<InstructionsTable>(Heap::kOld)); | 
 |   result.set_code_objects(code_objects); | 
 |   result.set_length(length); | 
 |   result.set_start_pc(start_pc); | 
 |   result.set_end_pc(end_pc); | 
 |   result.set_rodata(rodata); | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | void InstructionsTable::SetCodeAt(intptr_t index, CodePtr code) const { | 
 |   ASSERT((0 <= index) && | 
 |          (index < Smi::Value(code_objects()->untag()->length()))); | 
 |   code_objects()->untag()->set_element(index, code); | 
 | } | 
 |  | 
 | bool InstructionsTable::ContainsPc(InstructionsTablePtr table, uword pc) { | 
 |   return (InstructionsTable::start_pc(table) <= pc) && | 
 |          (pc < InstructionsTable::end_pc(table)); | 
 | } | 
 |  | 
 | uint32_t InstructionsTable::ConvertPcToOffset(InstructionsTablePtr table, | 
 |                                               uword pc) { | 
 |   ASSERT(InstructionsTable::ContainsPc(table, pc)); | 
 |   const uint32_t pc_offset = | 
 |       static_cast<uint32_t>(pc - InstructionsTable::start_pc(table)); | 
 |   ASSERT(InstructionsTable::start_pc(table) + pc_offset == pc);  // No overflow. | 
 |   return pc_offset; | 
 | } | 
 |  | 
 | intptr_t InstructionsTable::FindEntry(InstructionsTablePtr table, | 
 |                                       uword pc, | 
 |                                       intptr_t start_index /* = 0 */) { | 
 |   // This can run in the middle of GC and must not allocate handles. | 
 |   NoSafepointScope no_safepoint; | 
 |   if (!InstructionsTable::ContainsPc(table, pc)) return -1; | 
 |   const uint32_t pc_offset = InstructionsTable::ConvertPcToOffset(table, pc); | 
 |  | 
 |   const auto rodata = table.untag()->rodata_; | 
 |   const auto entries = rodata->entries(); | 
 |   intptr_t lo = start_index; | 
 |   intptr_t hi = rodata->length - 1; | 
 |   while (lo <= hi) { | 
 |     intptr_t mid = (hi - lo + 1) / 2 + lo; | 
 |     ASSERT(mid >= lo); | 
 |     ASSERT(mid <= hi); | 
 |     if (pc_offset < entries[mid].pc_offset) { | 
 |       hi = mid - 1; | 
 |     } else if ((mid != hi) && (pc_offset >= entries[mid + 1].pc_offset)) { | 
 |       lo = mid + 1; | 
 |     } else { | 
 |       return mid; | 
 |     } | 
 |   } | 
 |   return -1; | 
 | } | 
 |  | 
 | const UntaggedCompressedStackMaps::Payload* | 
 | InstructionsTable::GetCanonicalStackMap(InstructionsTablePtr table) { | 
 |   const auto rodata = table.untag()->rodata_; | 
 |   return rodata->canonical_stack_map_entries_offset != 0 | 
 |              ? rodata->StackMapAt(rodata->canonical_stack_map_entries_offset) | 
 |              : nullptr; | 
 | } | 
 |  | 
 | const UntaggedCompressedStackMaps::Payload* InstructionsTable::FindStackMap( | 
 |     InstructionsTablePtr table, | 
 |     uword pc, | 
 |     uword* start_pc) { | 
 |   // This can run in the middle of GC and must not allocate handles. | 
 |   NoSafepointScope no_safepoint; | 
 |   const intptr_t idx = FindEntry(table, pc); | 
 |   if (idx != -1) { | 
 |     const auto rodata = table.untag()->rodata_; | 
 |     const auto entries = rodata->entries(); | 
 |     *start_pc = InstructionsTable::start_pc(table) + entries[idx].pc_offset; | 
 |     return rodata->StackMapAt(entries[idx].stack_map_offset); | 
 |   } | 
 |   return nullptr; | 
 | } | 
 |  | 
 | CodePtr InstructionsTable::FindCode(InstructionsTablePtr table, uword pc) { | 
 |   // This can run in the middle of GC and must not allocate handles. | 
 |   NoSafepointScope no_safepoint; | 
 |   if (!InstructionsTable::ContainsPc(table, pc)) return Code::null(); | 
 |  | 
 |   const auto rodata = table.untag()->rodata_; | 
 |  | 
 |   const auto pc_offset = InstructionsTable::ConvertPcToOffset(table, pc); | 
 |  | 
 |   if (pc_offset <= rodata->entries()[rodata->first_entry_with_code].pc_offset) { | 
 |     return StubCode::UnknownDartCode().ptr(); | 
 |   } | 
 |  | 
 |   const auto idx = | 
 |       FindEntry(table, pc, table.untag()->rodata_->first_entry_with_code); | 
 |   if (idx != -1) { | 
 |     const intptr_t code_index = idx - rodata->first_entry_with_code; | 
 |     ASSERT(code_index >= 0); | 
 |     ASSERT(code_index < | 
 |            Smi::Value(table.untag()->code_objects()->untag()->length())); | 
 |     ObjectPtr result = | 
 |         table.untag()->code_objects()->untag()->element(code_index); | 
 |     ASSERT(result->IsCode()); | 
 |     // Note: can't use Code::RawCast(...) here because it allocates handles | 
 |     // in DEBUG mode. | 
 |     return static_cast<CodePtr>(result); | 
 |   } | 
 |  | 
 |   return Code::null(); | 
 | } | 
 |  | 
 | uword InstructionsTable::EntryPointAt(intptr_t code_index) const { | 
 |   ASSERT(0 <= code_index); | 
 |   ASSERT(code_index < static_cast<intptr_t>(rodata()->length)); | 
 |   return InstructionsTable::start_pc(this->ptr()) + | 
 |          rodata()->entries()[code_index].pc_offset; | 
 | } | 
 |  | 
 | const char* InstructionsTable::ToCString() const { | 
 |   return "InstructionsTable"; | 
 | } | 
 |  | 
 | ObjectPoolPtr ObjectPool::New(intptr_t len) { | 
 |   ASSERT(Object::object_pool_class() != Class::null()); | 
 |   if (len < 0 || len > kMaxElements) { | 
 |     // This should be caught before we reach here. | 
 |     FATAL("Fatal error in ObjectPool::New: invalid length %" Pd "\n", len); | 
 |   } | 
 |   // We only verify the entry bits in DEBUG, so only allocate a handle there. | 
 |   DEBUG_ONLY(auto& result = ObjectPool::Handle()); | 
 |   auto raw = Object::Allocate<ObjectPool>(Heap::kOld, len); | 
 |   NoSafepointScope no_safepoint; | 
 |   raw->untag()->length_ = len; | 
 | #if defined(DEBUG) | 
 |   result = raw; | 
 |   for (intptr_t i = 0; i < len; i++) { | 
 |     // Verify that InitializeObject() already set the payload as expected. | 
 |     ASSERT_EQUAL(result.PatchableAt(i), ObjectPool::Patchability::kPatchable); | 
 |     ASSERT_EQUAL(result.TypeAt(i), ObjectPool::EntryType::kImmediate); | 
 |     ASSERT_EQUAL(result.RawValueAt(i), 0); | 
 |   } | 
 | #endif | 
 |   return raw; | 
 | } | 
 |  | 
 | #if !defined(DART_PRECOMPILED_RUNTIME) | 
 | ObjectPoolPtr ObjectPool::NewFromBuilder( | 
 |     const compiler::ObjectPoolBuilder& builder) { | 
 |   const intptr_t len = builder.CurrentLength(); | 
 |   if (len == 0) { | 
 |     return Object::empty_object_pool().ptr(); | 
 |   } | 
 |   const ObjectPool& result = ObjectPool::Handle(ObjectPool::New(len)); | 
 |   for (intptr_t i = 0; i < len; i++) { | 
 |     auto entry = builder.EntryAt(i); | 
 |     auto type = entry.type(); | 
 |     auto patchable = entry.patchable(); | 
 |     auto snapshot_behavior = entry.snapshot_behavior(); | 
 |     result.SetTypeAt(i, type, patchable, snapshot_behavior); | 
 |     if (type == EntryType::kTaggedObject) { | 
 |       result.SetObjectAt(i, *entry.obj_); | 
 |     } else { | 
 | #if defined(TARGET_ARCH_IS_32_BIT) | 
 |       ASSERT(type != EntryType::kImmediate64); | 
 | #endif | 
 |       ASSERT(type != EntryType::kImmediate128); | 
 |       result.SetRawValueAt(i, entry.imm_); | 
 |     } | 
 |   } | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | void ObjectPool::CopyInto(compiler::ObjectPoolBuilder* builder) const { | 
 |   ASSERT(builder->CurrentLength() == 0); | 
 |   for (intptr_t i = 0; i < Length(); i++) { | 
 |     auto type = TypeAt(i); | 
 |     auto patchable = PatchableAt(i); | 
 |     auto snapshot_behavior = SnapshotBehaviorAt(i); | 
 |     switch (type) { | 
 |       case compiler::ObjectPoolBuilderEntry::kTaggedObject: { | 
 |         compiler::ObjectPoolBuilderEntry entry(&Object::ZoneHandle(ObjectAt(i)), | 
 |                                                patchable, snapshot_behavior); | 
 |         builder->AddObject(entry); | 
 |         break; | 
 |       } | 
 |       case compiler::ObjectPoolBuilderEntry::kImmediate: | 
 |       case compiler::ObjectPoolBuilderEntry::kNativeFunction: { | 
 |         compiler::ObjectPoolBuilderEntry entry(RawValueAt(i), type, patchable, | 
 |                                                snapshot_behavior); | 
 |         builder->AddObject(entry); | 
 |         break; | 
 |       } | 
 |       default: | 
 |         UNREACHABLE(); | 
 |     } | 
 |   } | 
 |   ASSERT(builder->CurrentLength() == Length()); | 
 | } | 
 | #endif | 
 |  | 
 | const char* ObjectPool::ToCString() const { | 
 |   Zone* zone = Thread::Current()->zone(); | 
 |   return zone->PrintToString("ObjectPool len:%" Pd, Length()); | 
 | } | 
 |  | 
 | void ObjectPool::DebugPrint() const { | 
 |   THR_Print("ObjectPool len:%" Pd " {\n", Length()); | 
 |   for (intptr_t i = 0; i < Length(); i++) { | 
 | #if defined(DART_PRECOMPILED_RUNTIME) | 
 |     intptr_t offset = ObjectPool::element_offset(i); | 
 | #else | 
 |     intptr_t offset = compiler::target::ObjectPool::element_offset(i); | 
 | #endif | 
 | #if defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64) | 
 |     THR_Print("  %" Pd "(pp) ", offset);  // PP is untagged | 
 | #elif defined(TARGET_ARCH_ARM64) | 
 |     THR_Print("  [pp, #%" Pd "] ", offset);  // PP is untagged | 
 | #elif defined(TARGET_ARCH_ARM32) | 
 |     THR_Print("  [pp, #%" Pd "] ", offset - kHeapObjectTag);  // PP is tagged | 
 | #else | 
 |     THR_Print("  [pp+0x%" Px "] ", offset - kHeapObjectTag);  // PP is tagged | 
 | #endif | 
 |     if (TypeAt(i) == EntryType::kTaggedObject) { | 
 |       const Object& obj = Object::Handle(ObjectAt(i)); | 
 |       THR_Print("%s (obj)\n", obj.ToCString()); | 
 |     } else if (TypeAt(i) == EntryType::kNativeFunction) { | 
 |       uword pc = RawValueAt(i); | 
 |       uintptr_t start = 0; | 
 |       const char* name = NativeSymbolResolver::LookupSymbolName(pc, &start); | 
 |       const char* dso_name; | 
 |       uword dso_base; | 
 |       if (name != nullptr) { | 
 |         THR_Print("%s (native function)\n", name); | 
 |         NativeSymbolResolver::FreeSymbolName(name); | 
 |       } else if (NativeSymbolResolver::LookupSharedObject(pc, &dso_base, | 
 |                                                           &dso_name)) { | 
 |         uword dso_offset = pc - dso_base; | 
 |         THR_Print("%s+0x%" Px " (native function)\n", dso_name, dso_offset); | 
 |         NativeSymbolResolver::FreeSymbolName(dso_name); | 
 |       } else { | 
 |         THR_Print("0x%" Px " (native function)\n", pc); | 
 |       } | 
 |     } else { | 
 |       THR_Print("0x%" Px " (raw)\n", RawValueAt(i)); | 
 |     } | 
 |   } | 
 |   THR_Print("}\n"); | 
 | } | 
 |  | 
 | intptr_t PcDescriptors::Length() const { | 
 |   return untag()->length_; | 
 | } | 
 |  | 
 | void PcDescriptors::SetLength(intptr_t value) const { | 
 |   StoreNonPointer(&untag()->length_, value); | 
 | } | 
 |  | 
 | void PcDescriptors::CopyData(const void* bytes, intptr_t size) { | 
 |   NoSafepointScope no_safepoint; | 
 |   uint8_t* data = UnsafeMutableNonPointer(&untag()->data()[0]); | 
 |   // We're guaranteed these memory spaces do not overlap. | 
 |   memcpy(data, bytes, size);  // NOLINT | 
 | } | 
 |  | 
 | PcDescriptorsPtr PcDescriptors::New(const void* delta_encoded_data, | 
 |                                     intptr_t size) { | 
 |   ASSERT(Object::pc_descriptors_class() != Class::null()); | 
 |   Thread* thread = Thread::Current(); | 
 |   PcDescriptors& result = PcDescriptors::Handle(thread->zone()); | 
 |   { | 
 |     auto raw = Object::Allocate<PcDescriptors>(Heap::kOld, size); | 
 |     NoSafepointScope no_safepoint; | 
 |     result = raw; | 
 |     result.SetLength(size); | 
 |   } | 
 |   result.CopyData(delta_encoded_data, size); | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | PcDescriptorsPtr PcDescriptors::New(intptr_t length) { | 
 |   ASSERT(Object::pc_descriptors_class() != Class::null()); | 
 |   Thread* thread = Thread::Current(); | 
 |   PcDescriptors& result = PcDescriptors::Handle(thread->zone()); | 
 |   { | 
 |     auto raw = Object::Allocate<PcDescriptors>(Heap::kOld, length); | 
 |     NoSafepointScope no_safepoint; | 
 |     result = raw; | 
 |     result.SetLength(length); | 
 |   } | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | const char* PcDescriptors::KindAsStr(UntaggedPcDescriptors::Kind kind) { | 
 |   switch (kind) { | 
 |     case UntaggedPcDescriptors::kDeopt: | 
 |       return "deopt        "; | 
 |     case UntaggedPcDescriptors::kIcCall: | 
 |       return "ic-call"; | 
 |     case UntaggedPcDescriptors::kUnoptStaticCall: | 
 |       return "unopt-call"; | 
 |     case UntaggedPcDescriptors::kRuntimeCall: | 
 |       return "runtime-call"; | 
 |     case UntaggedPcDescriptors::kOsrEntry: | 
 |       return "osr-entry"; | 
 |     case UntaggedPcDescriptors::kRewind: | 
 |       return "rewind"; | 
 |     case UntaggedPcDescriptors::kBSSRelocation: | 
 |       return "bss reloc"; | 
 |     case UntaggedPcDescriptors::kOther: | 
 |       return "other"; | 
 |     case UntaggedPcDescriptors::kAnyKind: | 
 |       UNREACHABLE(); | 
 |       break; | 
 |   } | 
 |   UNREACHABLE(); | 
 |   return ""; | 
 | } | 
 |  | 
 | void PcDescriptors::WriteToBuffer(BaseTextBuffer* buffer, uword base) const { | 
 |   // 4 bits per hex digit. | 
 |   const int addr_width = kBitsPerWord / 4; | 
 |   // "*" in a printf format specifier tells it to read the field width from | 
 |   // the printf argument list. | 
 |   buffer->Printf( | 
 |       "%-*s  kind           deopt-id  tok-ix        try-ix yield-idx\n", | 
 |       addr_width, "pc"); | 
 |   Iterator iter(*this, UntaggedPcDescriptors::kAnyKind); | 
 |   while (iter.MoveNext()) { | 
 |     buffer->Printf("%#-*" Px "  %-13s  % 8" Pd "  %-10s  % 8" Pd "  % 8" Pd | 
 |                    "\n", | 
 |                    addr_width, base + iter.PcOffset(), KindAsStr(iter.Kind()), | 
 |                    iter.DeoptId(), iter.TokenPos().ToCString(), iter.TryIndex(), | 
 |                    iter.YieldIndex()); | 
 |   } | 
 | } | 
 |  | 
 | const char* PcDescriptors::ToCString() const { | 
 |   if (Length() == 0) { | 
 |     return "empty PcDescriptors"; | 
 |   } | 
 |   ZoneTextBuffer buffer(Thread::Current()->zone()); | 
 |   WriteToBuffer(&buffer, /*base=*/0); | 
 |   return buffer.buffer(); | 
 | } | 
 |  | 
 | // Verify assumptions (in debug mode only). | 
 | // - No two deopt descriptors have the same deoptimization id. | 
 | // - No two ic-call descriptors have the same deoptimization id (type feedback). | 
 | // A function without unique ids is marked as non-optimizable (e.g., because of | 
 | // finally blocks). | 
 | void PcDescriptors::Verify(const Function& function) const { | 
 | #if defined(DEBUG) | 
 |   // Only check ids for unoptimized code that is optimizable. | 
 |   if (!function.IsOptimizable()) { | 
 |     return; | 
 |   } | 
 |   intptr_t max_deopt_id = 0; | 
 |   Iterator max_iter( | 
 |       *this, UntaggedPcDescriptors::kDeopt | UntaggedPcDescriptors::kIcCall); | 
 |   while (max_iter.MoveNext()) { | 
 |     if (max_iter.DeoptId() > max_deopt_id) { | 
 |       max_deopt_id = max_iter.DeoptId(); | 
 |     } | 
 |   } | 
 |  | 
 |   Zone* zone = Thread::Current()->zone(); | 
 |   BitVector* deopt_ids = new (zone) BitVector(zone, max_deopt_id + 1); | 
 |   BitVector* iccall_ids = new (zone) BitVector(zone, max_deopt_id + 1); | 
 |   Iterator iter(*this, | 
 |                 UntaggedPcDescriptors::kDeopt | UntaggedPcDescriptors::kIcCall); | 
 |   while (iter.MoveNext()) { | 
 |     // 'deopt_id' is set for kDeopt and kIcCall and must be unique for one kind. | 
 |     if (DeoptId::IsDeoptAfter(iter.DeoptId())) { | 
 |       // TODO(vegorov): some instructions contain multiple calls and have | 
 |       // multiple "after" targets recorded. Right now it is benign but might | 
 |       // lead to issues in the future. Fix that and enable verification. | 
 |       continue; | 
 |     } | 
 |     if (iter.Kind() == UntaggedPcDescriptors::kDeopt) { | 
 |       ASSERT(!deopt_ids->Contains(iter.DeoptId())); | 
 |       deopt_ids->Add(iter.DeoptId()); | 
 |     } else { | 
 |       ASSERT(!iccall_ids->Contains(iter.DeoptId())); | 
 |       iccall_ids->Add(iter.DeoptId()); | 
 |     } | 
 |   } | 
 | #endif  // DEBUG | 
 | } | 
 |  | 
 | void CodeSourceMap::SetLength(intptr_t value) const { | 
 |   StoreNonPointer(&untag()->length_, value); | 
 | } | 
 |  | 
 | CodeSourceMapPtr CodeSourceMap::New(intptr_t length) { | 
 |   ASSERT(Object::code_source_map_class() != Class::null()); | 
 |   Thread* thread = Thread::Current(); | 
 |   CodeSourceMap& result = CodeSourceMap::Handle(thread->zone()); | 
 |   { | 
 |     auto raw = Object::Allocate<CodeSourceMap>(Heap::kOld, length); | 
 |     NoSafepointScope no_safepoint; | 
 |     result = raw; | 
 |     result.SetLength(length); | 
 |   } | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | const char* CodeSourceMap::ToCString() const { | 
 |   return "CodeSourceMap"; | 
 | } | 
 |  | 
 | uword CompressedStackMaps::Hash() const { | 
 |   NoSafepointScope scope; | 
 |   uint8_t* data = UnsafeMutableNonPointer(&untag()->payload()->data()[0]); | 
 |   uint8_t* end = data + payload_size(); | 
 |   uint32_t hash = payload_size(); | 
 |   for (uint8_t* cursor = data; cursor < end; cursor++) { | 
 |     hash = CombineHashes(hash, *cursor); | 
 |   } | 
 |   return FinalizeHash(hash, kHashBits); | 
 | } | 
 |  | 
 | void CompressedStackMaps::WriteToBuffer(BaseTextBuffer* buffer, | 
 |                                         uword base, | 
 |                                         const char* separator) const { | 
 |   auto it = iterator(Thread::Current()); | 
 |   bool first_entry = true; | 
 |   while (it.MoveNext()) { | 
 |     if (!first_entry) { | 
 |       buffer->AddString(separator); | 
 |     } | 
 |     buffer->Printf("0x%.8" Px ": ", base + it.pc_offset()); | 
 |     for (intptr_t i = 0, n = it.Length(); i < n; i++) { | 
 |       buffer->AddString(it.IsObject(i) ? "1" : "0"); | 
 |     } | 
 |     first_entry = false; | 
 |   } | 
 | } | 
 |  | 
 | CompressedStackMaps::Iterator<CompressedStackMaps> | 
 | CompressedStackMaps::iterator(Thread* thread) const { | 
 |   return Iterator<CompressedStackMaps>( | 
 |       *this, CompressedStackMaps::Handle( | 
 |                  thread->zone(), thread->isolate_group() | 
 |                                      ->object_store() | 
 |                                      ->canonicalized_stack_map_entries())); | 
 | } | 
 |  | 
 | CompressedStackMapsPtr CompressedStackMaps::New(const void* payload, | 
 |                                                 intptr_t size, | 
 |                                                 bool is_global_table, | 
 |                                                 bool uses_global_table) { | 
 |   ASSERT(Object::compressed_stackmaps_class() != Class::null()); | 
 |   // We don't currently allow both flags to be true. | 
 |   ASSERT(!is_global_table || !uses_global_table); | 
 |   // The canonical empty instance should be used instead. | 
 |   ASSERT(size != 0); | 
 |  | 
 |   if (!UntaggedCompressedStackMaps::SizeField::is_valid(size)) { | 
 |     FATAL( | 
 |         "Fatal error in CompressedStackMaps::New: " | 
 |         "invalid payload size %" Pu "\n", | 
 |         size); | 
 |   } | 
 |  | 
 |   auto& result = CompressedStackMaps::Handle(); | 
 |   { | 
 |     // CompressedStackMaps data objects are associated with a code object, | 
 |     // allocate them in old generation. | 
 |     auto raw = Object::Allocate<CompressedStackMaps>(Heap::kOld, size); | 
 |     NoSafepointScope no_safepoint; | 
 |     result = raw; | 
 |     result.untag()->payload()->set_flags_and_size( | 
 |         UntaggedCompressedStackMaps::GlobalTableBit::encode(is_global_table) | | 
 |         UntaggedCompressedStackMaps::UsesTableBit::encode(uses_global_table) | | 
 |         UntaggedCompressedStackMaps::SizeField::encode(size)); | 
 |     // Perform the copy under the NoSafepointScope since it uses a raw pointer | 
 |     // to the payload, and so the object should not move during the copy. | 
 |     auto cursor = | 
 |         result.UnsafeMutableNonPointer(result.untag()->payload()->data()); | 
 |     memcpy(cursor, payload, size);  // NOLINT | 
 |   } | 
 |  | 
 |   ASSERT(!result.IsGlobalTable() || !result.UsesGlobalTable()); | 
 |  | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | const char* CompressedStackMaps::ToCString() const { | 
 |   ASSERT(!IsGlobalTable()); | 
 |   if (payload_size() == 0) { | 
 |     return "CompressedStackMaps()"; | 
 |   } | 
 |   auto const t = Thread::Current(); | 
 |   ZoneTextBuffer buffer(t->zone(), 100); | 
 |   buffer.AddString("CompressedStackMaps("); | 
 |   WriteToBuffer(&buffer, /*base=*/0, ", "); | 
 |   buffer.AddString(")"); | 
 |   return buffer.buffer(); | 
 | } | 
 |  | 
 | StringPtr LocalVarDescriptors::GetName(intptr_t var_index) const { | 
 |   ASSERT(var_index < Length()); | 
 |   ASSERT(Object::Handle(ptr()->untag()->name(var_index)).IsString()); | 
 |   return ptr()->untag()->name(var_index); | 
 | } | 
 |  | 
 | void LocalVarDescriptors::SetVar( | 
 |     intptr_t var_index, | 
 |     const String& name, | 
 |     UntaggedLocalVarDescriptors::VarInfo* info) const { | 
 |   ASSERT(var_index < Length()); | 
 |   ASSERT(!name.IsNull()); | 
 |   ptr()->untag()->set_name(var_index, name.ptr()); | 
 |   ptr()->untag()->data()[var_index] = *info; | 
 | } | 
 |  | 
 | void LocalVarDescriptors::GetInfo( | 
 |     intptr_t var_index, | 
 |     UntaggedLocalVarDescriptors::VarInfo* info) const { | 
 |   ASSERT(var_index < Length()); | 
 |   *info = ptr()->untag()->data()[var_index]; | 
 | } | 
 |  | 
 | static int PrintVarInfo(char* buffer, | 
 |                         int len, | 
 |                         intptr_t i, | 
 |                         const String& var_name, | 
 |                         const UntaggedLocalVarDescriptors::VarInfo& info) { | 
 |   const UntaggedLocalVarDescriptors::VarInfoKind kind = info.kind(); | 
 |   const int32_t index = info.index(); | 
 |   if (kind == UntaggedLocalVarDescriptors::kContextLevel) { | 
 |     return Utils::SNPrint(buffer, len, | 
 |                           "%2" Pd | 
 |                           " %-13s level=%-3d" | 
 |                           " begin=%-3d end=%d\n", | 
 |                           i, LocalVarDescriptors::KindToCString(kind), index, | 
 |                           static_cast<int>(info.begin_pos.Pos()), | 
 |                           static_cast<int>(info.end_pos.Pos())); | 
 |   } else if (kind == UntaggedLocalVarDescriptors::kContextVar) { | 
 |     return Utils::SNPrint( | 
 |         buffer, len, | 
 |         "%2" Pd | 
 |         " %-13s level=%-3d index=%-3d" | 
 |         " begin=%-3d end=%-3d name=%s\n", | 
 |         i, LocalVarDescriptors::KindToCString(kind), info.scope_id, index, | 
 |         static_cast<int>(info.begin_pos.Pos()), | 
 |         static_cast<int>(info.end_pos.Pos()), var_name.ToCString()); | 
 |   } else { | 
 |     return Utils::SNPrint( | 
 |         buffer, len, | 
 |         "%2" Pd | 
 |         " %-13s scope=%-3d index=%-3d" | 
 |         " begin=%-3d end=%-3d name=%s\n", | 
 |         i, LocalVarDescriptors::KindToCString(kind), info.scope_id, index, | 
 |         static_cast<int>(info.begin_pos.Pos()), | 
 |         static_cast<int>(info.end_pos.Pos()), var_name.ToCString()); | 
 |   } | 
 | } | 
 |  | 
 | const char* LocalVarDescriptors::ToCString() const { | 
 |   if (IsNull()) { | 
 |     return "LocalVarDescriptors: null"; | 
 |   } | 
 |   if (Length() == 0) { | 
 |     return "empty LocalVarDescriptors"; | 
 |   } | 
 |   intptr_t len = 1;  // Trailing '\0'. | 
 |   String& var_name = String::Handle(); | 
 |   for (intptr_t i = 0; i < Length(); i++) { | 
 |     UntaggedLocalVarDescriptors::VarInfo info; | 
 |     var_name = GetName(i); | 
 |     GetInfo(i, &info); | 
 |     len += PrintVarInfo(nullptr, 0, i, var_name, info); | 
 |   } | 
 |   char* buffer = Thread::Current()->zone()->Alloc<char>(len + 1); | 
 |   buffer[0] = '\0'; | 
 |   intptr_t num_chars = 0; | 
 |   for (intptr_t i = 0; i < Length(); i++) { | 
 |     UntaggedLocalVarDescriptors::VarInfo info; | 
 |     var_name = GetName(i); | 
 |     GetInfo(i, &info); | 
 |     num_chars += PrintVarInfo((buffer + num_chars), (len - num_chars), i, | 
 |                               var_name, info); | 
 |   } | 
 |   return buffer; | 
 | } | 
 |  | 
 | const char* LocalVarDescriptors::KindToCString( | 
 |     UntaggedLocalVarDescriptors::VarInfoKind kind) { | 
 |   switch (kind) { | 
 |     case UntaggedLocalVarDescriptors::kStackVar: | 
 |       return "StackVar"; | 
 |     case UntaggedLocalVarDescriptors::kContextVar: | 
 |       return "ContextVar"; | 
 |     case UntaggedLocalVarDescriptors::kContextLevel: | 
 |       return "ContextLevel"; | 
 |     case UntaggedLocalVarDescriptors::kSavedCurrentContext: | 
 |       return "CurrentCtx"; | 
 |     default: | 
 |       UNIMPLEMENTED(); | 
 |       return nullptr; | 
 |   } | 
 | } | 
 |  | 
 | LocalVarDescriptorsPtr LocalVarDescriptors::New(intptr_t num_variables) { | 
 |   ASSERT(Object::var_descriptors_class() != Class::null()); | 
 |   if (num_variables < 0 || num_variables > kMaxElements) { | 
 |     // This should be caught before we reach here. | 
 |     FATAL( | 
 |         "Fatal error in LocalVarDescriptors::New: " | 
 |         "invalid num_variables %" Pd ". Maximum is: %d\n", | 
 |         num_variables, UntaggedLocalVarDescriptors::kMaxIndex); | 
 |   } | 
 |   auto raw = Object::Allocate<LocalVarDescriptors>(Heap::kOld, num_variables); | 
 |   NoSafepointScope no_safepoint; | 
 |   raw->untag()->num_entries_ = num_variables; | 
 |   return raw; | 
 | } | 
 |  | 
 | intptr_t LocalVarDescriptors::Length() const { | 
 |   return untag()->num_entries_; | 
 | } | 
 |  | 
 | intptr_t ExceptionHandlers::num_entries() const { | 
 |   return untag()->num_entries(); | 
 | } | 
 |  | 
 | bool ExceptionHandlers::has_async_handler() const { | 
 |   return UntaggedExceptionHandlers::AsyncHandlerBit::decode( | 
 |       untag()->packed_fields_); | 
 | } | 
 |  | 
 | void ExceptionHandlers::set_has_async_handler(bool value) const { | 
 |   StoreNonPointer(&untag()->packed_fields_, | 
 |                   UntaggedExceptionHandlers::AsyncHandlerBit::update( | 
 |                       value, untag()->packed_fields_)); | 
 | } | 
 |  | 
 | void ExceptionHandlers::SetHandlerInfo(intptr_t try_index, | 
 |                                        intptr_t outer_try_index, | 
 |                                        uword handler_pc_offset, | 
 |                                        bool needs_stacktrace, | 
 |                                        bool has_catch_all, | 
 |                                        bool is_generated) const { | 
 |   ASSERT((try_index >= 0) && (try_index < num_entries())); | 
 |   NoSafepointScope no_safepoint; | 
 |   ExceptionHandlerInfo* info = | 
 |       UnsafeMutableNonPointer(&untag()->data()[try_index]); | 
 |   info->outer_try_index = outer_try_index; | 
 |   // Some C compilers warn about the comparison always being true when using <= | 
 |   // due to limited range of data type. | 
 |   ASSERT((handler_pc_offset == static_cast<uword>(kMaxUint32)) || | 
 |          (handler_pc_offset < static_cast<uword>(kMaxUint32))); | 
 |   info->handler_pc_offset = handler_pc_offset; | 
 |   info->needs_stacktrace = static_cast<int8_t>(needs_stacktrace); | 
 |   info->has_catch_all = static_cast<int8_t>(has_catch_all); | 
 |   info->is_generated = static_cast<int8_t>(is_generated); | 
 | } | 
 |  | 
 | void ExceptionHandlers::GetHandlerInfo(intptr_t try_index, | 
 |                                        ExceptionHandlerInfo* info) const { | 
 |   ASSERT((try_index >= 0) && (try_index < num_entries())); | 
 |   ASSERT(info != nullptr); | 
 |   *info = untag()->data()[try_index]; | 
 | } | 
 |  | 
 | uword ExceptionHandlers::HandlerPCOffset(intptr_t try_index) const { | 
 |   ASSERT((try_index >= 0) && (try_index < num_entries())); | 
 |   return untag()->data()[try_index].handler_pc_offset; | 
 | } | 
 |  | 
 | intptr_t ExceptionHandlers::OuterTryIndex(intptr_t try_index) const { | 
 |   ASSERT((try_index >= 0) && (try_index < num_entries())); | 
 |   return untag()->data()[try_index].outer_try_index; | 
 | } | 
 |  | 
 | bool ExceptionHandlers::NeedsStackTrace(intptr_t try_index) const { | 
 |   ASSERT((try_index >= 0) && (try_index < num_entries())); | 
 |   return untag()->data()[try_index].needs_stacktrace != 0; | 
 | } | 
 |  | 
 | bool ExceptionHandlers::IsGenerated(intptr_t try_index) const { | 
 |   ASSERT((try_index >= 0) && (try_index < num_entries())); | 
 |   return untag()->data()[try_index].is_generated != 0; | 
 | } | 
 |  | 
 | bool ExceptionHandlers::HasCatchAll(intptr_t try_index) const { | 
 |   ASSERT((try_index >= 0) && (try_index < num_entries())); | 
 |   return untag()->data()[try_index].has_catch_all != 0; | 
 | } | 
 |  | 
 | void ExceptionHandlers::SetHandledTypes(intptr_t try_index, | 
 |                                         const Array& handled_types) const { | 
 |   ASSERT((try_index >= 0) && (try_index < num_entries())); | 
 |   ASSERT(!handled_types.IsNull()); | 
 |   const Array& handled_types_data = | 
 |       Array::Handle(untag()->handled_types_data()); | 
 |   handled_types_data.SetAt(try_index, handled_types); | 
 | } | 
 |  | 
 | ArrayPtr ExceptionHandlers::GetHandledTypes(intptr_t try_index) const { | 
 |   ASSERT((try_index >= 0) && (try_index < num_entries())); | 
 |   Array& array = Array::Handle(untag()->handled_types_data()); | 
 |   array ^= array.At(try_index); | 
 |   return array.ptr(); | 
 | } | 
 |  | 
 | void ExceptionHandlers::set_handled_types_data(const Array& value) const { | 
 |   untag()->set_handled_types_data(value.ptr()); | 
 | } | 
 |  | 
 | ExceptionHandlersPtr ExceptionHandlers::New(intptr_t num_handlers) { | 
 |   ASSERT(Object::exception_handlers_class() != Class::null()); | 
 |   if ((num_handlers < 0) || (num_handlers >= kMaxHandlers)) { | 
 |     FATAL( | 
 |         "Fatal error in ExceptionHandlers::New(): " | 
 |         "invalid num_handlers %" Pd "\n", | 
 |         num_handlers); | 
 |   } | 
 |   const Array& handled_types_data = | 
 |       (num_handlers == 0) ? Object::empty_array() | 
 |                           : Array::Handle(Array::New(num_handlers, Heap::kOld)); | 
 |   return ExceptionHandlers::New(handled_types_data); | 
 | } | 
 |  | 
 | ExceptionHandlersPtr ExceptionHandlers::New(const Array& handled_types_data) { | 
 |   ASSERT(Object::exception_handlers_class() != Class::null()); | 
 |   const intptr_t num_handlers = handled_types_data.Length(); | 
 |   if ((num_handlers < 0) || (num_handlers >= kMaxHandlers)) { | 
 |     FATAL( | 
 |         "Fatal error in ExceptionHandlers::New(): " | 
 |         "invalid num_handlers %" Pd "\n", | 
 |         num_handlers); | 
 |   } | 
 |   ExceptionHandlers& result = ExceptionHandlers::Handle(); | 
 |   { | 
 |     auto raw = Object::Allocate<ExceptionHandlers>(Heap::kOld, num_handlers); | 
 |     NoSafepointScope no_safepoint; | 
 |     result = raw; | 
 |     result.untag()->packed_fields_ = | 
 |         UntaggedExceptionHandlers::NumEntriesBits::encode(num_handlers); | 
 |   } | 
 |   result.set_handled_types_data(handled_types_data); | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | void ExceptionHandlers::WriteToBuffer(BaseTextBuffer* buffer, | 
 |                                       uword base) const { | 
 |   auto& handled_types = Array::Handle(); | 
 |   auto& type = AbstractType::Handle(); | 
 |   ExceptionHandlerInfo info; | 
 |   for (intptr_t i = 0; i < num_entries(); i++) { | 
 |     GetHandlerInfo(i, &info); | 
 |     handled_types = GetHandledTypes(i); | 
 |     const intptr_t num_types = | 
 |         handled_types.IsNull() ? 0 : handled_types.Length(); | 
 |     buffer->Printf("%" Pd " => %#" Px "  (%" Pd " types) (outer %d)%s%s\n", i, | 
 |                    base + info.handler_pc_offset, num_types, | 
 |                    info.outer_try_index, | 
 |                    ((info.needs_stacktrace != 0) ? " (needs stack trace)" : ""), | 
 |                    ((info.is_generated != 0) ? " (generated)" : "")); | 
 |     for (int k = 0; k < num_types; k++) { | 
 |       type ^= handled_types.At(k); | 
 |       ASSERT(!type.IsNull()); | 
 |       buffer->Printf("  %d. %s\n", k, type.ToCString()); | 
 |     } | 
 |   } | 
 |   if (has_async_handler()) { | 
 |     buffer->AddString("<async handler>\n"); | 
 |   } | 
 | } | 
 |  | 
 | const char* ExceptionHandlers::ToCString() const { | 
 |   if (num_entries() == 0) { | 
 |     return has_async_handler() | 
 |                ? "empty ExceptionHandlers (with <async handler>)" | 
 |                : "empty ExceptionHandlers"; | 
 |   } | 
 |   ZoneTextBuffer buffer(Thread::Current()->zone()); | 
 |   WriteToBuffer(&buffer, /*base=*/0); | 
 |   return buffer.buffer(); | 
 | } | 
 |  | 
 | void SingleTargetCache::set_target(const Code& value) const { | 
 |   untag()->set_target(value.ptr()); | 
 | } | 
 |  | 
 | const char* SingleTargetCache::ToCString() const { | 
 |   return "SingleTargetCache"; | 
 | } | 
 |  | 
 | SingleTargetCachePtr SingleTargetCache::New() { | 
 |   return Object::Allocate<SingleTargetCache>(Heap::kOld); | 
 | } | 
 |  | 
 | void UnlinkedCall::set_can_patch_to_monomorphic(bool value) const { | 
 |   StoreNonPointer(&untag()->can_patch_to_monomorphic_, value); | 
 | } | 
 |  | 
 | uword UnlinkedCall::Hash() const { | 
 |   return String::Handle(target_name()).Hash(); | 
 | } | 
 |  | 
 | bool UnlinkedCall::Equals(const UnlinkedCall& other) const { | 
 |   return (target_name() == other.target_name()) && | 
 |          (arguments_descriptor() == other.arguments_descriptor()) && | 
 |          (can_patch_to_monomorphic() == other.can_patch_to_monomorphic()); | 
 | } | 
 |  | 
 | const char* UnlinkedCall::ToCString() const { | 
 |   return "UnlinkedCall"; | 
 | } | 
 |  | 
 | UnlinkedCallPtr UnlinkedCall::New() { | 
 |   const auto& result = | 
 |       UnlinkedCall::Handle(Object::Allocate<UnlinkedCall>(Heap::kOld)); | 
 |   result.set_can_patch_to_monomorphic(!FLAG_precompiled_mode); | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | MonomorphicSmiableCallPtr MonomorphicSmiableCall::New(classid_t expected_cid, | 
 |                                                       const Code& target) { | 
 |   const auto& result = MonomorphicSmiableCall::Handle( | 
 |       Object::Allocate<MonomorphicSmiableCall>(Heap::kOld)); | 
 |   result.StoreNonPointer(&result.untag()->expected_cid_, expected_cid); | 
 |   result.StoreNonPointer(&result.untag()->entrypoint_, target.EntryPoint()); | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | const char* MonomorphicSmiableCall::ToCString() const { | 
 |   return "MonomorphicSmiableCall"; | 
 | } | 
 |  | 
 | const char* CallSiteData::ToCString() const { | 
 |   // CallSiteData is an abstract class.  We should never reach here. | 
 |   UNREACHABLE(); | 
 |   return "CallSiteData"; | 
 | } | 
 |  | 
 | void CallSiteData::set_target_name(const String& value) const { | 
 |   ASSERT(!value.IsNull()); | 
 |   ASSERT(value.IsCanonical()); | 
 |   untag()->set_target_name(value.ptr()); | 
 | } | 
 |  | 
 | void CallSiteData::set_arguments_descriptor(const Array& value) const { | 
 |   ASSERT(!value.IsNull()); | 
 |   untag()->set_args_descriptor(value.ptr()); | 
 | } | 
 |  | 
 | #if !defined(DART_PRECOMPILED_RUNTIME) | 
 | void ICData::SetReceiversStaticType(const AbstractType& type) const { | 
 |   untag()->set_receivers_static_type(type.ptr()); | 
 |  | 
 |   if (!type.IsNull() && type.HasTypeClass() && (NumArgsTested() == 1) && | 
 |       type.IsInstantiated() && !type.IsFutureOrType()) { | 
 |     const Class& cls = Class::Handle(type.type_class()); | 
 |     if (cls.IsGeneric()) { | 
 |       set_tracking_exactness(true); | 
 |     } | 
 |   } | 
 | } | 
 | #endif | 
 |  | 
 | void ICData::SetTargetAtPos(const Array& data, | 
 |                             intptr_t data_pos, | 
 |                             intptr_t num_args_tested, | 
 |                             const Function& target) { | 
 | #if !defined(DART_PRECOMPILED_RUNTIME) | 
 |   // JIT | 
 |   data.SetAt(data_pos + TargetIndexFor(num_args_tested), target); | 
 | #else | 
 |   // AOT | 
 |   ASSERT(target.HasCode()); | 
 |   const Code& code = Code::Handle(target.CurrentCode()); | 
 |   data.SetAt(data_pos + CodeIndexFor(num_args_tested), code); | 
 |   data.SetAt(data_pos + EntryPointIndexFor(num_args_tested), target); | 
 | #endif | 
 | } | 
 |  | 
 | uword ICData::Hash() const { | 
 |   return String::HashRawSymbol(target_name()) ^ deopt_id(); | 
 | } | 
 |  | 
 | const char* ICData::ToCString() const { | 
 |   Zone* zone = Thread::Current()->zone(); | 
 |   const String& name = String::Handle(zone, target_name()); | 
 |   return zone->PrintToString("ICData(%s num-args: %" Pd " num-checks: %" Pd | 
 |                              " type-args-len: %" Pd ", deopt-id: %" Pd ")", | 
 |                              name.ToCString(), NumArgsTested(), | 
 |                              NumberOfChecks(), TypeArgsLen(), deopt_id()); | 
 | } | 
 |  | 
 | FunctionPtr ICData::Owner() const { | 
 |   Object& obj = Object::Handle(untag()->owner()); | 
 |   if (obj.IsNull()) { | 
 |     ASSERT(Dart::vm_snapshot_kind() == Snapshot::kFullAOT); | 
 |     return Function::null(); | 
 |   } else if (obj.IsFunction()) { | 
 |     return Function::Cast(obj).ptr(); | 
 |   } else { | 
 |     ICData& original = ICData::Handle(); | 
 |     original ^= obj.ptr(); | 
 |     return original.Owner(); | 
 |   } | 
 | } | 
 |  | 
 | ICDataPtr ICData::Original() const { | 
 |   if (IsNull()) { | 
 |     return ICData::null(); | 
 |   } | 
 |   if (untag()->owner()->IsICData()) { | 
 |     return static_cast<ICDataPtr>(untag()->owner()); | 
 |   } | 
 |   return this->ptr(); | 
 | } | 
 |  | 
 | void ICData::SetOriginal(const ICData& value) const { | 
 |   ASSERT(value.IsOriginal()); | 
 |   ASSERT(!value.IsNull()); | 
 |   untag()->set_owner(static_cast<ObjectPtr>(value.ptr())); | 
 | } | 
 |  | 
 | void ICData::set_owner(const Function& value) const { | 
 |   untag()->set_owner(static_cast<ObjectPtr>(value.ptr())); | 
 | } | 
 |  | 
 | void ICData::set_deopt_id(intptr_t value) const { | 
 | #if defined(DART_PRECOMPILED_RUNTIME) | 
 |   UNREACHABLE(); | 
 | #else | 
 |   ASSERT(value <= kMaxInt32); | 
 |   StoreNonPointer(&untag()->deopt_id_, value); | 
 | #endif | 
 | } | 
 |  | 
 | void ICData::set_entries(const Array& value) const { | 
 |   ASSERT(!value.IsNull()); | 
 |   untag()->set_entries<std::memory_order_release>(value.ptr()); | 
 | } | 
 |  | 
 | intptr_t ICData::NumArgsTested() const { | 
 |   return untag()->state_bits_.Read<NumArgsTestedBits>(); | 
 | } | 
 |  | 
 | void ICData::SetNumArgsTested(intptr_t value) const { | 
 |   ASSERT(Utils::IsUint(2, value)); | 
 |   untag()->state_bits_.Update<NumArgsTestedBits>(value); | 
 | } | 
 |  | 
 | intptr_t CallSiteData::TypeArgsLen() const { | 
 |   ArgumentsDescriptor args_desc(Array::Handle(arguments_descriptor())); | 
 |   return args_desc.TypeArgsLen(); | 
 | } | 
 |  | 
 | intptr_t CallSiteData::CountWithTypeArgs() const { | 
 |   ArgumentsDescriptor args_desc(Array::Handle(arguments_descriptor())); | 
 |   return args_desc.CountWithTypeArgs(); | 
 | } | 
 |  | 
 | intptr_t CallSiteData::CountWithoutTypeArgs() const { | 
 |   ArgumentsDescriptor args_desc(Array::Handle(arguments_descriptor())); | 
 |   return args_desc.Count(); | 
 | } | 
 |  | 
 | intptr_t CallSiteData::SizeWithoutTypeArgs() const { | 
 |   ArgumentsDescriptor args_desc(Array::Handle(arguments_descriptor())); | 
 |   return args_desc.Size(); | 
 | } | 
 |  | 
 | intptr_t CallSiteData::SizeWithTypeArgs() const { | 
 |   ArgumentsDescriptor args_desc(Array::Handle(arguments_descriptor())); | 
 |   return args_desc.SizeWithTypeArgs(); | 
 | } | 
 |  | 
 | uint32_t ICData::DeoptReasons() const { | 
 |   return untag()->state_bits_.Read<DeoptReasonBits>(); | 
 | } | 
 |  | 
 | void ICData::SetDeoptReasons(uint32_t reasons) const { | 
 |   untag()->state_bits_.Update<DeoptReasonBits>(reasons); | 
 | } | 
 |  | 
 | bool ICData::HasDeoptReason(DeoptReasonId reason) const { | 
 |   ASSERT(reason <= kLastRecordedDeoptReason); | 
 |   return (DeoptReasons() & (1 << reason)) != 0; | 
 | } | 
 |  | 
 | void ICData::AddDeoptReason(DeoptReasonId reason) const { | 
 |   if (reason <= kLastRecordedDeoptReason) { | 
 |     untag()->state_bits_.FetchOr<DeoptReasonBits>(1 << reason); | 
 |   } | 
 | } | 
 |  | 
 | const char* ICData::RebindRuleToCString(RebindRule r) { | 
 |   switch (r) { | 
 | #define RULE_CASE(Name)                                                        \ | 
 |   case RebindRule::k##Name:                                                    \ | 
 |     return #Name; | 
 |     FOR_EACH_REBIND_RULE(RULE_CASE) | 
 | #undef RULE_CASE | 
 |     default: | 
 |       return nullptr; | 
 |   } | 
 | } | 
 |  | 
 | bool ICData::ParseRebindRule(const char* str, RebindRule* out) { | 
 | #define RULE_CASE(Name)                                                        \ | 
 |   if (strcmp(str, #Name) == 0) {                                               \ | 
 |     *out = RebindRule::k##Name;                                                \ | 
 |     return true;                                                               \ | 
 |   } | 
 |   FOR_EACH_REBIND_RULE(RULE_CASE) | 
 | #undef RULE_CASE | 
 |   return false; | 
 | } | 
 |  | 
 | ICData::RebindRule ICData::rebind_rule() const { | 
 |   return RebindRule(untag()->state_bits_.Read<RebindRuleBits>()); | 
 | } | 
 |  | 
 | void ICData::set_rebind_rule(uint32_t rebind_rule) const { | 
 |   untag()->state_bits_.Update<ICData::RebindRuleBits>(rebind_rule); | 
 | } | 
 |  | 
 | bool ICData::is_static_call() const { | 
 |   return rebind_rule() != kInstance; | 
 | } | 
 |  | 
 | void ICData::clear_state_bits() const { | 
 |   untag()->state_bits_ = 0; | 
 | } | 
 |  | 
 | intptr_t ICData::TestEntryLengthFor(intptr_t num_args, | 
 |                                     bool tracking_exactness) { | 
 |   return num_args + 1 /* target function*/ + 1 /* frequency */ + | 
 |          (tracking_exactness ? 1 : 0) /* exactness state */; | 
 | } | 
 |  | 
 | intptr_t ICData::TestEntryLength() const { | 
 |   return TestEntryLengthFor(NumArgsTested(), is_tracking_exactness()); | 
 | } | 
 |  | 
 | intptr_t ICData::Length() const { | 
 |   return (Smi::Value(entries()->untag()->length()) / TestEntryLength()); | 
 | } | 
 |  | 
 | intptr_t ICData::NumberOfChecks() const { | 
 |   DEBUG_ONLY(AssertInvariantsAreSatisfied()); | 
 |   return Length() - 1; | 
 | } | 
 |  | 
 | bool ICData::NumberOfChecksIs(intptr_t n) const { | 
 |   DEBUG_ONLY(AssertInvariantsAreSatisfied()); | 
 |   return NumberOfChecks() == n; | 
 | } | 
 |  | 
 | #if defined(DEBUG) | 
 | void ICData::AssertInvariantsAreSatisfied() const { | 
 |   // See layout and invariant of [ICData] in class comment in object.h. | 
 |   // | 
 |   // This method can be called without holding any locks, it will grab a | 
 |   // snapshot of `entries()` and do it's verification logic on that. | 
 |   auto zone = Thread::Current()->zone(); | 
 |   const auto& array = Array::Handle(zone, entries()); | 
 |  | 
 |   const intptr_t entry_length = TestEntryLength(); | 
 |   const intptr_t num_checks = array.Length() / entry_length - 1; | 
 |   const intptr_t num_args = NumArgsTested(); | 
 |  | 
 |   /// Backing store must be multiple of entry length. | 
 |   ASSERT((array.Length() % entry_length) == 0); | 
 |  | 
 |   /// Entries must be valid. | 
 |   for (intptr_t i = 0; i < num_checks; ++i) { | 
 |     // Should be valid entry. | 
 |     const intptr_t start = entry_length * i; | 
 |     for (intptr_t i = 0; i < num_args; ++i) { | 
 |       ASSERT(!array.At(start + i)->IsHeapObject()); | 
 |       ASSERT(array.At(start + i) != smi_illegal_cid().ptr()); | 
 |     } | 
 |     ASSERT(array.At(start + TargetIndexFor(num_args))->IsHeapObject()); | 
 |     if (is_tracking_exactness()) { | 
 |       ASSERT(!array.At(start + ExactnessIndexFor(num_args))->IsHeapObject()); | 
 |     } | 
 |   } | 
 |  | 
 |   /// Sentinel at end must be valid. | 
 |   const intptr_t sentinel_start = num_checks * entry_length; | 
 |   for (intptr_t i = 0; i < entry_length - 1; ++i) { | 
 |     ASSERT(array.At(sentinel_start + i) == smi_illegal_cid().ptr()); | 
 |   } | 
 |   if (num_checks == 0) { | 
 |     ASSERT(array.At(sentinel_start + entry_length - 1) == | 
 |            smi_illegal_cid().ptr()); | 
 |     ASSERT(ICData::CachedEmptyICDataArray(num_args, is_tracking_exactness()) == | 
 |            array.ptr()); | 
 |   } else { | 
 |     ASSERT(array.At(sentinel_start + entry_length - 1) == ptr()); | 
 |   } | 
 |  | 
 |   // Invariants for ICData of static calls. | 
 |   if (num_args == 0) { | 
 |     ASSERT(Length() == 2); | 
 |     ASSERT(TestEntryLength() == 2); | 
 |   } | 
 | } | 
 | #endif  // defined(DEBUG) | 
 |  | 
 | // Discounts any checks with usage of zero. | 
 | intptr_t ICData::NumberOfUsedChecks() const { | 
 |   const intptr_t n = NumberOfChecks(); | 
 |   intptr_t count = 0; | 
 |   for (intptr_t i = 0; i < n; i++) { | 
 |     if (GetCountAt(i) > 0) { | 
 |       count++; | 
 |     } | 
 |   } | 
 |   return count; | 
 | } | 
 |  | 
 | void ICData::WriteSentinel(const Array& data, | 
 |                            intptr_t test_entry_length, | 
 |                            const Object& back_ref) { | 
 |   ASSERT(!data.IsNull()); | 
 |   RELEASE_ASSERT(smi_illegal_cid().Value() == kIllegalCid); | 
 |   const intptr_t entry_start = data.Length() - test_entry_length; | 
 |   for (intptr_t i = 0; i < test_entry_length - 1; i++) { | 
 |     data.SetAt(entry_start + i, smi_illegal_cid()); | 
 |   } | 
 |   data.SetAt(entry_start + test_entry_length - 1, back_ref); | 
 | } | 
 |  | 
 | #if defined(DEBUG) | 
 | // Used in asserts to verify that a check is not added twice. | 
 | bool ICData::HasCheck(const GrowableArray<intptr_t>& cids) const { | 
 |   return FindCheck(cids) != -1; | 
 | } | 
 | #endif  // DEBUG | 
 |  | 
 | intptr_t ICData::FindCheck(const GrowableArray<intptr_t>& cids) const { | 
 |   const intptr_t len = NumberOfChecks(); | 
 |   GrowableArray<intptr_t> class_ids; | 
 |   for (intptr_t i = 0; i < len; i++) { | 
 |     GetClassIdsAt(i, &class_ids); | 
 |     bool matches = true; | 
 |     for (intptr_t k = 0; k < class_ids.length(); k++) { | 
 |       ASSERT(class_ids[k] != kIllegalCid); | 
 |       if (class_ids[k] != cids[k]) { | 
 |         matches = false; | 
 |         break; | 
 |       } | 
 |     } | 
 |     if (matches) { | 
 |       return i; | 
 |     } | 
 |   } | 
 |   return -1; | 
 | } | 
 |  | 
 | void ICData::TruncateTo(intptr_t num_checks, | 
 |                         const CallSiteResetter& proof_of_reload) const { | 
 |   USE(proof_of_reload);  // This method can only be called during reload. | 
 |  | 
 |   DEBUG_ONLY(AssertInvariantsAreSatisfied()); | 
 |   ASSERT(num_checks <= NumberOfChecks()); | 
 |  | 
 |   // Nothing to do. | 
 |   if (NumberOfChecks() == num_checks) return; | 
 |  | 
 |   auto thread = Thread::Current(); | 
 |   REUSABLE_ARRAY_HANDLESCOPE(thread); | 
 |   auto& array = thread->ArrayHandle(); | 
 |  | 
 |   // If we make the ICData empty, use the pre-allocated shared backing stores. | 
 |   const intptr_t num_args = NumArgsTested(); | 
 |   if (num_checks == 0) { | 
 |     array = ICData::CachedEmptyICDataArray(num_args, is_tracking_exactness()); | 
 |     set_entries(array); | 
 |     return; | 
 |   } | 
 |  | 
 |   // Otherwise truncate array and initialize sentinel. | 
 |   // Use kSmiCid for all slots in the entry except the last, which is a backref | 
 |   // to ICData. | 
 |   const intptr_t entry_length = TestEntryLength(); | 
 |   array = entries(); | 
 |   array.Truncate((num_checks + 1) * entry_length); | 
 |   WriteSentinel(array, entry_length, *this); | 
 | } | 
 |  | 
 | void ICData::ClearCountAt(intptr_t index, | 
 |                           const CallSiteResetter& proof_of_reload) const { | 
 |   USE(proof_of_reload);  // This method can only be called during reload. | 
 |  | 
 |   ASSERT(index >= 0); | 
 |   ASSERT(index < NumberOfChecks()); | 
 |   SetCountAt(index, 0); | 
 | } | 
 |  | 
 | void ICData::ClearAndSetStaticTarget( | 
 |     const Function& func, | 
 |     const CallSiteResetter& proof_of_reload) const { | 
 |   USE(proof_of_reload);  // This method can only be called during reload. | 
 |  | 
 |   // The final entry is always the sentinel. | 
 |   DEBUG_ONLY(AssertInvariantsAreSatisfied()); | 
 |  | 
 |   if (IsImmutable()) return; | 
 |   if (NumberOfChecks() == 0) return; | 
 |  | 
 |   // Leave one entry. | 
 |   TruncateTo(/*num_checks=*/1, proof_of_reload); | 
 |  | 
 |   // Reinitialize the one and only entry. | 
 |   const intptr_t num_args = NumArgsTested(); | 
 |   Thread* thread = Thread::Current(); | 
 |   REUSABLE_ARRAY_HANDLESCOPE(thread); | 
 |   Array& data = thread->ArrayHandle(); | 
 |   data = entries(); | 
 |   const Smi& object_cid = Smi::Handle(Smi::New(kObjectCid)); | 
 |   for (intptr_t i = 0; i < num_args; i++) { | 
 |     data.SetAt(i, object_cid); | 
 |   } | 
 |   data.SetAt(TargetIndexFor(num_args), func); | 
 |   data.SetAt(CountIndexFor(num_args), Object::smi_zero()); | 
 | } | 
 |  | 
 | bool ICData::ValidateInterceptor(const Function& target) const { | 
 | #if !defined(DART_PRECOMPILED_RUNTIME) | 
 |   const String& name = String::Handle(target_name()); | 
 |   if (Function::IsDynamicInvocationForwarderName(name)) { | 
 |     return Function::DemangleDynamicInvocationForwarderName(name) == | 
 |            target.name(); | 
 |   } | 
 | #endif | 
 |   ObjectStore* store = IsolateGroup::Current()->object_store(); | 
 |   ASSERT((target.ptr() == store->simple_instance_of_true_function()) || | 
 |          (target.ptr() == store->simple_instance_of_false_function())); | 
 |   const String& instance_of_name = String::Handle( | 
 |       Library::PrivateCoreLibName(Symbols::_simpleInstanceOf()).ptr()); | 
 |   ASSERT(target_name() == instance_of_name.ptr()); | 
 |   return true; | 
 | } | 
 |  | 
 | void ICData::EnsureHasCheck(const GrowableArray<intptr_t>& class_ids, | 
 |                             const Function& target, | 
 |                             intptr_t count) const { | 
 |   SafepointMutexLocker ml(IsolateGroup::Current()->type_feedback_mutex()); | 
 |  | 
 |   if (FindCheck(class_ids) != -1) return; | 
 |   AddCheckInternal(class_ids, target, count); | 
 | } | 
 |  | 
 | void ICData::AddCheck(const GrowableArray<intptr_t>& class_ids, | 
 |                       const Function& target, | 
 |                       intptr_t count) const { | 
 |   SafepointMutexLocker ml(IsolateGroup::Current()->type_feedback_mutex()); | 
 |   AddCheckInternal(class_ids, target, count); | 
 | } | 
 |  | 
 | void ICData::AddCheckInternal(const GrowableArray<intptr_t>& class_ids, | 
 |                               const Function& target, | 
 |                               intptr_t count) const { | 
 |   ASSERT( | 
 |       IsolateGroup::Current()->type_feedback_mutex()->IsOwnedByCurrentThread()); | 
 |  | 
 |   ASSERT(!is_tracking_exactness()); | 
 |   ASSERT(!target.IsNull()); | 
 |   ASSERT((target.name() == target_name()) || ValidateInterceptor(target)); | 
 |   DEBUG_ASSERT(!HasCheck(class_ids)); | 
 |   ASSERT(NumArgsTested() > 1);  // Otherwise use 'AddReceiverCheck'. | 
 |   const intptr_t num_args_tested = NumArgsTested(); | 
 |   ASSERT(class_ids.length() == num_args_tested); | 
 |   const intptr_t old_num = NumberOfChecks(); | 
 |   Array& data = Array::Handle(entries()); | 
 |  | 
 |   // ICData of static calls with NumArgsTested() > 0 have initially a | 
 |   // dummy set of cids entered (see ICData::NewForStaticCall). That entry is | 
 |   // overwritten by first real type feedback data. | 
 |   if (old_num == 1 && num_args_tested == 2) { | 
 |     const bool has_dummy_entry = | 
 |         Smi::Value(Smi::RawCast(data.At(0))) == kObjectCid && | 
 |         Smi::Value(Smi::RawCast(data.At(1))) == kObjectCid; | 
 |     if (has_dummy_entry) { | 
 |       ASSERT(target.ptr() == data.At(TargetIndexFor(num_args_tested))); | 
 |       // Replace dummy entry. | 
 |       Smi& value = Smi::Handle(); | 
 |       for (intptr_t i = 0; i < NumArgsTested(); i++) { | 
 |         ASSERT(class_ids[i] != kIllegalCid); | 
 |         value = Smi::New(class_ids[i]); | 
 |         data.SetAt(i, value); | 
 |       } | 
 |       return; | 
 |     } | 
 |   } | 
 |   intptr_t index = -1; | 
 |   data = Grow(&index); | 
 |   ASSERT(!data.IsNull()); | 
 |   intptr_t data_pos = index * TestEntryLength(); | 
 |   Smi& value = Smi::Handle(); | 
 |   for (intptr_t i = 0; i < class_ids.length(); i++) { | 
 |     // kIllegalCid is used as terminating value, do not add it. | 
 |     ASSERT(class_ids[i] != kIllegalCid); | 
 |     value = Smi::New(class_ids[i]); | 
 |     data.SetAt(data_pos + i, value); | 
 |   } | 
 |   ASSERT(!target.IsNull()); | 
 |   data.SetAt(data_pos + TargetIndexFor(num_args_tested), target); | 
 |   value = Smi::New(count); | 
 |   data.SetAt(data_pos + CountIndexFor(num_args_tested), value); | 
 |   // Multithreaded access to ICData requires setting of array to be the last | 
 |   // operation. | 
 |   set_entries(data); | 
 | } | 
 |  | 
 | ArrayPtr ICData::Grow(intptr_t* index) const { | 
 |   DEBUG_ONLY(AssertInvariantsAreSatisfied()); | 
 |  | 
 |   *index = NumberOfChecks(); | 
 |   Array& data = Array::Handle(entries()); | 
 |   const intptr_t new_len = data.Length() + TestEntryLength(); | 
 |   data = Array::Grow(data, new_len, Heap::kOld); | 
 |   WriteSentinel(data, TestEntryLength(), *this); | 
 |   return data.ptr(); | 
 | } | 
 |  | 
 | void ICData::DebugDump() const { | 
 |   const Function& owner = Function::Handle(Owner()); | 
 |   THR_Print("ICData::DebugDump\n"); | 
 |   THR_Print("Owner = %s [deopt=%" Pd "]\n", owner.ToCString(), deopt_id()); | 
 |   THR_Print("NumArgsTested = %" Pd "\n", NumArgsTested()); | 
 |   THR_Print("Length = %" Pd "\n", Length()); | 
 |   THR_Print("NumberOfChecks = %" Pd "\n", NumberOfChecks()); | 
 |  | 
 |   GrowableArray<intptr_t> class_ids; | 
 |   for (intptr_t i = 0; i < NumberOfChecks(); i++) { | 
 |     THR_Print("Check[%" Pd "]:", i); | 
 |     GetClassIdsAt(i, &class_ids); | 
 |     for (intptr_t c = 0; c < class_ids.length(); c++) { | 
 |       THR_Print(" %" Pd "", class_ids[c]); | 
 |     } | 
 |     THR_Print("--- %" Pd " hits\n", GetCountAt(i)); | 
 |   } | 
 | } | 
 |  | 
 | void ICData::EnsureHasReceiverCheck(intptr_t receiver_class_id, | 
 |                                     const Function& target, | 
 |                                     intptr_t count, | 
 |                                     StaticTypeExactnessState exactness) const { | 
 |   SafepointMutexLocker ml(IsolateGroup::Current()->type_feedback_mutex()); | 
 |  | 
 |   GrowableArray<intptr_t> class_ids(1); | 
 |   class_ids.Add(receiver_class_id); | 
 |   if (FindCheck(class_ids) != -1) return; | 
 |  | 
 |   AddReceiverCheckInternal(receiver_class_id, target, count, exactness); | 
 | } | 
 |  | 
 | void ICData::AddReceiverCheck(intptr_t receiver_class_id, | 
 |                               const Function& target, | 
 |                               intptr_t count, | 
 |                               StaticTypeExactnessState exactness) const { | 
 |   SafepointMutexLocker ml(IsolateGroup::Current()->type_feedback_mutex()); | 
 |   AddReceiverCheckInternal(receiver_class_id, target, count, exactness); | 
 | } | 
 |  | 
 | void ICData::AddReceiverCheckInternal( | 
 |     intptr_t receiver_class_id, | 
 |     const Function& target, | 
 |     intptr_t count, | 
 |     StaticTypeExactnessState exactness) const { | 
 | #if defined(DEBUG) | 
 |   GrowableArray<intptr_t> class_ids(1); | 
 |   class_ids.Add(receiver_class_id); | 
 |   ASSERT(!HasCheck(class_ids)); | 
 | #endif  // DEBUG | 
 |   ASSERT(!target.IsNull()); | 
 |   const intptr_t kNumArgsTested = 1; | 
 |   ASSERT(NumArgsTested() == kNumArgsTested);  // Otherwise use 'AddCheck'. | 
 |   ASSERT(receiver_class_id != kIllegalCid); | 
 |  | 
 |   intptr_t index = -1; | 
 |   Array& data = Array::Handle(Grow(&index)); | 
 |   intptr_t data_pos = index * TestEntryLength(); | 
 |   if ((receiver_class_id == kSmiCid) && (data_pos > 0)) { | 
 |     ASSERT(GetReceiverClassIdAt(0) != kSmiCid); | 
 |     // Move class occupying position 0 to the data_pos. | 
 |     for (intptr_t i = 0; i < TestEntryLength(); i++) { | 
 |       data.SetAt(data_pos + i, Object::Handle(data.At(i))); | 
 |     } | 
 |     // Insert kSmiCid in position 0. | 
 |     data_pos = 0; | 
 |   } | 
 |   data.SetAt(data_pos, Smi::Handle(Smi::New(receiver_class_id))); | 
 |   SetTargetAtPos(data, data_pos, kNumArgsTested, target); | 
 |  | 
 | #if !defined(DART_PRECOMPILED_RUNTIME) | 
 |   data.SetAt(data_pos + CountIndexFor(kNumArgsTested), | 
 |              Smi::Handle(Smi::New(count))); | 
 |   if (is_tracking_exactness()) { | 
 |     data.SetAt(data_pos + ExactnessIndexFor(kNumArgsTested), | 
 |                Smi::Handle(Smi::New(exactness.Encode()))); | 
 |   } | 
 | #endif | 
 |  | 
 |   // Multithreaded access to ICData requires setting of array to be the last | 
 |   // operation. | 
 |   set_entries(data); | 
 | } | 
 |  | 
 | StaticTypeExactnessState ICData::GetExactnessAt(intptr_t index) const { | 
 |   if (!is_tracking_exactness()) { | 
 |     return StaticTypeExactnessState::NotTracking(); | 
 |   } | 
 |   Thread* thread = Thread::Current(); | 
 |   REUSABLE_ARRAY_HANDLESCOPE(thread); | 
 |   Array& data = thread->ArrayHandle(); | 
 |   data = entries(); | 
 |   intptr_t data_pos = | 
 |       index * TestEntryLength() + ExactnessIndexFor(NumArgsTested()); | 
 |   return StaticTypeExactnessState::Decode( | 
 |       Smi::Value(Smi::RawCast(data.At(data_pos)))); | 
 | } | 
 |  | 
 | void ICData::GetCheckAt(intptr_t index, | 
 |                         GrowableArray<intptr_t>* class_ids, | 
 |                         Function* target) const { | 
 |   ASSERT(index < NumberOfChecks()); | 
 |   ASSERT(class_ids != nullptr); | 
 |   ASSERT(target != nullptr); | 
 |   class_ids->Clear(); | 
 |   Thread* thread = Thread::Current(); | 
 |   REUSABLE_ARRAY_HANDLESCOPE(thread); | 
 |   Array& data = thread->ArrayHandle(); | 
 |   data = entries(); | 
 |   intptr_t data_pos = index * TestEntryLength(); | 
 |   for (intptr_t i = 0; i < NumArgsTested(); i++) { | 
 |     class_ids->Add(Smi::Value(Smi::RawCast(data.At(data_pos + i)))); | 
 |   } | 
 |   (*target) ^= data.At(data_pos + TargetIndexFor(NumArgsTested())); | 
 | } | 
 |  | 
 | void ICData::GetClassIdsAt(intptr_t index, | 
 |                            GrowableArray<intptr_t>* class_ids) const { | 
 |   ASSERT(index < Length()); | 
 |   ASSERT(class_ids != nullptr); | 
 |   ASSERT(IsValidEntryIndex(index)); | 
 |   class_ids->Clear(); | 
 |   Thread* thread = Thread::Current(); | 
 |   REUSABLE_ARRAY_HANDLESCOPE(thread); | 
 |   Array& data = thread->ArrayHandle(); | 
 |   data = entries(); | 
 |   intptr_t data_pos = index * TestEntryLength(); | 
 |   for (intptr_t i = 0; i < NumArgsTested(); i++) { | 
 |     class_ids->Add(Smi::Value(Smi::RawCast(data.At(data_pos++)))); | 
 |   } | 
 | } | 
 |  | 
 | void ICData::GetOneClassCheckAt(intptr_t index, | 
 |                                 intptr_t* class_id, | 
 |                                 Function* target) const { | 
 |   ASSERT(class_id != nullptr); | 
 |   ASSERT(target != nullptr); | 
 |   ASSERT(NumArgsTested() == 1); | 
 |   Thread* thread = Thread::Current(); | 
 |   REUSABLE_ARRAY_HANDLESCOPE(thread); | 
 |   Array& data = thread->ArrayHandle(); | 
 |   data = entries(); | 
 |   const intptr_t data_pos = index * TestEntryLength(); | 
 |   *class_id = Smi::Value(Smi::RawCast(data.At(data_pos))); | 
 |   *target ^= data.At(data_pos + TargetIndexFor(NumArgsTested())); | 
 | } | 
 |  | 
 | intptr_t ICData::GetCidAt(intptr_t index) const { | 
 |   ASSERT(NumArgsTested() == 1); | 
 |   Thread* thread = Thread::Current(); | 
 |   REUSABLE_ARRAY_HANDLESCOPE(thread); | 
 |   Array& data = thread->ArrayHandle(); | 
 |   data = entries(); | 
 |   const intptr_t data_pos = index * TestEntryLength(); | 
 |   return Smi::Value(Smi::RawCast(data.At(data_pos))); | 
 | } | 
 |  | 
 | intptr_t ICData::GetClassIdAt(intptr_t index, intptr_t arg_nr) const { | 
 |   GrowableArray<intptr_t> class_ids; | 
 |   GetClassIdsAt(index, &class_ids); | 
 |   return class_ids[arg_nr]; | 
 | } | 
 |  | 
 | intptr_t ICData::GetReceiverClassIdAt(intptr_t index) const { | 
 |   ASSERT(index < Length()); | 
 |   ASSERT(IsValidEntryIndex(index)); | 
 |   const intptr_t data_pos = index * TestEntryLength(); | 
 |   NoSafepointScope no_safepoint; | 
 |   ArrayPtr raw_data = entries(); | 
 |   return Smi::Value(Smi::RawCast(raw_data->untag()->element(data_pos))); | 
 | } | 
 |  | 
 | FunctionPtr ICData::GetTargetAt(intptr_t index) const { | 
 | #if defined(DART_PRECOMPILED_RUNTIME) | 
 |   UNREACHABLE(); | 
 |   return nullptr; | 
 | #else | 
 |   const intptr_t data_pos = | 
 |       index * TestEntryLength() + TargetIndexFor(NumArgsTested()); | 
 |   ASSERT(Object::Handle(Array::Handle(entries()).At(data_pos)).IsFunction()); | 
 |  | 
 |   NoSafepointScope no_safepoint; | 
 |   ArrayPtr raw_data = entries(); | 
 |   return static_cast<FunctionPtr>(raw_data->untag()->element(data_pos)); | 
 | #endif | 
 | } | 
 |  | 
 | void ICData::IncrementCountAt(intptr_t index, intptr_t value) const { | 
 |   ASSERT(0 <= value); | 
 |   ASSERT(value <= Smi::kMaxValue); | 
 |   SetCountAt(index, Utils::Minimum(GetCountAt(index) + value, Smi::kMaxValue)); | 
 | } | 
 |  | 
 | void ICData::SetCountAt(intptr_t index, intptr_t value) const { | 
 |   ASSERT(0 <= value); | 
 |   ASSERT(value <= Smi::kMaxValue); | 
 |  | 
 |   Thread* thread = Thread::Current(); | 
 |   REUSABLE_ARRAY_HANDLESCOPE(thread); | 
 |   Array& data = thread->ArrayHandle(); | 
 |   data = entries(); | 
 |   const intptr_t data_pos = | 
 |       index * TestEntryLength() + CountIndexFor(NumArgsTested()); | 
 |   data.SetAt(data_pos, Smi::Handle(Smi::New(value))); | 
 | } | 
 |  | 
 | intptr_t ICData::GetCountAt(intptr_t index) const { | 
 | #if defined(DART_PRECOMPILED_RUNTIME) | 
 |   UNREACHABLE(); | 
 |   return 0; | 
 | #else | 
 |   Thread* thread = Thread::Current(); | 
 |   REUSABLE_ARRAY_HANDLESCOPE(thread); | 
 |   Array& data = thread->ArrayHandle(); | 
 |   data = entries(); | 
 |   const intptr_t data_pos = | 
 |       index * TestEntryLength() + CountIndexFor(NumArgsTested()); | 
 |   intptr_t value = Smi::Value(Smi::RawCast(data.At(data_pos))); | 
 |   if (value >= 0) return value; | 
 |  | 
 |   // The counter very rarely overflows to a negative value, but if it does, we | 
 |   // would rather just reset it to zero. | 
 |   SetCountAt(index, 0); | 
 |   return 0; | 
 | #endif | 
 | } | 
 |  | 
 | intptr_t ICData::AggregateCount() const { | 
 |   if (IsNull()) return 0; | 
 |   const intptr_t len = NumberOfChecks(); | 
 |   intptr_t count = 0; | 
 |   for (intptr_t i = 0; i < len; i++) { | 
 |     count += GetCountAt(i); | 
 |   } | 
 |   return count; | 
 | } | 
 |  | 
 | #if !defined(DART_PRECOMPILED_RUNTIME) | 
 | ICDataPtr ICData::AsUnaryClassChecksForArgNr(intptr_t arg_nr) const { | 
 |   ASSERT(!IsNull()); | 
 |   ASSERT(NumArgsTested() > arg_nr); | 
 |   if ((arg_nr == 0) && (NumArgsTested() == 1)) { | 
 |     // Frequent case. | 
 |     return ptr(); | 
 |   } | 
 |   const intptr_t kNumArgsTested = 1; | 
 |   ICData& result = ICData::Handle(ICData::NewFrom(*this, kNumArgsTested)); | 
 |   const intptr_t len = NumberOfChecks(); | 
 |   for (intptr_t i = 0; i < len; i++) { | 
 |     const intptr_t class_id = GetClassIdAt(i, arg_nr); | 
 |     const intptr_t count = GetCountAt(i); | 
 |     if (count == 0) { | 
 |       continue; | 
 |     } | 
 |     intptr_t duplicate_class_id = -1; | 
 |     const intptr_t result_len = result.NumberOfChecks(); | 
 |     for (intptr_t k = 0; k < result_len; k++) { | 
 |       if (class_id == result.GetReceiverClassIdAt(k)) { | 
 |         duplicate_class_id = k; | 
 |         break; | 
 |       } | 
 |     } | 
 |     if (duplicate_class_id >= 0) { | 
 |       // This check is valid only when checking the receiver. | 
 |       ASSERT((arg_nr != 0) || | 
 |              (result.GetTargetAt(duplicate_class_id) == GetTargetAt(i))); | 
 |       result.IncrementCountAt(duplicate_class_id, count); | 
 |     } else { | 
 |       // This will make sure that Smi is first if it exists. | 
 |       result.AddReceiverCheckInternal(class_id, | 
 |                                       Function::Handle(GetTargetAt(i)), count, | 
 |                                       StaticTypeExactnessState::NotTracking()); | 
 |     } | 
 |   } | 
 |  | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | // (cid, count) tuple used to sort ICData by count. | 
 | struct CidCount { | 
 |   CidCount(intptr_t cid_, intptr_t count_, Function* f_) | 
 |       : cid(cid_), count(count_), function(f_) {} | 
 |  | 
 |   static int HighestCountFirst(const CidCount* a, const CidCount* b); | 
 |  | 
 |   intptr_t cid; | 
 |   intptr_t count; | 
 |   Function* function; | 
 | }; | 
 |  | 
 | int CidCount::HighestCountFirst(const CidCount* a, const CidCount* b) { | 
 |   if (a->count > b->count) { | 
 |     return -1; | 
 |   } | 
 |   return (a->count < b->count) ? 1 : 0; | 
 | } | 
 |  | 
 | ICDataPtr ICData::AsUnaryClassChecksSortedByCount() const { | 
 |   ASSERT(!IsNull()); | 
 |   const intptr_t kNumArgsTested = 1; | 
 |   const intptr_t len = NumberOfChecks(); | 
 |   if (len <= 1) { | 
 |     // No sorting needed. | 
 |     return AsUnaryClassChecks(); | 
 |   } | 
 |   GrowableArray<CidCount> aggregate; | 
 |   for (intptr_t i = 0; i < len; i++) { | 
 |     const intptr_t class_id = GetClassIdAt(i, 0); | 
 |     const intptr_t count = GetCountAt(i); | 
 |     if (count == 0) { | 
 |       continue; | 
 |     } | 
 |     bool found = false; | 
 |     for (intptr_t r = 0; r < aggregate.length(); r++) { | 
 |       if (aggregate[r].cid == class_id) { | 
 |         aggregate[r].count += count; | 
 |         found = true; | 
 |         break; | 
 |       } | 
 |     } | 
 |     if (!found) { | 
 |       aggregate.Add( | 
 |           CidCount(class_id, count, &Function::ZoneHandle(GetTargetAt(i)))); | 
 |     } | 
 |   } | 
 |   aggregate.Sort(CidCount::HighestCountFirst); | 
 |  | 
 |   ICData& result = ICData::Handle(ICData::NewFrom(*this, kNumArgsTested)); | 
 |   ASSERT(result.NumberOfChecksIs(0)); | 
 |   // Room for all entries and the sentinel. | 
 |   const intptr_t data_len = result.TestEntryLength() * (aggregate.length() + 1); | 
 |   // Allocate the array but do not assign it to result until we have populated | 
 |   // it with the aggregate data and the terminating sentinel. | 
 |   const Array& data = Array::Handle(Array::New(data_len, Heap::kOld)); | 
 |   intptr_t pos = 0; | 
 |   for (intptr_t i = 0; i < aggregate.length(); i++) { | 
 |     data.SetAt(pos + 0, Smi::Handle(Smi::New(aggregate[i].cid))); | 
 |     data.SetAt(pos + TargetIndexFor(1), *aggregate[i].function); | 
 |     data.SetAt(pos + CountIndexFor(1), | 
 |                Smi::Handle(Smi::New(aggregate[i].count))); | 
 |  | 
 |     pos += result.TestEntryLength(); | 
 |   } | 
 |   WriteSentinel(data, result.TestEntryLength(), result); | 
 |   result.set_entries(data); | 
 |   ASSERT(result.NumberOfChecksIs(aggregate.length())); | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | UnlinkedCallPtr ICData::AsUnlinkedCall() const { | 
 |   ASSERT(NumArgsTested() == 1); | 
 |   ASSERT(!is_tracking_exactness()); | 
 |   const UnlinkedCall& result = UnlinkedCall::Handle(UnlinkedCall::New()); | 
 |   result.set_target_name(String::Handle(target_name())); | 
 |   result.set_arguments_descriptor(Array::Handle(arguments_descriptor())); | 
 |   result.set_can_patch_to_monomorphic(!FLAG_precompiled_mode || | 
 |                                       receiver_cannot_be_smi()); | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | bool ICData::HasReceiverClassId(intptr_t class_id) const { | 
 |   ASSERT(NumArgsTested() > 0); | 
 |   const intptr_t len = NumberOfChecks(); | 
 |   for (intptr_t i = 0; i < len; i++) { | 
 |     if (IsUsedAt(i)) { | 
 |       const intptr_t test_class_id = GetReceiverClassIdAt(i); | 
 |       if (test_class_id == class_id) { | 
 |         return true; | 
 |       } | 
 |     } | 
 |   } | 
 |   return false; | 
 | } | 
 | #endif | 
 |  | 
 | bool ICData::IsUsedAt(intptr_t i) const { | 
 |   if (GetCountAt(i) <= 0) { | 
 |     // Do not mistake unoptimized static call ICData for unused. | 
 |     // See ICData::AddTarget. | 
 |     // TODO(srdjan): Make this test more robust. | 
 |     if (NumArgsTested() > 0) { | 
 |       const intptr_t cid = GetReceiverClassIdAt(i); | 
 |       if (cid == kObjectCid) { | 
 |         return true; | 
 |       } | 
 |     } | 
 |     return false; | 
 |   } | 
 |   return true; | 
 | } | 
 |  | 
 | void ICData::Init() { | 
 |   for (int i = 0; i <= kCachedICDataMaxArgsTestedWithoutExactnessTracking; | 
 |        i++) { | 
 |     cached_icdata_arrays_ | 
 |         [kCachedICDataZeroArgTestedWithoutExactnessTrackingIdx + i] = | 
 |             ICData::NewNonCachedEmptyICDataArray(i, false); | 
 |   } | 
 |   cached_icdata_arrays_[kCachedICDataOneArgWithExactnessTrackingIdx] = | 
 |       ICData::NewNonCachedEmptyICDataArray(1, true); | 
 | } | 
 |  | 
 | void ICData::Cleanup() { | 
 |   for (int i = 0; i < kCachedICDataArrayCount; ++i) { | 
 |     cached_icdata_arrays_[i] = nullptr; | 
 |   } | 
 | } | 
 |  | 
 | ArrayPtr ICData::NewNonCachedEmptyICDataArray(intptr_t num_args_tested, | 
 |                                               bool tracking_exactness) { | 
 |   // IC data array must be null terminated (sentinel entry). | 
 |   const intptr_t len = TestEntryLengthFor(num_args_tested, tracking_exactness); | 
 |   const Array& array = Array::Handle(Array::New(len, Heap::kOld)); | 
 |   // Only empty [ICData]s are allowed to have a non-ICData backref. | 
 |   WriteSentinel(array, len, /*back_ref=*/smi_illegal_cid()); | 
 |   array.MakeImmutable(); | 
 |   return array.ptr(); | 
 | } | 
 |  | 
 | ArrayPtr ICData::CachedEmptyICDataArray(intptr_t num_args_tested, | 
 |                                         bool tracking_exactness) { | 
 |   if (tracking_exactness) { | 
 |     ASSERT(num_args_tested == 1); | 
 |     return cached_icdata_arrays_[kCachedICDataOneArgWithExactnessTrackingIdx]; | 
 |   } else { | 
 |     ASSERT(num_args_tested >= 0); | 
 |     ASSERT(num_args_tested <= | 
 |            kCachedICDataMaxArgsTestedWithoutExactnessTracking); | 
 |     return cached_icdata_arrays_ | 
 |         [kCachedICDataZeroArgTestedWithoutExactnessTrackingIdx + | 
 |          num_args_tested]; | 
 |   } | 
 | } | 
 |  | 
 | bool ICData::IsCachedEmptyEntry(const Array& array) { | 
 |   for (int i = 0; i < kCachedICDataArrayCount; ++i) { | 
 |     if (cached_icdata_arrays_[i] == array.ptr()) return true; | 
 |   } | 
 |   return false; | 
 | } | 
 |  | 
 | // Does not initialize ICData array. | 
 | ICDataPtr ICData::NewDescriptor(Zone* zone, | 
 |                                 const Function& owner, | 
 |                                 const String& target_name, | 
 |                                 const Array& arguments_descriptor, | 
 |                                 intptr_t deopt_id, | 
 |                                 intptr_t num_args_tested, | 
 |                                 RebindRule rebind_rule, | 
 |                                 const AbstractType& receivers_static_type) { | 
 | #if !defined(DART_PRECOMPILED_RUNTIME) | 
 |   // We should only have null owners in the precompiled runtime, if the | 
 |   // owning function for a Code object was optimized out. | 
 |   ASSERT(!owner.IsNull()); | 
 | #endif | 
 |   ASSERT(!target_name.IsNull()); | 
 |   ASSERT(!arguments_descriptor.IsNull()); | 
 |   ASSERT(Object::icdata_class() != Class::null()); | 
 |   ASSERT(num_args_tested >= 0); | 
 |   // IC data objects are long living objects, allocate them in old generation. | 
 |   const auto& result = | 
 |       ICData::Handle(zone, Object::Allocate<ICData>(Heap::kOld)); | 
 |   result.set_owner(owner); | 
 |   result.set_target_name(target_name); | 
 |   result.set_arguments_descriptor(arguments_descriptor); | 
 |   NOT_IN_PRECOMPILED(result.set_deopt_id(deopt_id)); | 
 |   ASSERT_EQUAL(result.untag()->state_bits_, 0); | 
 |   result.set_rebind_rule(rebind_rule); | 
 |   result.SetNumArgsTested(num_args_tested); | 
 |   NOT_IN_PRECOMPILED(result.SetReceiversStaticType(receivers_static_type)); | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | bool ICData::IsImmutable() const { | 
 |   return entries()->IsImmutableArray(); | 
 | } | 
 |  | 
 | ICDataPtr ICData::New() { | 
 |   // IC data objects are long living objects, allocate them in old generation. | 
 |   const auto& result = ICData::Handle(Object::Allocate<ICData>(Heap::kOld)); | 
 |   ASSERT_EQUAL(result.untag()->state_bits_, 0); | 
 |   result.set_deopt_id(DeoptId::kNone); | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | ICDataPtr ICData::New(const Function& owner, | 
 |                       const String& target_name, | 
 |                       const Array& arguments_descriptor, | 
 |                       intptr_t deopt_id, | 
 |                       intptr_t num_args_tested, | 
 |                       RebindRule rebind_rule, | 
 |                       const AbstractType& receivers_static_type) { | 
 |   Zone* zone = Thread::Current()->zone(); | 
 |   const ICData& result = ICData::Handle( | 
 |       zone, | 
 |       NewDescriptor(zone, owner, target_name, arguments_descriptor, deopt_id, | 
 |                     num_args_tested, rebind_rule, receivers_static_type)); | 
 |   result.set_entries(Array::Handle( | 
 |       zone, | 
 |       CachedEmptyICDataArray(num_args_tested, result.is_tracking_exactness()))); | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | ICDataPtr ICData::NewWithCheck(const Function& owner, | 
 |                                const String& target_name, | 
 |                                const Array& arguments_descriptor, | 
 |                                intptr_t deopt_id, | 
 |                                intptr_t num_args_tested, | 
 |                                RebindRule rebind_rule, | 
 |                                GrowableArray<intptr_t>* cids, | 
 |                                const Function& target, | 
 |                                const AbstractType& receiver_type) { | 
 |   ASSERT((cids != nullptr) && !target.IsNull()); | 
 |   ASSERT(cids->length() == num_args_tested); | 
 |  | 
 |   Zone* zone = Thread::Current()->zone(); | 
 |   const auto& result = ICData::Handle( | 
 |       zone, | 
 |       NewDescriptor(zone, owner, target_name, arguments_descriptor, deopt_id, | 
 |                     num_args_tested, rebind_rule, receiver_type)); | 
 |  | 
 |   const intptr_t kNumEntries = 2;  // 1 entry and a sentinel. | 
 |   const intptr_t entry_len = | 
 |       TestEntryLengthFor(num_args_tested, result.is_tracking_exactness()); | 
 |   const auto& array = | 
 |       Array::Handle(zone, Array::New(kNumEntries * entry_len, Heap::kOld)); | 
 |  | 
 |   auto& cid = Smi::Handle(zone); | 
 |   for (intptr_t i = 0; i < num_args_tested; ++i) { | 
 |     cid = Smi::New((*cids)[i]); | 
 |     array.SetAt(i, cid); | 
 |   } | 
 |  | 
 |   SetTargetAtPos(array, 0, num_args_tested, target); | 
 | #if !defined(DART_PRECOMPILED_RUNTIME) | 
 |   array.SetAt(CountIndexFor(num_args_tested), Object::smi_zero()); | 
 | #endif | 
 |   WriteSentinel(array, entry_len, result); | 
 |  | 
 |   result.set_entries(array); | 
 |  | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | ICDataPtr ICData::NewForStaticCall(const Function& owner, | 
 |                                    const Function& target, | 
 |                                    const Array& arguments_descriptor, | 
 |                                    intptr_t deopt_id, | 
 |                                    intptr_t num_args_tested, | 
 |                                    RebindRule rebind_rule) { | 
 |   // See `MethodRecognizer::NumArgsCheckedForStaticCall`. | 
 |   ASSERT(num_args_tested == 0 || num_args_tested == 2); | 
 |   ASSERT(!target.IsNull()); | 
 |  | 
 |   Zone* zone = Thread::Current()->zone(); | 
 |   const auto& target_name = String::Handle(zone, target.name()); | 
 |   GrowableArray<intptr_t> cids(num_args_tested); | 
 |   if (num_args_tested == 2) { | 
 |     cids.Add(kObjectCid); | 
 |     cids.Add(kObjectCid); | 
 |   } | 
 |   return ICData::NewWithCheck(owner, target_name, arguments_descriptor, | 
 |                               deopt_id, num_args_tested, rebind_rule, &cids, | 
 |                               target, Object::null_abstract_type()); | 
 | } | 
 |  | 
 | #if !defined(DART_PRECOMPILED_RUNTIME) | 
 | ICDataPtr ICData::NewFrom(const ICData& from, intptr_t num_args_tested) { | 
 |   // See comment in [ICData::Clone] why we access the megamorphic bit first. | 
 |   const bool is_megamorphic = from.is_megamorphic(); | 
 |  | 
 |   const ICData& result = ICData::Handle(ICData::New( | 
 |       Function::Handle(from.Owner()), String::Handle(from.target_name()), | 
 |       Array::Handle(from.arguments_descriptor()), from.deopt_id(), | 
 |       num_args_tested, from.rebind_rule(), | 
 |       AbstractType::Handle(from.receivers_static_type()))); | 
 |   // Copy deoptimization reasons. | 
 |   result.SetDeoptReasons(from.DeoptReasons()); | 
 |   result.set_is_megamorphic(is_megamorphic); | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | ICDataPtr ICData::Clone(const ICData& from) { | 
 |   Zone* zone = Thread::Current()->zone(); | 
 |  | 
 |   // We have to check the megamorphic bit before accessing the entries of the | 
 |   // ICData to ensure all writes to the entries have been flushed and are | 
 |   // visible at this point. | 
 |   // | 
 |   // This will allow us to maintain the invariant that if the megamorphic bit is | 
 |   // set, the number of entries in the ICData have reached the limit. | 
 |   const bool is_megamorphic = from.is_megamorphic(); | 
 |  | 
 |   const ICData& result = ICData::Handle( | 
 |       zone, ICData::NewDescriptor( | 
 |                 zone, Function::Handle(zone, from.Owner()), | 
 |                 String::Handle(zone, from.target_name()), | 
 |                 Array::Handle(zone, from.arguments_descriptor()), | 
 |                 from.deopt_id(), from.NumArgsTested(), from.rebind_rule(), | 
 |                 AbstractType::Handle(zone, from.receivers_static_type()))); | 
 |   // Clone entry array. | 
 |   const Array& from_array = Array::Handle(zone, from.entries()); | 
 |   if (ICData::IsCachedEmptyEntry(from_array)) { | 
 |     result.set_entries(from_array); | 
 |   } else { | 
 |     const intptr_t len = from_array.Length(); | 
 |     const Array& cloned_array = | 
 |         Array::Handle(zone, Array::New(len, Heap::kOld)); | 
 |     Object& obj = Object::Handle(zone); | 
 |     for (intptr_t i = 0; i < len; i++) { | 
 |       obj = from_array.At(i); | 
 |       cloned_array.SetAt(i, obj); | 
 |     } | 
 |     // Update backref in our clone. | 
 |     cloned_array.SetAt(cloned_array.Length() - 1, result); | 
 |     result.set_entries(cloned_array); | 
 |   } | 
 |   // Copy deoptimization reasons. | 
 |   result.SetDeoptReasons(from.DeoptReasons()); | 
 |   result.set_is_megamorphic(is_megamorphic); | 
 |  | 
 |   DEBUG_ONLY(result.AssertInvariantsAreSatisfied()); | 
 |  | 
 |   return result.ptr(); | 
 | } | 
 | #endif | 
 |  | 
 | ICDataPtr ICData::ICDataOfEntriesArray(const Array& array) { | 
 |   const auto& back_ref = Object::Handle(array.At(array.Length() - 1)); | 
 |   if (back_ref.ptr() == smi_illegal_cid().ptr()) { | 
 |     ASSERT(IsCachedEmptyEntry(array)); | 
 |     return ICData::null(); | 
 |   } | 
 |  | 
 |   const auto& ic_data = ICData::Cast(back_ref); | 
 |   DEBUG_ONLY(ic_data.AssertInvariantsAreSatisfied()); | 
 |   return ic_data.ptr(); | 
 | } | 
 |  | 
 | const char* WeakSerializationReference::ToCString() const { | 
 |   return Object::Handle(target()).ToCString(); | 
 | } | 
 |  | 
 | ObjectPtr WeakSerializationReference::New(const Object& target, | 
 |                                           const Object& replacement) { | 
 |   ASSERT(Object::weak_serialization_reference_class() != Class::null()); | 
 |   // Don't wrap any object in the VM heap, as all objects in the VM isolate | 
 |   // heap are currently serialized. | 
 |   // | 
 |   // Note that we _do_ wrap Smis if requested. Smis are serialized in the Mint | 
 |   // cluster, and so dropping them if not strongly referenced saves space in | 
 |   // the snapshot. | 
 |   if (target.ptr()->IsHeapObject() && target.InVMIsolateHeap()) { | 
 |     return target.ptr(); | 
 |   } | 
 |   // If the target is a WSR that already uses the replacement, then return it. | 
 |   if (target.IsWeakSerializationReference() && | 
 |       WeakSerializationReference::Cast(target).replacement() == | 
 |           replacement.ptr()) { | 
 |     return target.ptr(); | 
 |   } | 
 |   const auto& result = WeakSerializationReference::Handle( | 
 |       Object::Allocate<WeakSerializationReference>(Heap::kOld)); | 
 |   // Don't nest WSRs, instead just use the old WSR's target. | 
 |   result.untag()->set_target(target.IsWeakSerializationReference() | 
 |                                  ? WeakSerializationReference::Unwrap(target) | 
 |                                  : target.ptr()); | 
 |   result.untag()->set_replacement(replacement.ptr()); | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | const char* WeakArray::ToCString() const { | 
 |   return Thread::Current()->zone()->PrintToString("WeakArray len:%" Pd, | 
 |                                                   Length()); | 
 | } | 
 |  | 
 | WeakArrayPtr WeakArray::New(intptr_t length, Heap::Space space) { | 
 |   ASSERT(Object::weak_array_class() != Class::null()); | 
 |   if (!IsValidLength(length)) { | 
 |     // This should be caught before we reach here. | 
 |     FATAL("Fatal error in WeakArray::New: invalid len %" Pd "\n", length); | 
 |   } | 
 |   auto raw = Object::Allocate<WeakArray>(space, length); | 
 |   NoSafepointScope no_safepoint; | 
 |   raw->untag()->set_length(Smi::New(length)); | 
 |   return raw; | 
 | } | 
 |  | 
 | #if defined(INCLUDE_IL_PRINTER) | 
 | Code::Comments& Code::Comments::New(intptr_t count) { | 
 |   Comments* comments; | 
 |   if (count < 0 || count > (kIntptrMax / kNumberOfEntries)) { | 
 |     // This should be caught before we reach here. | 
 |     FATAL("Fatal error in Code::Comments::New: invalid count %" Pd "\n", count); | 
 |   } | 
 |   if (count == 0) { | 
 |     comments = new Comments(Object::empty_array()); | 
 |   } else { | 
 |     const Array& data = | 
 |         Array::Handle(Array::New(count * kNumberOfEntries, Heap::kOld)); | 
 |     comments = new Comments(data); | 
 |   } | 
 |   return *comments; | 
 | } | 
 |  | 
 | intptr_t Code::Comments::Length() const { | 
 |   if (comments_.IsNull()) { | 
 |     return 0; | 
 |   } | 
 |   return comments_.Length() / kNumberOfEntries; | 
 | } | 
 |  | 
 | intptr_t Code::Comments::PCOffsetAt(intptr_t idx) const { | 
 |   return Smi::Value( | 
 |       Smi::RawCast(comments_.At(idx * kNumberOfEntries + kPCOffsetEntry))); | 
 | } | 
 |  | 
 | void Code::Comments::SetPCOffsetAt(intptr_t idx, intptr_t pc) { | 
 |   comments_.SetAt(idx * kNumberOfEntries + kPCOffsetEntry, | 
 |                   Smi::Handle(Smi::New(pc))); | 
 | } | 
 |  | 
 | const char* Code::Comments::CommentAt(intptr_t idx) const { | 
 |   string_ ^= comments_.At(idx * kNumberOfEntries + kCommentEntry); | 
 |   return string_.ToCString(); | 
 | } | 
 |  | 
 | void Code::Comments::SetCommentAt(intptr_t idx, const String& comment) { | 
 |   comments_.SetAt(idx * kNumberOfEntries + kCommentEntry, comment); | 
 | } | 
 |  | 
 | Code::Comments::Comments(const Array& comments) | 
 |     : comments_(comments), string_(String::Handle()) {} | 
 | #endif  // defined(INCLUDE_IL_PRINTER) | 
 |  | 
 | const char* Code::EntryKindToCString(EntryKind kind) { | 
 |   switch (kind) { | 
 |     case EntryKind::kNormal: | 
 |       return "Normal"; | 
 |     case EntryKind::kUnchecked: | 
 |       return "Unchecked"; | 
 |     case EntryKind::kMonomorphic: | 
 |       return "Monomorphic"; | 
 |     case EntryKind::kMonomorphicUnchecked: | 
 |       return "MonomorphicUnchecked"; | 
 |     default: | 
 |       UNREACHABLE(); | 
 |       return nullptr; | 
 |   } | 
 | } | 
 |  | 
 | bool Code::ParseEntryKind(const char* str, EntryKind* out) { | 
 |   if (strcmp(str, "Normal") == 0) { | 
 |     *out = EntryKind::kNormal; | 
 |     return true; | 
 |   } else if (strcmp(str, "Unchecked") == 0) { | 
 |     *out = EntryKind::kUnchecked; | 
 |     return true; | 
 |   } else if (strcmp(str, "Monomorphic") == 0) { | 
 |     *out = EntryKind::kMonomorphic; | 
 |     return true; | 
 |   } else if (strcmp(str, "MonomorphicUnchecked") == 0) { | 
 |     *out = EntryKind::kMonomorphicUnchecked; | 
 |     return true; | 
 |   } | 
 |   return false; | 
 | } | 
 |  | 
 | LocalVarDescriptorsPtr Code::GetLocalVarDescriptors() const { | 
 |   const LocalVarDescriptors& v = LocalVarDescriptors::Handle(var_descriptors()); | 
 |   if (v.IsNull()) { | 
 |     ASSERT(!is_optimized()); | 
 |     const Function& f = Function::Handle(function()); | 
 |     ASSERT(!f.IsIrregexpFunction());  // Not yet implemented. | 
 |     Compiler::ComputeLocalVarDescriptors(*this); | 
 |   } | 
 |   return var_descriptors(); | 
 | } | 
 |  | 
 | void Code::set_owner(const Object& owner) const { | 
 | #if defined(DEBUG) | 
 |   const auto& unwrapped_owner = | 
 |       Object::Handle(WeakSerializationReference::Unwrap(owner)); | 
 |   ASSERT(unwrapped_owner.IsFunction() || unwrapped_owner.IsClass() || | 
 |          unwrapped_owner.IsAbstractType()); | 
 | #endif | 
 |   untag()->set_owner(owner.ptr()); | 
 | } | 
 |  | 
 | void Code::set_state_bits(intptr_t bits) const { | 
 |   StoreNonPointer(&untag()->state_bits_, bits); | 
 | } | 
 |  | 
 | void Code::set_is_optimized(bool value) const { | 
 |   set_state_bits(OptimizedBit::update(value, untag()->state_bits_)); | 
 | } | 
 |  | 
 | void Code::set_is_force_optimized(bool value) const { | 
 |   set_state_bits(ForceOptimizedBit::update(value, untag()->state_bits_)); | 
 | } | 
 |  | 
 | void Code::set_is_alive(bool value) const { | 
 |   set_state_bits(AliveBit::update(value, untag()->state_bits_)); | 
 | } | 
 |  | 
 | void Code::set_is_discarded(bool value) const { | 
 |   set_state_bits(DiscardedBit::update(value, untag()->state_bits_)); | 
 | } | 
 |  | 
 | void Code::set_compressed_stackmaps(const CompressedStackMaps& maps) const { | 
 |   ASSERT(maps.IsOld()); | 
 |   untag()->set_compressed_stackmaps(maps.ptr()); | 
 | } | 
 |  | 
 | #if !defined(DART_PRECOMPILED_RUNTIME) | 
 | intptr_t Code::num_variables() const { | 
 |   ASSERT(!FLAG_precompiled_mode); | 
 |   return Smi::Value(Smi::RawCast(untag()->catch_entry())); | 
 | } | 
 | void Code::set_num_variables(intptr_t num_variables) const { | 
 |   ASSERT(!FLAG_precompiled_mode); | 
 |   untag()->set_catch_entry(Smi::New(num_variables)); | 
 | } | 
 | #endif | 
 |  | 
 | #if defined(DART_PRECOMPILED_RUNTIME) || defined(DART_PRECOMPILER) | 
 | TypedDataPtr Code::catch_entry_moves_maps() const { | 
 |   ASSERT(FLAG_precompiled_mode); | 
 |   return TypedData::RawCast(untag()->catch_entry()); | 
 | } | 
 | void Code::set_catch_entry_moves_maps(const TypedData& maps) const { | 
 |   ASSERT(FLAG_precompiled_mode); | 
 |   untag()->set_catch_entry(maps.ptr()); | 
 | } | 
 | #endif | 
 |  | 
 | void Code::set_deopt_info_array(const Array& array) const { | 
 | #if defined(DART_PRECOMPILED_RUNTIME) | 
 |   UNREACHABLE(); | 
 | #else | 
 |   ASSERT(array.IsOld()); | 
 |   untag()->set_deopt_info_array(array.ptr()); | 
 | #endif | 
 | } | 
 |  | 
 | void Code::set_static_calls_target_table(const Array& value) const { | 
 | #if defined(DART_PRECOMPILED_RUNTIME) | 
 |   UNREACHABLE(); | 
 | #else | 
 |   untag()->set_static_calls_target_table(value.ptr()); | 
 | #endif | 
 | #if defined(DEBUG) | 
 |   // Check that the table is sorted by pc offsets. | 
 |   // FlowGraphCompiler::AddStaticCallTarget adds pc-offsets to the table while | 
 |   // emitting assembly. This guarantees that every succeeding pc-offset is | 
 |   // larger than the previously added one. | 
 |   StaticCallsTable entries(value); | 
 |   const intptr_t count = entries.Length(); | 
 |   for (intptr_t i = 0; i < count - 1; ++i) { | 
 |     auto left = Smi::Value(entries[i].Get<kSCallTableKindAndOffset>()); | 
 |     auto right = Smi::Value(entries[i + 1].Get<kSCallTableKindAndOffset>()); | 
 |     ASSERT(OffsetField::decode(left) < OffsetField::decode(right)); | 
 |   } | 
 | #endif  // DEBUG | 
 | } | 
 |  | 
 | ObjectPoolPtr Code::GetObjectPool() const { | 
 | #if defined(DART_PRECOMPILER) || defined(DART_PRECOMPILED_RUNTIME) | 
 |   if (FLAG_precompiled_mode) { | 
 |     return IsolateGroup::Current()->object_store()->global_object_pool(); | 
 |   } | 
 | #endif | 
 |   return object_pool(); | 
 | } | 
 |  | 
 | bool Code::HasBreakpoint() const { | 
 | #if defined(PRODUCT) | 
 |   return false; | 
 | #else | 
 |   return IsolateGroup::Current()->debugger()->HasBreakpointInCode(*this); | 
 | #endif | 
 | } | 
 |  | 
 | TypedDataPtr Code::GetDeoptInfoAtPc(uword pc, | 
 |                                     ICData::DeoptReasonId* deopt_reason, | 
 |                                     uint32_t* deopt_flags) const { | 
 | #if defined(DART_PRECOMPILED_RUNTIME) | 
 |   ASSERT(Dart::vm_snapshot_kind() == Snapshot::kFullAOT); | 
 |   return TypedData::null(); | 
 | #else | 
 |   ASSERT(is_optimized()); | 
 |   const Instructions& instrs = Instructions::Handle(instructions()); | 
 |   uword code_entry = instrs.PayloadStart(); | 
 |   const Array& table = Array::Handle(deopt_info_array()); | 
 |   if (table.IsNull()) { | 
 |     ASSERT(Dart::vm_snapshot_kind() == Snapshot::kFullAOT); | 
 |     return TypedData::null(); | 
 |   } | 
 |   // Linear search for the PC offset matching the target PC. | 
 |   intptr_t length = DeoptTable::GetLength(table); | 
 |   Smi& offset = Smi::Handle(); | 
 |   Smi& reason_and_flags = Smi::Handle(); | 
 |   TypedData& info = TypedData::Handle(); | 
 |   for (intptr_t i = 0; i < length; ++i) { | 
 |     DeoptTable::GetEntry(table, i, &offset, &info, &reason_and_flags); | 
 |     if (pc == (code_entry + offset.Value())) { | 
 |       ASSERT(!info.IsNull()); | 
 |       *deopt_reason = DeoptTable::ReasonField::decode(reason_and_flags.Value()); | 
 |       *deopt_flags = DeoptTable::FlagsField::decode(reason_and_flags.Value()); | 
 |       return info.ptr(); | 
 |     } | 
 |   } | 
 |   *deopt_reason = ICData::kDeoptUnknown; | 
 |   return TypedData::null(); | 
 | #endif  // defined(DART_PRECOMPILED_RUNTIME) | 
 | } | 
 |  | 
 | intptr_t Code::BinarySearchInSCallTable(uword pc) const { | 
 | #if defined(DART_PRECOMPILED_RUNTIME) | 
 |   UNREACHABLE(); | 
 | #else | 
 |   NoSafepointScope no_safepoint; | 
 |   const Array& table = Array::Handle(untag()->static_calls_target_table()); | 
 |   StaticCallsTable entries(table); | 
 |   const intptr_t pc_offset = pc - PayloadStart(); | 
 |   intptr_t imin = 0; | 
 |   intptr_t imax = (table.Length() / kSCallTableEntryLength) - 1; | 
 |   while (imax >= imin) { | 
 |     const intptr_t imid = imin + (imax - imin) / 2; | 
 |     const auto offset = OffsetField::decode( | 
 |         Smi::Value(entries[imid].Get<kSCallTableKindAndOffset>())); | 
 |     if (offset < pc_offset) { | 
 |       imin = imid + 1; | 
 |     } else if (offset > pc_offset) { | 
 |       imax = imid - 1; | 
 |     } else { | 
 |       return imid; | 
 |     } | 
 |   } | 
 | #endif | 
 |   return -1; | 
 | } | 
 |  | 
 | FunctionPtr Code::GetStaticCallTargetFunctionAt(uword pc) const { | 
 | #if defined(DART_PRECOMPILED_RUNTIME) | 
 |   UNREACHABLE(); | 
 |   return Function::null(); | 
 | #else | 
 |   const intptr_t i = BinarySearchInSCallTable(pc); | 
 |   if (i < 0) { | 
 |     return Function::null(); | 
 |   } | 
 |   const Array& array = Array::Handle(untag()->static_calls_target_table()); | 
 |   StaticCallsTable entries(array); | 
 |   return entries[i].Get<kSCallTableFunctionTarget>(); | 
 | #endif | 
 | } | 
 |  | 
 | void Code::SetStaticCallTargetCodeAt(uword pc, const Code& code) const { | 
 | #if defined(DART_PRECOMPILED_RUNTIME) | 
 |   UNREACHABLE(); | 
 | #else | 
 |   const intptr_t i = BinarySearchInSCallTable(pc); | 
 |   ASSERT(i >= 0); | 
 |   const Array& array = Array::Handle(untag()->static_calls_target_table()); | 
 |   StaticCallsTable entries(array); | 
 |   ASSERT(code.IsNull() || | 
 |          (code.function() == entries[i].Get<kSCallTableFunctionTarget>())); | 
 |   return entries[i].Set<kSCallTableCodeOrTypeTarget>(code); | 
 | #endif | 
 | } | 
 |  | 
 | void Code::SetStubCallTargetCodeAt(uword pc, const Code& code) const { | 
 | #if defined(DART_PRECOMPILED_RUNTIME) | 
 |   UNREACHABLE(); | 
 | #else | 
 |   const intptr_t i = BinarySearchInSCallTable(pc); | 
 |   ASSERT(i >= 0); | 
 |   const Array& array = Array::Handle(untag()->static_calls_target_table()); | 
 |   StaticCallsTable entries(array); | 
 | #if defined(DEBUG) | 
 |   if (entries[i].Get<kSCallTableFunctionTarget>() == Function::null()) { | 
 |     ASSERT(!code.IsNull() && Object::Handle(code.owner()).IsClass()); | 
 |   } else { | 
 |     ASSERT(code.IsNull() || | 
 |            (code.function() == entries[i].Get<kSCallTableFunctionTarget>())); | 
 |   } | 
 | #endif | 
 |   return entries[i].Set<kSCallTableCodeOrTypeTarget>(code); | 
 | #endif | 
 | } | 
 |  | 
 | void Code::Disassemble(DisassemblyFormatter* formatter) const { | 
 | #if !defined(PRODUCT) || defined(FORCE_INCLUDE_DISASSEMBLER) | 
 |   if (!FLAG_support_disassembler) { | 
 |     return; | 
 |   } | 
 |   const uword start = PayloadStart(); | 
 |   if (formatter == nullptr) { | 
 |     Disassembler::Disassemble(start, start + Size(), *this); | 
 |   } else { | 
 |     Disassembler::Disassemble(start, start + Size(), formatter, *this); | 
 |   } | 
 | #endif  // !defined(PRODUCT) || defined(FORCE_INCLUDE_DISASSEMBLER) | 
 | } | 
 |  | 
 | #if defined(INCLUDE_IL_PRINTER) | 
 | #if defined(PRODUCT) | 
 | // In PRODUCT builds we don't have space in Code object to store code comments | 
 | // so we move them into malloced heap (and leak them). This functionality | 
 | // is only intended to be used in AOT compiler so leaking is fine. | 
 | class MallocCodeComments final : public CodeComments { | 
 |  public: | 
 |   explicit MallocCodeComments(const CodeComments& comments) | 
 |       : length_(comments.Length()), comments_(new Comment[comments.Length()]) { | 
 |     for (intptr_t i = 0; i < length_; i++) { | 
 |       comments_[i].pc_offset = comments.PCOffsetAt(i); | 
 |       comments_[i].comment = | 
 |           CStringUniquePtr(Utils::StrDup(comments.CommentAt(i))); | 
 |     } | 
 |   } | 
 |  | 
 |   intptr_t Length() const override { return length_; } | 
 |  | 
 |   intptr_t PCOffsetAt(intptr_t i) const override { | 
 |     return comments_[i].pc_offset; | 
 |   } | 
 |  | 
 |   const char* CommentAt(intptr_t i) const override { | 
 |     return comments_[i].comment.get(); | 
 |   } | 
 |  | 
 |  private: | 
 |   struct Comment { | 
 |     intptr_t pc_offset; | 
 |     CStringUniquePtr comment; | 
 |   }; | 
 |  | 
 |   intptr_t length_; | 
 |   std::unique_ptr<Comment[]> comments_; | 
 | }; | 
 | #endif | 
 |  | 
 | const CodeComments& Code::comments() const { | 
 | #if defined(PRODUCT) | 
 |   auto comments = | 
 |       static_cast<CodeComments*>(Thread::Current()->heap()->GetPeer(ptr())); | 
 |   return (comments != nullptr) ? *comments : Code::Comments::New(0); | 
 | #else | 
 |   return *new Code::Comments(Array::Handle(untag()->comments())); | 
 | #endif | 
 | } | 
 |  | 
 | void Code::set_comments(const CodeComments& comments) const { | 
 | #if !defined(PRODUCT) | 
 |   auto& wrapper = static_cast<const Code::Comments&>(comments); | 
 |   ASSERT(wrapper.comments_.IsOld()); | 
 |   untag()->set_comments(wrapper.comments_.ptr()); | 
 | #else | 
 |   if (FLAG_code_comments && comments.Length() > 0) { | 
 |     Thread::Current()->heap()->SetPeer(ptr(), new MallocCodeComments(comments)); | 
 |   } else { | 
 |     Thread::Current()->heap()->SetPeer(ptr(), nullptr); | 
 |   } | 
 | #endif | 
 | } | 
 | #endif  // defined(INCLUDE_IL_PRINTER) | 
 |  | 
 | void Code::SetPrologueOffset(intptr_t offset) const { | 
 | #if defined(PRODUCT) | 
 |   UNREACHABLE(); | 
 | #else | 
 |   ASSERT(offset >= 0); | 
 |   untag()->set_return_address_metadata(Smi::New(offset)); | 
 | #endif | 
 | } | 
 |  | 
 | intptr_t Code::GetPrologueOffset() const { | 
 | #if defined(PRODUCT) | 
 |   UNREACHABLE(); | 
 |   return -1; | 
 | #else | 
 |   const Object& object = Object::Handle(untag()->return_address_metadata()); | 
 |   // In the future we may put something other than a smi in | 
 |   // |return_address_metadata_|. | 
 |   if (object.IsNull() || !object.IsSmi()) { | 
 |     return -1; | 
 |   } | 
 |   return Smi::Cast(object).Value(); | 
 | #endif | 
 | } | 
 |  | 
 | ArrayPtr Code::inlined_id_to_function() const { | 
 |   return untag()->inlined_id_to_function(); | 
 | } | 
 |  | 
 | void Code::set_inlined_id_to_function(const Array& value) const { | 
 |   ASSERT(value.IsOld()); | 
 |   untag()->set_inlined_id_to_function(value.ptr()); | 
 | } | 
 |  | 
 | CodePtr Code::New(intptr_t pointer_offsets_length) { | 
 |   if (pointer_offsets_length < 0 || pointer_offsets_length > kMaxElements) { | 
 |     // This should be caught before we reach here. | 
 |     FATAL("Fatal error in Code::New: invalid pointer_offsets_length %" Pd "\n", | 
 |           pointer_offsets_length); | 
 |   } | 
 |   ASSERT(Object::code_class() != Class::null()); | 
 |   Code& result = Code::Handle(); | 
 |   { | 
 |     auto raw = Object::Allocate<Code>(Heap::kOld, pointer_offsets_length); | 
 |     NoSafepointScope no_safepoint; | 
 |     result = raw; | 
 |     ASSERT_EQUAL(result.untag()->state_bits_, 0); | 
 |     result.set_pointer_offsets_length(pointer_offsets_length); | 
 |   } | 
 |   DEBUG_ASSERT(result.compile_timestamp() == 0); | 
 | #if defined(INCLUDE_IL_PRINTER) | 
 |   result.set_comments(Comments::New(0)); | 
 | #endif | 
 |   result.set_pc_descriptors(Object::empty_descriptors()); | 
 |   result.set_compressed_stackmaps(Object::empty_compressed_stackmaps()); | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | #if !defined(DART_PRECOMPILED_RUNTIME) | 
 | CodePtr Code::FinalizeCodeAndNotify(const Function& function, | 
 |                                     FlowGraphCompiler* compiler, | 
 |                                     compiler::Assembler* assembler, | 
 |                                     PoolAttachment pool_attachment, | 
 |                                     bool optimized, | 
 |                                     CodeStatistics* stats) { | 
 |   auto thread = Thread::Current(); | 
 |   ASSERT(thread->isolate_group()->program_lock()->IsCurrentThreadWriter()); | 
 |  | 
 |   const auto& code = Code::Handle( | 
 |       FinalizeCode(compiler, assembler, pool_attachment, optimized, stats)); | 
 |   NotifyCodeObservers(function, code, optimized); | 
 |   return code.ptr(); | 
 | } | 
 |  | 
 | CodePtr Code::FinalizeCodeAndNotify(const char* name, | 
 |                                     FlowGraphCompiler* compiler, | 
 |                                     compiler::Assembler* assembler, | 
 |                                     PoolAttachment pool_attachment, | 
 |                                     bool optimized, | 
 |                                     CodeStatistics* stats) { | 
 |   auto thread = Thread::Current(); | 
 |   ASSERT(thread->isolate_group()->program_lock()->IsCurrentThreadWriter()); | 
 |  | 
 |   const auto& code = Code::Handle( | 
 |       FinalizeCode(compiler, assembler, pool_attachment, optimized, stats)); | 
 |   NotifyCodeObservers(name, code, optimized); | 
 |   return code.ptr(); | 
 | } | 
 |  | 
 | #if defined(DART_PRECOMPILER) | 
 | DECLARE_FLAG(charp, write_v8_snapshot_profile_to); | 
 | DECLARE_FLAG(charp, trace_precompiler_to); | 
 | #endif  // defined(DART_PRECOMPILER) | 
 |  | 
 | CodePtr Code::FinalizeCode(FlowGraphCompiler* compiler, | 
 |                            compiler::Assembler* assembler, | 
 |                            PoolAttachment pool_attachment, | 
 |                            bool optimized, | 
 |                            CodeStatistics* stats /* = nullptr */) { | 
 |   auto thread = Thread::Current(); | 
 |   ASSERT(thread->isolate_group()->program_lock()->IsCurrentThreadWriter()); | 
 |  | 
 |   ASSERT(assembler != nullptr); | 
 |   ObjectPool& object_pool = ObjectPool::Handle(); | 
 |  | 
 |   if (pool_attachment == PoolAttachment::kAttachPool) { | 
 |     if (assembler->HasObjectPoolBuilder()) { | 
 |       object_pool = | 
 |           ObjectPool::NewFromBuilder(assembler->object_pool_builder()); | 
 |     } else { | 
 |       object_pool = ObjectPool::empty_object_pool().ptr(); | 
 |     } | 
 |   } else { | 
 | #if defined(DART_PRECOMPILER) | 
 |     if (assembler->HasObjectPoolBuilder() && | 
 |         assembler->object_pool_builder().HasParent()) { | 
 |       // We are not going to write this pool into snapshot, but we will use | 
 |       // it to emit references from this code object to other objects in the | 
 |       // snapshot that it uses. | 
 |       object_pool = | 
 |           ObjectPool::NewFromBuilder(assembler->object_pool_builder()); | 
 |     } | 
 | #endif  // defined(DART_PRECOMPILER) | 
 |   } | 
 |  | 
 |   // Allocate the Code and Instructions objects.  Code is allocated first | 
 |   // because a GC during allocation of the code will leave the instruction | 
 |   // pages read-only. | 
 |   intptr_t pointer_offset_count = assembler->CountPointerOffsets(); | 
 |   Code& code = Code::ZoneHandle(Code::New(pointer_offset_count)); | 
 | #ifdef TARGET_ARCH_IA32 | 
 |   assembler->GetSelfHandle() = code.ptr(); | 
 | #endif | 
 |   Instructions& instrs = Instructions::ZoneHandle(Instructions::New( | 
 |       assembler->CodeSize(), assembler->has_monomorphic_entry(), | 
 |       assembler->should_be_aligned())); | 
 |  | 
 |   { | 
 |     // Important: if GC is triggered at any point between Instructions::New | 
 |     // and here it would write protect instructions object that we are trying | 
 |     // to fill in. | 
 |     NoSafepointScope no_safepoint; | 
 |  | 
 |     // Copy the instructions into the instruction area and apply all fixups. | 
 |     // Embedded pointers are still in handles at this point. | 
 |     MemoryRegion region(reinterpret_cast<void*>(instrs.PayloadStart()), | 
 |                         instrs.Size()); | 
 |     assembler->FinalizeInstructions(region); | 
 |  | 
 |     const auto& pointer_offsets = assembler->GetPointerOffsets(); | 
 |     ASSERT(pointer_offsets.length() == pointer_offset_count); | 
 |     ASSERT(code.pointer_offsets_length() == pointer_offsets.length()); | 
 |  | 
 |     // Set pointer offsets list in Code object and resolve all handles in | 
 |     // the instruction stream to raw objects. | 
 |     for (intptr_t i = 0; i < pointer_offsets.length(); i++) { | 
 |       intptr_t offset_in_instrs = pointer_offsets[i]; | 
 |       code.SetPointerOffsetAt(i, offset_in_instrs); | 
 |       uword addr = region.start() + offset_in_instrs; | 
 |       ASSERT(instrs.PayloadStart() <= addr); | 
 |       ASSERT((instrs.PayloadStart() + instrs.Size()) > addr); | 
 |       const Object* object = LoadUnaligned(reinterpret_cast<Object**>(addr)); | 
 |       ASSERT(object->IsOld()); | 
 |       // N.B. The pointer is embedded in the Instructions object, but visited | 
 |       // through the Code object. | 
 |       code.StorePointerUnaligned(reinterpret_cast<ObjectPtr*>(addr), | 
 |                                  object->ptr(), thread); | 
 |     } | 
 |  | 
 |     // Write protect instructions and, if supported by OS, use dual mapping | 
 |     // for execution. | 
 |     if (FLAG_write_protect_code) { | 
 |       uword address = UntaggedObject::ToAddr(instrs.ptr()); | 
 |       VirtualMemory::Protect(reinterpret_cast<void*>(address), | 
 |                              instrs.ptr()->untag()->HeapSize(), | 
 |                              VirtualMemory::kReadExecute); | 
 |     } | 
 |  | 
 |     // Hook up Code and Instructions objects. | 
 |     const uword unchecked_offset = assembler->UncheckedEntryOffset(); | 
 |     code.SetActiveInstructions(instrs, unchecked_offset); | 
 |     code.set_instructions(instrs); | 
 |     NOT_IN_PRECOMPILED(code.set_unchecked_offset(unchecked_offset)); | 
 |     code.set_is_alive(true); | 
 |  | 
 |     // Set object pool in Instructions object. | 
 |     if (!object_pool.IsNull()) { | 
 |       code.set_object_pool(object_pool.ptr()); | 
 |     } | 
 |  | 
 | #if defined(DART_PRECOMPILER) | 
 |     if (stats != nullptr) { | 
 |       stats->Finalize(); | 
 |       instrs.set_stats(stats); | 
 |     } | 
 | #endif | 
 |  | 
 |     CPU::FlushICache(instrs.PayloadStart(), instrs.Size()); | 
 |   } | 
 |  | 
 | #if defined(INCLUDE_IL_PRINTER) | 
 |   code.set_comments(CreateCommentsFrom(assembler)); | 
 | #endif  // defined(INCLUDE_IL_PRINTER) | 
 |  | 
 | #ifndef PRODUCT | 
 |   code.set_compile_timestamp(OS::GetCurrentMonotonicMicros()); | 
 |   if (assembler->prologue_offset() >= 0) { | 
 |     code.SetPrologueOffset(assembler->prologue_offset()); | 
 |   } else { | 
 |     // No prologue was ever entered, optimistically assume nothing was ever | 
 |     // pushed onto the stack. | 
 |     code.SetPrologueOffset(assembler->CodeSize()); | 
 |   } | 
 | #endif | 
 |   return code.ptr(); | 
 | } | 
 |  | 
 | void Code::NotifyCodeObservers(const Code& code, bool optimized) { | 
 | #if !defined(PRODUCT) | 
 |   ASSERT(!Thread::Current()->OwnsGCSafepoint()); | 
 |   if (CodeObservers::AreActive()) { | 
 |     if (code.IsFunctionCode()) { | 
 |       const auto& function = Function::Handle(code.function()); | 
 |       if (!function.IsNull()) { | 
 |         return NotifyCodeObservers(function, code, optimized); | 
 |       } | 
 |     } | 
 |     NotifyCodeObservers(code.Name(), code, optimized); | 
 |   } | 
 | #endif | 
 | } | 
 |  | 
 | void Code::NotifyCodeObservers(const Function& function, | 
 |                                const Code& code, | 
 |                                bool optimized) { | 
 | #if !defined(PRODUCT) | 
 |   ASSERT(!function.IsNull()); | 
 |   ASSERT(!Thread::Current()->OwnsGCSafepoint()); | 
 |   // Calling ToLibNamePrefixedQualifiedCString is very expensive, | 
 |   // try to avoid it. | 
 |   if (CodeObservers::AreActive()) { | 
 |     const char* name = function.ToLibNamePrefixedQualifiedCString(); | 
 |     NotifyCodeObservers(name, code, optimized); | 
 |   } | 
 | #endif | 
 | } | 
 |  | 
 | void Code::NotifyCodeObservers(const char* name, | 
 |                                const Code& code, | 
 |                                bool optimized) { | 
 | #if !defined(PRODUCT) | 
 |   ASSERT(name != nullptr); | 
 |   ASSERT(!code.IsNull()); | 
 |   ASSERT(!Thread::Current()->OwnsGCSafepoint()); | 
 |   if (CodeObservers::AreActive()) { | 
 |     const auto& instrs = Instructions::Handle(code.instructions()); | 
 |     CodeObservers::NotifyAll(name, instrs.PayloadStart(), | 
 |                              code.GetPrologueOffset(), instrs.Size(), optimized, | 
 |                              &code.comments()); | 
 |   } | 
 | #endif | 
 | } | 
 | #endif  // !defined(DART_PRECOMPILED_RUNTIME) | 
 |  | 
 | CodePtr Code::FindCode(uword pc, int64_t timestamp) { | 
 |   class SlowFindCodeVisitor : public ObjectVisitor { | 
 |    public: | 
 |     SlowFindCodeVisitor(uword pc, int64_t timestamp) | 
 |         : pc_(pc), timestamp_(timestamp), result_(Code::null()) {} | 
 |  | 
 |     void VisitObject(ObjectPtr obj) { | 
 |       if (!obj->IsCode()) return; | 
 |       CodePtr code = static_cast<CodePtr>(obj); | 
 |       if (Code::PayloadStartOf(code) != pc_) return; | 
 | #if !defined(PRODUCT) | 
 |       if (code->untag()->compile_timestamp_ != timestamp_) return; | 
 | #endif | 
 |       ASSERT(result_ == Code::null()); | 
 |       result_ = code; | 
 |     } | 
 |  | 
 |     CodePtr result() const { return result_; } | 
 |  | 
 |    private: | 
 |     uword pc_; | 
 |     int64_t timestamp_; | 
 |     CodePtr result_; | 
 |   }; | 
 |  | 
 |   HeapIterationScope iteration(Thread::Current()); | 
 |   SlowFindCodeVisitor visitor(pc, timestamp); | 
 |   iteration.IterateVMIsolateObjects(&visitor); | 
 |   iteration.IterateOldObjectsNoImagePages(&visitor); | 
 |   return visitor.result(); | 
 | } | 
 |  | 
 | CodePtr Code::FindCodeUnsafe(uword pc) { | 
 |   class FindCodeUnsafeVisitor : public ObjectVisitor { | 
 |    public: | 
 |     explicit FindCodeUnsafeVisitor(uword pc) : pc_(pc), result_(Code::null()) {} | 
 |  | 
 |     void VisitObject(ObjectPtr obj) { | 
 |       if (obj->IsCode()) { | 
 |         CodePtr code = static_cast<CodePtr>(obj); | 
 |         if (Code::ContainsInstructionAt(code, pc_)) { | 
 |           result_ = code; | 
 |         } | 
 |       } | 
 |     } | 
 |  | 
 |     CodePtr result() { return result_; } | 
 |  | 
 |    private: | 
 |     uword pc_; | 
 |     CodePtr result_; | 
 |   }; | 
 |  | 
 |   IsolateGroup* group = IsolateGroup::Current(); | 
 |   PageSpace* old_space = group->heap()->old_space(); | 
 |   old_space->MakeIterable(); | 
 |   FindCodeUnsafeVisitor visitor(pc); | 
 |   old_space->VisitObjectsUnsafe(&visitor); | 
 |   Dart::vm_isolate_group()->heap()->old_space()->VisitObjectsUnsafe(&visitor); | 
 |   return visitor.result(); | 
 | } | 
 |  | 
 | TokenPosition Code::GetTokenIndexOfPC(uword pc) const { | 
 |   uword pc_offset = pc - PayloadStart(); | 
 |   const PcDescriptors& descriptors = PcDescriptors::Handle(pc_descriptors()); | 
 |   PcDescriptors::Iterator iter(descriptors, UntaggedPcDescriptors::kAnyKind); | 
 |   while (iter.MoveNext()) { | 
 |     if (iter.PcOffset() == pc_offset) { | 
 |       return iter.TokenPos(); | 
 |     } | 
 |   } | 
 |   return TokenPosition::kNoSource; | 
 | } | 
 |  | 
 | uword Code::GetPcForDeoptId(intptr_t deopt_id, | 
 |                             UntaggedPcDescriptors::Kind kind) const { | 
 |   const PcDescriptors& descriptors = PcDescriptors::Handle(pc_descriptors()); | 
 |   PcDescriptors::Iterator iter(descriptors, kind); | 
 |   while (iter.MoveNext()) { | 
 |     if (iter.DeoptId() == deopt_id) { | 
 |       uword pc_offset = iter.PcOffset(); | 
 |       uword pc = PayloadStart() + pc_offset; | 
 |       ASSERT(ContainsInstructionAt(pc)); | 
 |       return pc; | 
 |     } | 
 |   } | 
 |   return 0; | 
 | } | 
 |  | 
 | intptr_t Code::GetDeoptIdForOsr(uword pc) const { | 
 |   uword pc_offset = pc - PayloadStart(); | 
 |   const PcDescriptors& descriptors = PcDescriptors::Handle(pc_descriptors()); | 
 |   PcDescriptors::Iterator iter(descriptors, UntaggedPcDescriptors::kOsrEntry); | 
 |   while (iter.MoveNext()) { | 
 |     if (iter.PcOffset() == pc_offset) { | 
 |       return iter.DeoptId(); | 
 |     } | 
 |   } | 
 |   return DeoptId::kNone; | 
 | } | 
 |  | 
 | const char* Code::ToCString() const { | 
 |   return OS::SCreate(Thread::Current()->zone(), "Code(%s)", | 
 |                      QualifiedName(NameFormattingParams( | 
 |                          kScrubbedName, NameDisambiguation::kYes))); | 
 | } | 
 |  | 
 | uint32_t Code::Hash() const { | 
 |   // PayloadStart() is a tempting hash as Instructions are not moved by the | 
 |   // compactor, but Instructions are effectively moved between the process | 
 |   // creating an AppJIT/AOT snapshot and the process loading the snapshot. | 
 |   const Object& obj = | 
 |       Object::Handle(WeakSerializationReference::UnwrapIfTarget(owner())); | 
 |   if (obj.IsClass()) { | 
 |     return Class::Cast(obj).Hash(); | 
 |   } else if (obj.IsAbstractType()) { | 
 |     return AbstractType::Cast(obj).Hash(); | 
 |   } else if (obj.IsFunction()) { | 
 |     return Function::Cast(obj).Hash(); | 
 |   } else { | 
 |     // E.g., VM stub. | 
 |     return 42; | 
 |   } | 
 | } | 
 |  | 
 | const char* Code::Name() const { | 
 |   Zone* zone = Thread::Current()->zone(); | 
 |   if (IsStubCode()) { | 
 |     // Regular stub. | 
 |     const char* name = StubCode::NameOfStub(EntryPoint()); | 
 |     if (name == nullptr) { | 
 |       return "[unknown stub]";  // Not yet recorded. | 
 |     } | 
 |     return OS::SCreate(zone, "[Stub] %s", name); | 
 |   } | 
 |   const auto& obj = | 
 |       Object::Handle(zone, WeakSerializationReference::UnwrapIfTarget(owner())); | 
 |   if (obj.IsClass()) { | 
 |     // Allocation stub. | 
 |     return OS::SCreate(zone, "[Stub] Allocate %s", | 
 |                        Class::Cast(obj).ScrubbedNameCString()); | 
 |   } else if (obj.IsAbstractType()) { | 
 |     // Type test stub. | 
 |     return OS::SCreate(zone, "[Stub] Type Test %s", | 
 |                        AbstractType::Cast(obj).ToCString()); | 
 |   } else if (obj.IsFunction()) { | 
 |     // Dart function. | 
 |     const char* opt = is_optimized() ? "[Optimized]" : "[Unoptimized]"; | 
 |     const char* function_name = Function::Cast(obj).UserVisibleNameCString(); | 
 |     return OS::SCreate(zone, "%s %s", opt, function_name); | 
 |   } else { | 
 |     // --no_retain_function_objects etc | 
 |     return "[unknown code]"; | 
 |   } | 
 | } | 
 |  | 
 | const char* Code::QualifiedName(const NameFormattingParams& params) const { | 
 |   Zone* zone = Thread::Current()->zone(); | 
 |   const Object& obj = | 
 |       Object::Handle(zone, WeakSerializationReference::UnwrapIfTarget(owner())); | 
 |   if (obj.IsFunction()) { | 
 |     ZoneTextBuffer printer(zone); | 
 |     printer.AddString(is_optimized() ? "[Optimized] " : "[Unoptimized] "); | 
 |     Function::Cast(obj).PrintName(params, &printer); | 
 |     return printer.buffer(); | 
 |   } | 
 |   return Name(); | 
 | } | 
 |  | 
 | bool Code::IsStubCode() const { | 
 |   // We should _not_ unwrap any possible WSRs here, as the null value is never | 
 |   // wrapped by a WSR. | 
 |   return owner() == Object::null(); | 
 | } | 
 |  | 
 | bool Code::IsAllocationStubCode() const { | 
 |   return OwnerClassId() == kClassCid; | 
 | } | 
 |  | 
 | bool Code::IsTypeTestStubCode() const { | 
 |   auto const cid = OwnerClassId(); | 
 |   return cid == kAbstractTypeCid || cid == kTypeCid || | 
 |          cid == kFunctionTypeCid || cid == kRecordTypeCid || | 
 |          cid == kTypeParameterCid; | 
 | } | 
 |  | 
 | bool Code::IsFunctionCode() const { | 
 |   return OwnerClassId() == kFunctionCid; | 
 | } | 
 |  | 
 | bool Code::IsUnknownDartCode(CodePtr code) { | 
 |   return StubCode::HasBeenInitialized() && | 
 |          (code == StubCode::UnknownDartCode().ptr()); | 
 | } | 
 |  | 
 | void Code::DisableDartCode() const { | 
 |   GcSafepointOperationScope safepoint(Thread::Current()); | 
 |   ASSERT(IsFunctionCode()); | 
 |   ASSERT(instructions() == active_instructions()); | 
 |   const Code& new_code = StubCode::FixCallersTarget(); | 
 |   SetActiveInstructions(Instructions::Handle(new_code.instructions()), | 
 |                         new_code.UncheckedEntryPointOffset()); | 
 | } | 
 |  | 
 | void Code::DisableStubCode(bool is_cls_parameterized) const { | 
 |   GcSafepointOperationScope safepoint(Thread::Current()); | 
 |   ASSERT(IsAllocationStubCode()); | 
 |   ASSERT(instructions() == active_instructions()); | 
 |   const Code& new_code = is_cls_parameterized | 
 |                              ? StubCode::FixParameterizedAllocationStubTarget() | 
 |                              : StubCode::FixAllocationStubTarget(); | 
 |   SetActiveInstructions(Instructions::Handle(new_code.instructions()), | 
 |                         new_code.UncheckedEntryPointOffset()); | 
 | } | 
 |  | 
 | void Code::InitializeCachedEntryPointsFrom(CodePtr code, | 
 |                                            InstructionsPtr instructions, | 
 |                                            uint32_t unchecked_offset) { | 
 |   NoSafepointScope _; | 
 |   const uword entry_point = Instructions::EntryPoint(instructions); | 
 |   const uword monomorphic_entry_point = | 
 |       Instructions::MonomorphicEntryPoint(instructions); | 
 |   code->untag()->entry_point_ = entry_point; | 
 |   code->untag()->monomorphic_entry_point_ = monomorphic_entry_point; | 
 |   code->untag()->unchecked_entry_point_ = entry_point + unchecked_offset; | 
 |   code->untag()->monomorphic_unchecked_entry_point_ = | 
 |       monomorphic_entry_point + unchecked_offset; | 
 | } | 
 |  | 
 | void Code::SetActiveInstructions(const Instructions& instructions, | 
 |                                  uint32_t unchecked_offset) const { | 
 | #if defined(DART_PRECOMPILED_RUNTIME) | 
 |   UNREACHABLE(); | 
 | #else | 
 |   ASSERT(IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter()); | 
 |   SetActiveInstructionsSafe(instructions, unchecked_offset); | 
 | #endif | 
 | } | 
 |  | 
 | void Code::SetActiveInstructionsSafe(const Instructions& instructions, | 
 |                                      uint32_t unchecked_offset) const { | 
 | #if defined(DART_PRECOMPILED_RUNTIME) | 
 |   UNREACHABLE(); | 
 | #else | 
 |   // RawInstructions are never allocated in New space and hence a | 
 |   // store buffer update is not needed here. | 
 |   untag()->set_active_instructions(instructions.ptr()); | 
 |   Code::InitializeCachedEntryPointsFrom(ptr(), instructions.ptr(), | 
 |                                         unchecked_offset); | 
 | #endif | 
 | } | 
 |  | 
 | void Code::ResetActiveInstructions() const { | 
 | #if defined(DART_PRECOMPILED_RUNTIME) | 
 |   UNREACHABLE(); | 
 | #else | 
 |   SetActiveInstructions(Instructions::Handle(instructions()), | 
 |                         untag()->unchecked_offset_); | 
 | #endif | 
 | } | 
 |  | 
 | void Code::GetInlinedFunctionsAtInstruction( | 
 |     intptr_t pc_offset, | 
 |     GrowableArray<const Function*>* functions, | 
 |     GrowableArray<TokenPosition>* token_positions) const { | 
 |   const CodeSourceMap& map = CodeSourceMap::Handle(code_source_map()); | 
 |   if (map.IsNull()) { | 
 |     ASSERT(!IsFunctionCode()); | 
 |     return;  // VM stub, allocation stub, or type testing stub. | 
 |   } | 
 |   const Array& id_map = Array::Handle(inlined_id_to_function()); | 
 |   const Function& root = Function::Handle(function()); | 
 |   CodeSourceMapReader reader(map, id_map, root); | 
 |   reader.GetInlinedFunctionsAt(pc_offset, functions, token_positions); | 
 | } | 
 |  | 
 | #ifndef PRODUCT | 
 | void Code::PrintJSONInlineIntervals(JSONObject* jsobj) const { | 
 |   if (!is_optimized()) { | 
 |     return;  // No inlining. | 
 |   } | 
 |   const CodeSourceMap& map = CodeSourceMap::Handle(code_source_map()); | 
 |   const Array& id_map = Array::Handle(inlined_id_to_function()); | 
 |   const Function& root = Function::Handle(function()); | 
 |   CodeSourceMapReader reader(map, id_map, root); | 
 |   reader.PrintJSONInlineIntervals(jsobj); | 
 | } | 
 | #endif | 
 |  | 
 | void Code::DumpInlineIntervals() const { | 
 |   const CodeSourceMap& map = CodeSourceMap::Handle(code_source_map()); | 
 |   if (map.IsNull()) { | 
 |     // Stub code. | 
 |     return; | 
 |   } | 
 |   const Array& id_map = Array::Handle(inlined_id_to_function()); | 
 |   const Function& root = Function::Handle(function()); | 
 |   CodeSourceMapReader reader(map, id_map, root); | 
 |   reader.DumpInlineIntervals(PayloadStart()); | 
 | } | 
 |  | 
 | void Code::DumpSourcePositions(bool relative_addresses) const { | 
 |   const CodeSourceMap& map = CodeSourceMap::Handle(code_source_map()); | 
 |   if (map.IsNull()) { | 
 |     // Stub code. | 
 |     return; | 
 |   } | 
 |   const Array& id_map = Array::Handle(inlined_id_to_function()); | 
 |   const Function& root = Function::Handle(function()); | 
 |   CodeSourceMapReader reader(map, id_map, root); | 
 |   reader.DumpSourcePositions(relative_addresses ? 0 : PayloadStart()); | 
 | } | 
 |  | 
 | intptr_t Context::GetLevel() const { | 
 |   intptr_t level = 0; | 
 |   Context& parent_ctx = Context::Handle(parent()); | 
 |   while (!parent_ctx.IsNull()) { | 
 |     level++; | 
 |     parent_ctx = parent_ctx.parent(); | 
 |   } | 
 |   return level; | 
 | } | 
 |  | 
 | ContextPtr Context::New(intptr_t num_variables, Heap::Space space) { | 
 |   ASSERT(num_variables >= 0); | 
 |   ASSERT(Object::context_class() != Class::null()); | 
 |  | 
 |   if (!IsValidLength(num_variables)) { | 
 |     // This should be caught before we reach here. | 
 |     FATAL("Fatal error in Context::New: invalid num_variables %" Pd "\n", | 
 |           num_variables); | 
 |   } | 
 |   auto raw = Object::Allocate<Context>(space, num_variables); | 
 |   NoSafepointScope no_safepoint; | 
 |   raw->untag()->num_variables_ = num_variables; | 
 |   return raw; | 
 | } | 
 |  | 
 | const char* Context::ToCString() const { | 
 |   if (IsNull()) { | 
 |     return "Context: null"; | 
 |   } | 
 |   Zone* zone = Thread::Current()->zone(); | 
 |   const Context& parent_ctx = Context::Handle(parent()); | 
 |   if (parent_ctx.IsNull()) { | 
 |     return zone->PrintToString("Context num_variables: %" Pd "", | 
 |                                num_variables()); | 
 |   } else { | 
 |     const char* parent_str = parent_ctx.ToCString(); | 
 |     return zone->PrintToString("Context num_variables: %" Pd " parent:{ %s }", | 
 |                                num_variables(), parent_str); | 
 |   } | 
 | } | 
 |  | 
 | static void IndentN(int count) { | 
 |   for (int i = 0; i < count; i++) { | 
 |     THR_Print(" "); | 
 |   } | 
 | } | 
 |  | 
 | void Context::Dump(int indent) const { | 
 |   if (IsNull()) { | 
 |     IndentN(indent); | 
 |     THR_Print("Context@null\n"); | 
 |     return; | 
 |   } | 
 |  | 
 |   IndentN(indent); | 
 |   THR_Print("Context vars(%" Pd ") {\n", num_variables()); | 
 |   Object& obj = Object::Handle(); | 
 |   for (intptr_t i = 0; i < num_variables(); i++) { | 
 |     IndentN(indent + 2); | 
 |     obj = At(i); | 
 |     const char* s = obj.ToCString(); | 
 |     if (strlen(s) > 50) { | 
 |       THR_Print("[%" Pd "] = [first 50 chars:] %.50s...\n", i, s); | 
 |     } else { | 
 |       THR_Print("[%" Pd "] = %s\n", i, s); | 
 |     } | 
 |   } | 
 |  | 
 |   const Context& parent_ctx = Context::Handle(parent()); | 
 |   if (!parent_ctx.IsNull()) { | 
 |     parent_ctx.Dump(indent + 2); | 
 |   } | 
 |   IndentN(indent); | 
 |   THR_Print("}\n"); | 
 | } | 
 |  | 
 | ContextScopePtr ContextScope::New(intptr_t num_variables, bool is_implicit) { | 
 |   ASSERT(Object::context_scope_class() != Class::null()); | 
 |   if (num_variables < 0 || num_variables > kMaxElements) { | 
 |     // This should be caught before we reach here. | 
 |     FATAL("Fatal error in ContextScope::New: invalid num_variables %" Pd "\n", | 
 |           num_variables); | 
 |   } | 
 |   ContextScope& result = ContextScope::Handle(); | 
 |   { | 
 |     auto raw = Object::Allocate<ContextScope>(Heap::kOld, num_variables); | 
 |     NoSafepointScope no_safepoint; | 
 |     result = raw; | 
 |     result.set_num_variables(num_variables); | 
 |   } | 
 |   result.set_is_implicit(is_implicit); | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | TokenPosition ContextScope::TokenIndexAt(intptr_t scope_index) const { | 
 |   return TokenPosition::Deserialize( | 
 |       Smi::Value(untag()->token_pos_at(scope_index))); | 
 | } | 
 |  | 
 | void ContextScope::SetTokenIndexAt(intptr_t scope_index, | 
 |                                    TokenPosition token_pos) const { | 
 |   untag()->set_token_pos_at(scope_index, Smi::New(token_pos.Serialize())); | 
 | } | 
 |  | 
 | TokenPosition ContextScope::DeclarationTokenIndexAt( | 
 |     intptr_t scope_index) const { | 
 |   return TokenPosition::Deserialize( | 
 |       Smi::Value(untag()->declaration_token_pos_at(scope_index))); | 
 | } | 
 |  | 
 | void ContextScope::SetDeclarationTokenIndexAt( | 
 |     intptr_t scope_index, | 
 |     TokenPosition declaration_token_pos) const { | 
 |   untag()->set_declaration_token_pos_at( | 
 |       scope_index, Smi::New(declaration_token_pos.Serialize())); | 
 | } | 
 |  | 
 | StringPtr ContextScope::NameAt(intptr_t scope_index) const { | 
 |   return untag()->name_at(scope_index); | 
 | } | 
 |  | 
 | void ContextScope::SetNameAt(intptr_t scope_index, const String& name) const { | 
 |   untag()->set_name_at(scope_index, name.ptr()); | 
 | } | 
 |  | 
 | void ContextScope::ClearFlagsAt(intptr_t scope_index) const { | 
 |   untag()->set_flags_at(scope_index, Smi::New(0)); | 
 | } | 
 |  | 
 | bool ContextScope::GetFlagAt(intptr_t scope_index, intptr_t bit_index) const { | 
 |   const intptr_t mask = 1 << bit_index; | 
 |   return (Smi::Value(untag()->flags_at(scope_index)) & mask) != 0; | 
 | } | 
 |  | 
 | void ContextScope::SetFlagAt(intptr_t scope_index, | 
 |                              intptr_t bit_index, | 
 |                              bool value) const { | 
 |   const intptr_t mask = 1 << bit_index; | 
 |   intptr_t flags = Smi::Value(untag()->flags_at(scope_index)); | 
 |   untag()->set_flags_at(scope_index, | 
 |                         Smi::New(value ? flags | mask : flags & ~mask)); | 
 | } | 
 |  | 
 | #define DEFINE_FLAG_ACCESSORS(Name)                                            \ | 
 |   bool ContextScope::Is##Name##At(intptr_t scope_index) const {                \ | 
 |     return GetFlagAt(scope_index,                                              \ | 
 |                      UntaggedContextScope::VariableDesc::kIs##Name);           \ | 
 |   }                                                                            \ | 
 |                                                                                \ | 
 |   void ContextScope::SetIs##Name##At(intptr_t scope_index, bool value) const { \ | 
 |     SetFlagAt(scope_index, UntaggedContextScope::VariableDesc::kIs##Name,      \ | 
 |               value);                                                          \ | 
 |   } | 
 |  | 
 | CONTEXT_SCOPE_VARIABLE_DESC_FLAG_LIST(DEFINE_FLAG_ACCESSORS) | 
 | #undef DEFINE_FLAG_ACCESSORS | 
 |  | 
 | intptr_t ContextScope::LateInitOffsetAt(intptr_t scope_index) const { | 
 |   return Smi::Value(untag()->late_init_offset_at(scope_index)); | 
 | } | 
 |  | 
 | void ContextScope::SetLateInitOffsetAt(intptr_t scope_index, | 
 |                                        intptr_t late_init_offset) const { | 
 |   untag()->set_late_init_offset_at(scope_index, Smi::New(late_init_offset)); | 
 | } | 
 |  | 
 | AbstractTypePtr ContextScope::TypeAt(intptr_t scope_index) const { | 
 |   return untag()->type_at(scope_index); | 
 | } | 
 |  | 
 | void ContextScope::SetTypeAt(intptr_t scope_index, | 
 |                              const AbstractType& type) const { | 
 |   untag()->set_type_at(scope_index, type.ptr()); | 
 | } | 
 |  | 
 | intptr_t ContextScope::CidAt(intptr_t scope_index) const { | 
 |   return Smi::Value(untag()->cid_at(scope_index)); | 
 | } | 
 |  | 
 | void ContextScope::SetCidAt(intptr_t scope_index, intptr_t cid) const { | 
 |   untag()->set_cid_at(scope_index, Smi::New(cid)); | 
 | } | 
 |  | 
 | intptr_t ContextScope::ContextIndexAt(intptr_t scope_index) const { | 
 |   return Smi::Value(untag()->context_index_at(scope_index)); | 
 | } | 
 |  | 
 | void ContextScope::SetContextIndexAt(intptr_t scope_index, | 
 |                                      intptr_t context_index) const { | 
 |   untag()->set_context_index_at(scope_index, Smi::New(context_index)); | 
 | } | 
 |  | 
 | intptr_t ContextScope::ContextLevelAt(intptr_t scope_index) const { | 
 |   return Smi::Value(untag()->context_level_at(scope_index)); | 
 | } | 
 |  | 
 | void ContextScope::SetContextLevelAt(intptr_t scope_index, | 
 |                                      intptr_t context_level) const { | 
 |   untag()->set_context_level_at(scope_index, Smi::New(context_level)); | 
 | } | 
 |  | 
 | intptr_t ContextScope::KernelOffsetAt(intptr_t scope_index) const { | 
 |   return Smi::Value(untag()->kernel_offset_at(scope_index)); | 
 | } | 
 |  | 
 | void ContextScope::SetKernelOffsetAt(intptr_t scope_index, | 
 |                                      intptr_t kernel_offset) const { | 
 |   untag()->set_kernel_offset_at(scope_index, Smi::New(kernel_offset)); | 
 | } | 
 |  | 
 | const char* ContextScope::ToCString() const { | 
 |   const char* prev_cstr = "ContextScope:"; | 
 |   String& name = String::Handle(); | 
 |   for (int i = 0; i < num_variables(); i++) { | 
 |     name = NameAt(i); | 
 |     const char* cname = name.ToCString(); | 
 |     TokenPosition pos = TokenIndexAt(i); | 
 |     intptr_t idx = ContextIndexAt(i); | 
 |     intptr_t lvl = ContextLevelAt(i); | 
 |     char* chars = | 
 |         OS::SCreate(Thread::Current()->zone(), | 
 |                     "%s\nvar %s  token-pos %s  ctx lvl %" Pd "  index %" Pd "", | 
 |                     prev_cstr, cname, pos.ToCString(), lvl, idx); | 
 |     prev_cstr = chars; | 
 |   } | 
 |   return prev_cstr; | 
 | } | 
 |  | 
 | SentinelPtr Sentinel::New() { | 
 |   return Object::Allocate<Sentinel>(Heap::kOld); | 
 | } | 
 |  | 
 | const char* Sentinel::ToCString() const { | 
 |   if (ptr() == Object::sentinel().ptr()) { | 
 |     return "sentinel"; | 
 |   } else if (ptr() == Object::transition_sentinel().ptr()) { | 
 |     return "transition_sentinel"; | 
 |   } else if (ptr() == Object::unknown_constant().ptr()) { | 
 |     return "unknown_constant"; | 
 |   } else if (ptr() == Object::non_constant().ptr()) { | 
 |     return "non_constant"; | 
 |   } else if (ptr() == Object::optimized_out().ptr()) { | 
 |     return "<optimized out>"; | 
 |   } | 
 |   return "Sentinel(unknown)"; | 
 | } | 
 |  | 
 | ArrayPtr MegamorphicCache::buckets() const { | 
 |   return untag()->buckets(); | 
 | } | 
 |  | 
 | void MegamorphicCache::set_buckets(const Array& buckets) const { | 
 |   untag()->set_buckets(buckets.ptr()); | 
 | } | 
 |  | 
 | // Class IDs in the table are smi-tagged, so we use a smi-tagged mask | 
 | // and target class ID to avoid untagging (on each iteration of the | 
 | // test loop) in generated code. | 
 | intptr_t MegamorphicCache::mask() const { | 
 |   return Smi::Value(untag()->mask()); | 
 | } | 
 |  | 
 | void MegamorphicCache::set_mask(intptr_t mask) const { | 
 |   untag()->set_mask(Smi::New(mask)); | 
 | } | 
 |  | 
 | intptr_t MegamorphicCache::filled_entry_count() const { | 
 |   return untag()->filled_entry_count_; | 
 | } | 
 |  | 
 | void MegamorphicCache::set_filled_entry_count(intptr_t count) const { | 
 |   StoreNonPointer(&untag()->filled_entry_count_, count); | 
 | } | 
 |  | 
 | MegamorphicCachePtr MegamorphicCache::New() { | 
 |   return Object::Allocate<MegamorphicCache>(Heap::kOld); | 
 | } | 
 |  | 
 | MegamorphicCachePtr MegamorphicCache::New(const String& target_name, | 
 |                                           const Array& arguments_descriptor) { | 
 |   auto* const zone = Thread::Current()->zone(); | 
 |   const auto& result = MegamorphicCache::Handle( | 
 |       zone, Object::Allocate<MegamorphicCache>(Heap::kOld)); | 
 |   const intptr_t capacity = kInitialCapacity; | 
 |   const Array& buckets = | 
 |       Array::Handle(zone, Array::New(kEntryLength * capacity, Heap::kOld)); | 
 |   const Object& handler = Object::Handle(zone); | 
 |   for (intptr_t i = 0; i < capacity; ++i) { | 
 |     SetEntry(buckets, i, smi_illegal_cid(), handler); | 
 |   } | 
 |   result.set_buckets(buckets); | 
 |   result.set_mask(capacity - 1); | 
 |   result.set_target_name(target_name); | 
 |   result.set_arguments_descriptor(arguments_descriptor); | 
 |   result.set_filled_entry_count(0); | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | void MegamorphicCache::EnsureContains(const Smi& class_id, | 
 |                                       const Object& target) const { | 
 |   SafepointMutexLocker ml(IsolateGroup::Current()->type_feedback_mutex()); | 
 |  | 
 |   if (LookupLocked(class_id) == Object::null()) { | 
 |     InsertLocked(class_id, target); | 
 |   } | 
 |  | 
 | #if defined(DEBUG) | 
 |   ASSERT(LookupLocked(class_id) == target.ptr()); | 
 | #endif  // define(DEBUG) | 
 | } | 
 |  | 
 | ObjectPtr MegamorphicCache::Lookup(const Smi& class_id) const { | 
 |   SafepointMutexLocker ml(IsolateGroup::Current()->type_feedback_mutex()); | 
 |   return LookupLocked(class_id); | 
 | } | 
 |  | 
 | ObjectPtr MegamorphicCache::LookupLocked(const Smi& class_id) const { | 
 |   auto thread = Thread::Current(); | 
 |   auto isolate_group = thread->isolate_group(); | 
 |   auto zone = thread->zone(); | 
 |   ASSERT(thread->IsDartMutatorThread()); | 
 |   ASSERT(isolate_group->type_feedback_mutex()->IsOwnedByCurrentThread()); | 
 |  | 
 |   const auto& backing_array = Array::Handle(zone, buckets()); | 
 |   intptr_t id_mask = mask(); | 
 |   intptr_t index = (class_id.Value() * kSpreadFactor) & id_mask; | 
 |   intptr_t i = index; | 
 |   do { | 
 |     const classid_t current_cid = | 
 |         Smi::Value(Smi::RawCast(GetClassId(backing_array, i))); | 
 |     if (current_cid == class_id.Value()) { | 
 |       return GetTargetFunction(backing_array, i); | 
 |     } else if (current_cid == kIllegalCid) { | 
 |       return Object::null(); | 
 |     } | 
 |     i = (i + 1) & id_mask; | 
 |   } while (i != index); | 
 |   UNREACHABLE(); | 
 | } | 
 |  | 
 | void MegamorphicCache::InsertLocked(const Smi& class_id, | 
 |                                     const Object& target) const { | 
 |   auto isolate_group = IsolateGroup::Current(); | 
 |   ASSERT(isolate_group->type_feedback_mutex()->IsOwnedByCurrentThread()); | 
 |  | 
 |   // As opposed to ICData we are stopping mutator threads from other isolates | 
 |   // while modifying the megamorphic cache, since updates are not atomic. | 
 |   // | 
 |   // NOTE: In the future we might change the megamorphic cache insertions to | 
 |   // carefully use store-release barriers on the writer as well as | 
 |   // load-acquire barriers on the reader, ... | 
 |   isolate_group->RunWithStoppedMutators( | 
 |       [&]() { | 
 |         EnsureCapacityLocked(); | 
 |         InsertEntryLocked(class_id, target); | 
 |       }, | 
 |       /*use_force_growth=*/true); | 
 | } | 
 |  | 
 | void MegamorphicCache::EnsureCapacityLocked() const { | 
 |   auto thread = Thread::Current(); | 
 |   auto zone = thread->zone(); | 
 |   auto isolate_group = thread->isolate_group(); | 
 |   ASSERT(isolate_group->type_feedback_mutex()->IsOwnedByCurrentThread()); | 
 |  | 
 |   intptr_t old_capacity = mask() + 1; | 
 |   double load_limit = kLoadFactor * static_cast<double>(old_capacity); | 
 |   if (static_cast<double>(filled_entry_count() + 1) > load_limit) { | 
 |     const Array& old_buckets = Array::Handle(zone, buckets()); | 
 |     intptr_t new_capacity = old_capacity * 2; | 
 |     const Array& new_buckets = | 
 |         Array::Handle(zone, Array::New(kEntryLength * new_capacity)); | 
 |  | 
 |     auto& target = Object::Handle(zone); | 
 |     for (intptr_t i = 0; i < new_capacity; ++i) { | 
 |       SetEntry(new_buckets, i, smi_illegal_cid(), target); | 
 |     } | 
 |     set_buckets(new_buckets); | 
 |     set_mask(new_capacity - 1); | 
 |     set_filled_entry_count(0); | 
 |  | 
 |     // Rehash the valid entries. | 
 |     Smi& class_id = Smi::Handle(zone); | 
 |     for (intptr_t i = 0; i < old_capacity; ++i) { | 
 |       class_id ^= GetClassId(old_buckets, i); | 
 |       if (class_id.Value() != kIllegalCid) { | 
 |         target = GetTargetFunction(old_buckets, i); | 
 |         InsertEntryLocked(class_id, target); | 
 |       } | 
 |     } | 
 |   } | 
 | } | 
 |  | 
 | void MegamorphicCache::InsertEntryLocked(const Smi& class_id, | 
 |                                          const Object& target) const { | 
 |   auto thread = Thread::Current(); | 
 |   auto isolate_group = thread->isolate_group(); | 
 |   ASSERT(isolate_group->type_feedback_mutex()->IsOwnedByCurrentThread()); | 
 |  | 
 |   ASSERT(Thread::Current()->IsDartMutatorThread()); | 
 |   ASSERT(static_cast<double>(filled_entry_count() + 1) <= | 
 |          (kLoadFactor * static_cast<double>(mask() + 1))); | 
 |   const Array& backing_array = Array::Handle(buckets()); | 
 |   intptr_t id_mask = mask(); | 
 |   intptr_t index = (class_id.Value() * kSpreadFactor) & id_mask; | 
 |   intptr_t i = index; | 
 |   do { | 
 |     if (Smi::Value(Smi::RawCast(GetClassId(backing_array, i))) == kIllegalCid) { | 
 |       SetEntry(backing_array, i, class_id, target); | 
 |       set_filled_entry_count(filled_entry_count() + 1); | 
 |       return; | 
 |     } | 
 |     i = (i + 1) & id_mask; | 
 |   } while (i != index); | 
 |   UNREACHABLE(); | 
 | } | 
 |  | 
 | const char* MegamorphicCache::ToCString() const { | 
 |   const String& name = String::Handle(target_name()); | 
 |   return OS::SCreate(Thread::Current()->zone(), "MegamorphicCache(%s)", | 
 |                      name.ToCString()); | 
 | } | 
 |  | 
 | SubtypeTestCachePtr SubtypeTestCache::New(intptr_t num_inputs) { | 
 |   ASSERT(Object::subtypetestcache_class() != Class::null()); | 
 |   ASSERT(num_inputs >= 1); | 
 |   ASSERT(num_inputs <= kMaxInputs); | 
 |   // SubtypeTestCache objects are long living objects, allocate them in the | 
 |   // old generation. | 
 |   const auto& result = | 
 |       SubtypeTestCache::Handle(Object::Allocate<SubtypeTestCache>(Heap::kOld)); | 
 |   ASSERT_EQUAL(result.num_occupied(), 0); | 
 |   result.untag()->num_inputs_ = num_inputs; | 
 |   result.set_cache(Object::empty_subtype_test_cache_array()); | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | ArrayPtr SubtypeTestCache::cache() const { | 
 |   return untag()->cache<std::memory_order_acquire>(); | 
 | } | 
 |  | 
 | void SubtypeTestCache::set_cache(const Array& value) const { | 
 |   // We have to ensure that initializing stores to the array are available | 
 |   // when releasing the pointer to the array pointer. | 
 |   // => We have to use store-release here. | 
 |   untag()->set_cache<std::memory_order_release>(value.ptr()); | 
 | } | 
 |  | 
 | void SubtypeTestCache::set_num_occupied(intptr_t value) const { | 
 |   ASSERT(Utils::IsUint(32, value)); | 
 |   untag()->num_occupied_ = value; | 
 | } | 
 |  | 
 | intptr_t SubtypeTestCache::NumberOfChecks() const { | 
 |   ASSERT(!IsNull()); | 
 |   return num_occupied(); | 
 | } | 
 |  | 
 | intptr_t SubtypeTestCache::NumEntries() const { | 
 |   ASSERT(!IsNull()); | 
 |   return Array::LengthOf(cache()) / kTestEntryLength; | 
 | } | 
 |  | 
 | intptr_t SubtypeTestCache::NumEntries(const Array& array) { | 
 |   SubtypeTestCacheTable table(array); | 
 |   return table.Length(); | 
 | } | 
 |  | 
 | bool SubtypeTestCache::IsHash() const { | 
 |   if (IsNull()) return false; | 
 |   return Array::LengthOf(cache()) > kMaxLinearCacheSize; | 
 | } | 
 |  | 
 | bool SubtypeTestCache::IsHash(const Array& array) { | 
 |   return array.Length() > kMaxLinearCacheSize; | 
 | } | 
 |  | 
 | intptr_t SubtypeTestCache::AddCheck( | 
 |     const Object& instance_class_id_or_signature, | 
 |     const AbstractType& destination_type, | 
 |     const TypeArguments& instance_type_arguments, | 
 |     const TypeArguments& instantiator_type_arguments, | 
 |     const TypeArguments& function_type_arguments, | 
 |     const TypeArguments& instance_parent_function_type_arguments, | 
 |     const TypeArguments& instance_delayed_type_arguments, | 
 |     const Bool& test_result) const { | 
 |   ASSERT(Thread::Current() | 
 |              ->isolate_group() | 
 |              ->subtype_test_cache_mutex() | 
 |              ->IsOwnedByCurrentThread()); | 
 |   ASSERT(!test_result.IsNull()); | 
 |   ASSERT(Smi::New(kRecordCid) != instance_class_id_or_signature.ptr()); | 
 |  | 
 |   const intptr_t old_num = NumberOfChecks(); | 
 |   Zone* const zone = Thread::Current()->zone(); | 
 |   Array& data = Array::Handle(zone, cache()); | 
 |   bool was_grown; | 
 |   data = EnsureCapacity(zone, data, old_num + 1, &was_grown); | 
 |   ASSERT(data.ptr() != Object::empty_subtype_test_cache_array().ptr()); | 
 |  | 
 |   const auto& loc = FindKeyOrUnused( | 
 |       data, num_inputs(), instance_class_id_or_signature, destination_type, | 
 |       instance_type_arguments, instantiator_type_arguments, | 
 |       function_type_arguments, instance_parent_function_type_arguments, | 
 |       instance_delayed_type_arguments); | 
 |   SubtypeTestCacheTable entries(data); | 
 |   const auto& entry = entries[loc.entry]; | 
 |   if (loc.present) { | 
 |     if (entry.Get<kTestResult>() != test_result.ptr()) { | 
 |       const auto& old_result = Bool::Handle(zone, entry.Get<kTestResult>()); | 
 |       FATAL("Existing subtype test cache entry has result %s, not %s", | 
 |             old_result.ToCString(), test_result.ToCString()); | 
 |     } | 
 |     return loc.entry; | 
 |   } | 
 |  | 
 |   // Set the used elements in the entry in reverse order, so that the instance | 
 |   // cid or signature is last, then increment the number of entries. | 
 |   entry.Set<kTestResult>(test_result); | 
 |   switch (num_inputs()) { | 
 |     case 7: | 
 |       entry.Set<kDestinationType>(destination_type); | 
 |       FALL_THROUGH; | 
 |     case 6: | 
 |       entry.Set<kInstanceDelayedFunctionTypeArguments>( | 
 |           instance_delayed_type_arguments); | 
 |       FALL_THROUGH; | 
 |     case 5: | 
 |       entry.Set<kInstanceParentFunctionTypeArguments>( | 
 |           instance_parent_function_type_arguments); | 
 |       FALL_THROUGH; | 
 |     case 4: | 
 |       entry.Set<kFunctionTypeArguments>(function_type_arguments); | 
 |       FALL_THROUGH; | 
 |     case 3: | 
 |       entry.Set<kInstantiatorTypeArguments>(instantiator_type_arguments); | 
 |       FALL_THROUGH; | 
 |     case 2: | 
 |       entry.Set<kInstanceTypeArguments>(instance_type_arguments); | 
 |       FALL_THROUGH; | 
 |     case 1: | 
 |       // If this is a new backing array, we don't need store-release barriers, | 
 |       // as no reader has access to the array until it is set as the backing | 
 |       // store (which is done with a store-release barrier). | 
 |       // | 
 |       // Otherwise, the instance cid or signature must be set last with a | 
 |       // store-release barrier, so concurrent readers can depend on a non-null | 
 |       // value meaning the rest of the entry is safe to load without barriers. | 
 |       if (was_grown) { | 
 |         entry.Set<kInstanceCidOrSignature>(instance_class_id_or_signature); | 
 |       } else { | 
 |         entry.Set<kInstanceCidOrSignature, std::memory_order_release>( | 
 |             instance_class_id_or_signature); | 
 |       } | 
 |       break; | 
 |     default: | 
 |       UNREACHABLE(); | 
 |   } | 
 |   set_num_occupied(old_num + 1); | 
 |   if (was_grown) { | 
 |     set_cache(data); | 
 |   } | 
 |   return loc.entry; | 
 | } | 
 |  | 
 | static inline bool SubtypeTestCacheEntryMatches( | 
 |     const SubtypeTestCacheTable::TupleView& t, | 
 |     intptr_t num_inputs, | 
 |     const Object& instance_class_id_or_signature, | 
 |     const AbstractType& destination_type, | 
 |     const TypeArguments& instance_type_arguments, | 
 |     const TypeArguments& instantiator_type_arguments, | 
 |     const TypeArguments& function_type_arguments, | 
 |     const TypeArguments& instance_parent_function_type_arguments, | 
 |     const TypeArguments& instance_delayed_type_arguments) { | 
 |   switch (num_inputs) { | 
 |     case 7: | 
 |       if (t.Get<SubtypeTestCache::kDestinationType>() != | 
 |           destination_type.ptr()) { | 
 |         return false; | 
 |       } | 
 |       FALL_THROUGH; | 
 |     case 6: | 
 |       if (t.Get<SubtypeTestCache::kInstanceDelayedFunctionTypeArguments>() != | 
 |           instance_delayed_type_arguments.ptr()) { | 
 |         return false; | 
 |       } | 
 |       FALL_THROUGH; | 
 |     case 5: | 
 |       if (t.Get<SubtypeTestCache::kInstanceParentFunctionTypeArguments>() != | 
 |           instance_parent_function_type_arguments.ptr()) { | 
 |         return false; | 
 |       } | 
 |       FALL_THROUGH; | 
 |     case 4: | 
 |       if (t.Get<SubtypeTestCache::kFunctionTypeArguments>() != | 
 |           function_type_arguments.ptr()) { | 
 |         return false; | 
 |       } | 
 |       FALL_THROUGH; | 
 |     case 3: | 
 |       if (t.Get<SubtypeTestCache::kInstantiatorTypeArguments>() != | 
 |           instantiator_type_arguments.ptr()) { | 
 |         return false; | 
 |       } | 
 |       FALL_THROUGH; | 
 |     case 2: | 
 |       if (t.Get<SubtypeTestCache::kInstanceTypeArguments>() != | 
 |           instance_type_arguments.ptr()) { | 
 |         return false; | 
 |       } | 
 |       FALL_THROUGH; | 
 |     case 1: | 
 |       // We don't need to perform load-acquire semantics when re-retrieving | 
 |       // the kInstanceCidOrSignature field, as this is performed only if the | 
 |       // entry is occupied, and occupied entries never change. | 
 |       return t.Get<SubtypeTestCache::kInstanceCidOrSignature>() == | 
 |              instance_class_id_or_signature.ptr(); | 
 |     default: | 
 |       UNREACHABLE(); | 
 |   } | 
 | } | 
 |  | 
 | SubtypeTestCache::KeyLocation SubtypeTestCache::FindKeyOrUnused( | 
 |     const Array& array, | 
 |     intptr_t num_inputs, | 
 |     const Object& instance_class_id_or_signature, | 
 |     const AbstractType& destination_type, | 
 |     const TypeArguments& instance_type_arguments, | 
 |     const TypeArguments& instantiator_type_arguments, | 
 |     const TypeArguments& function_type_arguments, | 
 |     const TypeArguments& instance_parent_function_type_arguments, | 
 |     const TypeArguments& instance_delayed_type_arguments) { | 
 |   // Fast case for empty STCs. | 
 |   if (array.ptr() == Object::empty_subtype_test_cache_array().ptr()) { | 
 |     return {0, false}; | 
 |   } | 
 |   const bool is_hash = IsHash(array); | 
 |   SubtypeTestCacheTable table(array); | 
 |   const intptr_t num_entries = table.Length(); | 
 |   // For a linear cache, start at the first entry and probe linearly. This can | 
 |   // be done because a linear cache always has at least one unoccupied entry | 
 |   // after all the occupied ones. | 
 |   intptr_t probe = 0; | 
 |   intptr_t probe_distance = 1; | 
 |   if (is_hash) { | 
 |     // For a hash-based cache, instead start at an entry determined by the hash | 
 |     // of the keys. | 
 |     // | 
 |     // If we have an instance cid, then just use that as our starting hash. | 
 |     uint32_t hash = | 
 |         instance_class_id_or_signature.IsFunctionType() | 
 |             ? FunctionType::Cast(instance_class_id_or_signature).Hash() | 
 |             : Smi::Cast(instance_class_id_or_signature).Value(); | 
 |     switch (num_inputs) { | 
 |       case 7: | 
 |         hash = CombineHashes(hash, destination_type.Hash()); | 
 |         FALL_THROUGH; | 
 |       case 6: | 
 |         hash = CombineHashes(hash, instance_delayed_type_arguments.Hash()); | 
 |         FALL_THROUGH; | 
 |       case 5: | 
 |         hash = | 
 |             CombineHashes(hash, instance_parent_function_type_arguments.Hash()); | 
 |         FALL_THROUGH; | 
 |       case 4: | 
 |         hash = CombineHashes(hash, function_type_arguments.Hash()); | 
 |         FALL_THROUGH; | 
 |       case 3: | 
 |         hash = CombineHashes(hash, instantiator_type_arguments.Hash()); | 
 |         FALL_THROUGH; | 
 |       case 2: | 
 |         hash = CombineHashes(hash, instance_type_arguments.Hash()); | 
 |         FALL_THROUGH; | 
 |       case 1: | 
 |         break; | 
 |       default: | 
 |         UNREACHABLE(); | 
 |     } | 
 |     hash = FinalizeHash(hash); | 
 |     probe = hash & (num_entries - 1); | 
 |   } | 
 |   while (true) { | 
 |     const auto& tuple = table.At(probe); | 
 |     if (tuple.Get<kInstanceCidOrSignature, std::memory_order_acquire>() == | 
 |         Object::null()) { | 
 |       break; | 
 |     } | 
 |     if (SubtypeTestCacheEntryMatches( | 
 |             tuple, num_inputs, instance_class_id_or_signature, destination_type, | 
 |             instance_type_arguments, instantiator_type_arguments, | 
 |             function_type_arguments, instance_parent_function_type_arguments, | 
 |             instance_delayed_type_arguments)) { | 
 |       return {probe, true}; | 
 |     } | 
 |     // Advance probe by the current probing distance. | 
 |     probe = probe + probe_distance; | 
 |     if (is_hash) { | 
 |       // Wrap around if the probe goes off the end of the entries array. | 
 |       probe = probe & (num_entries - 1); | 
 |       // We had a collision, so increase the probe distance. See comment in | 
 |       // EnsureCapacityLocked for an explanation of how this hits all slots. | 
 |       probe_distance++; | 
 |     } | 
 |   } | 
 |   return {probe, false}; | 
 | } | 
 |  | 
 | ArrayPtr SubtypeTestCache::EnsureCapacity(Zone* zone, | 
 |                                           const Array& array, | 
 |                                           intptr_t new_occupied, | 
 |                                           bool* was_grown) const { | 
 |   ASSERT(new_occupied > NumberOfChecks()); | 
 |   ASSERT(was_grown != nullptr); | 
 |   // How many entries are in the current array (including unoccupied entries). | 
 |   const intptr_t current_capacity = NumEntries(array); | 
 |  | 
 |   // Early returns for cases where no growth is needed. | 
 |   *was_grown = false; | 
 |   const bool is_linear = IsLinear(array); | 
 |   if (is_linear) { | 
 |     // We need at least one unoccupied entry in addition to the occupied ones. | 
 |     if (current_capacity > new_occupied) return array.ptr(); | 
 |   } else { | 
 |     if (LoadFactor(new_occupied, current_capacity) < kMaxLoadFactor) { | 
 |       return array.ptr(); | 
 |     } | 
 |   } | 
 |  | 
 |   // Every path from here should result in a new backing array. | 
 |   *was_grown = true; | 
 |   // Initially null for initializing unoccupied entries. | 
 |   auto& instance_cid_or_signature = Object::Handle(zone); | 
 |   if (new_occupied <= kMaxLinearCacheEntries) { | 
 |     ASSERT(is_linear); | 
 |     // Not enough room for both the new entry and at least one unoccupied | 
 |     // entry, so grow the tuple capacity of the linear cache by about 50%, | 
 |     // ensuring that space for at least one new tuple is added, capping the | 
 |     // total number of occupied entries to the max allowed. | 
 |     const intptr_t new_capacity = | 
 |         Utils::Minimum(current_capacity + (current_capacity >> 1), | 
 |                        kMaxLinearCacheEntries) + | 
 |         1; | 
 |     const intptr_t cache_size = new_capacity * kTestEntryLength; | 
 |     ASSERT(cache_size <= kMaxLinearCacheSize); | 
 |     const auto& new_data = | 
 |         Array::Handle(zone, Array::Grow(array, cache_size, Heap::kOld)); | 
 |     ASSERT(!new_data.IsNull()); | 
 |     // No need to adjust old entries, as they are copied over by Array::Grow. | 
 |     // Just mark any new entries as unoccupied. | 
 |     SubtypeTestCacheTable table(new_data); | 
 |     for (intptr_t i = current_capacity; i < new_capacity; i++) { | 
 |       const auto& tuple = table.At(i); | 
 |       tuple.Set<kInstanceCidOrSignature>(instance_cid_or_signature); | 
 |     } | 
 |     return new_data.ptr(); | 
 |   } | 
 |  | 
 |   // Either we're converting a linear cache into a hash-based cache, or the | 
 |   // load factor of the hash-based cache has increased to the point where we | 
 |   // need to grow it. | 
 |   const intptr_t new_capacity = | 
 |       is_linear ? kNumInitialHashCacheEntries : 2 * current_capacity; | 
 |   // Because we use quadratic (actually triangle number) probing it is | 
 |   // important that the size is a power of two (otherwise we could fail to | 
 |   // find an empty slot).  This is described in Knuth's The Art of Computer | 
 |   // Programming Volume 2, Chapter 6.4, exercise 20 (solution in the | 
 |   // appendix, 2nd edition). | 
 |   // | 
 |   // This is also important because when we do hash probing, we take the | 
 |   // calculated hash from the inputs and then calculate (hash % capacity) to get | 
 |   // the initial probe index. To ensure this is a fast calculation in the stubs, | 
 |   // we ensure the capacity is a power of 2, which allows (hash % capacity) to | 
 |   // be calculated as (hash & (capacity - 1)). | 
 |   ASSERT(Utils::IsPowerOfTwo(new_capacity)); | 
 |   ASSERT(LoadFactor(new_occupied, new_capacity) < kMaxLoadFactor); | 
 |   const intptr_t new_size = new_capacity * kTestEntryLength; | 
 |   const auto& new_data = | 
 |       Array::Handle(zone, Array::NewUninitialized(new_size, Heap::kOld)); | 
 |   ASSERT(!new_data.IsNull()); | 
 |   // Mark all the entries in new_data as unoccupied. | 
 |   SubtypeTestCacheTable to_table(new_data); | 
 |   for (const auto& tuple : to_table) { | 
 |     tuple.Set<kInstanceCidOrSignature>(instance_cid_or_signature); | 
 |   } | 
 |   // Finally, copy over the entries. | 
 |   auto& destination_type = AbstractType::Handle(zone); | 
 |   auto& instance_type_arguments = TypeArguments::Handle(zone); | 
 |   auto& instantiator_type_arguments = TypeArguments::Handle(zone); | 
 |   auto& function_type_arguments = TypeArguments::Handle(zone); | 
 |   auto& instance_parent_function_type_arguments = TypeArguments::Handle(zone); | 
 |   auto& instance_delayed_type_arguments = TypeArguments::Handle(zone); | 
 |   auto& test_result = Bool::Handle(zone); | 
 |   const SubtypeTestCacheTable from_table(array); | 
 |   const intptr_t used_inputs = num_inputs(); | 
 |   for (intptr_t i = 0; i < current_capacity; i++) { | 
 |     const auto& from_tuple = from_table.At(i); | 
 |     // Skip unoccupied entries. | 
 |     if (from_tuple.Get<kInstanceCidOrSignature>() == Object::null()) continue; | 
 |     GetCheckFromArray(array, used_inputs, i, &instance_cid_or_signature, | 
 |                       &destination_type, &instance_type_arguments, | 
 |                       &instantiator_type_arguments, &function_type_arguments, | 
 |                       &instance_parent_function_type_arguments, | 
 |                       &instance_delayed_type_arguments, &test_result); | 
 |     // Since new_data has a different total capacity, we can't use the old | 
 |     // entry indexes, but must recalculate them. | 
 |     auto loc = FindKeyOrUnused( | 
 |         new_data, used_inputs, instance_cid_or_signature, destination_type, | 
 |         instance_type_arguments, instantiator_type_arguments, | 
 |         function_type_arguments, instance_parent_function_type_arguments, | 
 |         instance_delayed_type_arguments); | 
 |     ASSERT(!loc.present); | 
 |     const auto& to_tuple = to_table.At(loc.entry); | 
 |     to_tuple.Set<kTestResult>(test_result); | 
 |     switch (used_inputs) { | 
 |       case 7: | 
 |         to_tuple.Set<kDestinationType>(destination_type); | 
 |         FALL_THROUGH; | 
 |       case 6: | 
 |         to_tuple.Set<kInstanceDelayedFunctionTypeArguments>( | 
 |             instance_delayed_type_arguments); | 
 |         FALL_THROUGH; | 
 |       case 5: | 
 |         to_tuple.Set<kInstanceParentFunctionTypeArguments>( | 
 |             instance_parent_function_type_arguments); | 
 |         FALL_THROUGH; | 
 |       case 4: | 
 |         to_tuple.Set<kFunctionTypeArguments>(function_type_arguments); | 
 |         FALL_THROUGH; | 
 |       case 3: | 
 |         to_tuple.Set<kInstantiatorTypeArguments>(instantiator_type_arguments); | 
 |         FALL_THROUGH; | 
 |       case 2: | 
 |         to_tuple.Set<kInstanceTypeArguments>(instance_type_arguments); | 
 |         FALL_THROUGH; | 
 |       case 1: | 
 |         to_tuple.Set<kInstanceCidOrSignature>(instance_cid_or_signature); | 
 |         break; | 
 |       default: | 
 |         UNREACHABLE(); | 
 |     } | 
 |   } | 
 |   return new_data.ptr(); | 
 | } | 
 |  | 
 | void SubtypeTestCache::GetCheck( | 
 |     intptr_t ix, | 
 |     Object* instance_class_id_or_signature, | 
 |     AbstractType* destination_type, | 
 |     TypeArguments* instance_type_arguments, | 
 |     TypeArguments* instantiator_type_arguments, | 
 |     TypeArguments* function_type_arguments, | 
 |     TypeArguments* instance_parent_function_type_arguments, | 
 |     TypeArguments* instance_delayed_type_arguments, | 
 |     Bool* test_result) const { | 
 |   ASSERT(Thread::Current() | 
 |              ->isolate_group() | 
 |              ->subtype_test_cache_mutex() | 
 |              ->IsOwnedByCurrentThread()); | 
 |   GetCurrentCheck(ix, instance_class_id_or_signature, destination_type, | 
 |                   instance_type_arguments, instantiator_type_arguments, | 
 |                   function_type_arguments, | 
 |                   instance_parent_function_type_arguments, | 
 |                   instance_delayed_type_arguments, test_result); | 
 | } | 
 |  | 
 | void SubtypeTestCache::GetCurrentCheck( | 
 |     intptr_t ix, | 
 |     Object* instance_class_id_or_signature, | 
 |     AbstractType* destination_type, | 
 |     TypeArguments* instance_type_arguments, | 
 |     TypeArguments* instantiator_type_arguments, | 
 |     TypeArguments* function_type_arguments, | 
 |     TypeArguments* instance_parent_function_type_arguments, | 
 |     TypeArguments* instance_delayed_type_arguments, | 
 |     Bool* test_result) const { | 
 |   const Array& array = Array::Handle(cache()); | 
 |   GetCheckFromArray(array, num_inputs(), ix, instance_class_id_or_signature, | 
 |                     destination_type, instance_type_arguments, | 
 |                     instantiator_type_arguments, function_type_arguments, | 
 |                     instance_parent_function_type_arguments, | 
 |                     instance_delayed_type_arguments, test_result); | 
 | } | 
 |  | 
 | void SubtypeTestCache::GetCheckFromArray( | 
 |     const Array& array, | 
 |     intptr_t num_inputs, | 
 |     intptr_t ix, | 
 |     Object* instance_class_id_or_signature, | 
 |     AbstractType* destination_type, | 
 |     TypeArguments* instance_type_arguments, | 
 |     TypeArguments* instantiator_type_arguments, | 
 |     TypeArguments* function_type_arguments, | 
 |     TypeArguments* instance_parent_function_type_arguments, | 
 |     TypeArguments* instance_delayed_type_arguments, | 
 |     Bool* test_result) { | 
 |   ASSERT(array.ptr() != Object::empty_subtype_test_cache_array().ptr()); | 
 |   SubtypeTestCacheTable entries(array); | 
 |   auto entry = entries[ix]; | 
 |   // First get the field that determines occupancy. We have to do this with | 
 |   // load-acquire because some callers may not have the subtype test cache lock. | 
 |   *instance_class_id_or_signature = | 
 |       entry.Get<kInstanceCidOrSignature, std::memory_order_acquire>(); | 
 |   // We should not be retrieving unoccupied entries. | 
 |   ASSERT(!instance_class_id_or_signature->IsNull()); | 
 |   switch (num_inputs) { | 
 |     case 7: | 
 |       *destination_type = entry.Get<kDestinationType>(); | 
 |       FALL_THROUGH; | 
 |     case 6: | 
 |       *instance_delayed_type_arguments = | 
 |           entry.Get<kInstanceDelayedFunctionTypeArguments>(); | 
 |       FALL_THROUGH; | 
 |     case 5: | 
 |       *instance_parent_function_type_arguments = | 
 |           entry.Get<kInstanceParentFunctionTypeArguments>(); | 
 |       FALL_THROUGH; | 
 |     case 4: | 
 |       *function_type_arguments = entry.Get<kFunctionTypeArguments>(); | 
 |       FALL_THROUGH; | 
 |     case 3: | 
 |       *instantiator_type_arguments = entry.Get<kInstantiatorTypeArguments>(); | 
 |       FALL_THROUGH; | 
 |     case 2: | 
 |       *instance_type_arguments = entry.Get<kInstanceTypeArguments>(); | 
 |       FALL_THROUGH; | 
 |     case 1: | 
 |       break; | 
 |     default: | 
 |       UNREACHABLE(); | 
 |   } | 
 |   *test_result = entry.Get<kTestResult>(); | 
 | } | 
 |  | 
 | bool SubtypeTestCache::GetNextCheck( | 
 |     intptr_t* ix, | 
 |     Object* instance_class_id_or_signature, | 
 |     AbstractType* destination_type, | 
 |     TypeArguments* instance_type_arguments, | 
 |     TypeArguments* instantiator_type_arguments, | 
 |     TypeArguments* function_type_arguments, | 
 |     TypeArguments* instance_parent_function_type_arguments, | 
 |     TypeArguments* instance_delayed_type_arguments, | 
 |     Bool* test_result) const { | 
 |   ASSERT(ix != nullptr); | 
 |   for (intptr_t i = *ix; i < NumEntries(); i++) { | 
 |     ASSERT(Thread::Current() | 
 |                ->isolate_group() | 
 |                ->subtype_test_cache_mutex() | 
 |                ->IsOwnedByCurrentThread()); | 
 |     if (IsOccupied(i)) { | 
 |       GetCurrentCheck(i, instance_class_id_or_signature, destination_type, | 
 |                       instance_type_arguments, instantiator_type_arguments, | 
 |                       function_type_arguments, | 
 |                       instance_parent_function_type_arguments, | 
 |                       instance_delayed_type_arguments, test_result); | 
 |       *ix = i + 1; | 
 |       return true; | 
 |     } | 
 |   } | 
 |   return false; | 
 | } | 
 |  | 
 | bool SubtypeTestCache::HasCheck( | 
 |     const Object& instance_class_id_or_signature, | 
 |     const AbstractType& destination_type, | 
 |     const TypeArguments& instance_type_arguments, | 
 |     const TypeArguments& instantiator_type_arguments, | 
 |     const TypeArguments& function_type_arguments, | 
 |     const TypeArguments& instance_parent_function_type_arguments, | 
 |     const TypeArguments& instance_delayed_type_arguments, | 
 |     intptr_t* index, | 
 |     Bool* result) const { | 
 |   const auto& data = Array::Handle(cache()); | 
 |   auto loc = FindKeyOrUnused( | 
 |       data, num_inputs(), instance_class_id_or_signature, destination_type, | 
 |       instance_type_arguments, instantiator_type_arguments, | 
 |       function_type_arguments, instance_parent_function_type_arguments, | 
 |       instance_delayed_type_arguments); | 
 |   if (loc.present) { | 
 |     if (index != nullptr) { | 
 |       *index = loc.entry; | 
 |     } | 
 |     if (result != nullptr) { | 
 |       SubtypeTestCacheTable entries(data); | 
 |       const auto& entry = entries[loc.entry]; | 
 |       // A positive result from FindKeyOrUnused means that load-acquire is not | 
 |       // needed, as an occupied entry never changes for a given backing array. | 
 |       *result = entry.Get<kTestResult>(); | 
 |       ASSERT(!result->IsNull()); | 
 |     } | 
 |   } | 
 |   return loc.present; | 
 | } | 
 |  | 
 | void SubtypeTestCache::WriteEntryToBuffer(Zone* zone, | 
 |                                           BaseTextBuffer* buffer, | 
 |                                           intptr_t index, | 
 |                                           const char* line_prefix) const { | 
 |   ASSERT(Thread::Current() | 
 |              ->isolate_group() | 
 |              ->subtype_test_cache_mutex() | 
 |              ->IsOwnedByCurrentThread()); | 
 |   WriteCurrentEntryToBuffer(zone, buffer, index, line_prefix); | 
 | } | 
 |  | 
 | void SubtypeTestCache::WriteToBuffer(Zone* zone, | 
 |                                      BaseTextBuffer* buffer, | 
 |                                      const char* line_prefix) const { | 
 |   ASSERT(Thread::Current() | 
 |              ->isolate_group() | 
 |              ->subtype_test_cache_mutex() | 
 |              ->IsOwnedByCurrentThread()); | 
 |   WriteToBufferUnlocked(zone, buffer, line_prefix); | 
 | } | 
 |  | 
 | void SubtypeTestCache::WriteCurrentEntryToBuffer( | 
 |     Zone* zone, | 
 |     BaseTextBuffer* buffer, | 
 |     intptr_t index, | 
 |     const char* line_prefix) const { | 
 |   const char* separator = | 
 |       line_prefix == nullptr ? ", " : OS::SCreate(zone, "\n%s", line_prefix); | 
 |   auto& instance_class_id_or_signature = Object::Handle(zone); | 
 |   auto& destination_type = AbstractType::Handle(zone); | 
 |   auto& instance_type_arguments = TypeArguments::Handle(zone); | 
 |   auto& instantiator_type_arguments = TypeArguments::Handle(zone); | 
 |   auto& function_type_arguments = TypeArguments::Handle(zone); | 
 |   auto& instance_parent_function_type_arguments = TypeArguments::Handle(zone); | 
 |   auto& instance_delayed_type_arguments = TypeArguments::Handle(zone); | 
 |   auto& result = Bool::Handle(zone); | 
 |   GetCurrentCheck(index, &instance_class_id_or_signature, &destination_type, | 
 |                   &instance_type_arguments, &instantiator_type_arguments, | 
 |                   &function_type_arguments, | 
 |                   &instance_parent_function_type_arguments, | 
 |                   &instance_delayed_type_arguments, &result); | 
 |   buffer->Printf( | 
 |       "%" Pd ": [ %#" Px ", %#" Px ", %#" Px ", %#" Px ", %#" Px ", %#" Px | 
 |       ", %#" Px ", %#" Px " ]", | 
 |       index, static_cast<uword>(instance_class_id_or_signature.ptr()), | 
 |       static_cast<uword>(instance_type_arguments.ptr()), | 
 |       static_cast<uword>(instantiator_type_arguments.ptr()), | 
 |       static_cast<uword>(function_type_arguments.ptr()), | 
 |       static_cast<uword>(instance_parent_function_type_arguments.ptr()), | 
 |       static_cast<uword>(instance_delayed_type_arguments.ptr()), | 
 |       static_cast<uword>(destination_type.ptr()), | 
 |       static_cast<uword>(result.ptr())); | 
 |   if (instance_class_id_or_signature.IsSmi()) { | 
 |     buffer->Printf("%sclass id: %" Pd "", separator, | 
 |                    Smi::Cast(instance_class_id_or_signature).Value()); | 
 |   } else { | 
 |     buffer->Printf( | 
 |         "%ssignature: %s", separator, | 
 |         FunctionType::Cast(instance_class_id_or_signature).ToCString()); | 
 |   } | 
 |   if (!instance_type_arguments.IsNull()) { | 
 |     if (instance_class_id_or_signature.IsSmi()) { | 
 |       buffer->Printf("%sinstance type arguments: %s", separator, | 
 |                      instance_type_arguments.ToCString()); | 
 |     } else { | 
 |       ASSERT(instance_class_id_or_signature.IsFunctionType()); | 
 |       buffer->Printf("%sclosure instantiator function type arguments: %s", | 
 |                      separator, instance_type_arguments.ToCString()); | 
 |     } | 
 |   } | 
 |   if (!instantiator_type_arguments.IsNull()) { | 
 |     buffer->Printf("%sinstantiator type arguments: %s", separator, | 
 |                    instantiator_type_arguments.ToCString()); | 
 |   } | 
 |   if (!function_type_arguments.IsNull()) { | 
 |     buffer->Printf("%sfunction type arguments: %s", separator, | 
 |                    function_type_arguments.ToCString()); | 
 |   } | 
 |   if (!instance_parent_function_type_arguments.IsNull()) { | 
 |     buffer->Printf("%sclosure parent function type arguments: %s", separator, | 
 |                    instance_parent_function_type_arguments.ToCString()); | 
 |   } | 
 |   if (!instance_delayed_type_arguments.IsNull()) { | 
 |     buffer->Printf("%sclosure delayed function type arguments: %s", separator, | 
 |                    instance_delayed_type_arguments.ToCString()); | 
 |   } | 
 |   if (!destination_type.IsNull()) { | 
 |     buffer->Printf("%sdestination type: %s", separator, | 
 |                    destination_type.ToCString()); | 
 |     if (!destination_type.IsInstantiated()) { | 
 |       AbstractType& test_type = AbstractType::Handle( | 
 |           zone, destination_type.InstantiateFrom(instantiator_type_arguments, | 
 |                                                  function_type_arguments, | 
 |                                                  kAllFree, Heap::kNew)); | 
 |       const auto type_class_id = test_type.type_class_id(); | 
 |       buffer->Printf("%sinstantiated type: %s", separator, | 
 |                      test_type.ToCString()); | 
 |       buffer->Printf("%sinstantiated type class id: %d", separator, | 
 |                      type_class_id); | 
 |     } | 
 |   } | 
 |   buffer->Printf("%sresult: %s", separator, result.ToCString()); | 
 | } | 
 |  | 
 | void SubtypeTestCache::WriteToBufferUnlocked(Zone* zone, | 
 |                                              BaseTextBuffer* buffer, | 
 |                                              const char* line_prefix) const { | 
 |   const char* separator = | 
 |       line_prefix == nullptr ? " " : OS::SCreate(zone, "\n%s", line_prefix); | 
 |   const char* internal_line_prefix = | 
 |       line_prefix == nullptr | 
 |           ? nullptr | 
 |           : OS::SCreate(zone, "%s%s", line_prefix, line_prefix); | 
 |   const intptr_t num_entries = NumEntries(); | 
 |   buffer->Printf("SubtypeTestCache(%" Pd ", %" Pd "", num_inputs(), | 
 |                  num_occupied()); | 
 |   for (intptr_t i = 0; i < num_entries; i++) { | 
 |     if (!IsOccupied(i)) continue; | 
 |     buffer->Printf(",%s{", separator); | 
 |     WriteCurrentEntryToBuffer(zone, buffer, i, internal_line_prefix); | 
 |     buffer->Printf(line_prefix != nullptr ? "}" : " }"); | 
 |   } | 
 |   buffer->AddString(line_prefix != nullptr && num_entries != 0 ? "\n)" : ")"); | 
 | } | 
 |  | 
 | void SubtypeTestCache::Reset() const { | 
 |   set_num_occupied(0); | 
 |   set_cache(Object::empty_subtype_test_cache_array()); | 
 | } | 
 |  | 
 | bool SubtypeTestCache::Equals(const SubtypeTestCache& other) const { | 
 |   ASSERT(Thread::Current() | 
 |              ->isolate_group() | 
 |              ->subtype_test_cache_mutex() | 
 |              ->IsOwnedByCurrentThread()); | 
 |   if (ptr() == other.ptr()) { | 
 |     return true; | 
 |   } | 
 |   if (num_inputs() != other.num_inputs()) return false; | 
 |   if (num_occupied() != other.num_occupied()) return false; | 
 |   return Array::Handle(cache()).Equals(Array::Handle(other.cache())); | 
 | } | 
 |  | 
 | SubtypeTestCachePtr SubtypeTestCache::Copy(Thread* thread) const { | 
 |   ASSERT(thread->isolate_group() | 
 |              ->subtype_test_cache_mutex() | 
 |              ->IsOwnedByCurrentThread()); | 
 |   if (IsNull()) { | 
 |     return SubtypeTestCache::null(); | 
 |   } | 
 |   Zone* const zone = thread->zone(); | 
 |   // STC caches are only copied on write if there are not enough unoccupied | 
 |   // entries to store a new one, so we need to copy the array. | 
 |   const auto& result = | 
 |       SubtypeTestCache::Handle(zone, SubtypeTestCache::New(num_inputs())); | 
 |   auto& entry_cache = Array::Handle(zone, cache()); | 
 |   entry_cache = entry_cache.Copy(); | 
 |   result.set_cache(entry_cache); | 
 |   result.set_num_occupied(num_occupied()); | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | bool SubtypeTestCache::IsOccupied(intptr_t index) const { | 
 |   ASSERT(!IsNull()); | 
 |   ASSERT(index < NumEntries()); | 
 |   const intptr_t cache_index = | 
 |       index * kTestEntryLength + kInstanceCidOrSignature; | 
 |   NoSafepointScope no_safepoint; | 
 |   return cache()->untag()->element<std::memory_order_acquire>(cache_index) != | 
 |          Object::null(); | 
 | } | 
 |  | 
 | intptr_t SubtypeTestCache::UsedInputsForType(const AbstractType& type) { | 
 |   if (type.IsType()) { | 
 |     if (type.IsInstantiated()) return 2; | 
 |     if (type.IsInstantiated(kFunctions)) return 3; | 
 |     return 4; | 
 |   } | 
 |   // Default to all inputs except for the destination type, which must be | 
 |   // statically known, otherwise this method wouldn't be called. | 
 |   static_assert(kDestinationType == kMaxInputs - 1, | 
 |                 "destination type is not last input"); | 
 |   return kMaxInputs - 1; | 
 | } | 
 |  | 
 | const char* SubtypeTestCache::ToCString() const { | 
 |   auto const zone = Thread::Current()->zone(); | 
 |   ZoneTextBuffer buffer(zone); | 
 |   WriteToBufferUnlocked(zone, &buffer); | 
 |   return buffer.buffer(); | 
 | } | 
 |  | 
 | LoadingUnitPtr LoadingUnit::New(intptr_t id, const LoadingUnit& parent) { | 
 |   ASSERT(Object::loadingunit_class() != Class::null()); | 
 |   // LoadingUnit objects are long living objects, allocate them in the | 
 |   // old generation. | 
 |   auto result = Object::Allocate<LoadingUnit>(Heap::kOld); | 
 |   NoSafepointScope scope; | 
 |   ASSERT(Utils::IsInt(UntaggedLoadingUnit::IdBits::bitsize(), id)); | 
 |   result->untag()->packed_fields_.Update<UntaggedLoadingUnit::IdBits>(id); | 
 |   result->untag()->set_parent(parent.ptr()); | 
 |   return result; | 
 | } | 
 |  | 
 | void LoadingUnit::set_base_objects(const Array& value) const { | 
 |   untag()->set_base_objects(value.ptr()); | 
 | } | 
 |  | 
 | const char* LoadingUnit::ToCString() const { | 
 |   return "LoadingUnit"; | 
 | } | 
 |  | 
 | ObjectPtr LoadingUnit::IssueLoad() const { | 
 |   set_load_outstanding(); | 
 |   return Isolate::Current()->CallDeferredLoadHandler(id()); | 
 | } | 
 |  | 
 | ObjectPtr LoadingUnit::CompleteLoad(const String& error_message, | 
 |                                     bool transient_error) const { | 
 |   set_loaded(error_message.IsNull()); | 
 |  | 
 |   const Library& lib = Library::Handle(Library::CoreLibrary()); | 
 |   const String& sel = String::Handle(String::New("_completeLoads")); | 
 |   const Function& func = Function::Handle(lib.LookupFunctionAllowPrivate(sel)); | 
 |   ASSERT(!func.IsNull()); | 
 |   const Array& args = Array::Handle(Array::New(3)); | 
 |   args.SetAt(0, Smi::Handle(Smi::New(id()))); | 
 |   args.SetAt(1, error_message); | 
 |   args.SetAt(2, Bool::Get(transient_error)); | 
 |   return DartEntry::InvokeFunction(func, args); | 
 | } | 
 |  | 
 | // The assignment to loading units here must match that in | 
 | // AssignLoadingUnitsCodeVisitor, which runs after compilation is done. | 
 | intptr_t LoadingUnit::LoadingUnitOf(const Function& function) { | 
 |   Thread* thread = Thread::Current(); | 
 |   REUSABLE_CLASS_HANDLESCOPE(thread); | 
 |   REUSABLE_LIBRARY_HANDLESCOPE(thread); | 
 |   REUSABLE_LOADING_UNIT_HANDLESCOPE(thread); | 
 |  | 
 |   Class& cls = thread->ClassHandle(); | 
 |   Library& lib = thread->LibraryHandle(); | 
 |   LoadingUnit& unit = thread->LoadingUnitHandle(); | 
 |  | 
 |   cls = function.Owner(); | 
 |   lib = cls.library(); | 
 |   unit = lib.loading_unit(); | 
 |   if (unit.IsNull()) { | 
 |     FATAL("Unable to find loading unit of %s (class %s, library %s)", | 
 |           function.ToFullyQualifiedCString(), cls.ToCString(), lib.ToCString()); | 
 |   } | 
 |   return unit.id(); | 
 | } | 
 |  | 
 | intptr_t LoadingUnit::LoadingUnitOf(const Code& code) { | 
 |   if (code.IsStubCode() || code.IsTypeTestStubCode() || | 
 |       code.IsAllocationStubCode()) { | 
 |     return LoadingUnit::kRootId; | 
 |   } else { | 
 |     Thread* thread = Thread::Current(); | 
 |     REUSABLE_FUNCTION_HANDLESCOPE(thread); | 
 |     REUSABLE_CLASS_HANDLESCOPE(thread); | 
 |     REUSABLE_LIBRARY_HANDLESCOPE(thread); | 
 |     REUSABLE_LOADING_UNIT_HANDLESCOPE(thread); | 
 |  | 
 |     Class& cls = thread->ClassHandle(); | 
 |     Library& lib = thread->LibraryHandle(); | 
 |     LoadingUnit& unit = thread->LoadingUnitHandle(); | 
 |     Function& func = thread->FunctionHandle(); | 
 |  | 
 |     if (code.IsFunctionCode()) { | 
 |       func ^= code.function(); | 
 |       cls = func.Owner(); | 
 |       lib = cls.library(); | 
 |       unit = lib.loading_unit(); | 
 |       ASSERT(!unit.IsNull()); | 
 |       return unit.id(); | 
 |     } else { | 
 |       UNREACHABLE(); | 
 |       return LoadingUnit::kIllegalId; | 
 |     } | 
 |   } | 
 | } | 
 |  | 
 | const char* Error::ToErrorCString() const { | 
 |   if (IsNull()) { | 
 |     return "Error: null"; | 
 |   } | 
 |   UNREACHABLE(); | 
 |   return "Error"; | 
 | } | 
 |  | 
 | const char* Error::ToCString() const { | 
 |   if (IsNull()) { | 
 |     return "Error: null"; | 
 |   } | 
 |   // Error is an abstract class.  We should never reach here. | 
 |   UNREACHABLE(); | 
 |   return "Error"; | 
 | } | 
 |  | 
 | ApiErrorPtr ApiError::New() { | 
 |   ASSERT(Object::api_error_class() != Class::null()); | 
 |   return Object::Allocate<ApiError>(Heap::kOld); | 
 | } | 
 |  | 
 | ApiErrorPtr ApiError::New(const String& message, Heap::Space space) { | 
 | #ifndef PRODUCT | 
 |   if (FLAG_print_stacktrace_at_api_error) { | 
 |     OS::PrintErr("ApiError: %s\n", message.ToCString()); | 
 |     Profiler::DumpStackTrace(false /* for_crash */); | 
 |   } | 
 | #endif  // !PRODUCT | 
 |  | 
 |   ASSERT(Object::api_error_class() != Class::null()); | 
 |   const auto& result = ApiError::Handle(Object::Allocate<ApiError>(space)); | 
 |   result.set_message(message); | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | void ApiError::set_message(const String& message) const { | 
 |   untag()->set_message(message.ptr()); | 
 | } | 
 |  | 
 | const char* ApiError::ToErrorCString() const { | 
 |   const String& msg_str = String::Handle(message()); | 
 |   return msg_str.ToCString(); | 
 | } | 
 |  | 
 | const char* ApiError::ToCString() const { | 
 |   return "ApiError"; | 
 | } | 
 |  | 
 | LanguageErrorPtr LanguageError::New() { | 
 |   ASSERT(Object::language_error_class() != Class::null()); | 
 |   return Object::Allocate<LanguageError>(Heap::kOld); | 
 | } | 
 |  | 
 | LanguageErrorPtr LanguageError::NewFormattedV(const Error& prev_error, | 
 |                                               const Script& script, | 
 |                                               TokenPosition token_pos, | 
 |                                               bool report_after_token, | 
 |                                               Report::Kind kind, | 
 |                                               Heap::Space space, | 
 |                                               const char* format, | 
 |                                               va_list args) { | 
 |   ASSERT(Object::language_error_class() != Class::null()); | 
 |   const auto& result = | 
 |       LanguageError::Handle(Object::Allocate<LanguageError>(space)); | 
 |   result.set_previous_error(prev_error); | 
 |   result.set_script(script); | 
 |   result.set_token_pos(token_pos); | 
 |   result.set_report_after_token(report_after_token); | 
 |   result.set_kind(kind); | 
 |   result.set_message( | 
 |       String::Handle(String::NewFormattedV(format, args, space))); | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | LanguageErrorPtr LanguageError::NewFormatted(const Error& prev_error, | 
 |                                              const Script& script, | 
 |                                              TokenPosition token_pos, | 
 |                                              bool report_after_token, | 
 |                                              Report::Kind kind, | 
 |                                              Heap::Space space, | 
 |                                              const char* format, | 
 |                                              ...) { | 
 |   va_list args; | 
 |   va_start(args, format); | 
 |   LanguageErrorPtr result = LanguageError::NewFormattedV( | 
 |       prev_error, script, token_pos, report_after_token, kind, space, format, | 
 |       args); | 
 |   NoSafepointScope no_safepoint; | 
 |   va_end(args); | 
 |   return result; | 
 | } | 
 |  | 
 | LanguageErrorPtr LanguageError::New(const String& formatted_message, | 
 |                                     Report::Kind kind, | 
 |                                     Heap::Space space) { | 
 |   ASSERT(Object::language_error_class() != Class::null()); | 
 |   const auto& result = | 
 |       LanguageError::Handle(Object::Allocate<LanguageError>(space)); | 
 |   result.set_formatted_message(formatted_message); | 
 |   result.set_kind(kind); | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | void LanguageError::set_previous_error(const Error& value) const { | 
 |   untag()->set_previous_error(value.ptr()); | 
 | } | 
 |  | 
 | void LanguageError::set_script(const Script& value) const { | 
 |   untag()->set_script(value.ptr()); | 
 | } | 
 |  | 
 | void LanguageError::set_token_pos(TokenPosition token_pos) const { | 
 |   ASSERT(!token_pos.IsClassifying()); | 
 |   StoreNonPointer(&untag()->token_pos_, token_pos); | 
 | } | 
 |  | 
 | void LanguageError::set_report_after_token(bool value) const { | 
 |   StoreNonPointer(&untag()->report_after_token_, value); | 
 | } | 
 |  | 
 | void LanguageError::set_kind(uint8_t value) const { | 
 |   StoreNonPointer(&untag()->kind_, value); | 
 | } | 
 |  | 
 | void LanguageError::set_message(const String& value) const { | 
 |   untag()->set_message(value.ptr()); | 
 | } | 
 |  | 
 | void LanguageError::set_formatted_message(const String& value) const { | 
 |   untag()->set_formatted_message(value.ptr()); | 
 | } | 
 |  | 
 | StringPtr LanguageError::FormatMessage() const { | 
 |   if (formatted_message() != String::null()) { | 
 |     return formatted_message(); | 
 |   } | 
 |   String& result = String::Handle( | 
 |       Report::PrependSnippet(kind(), Script::Handle(script()), token_pos(), | 
 |                              report_after_token(), String::Handle(message()))); | 
 |   // Prepend previous error message. | 
 |   const Error& prev_error = Error::Handle(previous_error()); | 
 |   if (!prev_error.IsNull()) { | 
 |     result = String::Concat( | 
 |         String::Handle(String::New(prev_error.ToErrorCString())), result); | 
 |   } | 
 |   set_formatted_message(result); | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | const char* LanguageError::ToErrorCString() const { | 
 |   const String& msg_str = String::Handle(FormatMessage()); | 
 |   return msg_str.ToCString(); | 
 | } | 
 |  | 
 | const char* LanguageError::ToCString() const { | 
 |   return "LanguageError"; | 
 | } | 
 |  | 
 | UnhandledExceptionPtr UnhandledException::New(const Instance& exception, | 
 |                                               const Instance& stacktrace, | 
 |                                               Heap::Space space) { | 
 |   ASSERT(Object::unhandled_exception_class() != Class::null()); | 
 |   const auto& result = | 
 |       UnhandledException::Handle(Object::Allocate<UnhandledException>(space)); | 
 |   result.set_exception(exception); | 
 |   result.set_stacktrace(stacktrace); | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | UnhandledExceptionPtr UnhandledException::New(Heap::Space space) { | 
 |   ASSERT(Object::unhandled_exception_class() != Class::null()); | 
 |   return Object::Allocate<UnhandledException>(space); | 
 | } | 
 |  | 
 | void UnhandledException::set_exception(const Instance& exception) const { | 
 |   untag()->set_exception(exception.ptr()); | 
 | } | 
 |  | 
 | void UnhandledException::set_stacktrace(const Instance& stacktrace) const { | 
 |   untag()->set_stacktrace(stacktrace.ptr()); | 
 | } | 
 |  | 
 | const char* UnhandledException::ToErrorCString() const { | 
 |   Thread* thread = Thread::Current(); | 
 |   auto isolate_group = thread->isolate_group(); | 
 |   NoReloadScope no_reload_scope(thread); | 
 |   HANDLESCOPE(thread); | 
 |   Object& strtmp = Object::Handle(); | 
 |   const char* exc_str; | 
 |   if (exception() == isolate_group->object_store()->out_of_memory()) { | 
 |     exc_str = "Out of Memory"; | 
 |   } else if (exception() == isolate_group->object_store()->stack_overflow()) { | 
 |     exc_str = "Stack Overflow"; | 
 |   } else { | 
 |     const Instance& exc = Instance::Handle(exception()); | 
 |     strtmp = DartLibraryCalls::ToString(exc); | 
 |     if (!strtmp.IsError()) { | 
 |       exc_str = strtmp.ToCString(); | 
 |     } else { | 
 |       exc_str = "<Received error while converting exception to string>"; | 
 |     } | 
 |   } | 
 |   const Instance& stack = Instance::Handle(stacktrace()); | 
 |   const char* stack_str; | 
 |   if (stack.IsNull()) { | 
 |     stack_str = "null"; | 
 |   } else if (stack.IsStackTrace()) { | 
 |     stack_str = StackTrace::Cast(stack).ToCString(); | 
 |   } else { | 
 |     strtmp = DartLibraryCalls::ToString(stack); | 
 |     if (!strtmp.IsError()) { | 
 |       stack_str = strtmp.ToCString(); | 
 |     } else { | 
 |       stack_str = "<Received error while converting stack trace to string>"; | 
 |     } | 
 |   } | 
 |   return OS::SCreate(thread->zone(), "Unhandled exception:\n%s\n%s", exc_str, | 
 |                      stack_str); | 
 | } | 
 |  | 
 | const char* UnhandledException::ToCString() const { | 
 |   return "UnhandledException"; | 
 | } | 
 |  | 
 | UnwindErrorPtr UnwindError::New(const String& message, Heap::Space space) { | 
 |   ASSERT(Object::unwind_error_class() != Class::null()); | 
 |   const auto& result = | 
 |       UnwindError::Handle(Object::Allocate<UnwindError>(space)); | 
 |   result.set_message(message); | 
 |   ASSERT_EQUAL(result.is_user_initiated(), false); | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | void UnwindError::set_message(const String& message) const { | 
 |   untag()->set_message(message.ptr()); | 
 | } | 
 |  | 
 | void UnwindError::set_is_user_initiated(bool value) const { | 
 |   StoreNonPointer(&untag()->is_user_initiated_, value); | 
 | } | 
 |  | 
 | const char* UnwindError::ToErrorCString() const { | 
 |   const String& msg_str = String::Handle(message()); | 
 |   return msg_str.ToCString(); | 
 | } | 
 |  | 
 | const char* UnwindError::ToCString() const { | 
 |   return "UnwindError"; | 
 | } | 
 |  | 
 | ObjectPtr Instance::InvokeGetter(const String& getter_name, | 
 |                                  bool respect_reflectable, | 
 |                                  bool check_is_entrypoint) const { | 
 |   Thread* thread = Thread::Current(); | 
 |   Zone* zone = thread->zone(); | 
 |  | 
 |   Class& klass = Class::Handle(zone, clazz()); | 
 |   CHECK_ERROR(klass.EnsureIsFinalized(thread)); | 
 |   const auto& inst_type_args = | 
 |       klass.NumTypeArguments() > 0 | 
 |           ? TypeArguments::Handle(zone, GetTypeArguments()) | 
 |           : Object::null_type_arguments(); | 
 |  | 
 |   const String& internal_getter_name = | 
 |       String::Handle(zone, Field::GetterName(getter_name)); | 
 |   Function& function = Function::Handle( | 
 |       zone, | 
 |       Resolver::ResolveDynamicAnyArgs(zone, klass, internal_getter_name, | 
 |                                       /*allow_add=*/!FLAG_precompiled_mode)); | 
 |  | 
 |   if (!function.IsNull() && check_is_entrypoint) { | 
 |     // The getter must correspond to either an entry-point field or a getter | 
 |     // method explicitly marked. | 
 |     Field& field = Field::Handle(zone); | 
 |     if (function.kind() == UntaggedFunction::kImplicitGetter) { | 
 |       field = function.accessor_field(); | 
 |     } | 
 |     if (!field.IsNull()) { | 
 |       CHECK_ERROR(field.VerifyEntryPoint(EntryPointPragma::kGetterOnly)); | 
 |     } else { | 
 |       CHECK_ERROR(function.VerifyCallEntryPoint()); | 
 |     } | 
 |   } | 
 |  | 
 |   // Check for method extraction when method extractors are not lazily created. | 
 |   if (function.IsNull() && FLAG_precompiled_mode) { | 
 |     function = Resolver::ResolveDynamicAnyArgs(zone, klass, getter_name, | 
 |                                                /*allow_add=*/false); | 
 |  | 
 |     if (!function.IsNull() && check_is_entrypoint) { | 
 |       CHECK_ERROR(function.VerifyClosurizedEntryPoint()); | 
 |     } | 
 |  | 
 |     if (!function.IsNull() && function.SafeToClosurize()) { | 
 |       const Function& closure_function = | 
 |           Function::Handle(zone, function.ImplicitClosureFunction()); | 
 |       return closure_function.ImplicitInstanceClosure(*this); | 
 |     } | 
 |   } | 
 |  | 
 |   const int kTypeArgsLen = 0; | 
 |   const int kNumArgs = 1; | 
 |   const Array& args = Array::Handle(zone, Array::New(kNumArgs)); | 
 |   args.SetAt(0, *this); | 
 |   const Array& args_descriptor = Array::Handle( | 
 |       zone, | 
 |       ArgumentsDescriptor::NewBoxed(kTypeArgsLen, args.Length(), Heap::kNew)); | 
 |  | 
 |   return InvokeInstanceFunction(thread, *this, function, internal_getter_name, | 
 |                                 args, args_descriptor, respect_reflectable, | 
 |                                 inst_type_args); | 
 | } | 
 |  | 
 | ObjectPtr Instance::InvokeSetter(const String& setter_name, | 
 |                                  const Instance& value, | 
 |                                  bool respect_reflectable, | 
 |                                  bool check_is_entrypoint) const { | 
 |   Thread* thread = Thread::Current(); | 
 |   Zone* zone = thread->zone(); | 
 |  | 
 |   const Class& klass = Class::Handle(zone, clazz()); | 
 |   CHECK_ERROR(klass.EnsureIsFinalized(thread)); | 
 |   const auto& inst_type_args = | 
 |       klass.NumTypeArguments() > 0 | 
 |           ? TypeArguments::Handle(zone, GetTypeArguments()) | 
 |           : Object::null_type_arguments(); | 
 |  | 
 |   const String& internal_setter_name = | 
 |       String::Handle(zone, Field::SetterName(setter_name)); | 
 |   const Function& setter = Function::Handle( | 
 |       zone, | 
 |       Resolver::ResolveDynamicAnyArgs(zone, klass, internal_setter_name, | 
 |                                       /*allow_add=*/!FLAG_precompiled_mode)); | 
 |  | 
 |   if (check_is_entrypoint) { | 
 |     // The setter must correspond to either an entry-point field or a setter | 
 |     // method explicitly marked. | 
 |     Field& field = Field::Handle(zone); | 
 |     if (setter.kind() == UntaggedFunction::kImplicitSetter) { | 
 |       field = setter.accessor_field(); | 
 |     } | 
 |     if (!field.IsNull()) { | 
 |       CHECK_ERROR(field.VerifyEntryPoint(EntryPointPragma::kSetterOnly)); | 
 |     } else if (!setter.IsNull()) { | 
 |       CHECK_ERROR(setter.VerifyCallEntryPoint()); | 
 |     } | 
 |   } | 
 |  | 
 |   const int kTypeArgsLen = 0; | 
 |   const int kNumArgs = 2; | 
 |   const Array& args = Array::Handle(zone, Array::New(kNumArgs)); | 
 |   args.SetAt(0, *this); | 
 |   args.SetAt(1, value); | 
 |   const Array& args_descriptor = Array::Handle( | 
 |       zone, | 
 |       ArgumentsDescriptor::NewBoxed(kTypeArgsLen, args.Length(), Heap::kNew)); | 
 |  | 
 |   return InvokeInstanceFunction(thread, *this, setter, internal_setter_name, | 
 |                                 args, args_descriptor, respect_reflectable, | 
 |                                 inst_type_args); | 
 | } | 
 |  | 
 | ObjectPtr Instance::Invoke(const String& function_name, | 
 |                            const Array& args, | 
 |                            const Array& arg_names, | 
 |                            bool respect_reflectable, | 
 |                            bool check_is_entrypoint) const { | 
 |   Thread* thread = Thread::Current(); | 
 |   Zone* zone = thread->zone(); | 
 |   Class& klass = Class::Handle(zone, clazz()); | 
 |   CHECK_ERROR(klass.EnsureIsFinalized(thread)); | 
 |  | 
 |   Function& function = Function::Handle( | 
 |       zone, | 
 |       Resolver::ResolveDynamicAnyArgs(zone, klass, function_name, | 
 |                                       /*allow_add=*/!FLAG_precompiled_mode)); | 
 |  | 
 |   if (!function.IsNull() && check_is_entrypoint) { | 
 |     CHECK_ERROR(function.VerifyCallEntryPoint()); | 
 |   } | 
 |  | 
 |   // We don't pass any explicit type arguments, which will be understood as | 
 |   // using dynamic for any function type arguments by lower layers. | 
 |   const int kTypeArgsLen = 0; | 
 |   const Array& args_descriptor = Array::Handle( | 
 |       zone, ArgumentsDescriptor::NewBoxed(kTypeArgsLen, args.Length(), | 
 |                                           arg_names, Heap::kNew)); | 
 |  | 
 |   const auto& inst_type_args = | 
 |       klass.NumTypeArguments() > 0 | 
 |           ? TypeArguments::Handle(zone, GetTypeArguments()) | 
 |           : Object::null_type_arguments(); | 
 |  | 
 |   if (function.IsNull()) { | 
 |     // Didn't find a method: try to find a getter and invoke call on its result. | 
 |     const String& getter_name = | 
 |         String::Handle(zone, Field::GetterName(function_name)); | 
 |     function = | 
 |         Resolver::ResolveDynamicAnyArgs(zone, klass, getter_name, | 
 |                                         /*allow_add=*/!FLAG_precompiled_mode); | 
 |     if (!function.IsNull()) { | 
 |       if (check_is_entrypoint) { | 
 |         CHECK_ERROR(EntryPointFieldInvocationError(function_name)); | 
 |       } | 
 |       ASSERT(function.kind() != UntaggedFunction::kMethodExtractor); | 
 |       // Invoke the getter. | 
 |       const int kNumArgs = 1; | 
 |       const Array& getter_args = Array::Handle(zone, Array::New(kNumArgs)); | 
 |       getter_args.SetAt(0, *this); | 
 |       const Array& getter_args_descriptor = Array::Handle( | 
 |           zone, ArgumentsDescriptor::NewBoxed( | 
 |                     kTypeArgsLen, getter_args.Length(), Heap::kNew)); | 
 |       const Object& getter_result = Object::Handle( | 
 |           zone, InvokeInstanceFunction(thread, *this, function, getter_name, | 
 |                                        getter_args, getter_args_descriptor, | 
 |                                        respect_reflectable, inst_type_args)); | 
 |       if (getter_result.IsError()) { | 
 |         return getter_result.ptr(); | 
 |       } | 
 |       // Replace the closure as the receiver in the arguments list. | 
 |       args.SetAt(0, getter_result); | 
 |       return DartEntry::InvokeClosure(thread, args, args_descriptor); | 
 |     } | 
 |   } | 
 |  | 
 |   // Found an ordinary method. | 
 |   return InvokeInstanceFunction(thread, *this, function, function_name, args, | 
 |                                 args_descriptor, respect_reflectable, | 
 |                                 inst_type_args); | 
 | } | 
 |  | 
 | ObjectPtr Instance::HashCode() const { | 
 |   // TODO(koda): Optimize for all builtin classes and all classes | 
 |   // that do not override hashCode. | 
 |   return DartLibraryCalls::HashCode(*this); | 
 | } | 
 |  | 
 | // Keep in sync with AsmIntrinsifier::Object_getHash. | 
 | IntegerPtr Instance::IdentityHashCode(Thread* thread) const { | 
 |   if (IsInteger()) return Integer::Cast(*this).ptr(); | 
 |  | 
 | #if defined(HASH_IN_OBJECT_HEADER) | 
 |   intptr_t hash = Object::GetCachedHash(ptr()); | 
 | #else | 
 |   intptr_t hash = thread->heap()->GetHash(ptr()); | 
 | #endif | 
 |   if (hash == 0) { | 
 |     if (IsNull()) { | 
 |       hash = kNullIdentityHash; | 
 |     } else if (IsBool()) { | 
 |       hash = Bool::Cast(*this).value() ? kTrueIdentityHash : kFalseIdentityHash; | 
 |     } else if (IsDouble()) { | 
 |       double val = Double::Cast(*this).value(); | 
 |       if ((val >= kMinInt64RepresentableAsDouble) && | 
 |           (val <= kMaxInt64RepresentableAsDouble)) { | 
 |         int64_t ival = static_cast<int64_t>(val); | 
 |         if (static_cast<double>(ival) == val) { | 
 |           return Integer::New(ival); | 
 |         } | 
 |       } | 
 |  | 
 |       uint64_t uval = bit_cast<uint64_t>(val); | 
 |       hash = ((uval >> 32) ^ (uval)) & kSmiMax; | 
 |     } else { | 
 |       do { | 
 |         hash = thread->random()->NextUInt32() & 0x3FFFFFFF; | 
 |       } while (hash == 0); | 
 |     } | 
 |  | 
 | #if defined(HASH_IN_OBJECT_HEADER) | 
 |     hash = Object::SetCachedHashIfNotSet(ptr(), hash); | 
 | #else | 
 |     hash = thread->heap()->SetHashIfNotSet(ptr(), hash); | 
 | #endif | 
 |   } | 
 |   return Smi::New(hash); | 
 | } | 
 |  | 
 | bool Instance::CanonicalizeEquals(const Instance& other) const { | 
 |   if (this->ptr() == other.ptr()) { | 
 |     return true;  // "===". | 
 |   } | 
 |  | 
 |   if (other.IsNull() || (this->clazz() != other.clazz())) { | 
 |     return false; | 
 |   } | 
 |  | 
 |   { | 
 |     NoSafepointScope no_safepoint; | 
 |     // Raw bits compare. | 
 |     const intptr_t instance_size = SizeFromClass(); | 
 |     ASSERT(instance_size != 0); | 
 |     const intptr_t other_instance_size = other.SizeFromClass(); | 
 |     ASSERT(other_instance_size != 0); | 
 |     if (instance_size != other_instance_size) { | 
 |       return false; | 
 |     } | 
 |     uword this_addr = reinterpret_cast<uword>(this->untag()); | 
 |     uword other_addr = reinterpret_cast<uword>(other.untag()); | 
 |     for (intptr_t offset = Instance::NextFieldOffset(); offset < instance_size; | 
 |          offset += kCompressedWordSize) { | 
 |       if ((reinterpret_cast<CompressedObjectPtr*>(this_addr + offset) | 
 |                ->Decompress(untag()->heap_base())) != | 
 |           (reinterpret_cast<CompressedObjectPtr*>(other_addr + offset) | 
 |                ->Decompress(untag()->heap_base()))) { | 
 |         return false; | 
 |       } | 
 |     } | 
 |   } | 
 |   return true; | 
 | } | 
 |  | 
 | bool Symbol::IsSymbolCid(Thread* thread, classid_t class_id) { | 
 |   auto object_store = thread->isolate_group()->object_store(); | 
 |   return Class::GetClassId(object_store->symbol_class()) == class_id; | 
 | } | 
 |  | 
 | // Must be kept in sync with Symbol.hashCode in symbol_patch.dart | 
 | uint32_t Symbol::CanonicalizeHash(Thread* thread, const Instance& instance) { | 
 |   ASSERT(IsSymbolCid(thread, instance.GetClassId())); | 
 |  | 
 |   auto zone = thread->zone(); | 
 |   auto object_store = thread->isolate_group()->object_store(); | 
 |  | 
 |   const auto& symbol_name_field = | 
 |       Field::Handle(zone, object_store->symbol_name_field()); | 
 |   ASSERT(!symbol_name_field.IsNull()); | 
 |  | 
 |   // Keep in sync with sdk/lib/_internal/vm/lib/symbol_patch.dart. | 
 |   const auto& name = | 
 |       String::Cast(Object::Handle(zone, instance.GetField(symbol_name_field))); | 
 |   const uint32_t arbitrary_prime = 664597; | 
 |   return 0x1fffffff & (arbitrary_prime * name.CanonicalizeHash()); | 
 | } | 
 |  | 
 | uint32_t Instance::CanonicalizeHash() const { | 
 |   if (GetClassId() == kNullCid) { | 
 |     return kNullIdentityHash; | 
 |   } | 
 |   Thread* thread = Thread::Current(); | 
 |   uint32_t hash = thread->heap()->GetCanonicalHash(ptr()); | 
 |   if (hash != 0) { | 
 |     return hash; | 
 |   } | 
 |   Zone* zone = thread->zone(); | 
 |   const Class& cls = Class::Handle(zone, clazz()); | 
 |   const bool is_symbol = Symbol::IsSymbolCid(thread, cls.id()); | 
 |  | 
 |   NoSafepointScope no_safepoint(thread); | 
 |  | 
 |   if (is_symbol) { | 
 |     hash = Symbol::CanonicalizeHash(thread, *this); | 
 |   } else { | 
 |     const intptr_t class_id = cls.id(); | 
 |     ASSERT(class_id != 0); | 
 |     hash = class_id; | 
 |     uword this_addr = reinterpret_cast<uword>(this->untag()); | 
 |     Object& obj = Object::Handle(zone); | 
 |     Instance& instance = Instance::Handle(zone); | 
 |  | 
 |     const auto unboxed_fields_bitmap = | 
 |         thread->isolate_group()->class_table()->GetUnboxedFieldsMapAt( | 
 |             GetClassId()); | 
 |  | 
 |     for (intptr_t offset = Instance::NextFieldOffset(); | 
 |          offset < cls.host_next_field_offset(); offset += kCompressedWordSize) { | 
 |       if (unboxed_fields_bitmap.Get(offset / kCompressedWordSize)) { | 
 |         if (kCompressedWordSize == 8) { | 
 |           hash = CombineHashes( | 
 |               hash, *reinterpret_cast<uint32_t*>(this_addr + offset)); | 
 |           hash = CombineHashes( | 
 |               hash, *reinterpret_cast<uint32_t*>(this_addr + offset + 4)); | 
 |         } else { | 
 |           hash = CombineHashes( | 
 |               hash, *reinterpret_cast<uint32_t*>(this_addr + offset)); | 
 |         } | 
 |       } else { | 
 |         obj = reinterpret_cast<CompressedObjectPtr*>(this_addr + offset) | 
 |                   ->Decompress(untag()->heap_base()); | 
 |         if (obj.IsSentinel()) { | 
 |           hash = CombineHashes(hash, 11); | 
 |         } else { | 
 |           instance ^= obj.ptr(); | 
 |           hash = CombineHashes(hash, instance.CanonicalizeHash()); | 
 |         } | 
 |       } | 
 |     } | 
 |     hash = FinalizeHash(hash, String::kHashBits); | 
 |   } | 
 |   thread->heap()->SetCanonicalHash(ptr(), hash); | 
 |   return hash; | 
 | } | 
 |  | 
 | #if defined(DEBUG) | 
 | class CheckForPointers : public ObjectPointerVisitor { | 
 |  public: | 
 |   explicit CheckForPointers(IsolateGroup* isolate_group) | 
 |       : ObjectPointerVisitor(isolate_group), has_pointers_(false) {} | 
 |  | 
 |   bool has_pointers() const { return has_pointers_; } | 
 |  | 
 |   void VisitPointers(ObjectPtr* first, ObjectPtr* last) override { | 
 |     if (last >= first) { | 
 |       has_pointers_ = true; | 
 |     } | 
 |   } | 
 |  | 
 | #if defined(DART_COMPRESSED_POINTERS) | 
 |   void VisitCompressedPointers(uword heap_base, | 
 |                                CompressedObjectPtr* first, | 
 |                                CompressedObjectPtr* last) override { | 
 |     if (last >= first) { | 
 |       has_pointers_ = true; | 
 |     } | 
 |   } | 
 | #endif | 
 |  | 
 |  private: | 
 |   bool has_pointers_; | 
 |  | 
 |   DISALLOW_COPY_AND_ASSIGN(CheckForPointers); | 
 | }; | 
 | #endif  // DEBUG | 
 |  | 
 | void Instance::CanonicalizeFieldsLocked(Thread* thread) const { | 
 |   const intptr_t class_id = GetClassId(); | 
 |   if (class_id >= kNumPredefinedCids) { | 
 |     // Iterate over all fields, canonicalize numbers and strings, expect all | 
 |     // other instances to be canonical otherwise report error (return false). | 
 |     Zone* zone = thread->zone(); | 
 |     Object& obj = Object::Handle(zone); | 
 |     const intptr_t instance_size = SizeFromClass(); | 
 |     ASSERT(instance_size != 0); | 
 |     const auto unboxed_fields_bitmap = | 
 |         thread->isolate_group()->class_table()->GetUnboxedFieldsMapAt(class_id); | 
 |     for (intptr_t offset = Instance::NextFieldOffset(); offset < instance_size; | 
 |          offset += kCompressedWordSize) { | 
 |       if (unboxed_fields_bitmap.Get(offset / kCompressedWordSize)) { | 
 |         continue; | 
 |       } | 
 |       obj = this->FieldAddrAtOffset(offset)->Decompress(untag()->heap_base()); | 
 |       if (obj.IsInstance()) { | 
 |         obj = Instance::Cast(obj).CanonicalizeLocked(thread); | 
 |         this->SetFieldAtOffset(offset, obj); | 
 |       } else { | 
 |         ASSERT(obj.IsNull() || obj.IsSentinel()); | 
 |       } | 
 |     } | 
 |   } else { | 
 | #if defined(DEBUG) && !defined(DART_COMPRESSED_POINTERS) | 
 |     // Make sure that we are not missing any fields. | 
 |     IsolateGroup* group = IsolateGroup::Current(); | 
 |     CheckForPointers has_pointers(group); | 
 |     this->ptr()->untag()->VisitPointersPrecise(&has_pointers); | 
 |     ASSERT(!has_pointers.has_pointers()); | 
 | #endif  // DEBUG | 
 |   } | 
 | } | 
 |  | 
 | InstancePtr Instance::CopyShallowToOldSpace(Thread* thread) const { | 
 |   return Instance::RawCast(Object::Clone(*this, Heap::kOld)); | 
 | } | 
 |  | 
 | InstancePtr Instance::Canonicalize(Thread* thread) const { | 
 |   SafepointMutexLocker ml( | 
 |       thread->isolate_group()->constant_canonicalization_mutex()); | 
 |   return CanonicalizeLocked(thread); | 
 | } | 
 |  | 
 | InstancePtr Instance::CanonicalizeLocked(Thread* thread) const { | 
 |   if (!this->ptr()->IsHeapObject() || this->IsCanonical()) { | 
 |     return this->ptr(); | 
 |   } | 
 |   ASSERT(!IsNull()); | 
 |   CanonicalizeFieldsLocked(thread); | 
 |   Zone* zone = thread->zone(); | 
 |   const Class& cls = Class::Handle(zone, this->clazz()); | 
 |   Instance& result = | 
 |       Instance::Handle(zone, cls.LookupCanonicalInstance(zone, *this)); | 
 |   if (!result.IsNull()) { | 
 |     return result.ptr(); | 
 |   } | 
 |   if (IsNew()) { | 
 |     ASSERT((thread->isolate() == Dart::vm_isolate()) || !InVMIsolateHeap()); | 
 |     // Create a canonical object in old space. | 
 |     result ^= Object::Clone(*this, Heap::kOld); | 
 |   } else { | 
 |     result = this->ptr(); | 
 |   } | 
 |   ASSERT(result.IsOld()); | 
 |   result.SetCanonical(); | 
 |   return cls.InsertCanonicalConstant(zone, result); | 
 | } | 
 |  | 
 | ObjectPtr Instance::GetField(const Field& field) const { | 
 |   if (field.is_unboxed()) { | 
 |     switch (field.guarded_cid()) { | 
 |       case kDoubleCid: | 
 |         return Double::New(*reinterpret_cast<double_t*>(FieldAddr(field))); | 
 |       case kFloat32x4Cid: | 
 |         return Float32x4::New( | 
 |             *reinterpret_cast<simd128_value_t*>(FieldAddr(field))); | 
 |       case kFloat64x2Cid: | 
 |         return Float64x2::New( | 
 |             *reinterpret_cast<simd128_value_t*>(FieldAddr(field))); | 
 |       default: | 
 |         return Integer::New(*reinterpret_cast<int64_t*>(FieldAddr(field))); | 
 |     } | 
 |   } else { | 
 |     return FieldAddr(field)->Decompress(untag()->heap_base()); | 
 |   } | 
 | } | 
 |  | 
 | void Instance::SetField(const Field& field, const Object& value) const { | 
 |   if (field.is_unboxed()) { | 
 |     switch (field.guarded_cid()) { | 
 |       case kDoubleCid: | 
 |         StoreNonPointer(reinterpret_cast<double_t*>(FieldAddr(field)), | 
 |                         Double::Cast(value).value()); | 
 |         break; | 
 |       case kFloat32x4Cid: | 
 |         StoreNonPointer(reinterpret_cast<simd128_value_t*>(FieldAddr(field)), | 
 |                         Float32x4::Cast(value).value()); | 
 |         break; | 
 |       case kFloat64x2Cid: | 
 |         StoreNonPointer(reinterpret_cast<simd128_value_t*>(FieldAddr(field)), | 
 |                         Float64x2::Cast(value).value()); | 
 |         break; | 
 |       default: | 
 |         StoreNonPointer(reinterpret_cast<int64_t*>(FieldAddr(field)), | 
 |                         Integer::Cast(value).AsInt64Value()); | 
 |         break; | 
 |     } | 
 |   } else { | 
 |     field.RecordStore(value); | 
 |     StoreCompressedPointer(FieldAddr(field), value.ptr()); | 
 |   } | 
 | } | 
 |  | 
 | AbstractTypePtr Instance::GetType(Heap::Space space) const { | 
 |   if (IsNull()) { | 
 |     return Type::NullType(); | 
 |   } | 
 |   Thread* thread = Thread::Current(); | 
 |   Zone* zone = thread->zone(); | 
 |   const Class& cls = Class::Handle(zone, clazz()); | 
 |   if (!cls.is_finalized()) { | 
 |     // Various predefined classes can be instantiated by the VM or | 
 |     // Dart_NewString/Integer/TypedData/... before the class is finalized. | 
 |     ASSERT(cls.is_prefinalized()); | 
 |     cls.EnsureDeclarationLoaded(); | 
 |   } | 
 |   if (cls.IsClosureClass()) { | 
 |     FunctionType& signature = FunctionType::Handle( | 
 |         zone, Closure::Cast(*this).GetInstantiatedSignature(zone)); | 
 |     if (!signature.IsFinalized()) { | 
 |       signature.SetIsFinalized(); | 
 |     } | 
 |     signature ^= signature.Canonicalize(thread); | 
 |     return signature.ptr(); | 
 |   } | 
 |   if (IsRecord()) { | 
 |     ASSERT(cls.IsRecordClass()); | 
 |     auto& record_type = | 
 |         RecordType::Handle(zone, Record::Cast(*this).GetRecordType()); | 
 |     ASSERT(record_type.IsFinalized()); | 
 |     ASSERT(record_type.IsCanonical()); | 
 |     return record_type.ptr(); | 
 |   } | 
 |   Type& type = Type::Handle(zone); | 
 |   if (!cls.IsGeneric()) { | 
 |     type = cls.DeclarationType(); | 
 |   } | 
 |   if (type.IsNull()) { | 
 |     TypeArguments& type_arguments = TypeArguments::Handle(zone); | 
 |     const intptr_t num_type_arguments = cls.NumTypeArguments(); | 
 |     if (num_type_arguments > 0) { | 
 |       type_arguments = GetTypeArguments(); | 
 |       if (!type_arguments.IsNull()) { | 
 |         type_arguments = type_arguments.FromInstanceTypeArguments(thread, cls); | 
 |       } | 
 |     } | 
 |     type = Type::New(cls, type_arguments, Nullability::kNonNullable, space); | 
 |     type.SetIsFinalized(); | 
 |     type ^= type.Canonicalize(thread); | 
 |   } | 
 |   return type.ptr(); | 
 | } | 
 |  | 
 | TypeArgumentsPtr Instance::GetTypeArguments() const { | 
 |   ASSERT(!IsType()); | 
 |   const Class& cls = Class::Handle(clazz()); | 
 |   intptr_t field_offset = cls.host_type_arguments_field_offset(); | 
 |   ASSERT(field_offset != Class::kNoTypeArguments); | 
 |   TypeArguments& type_arguments = TypeArguments::Handle(); | 
 |   type_arguments ^= | 
 |       FieldAddrAtOffset(field_offset)->Decompress(untag()->heap_base()); | 
 |   return type_arguments.ptr(); | 
 | } | 
 |  | 
 | void Instance::SetTypeArguments(const TypeArguments& value) const { | 
 |   ASSERT(!IsType()); | 
 |   ASSERT(value.IsNull() || value.IsCanonical()); | 
 |   const Class& cls = Class::Handle(clazz()); | 
 |   intptr_t field_offset = cls.host_type_arguments_field_offset(); | 
 |   ASSERT(field_offset != Class::kNoTypeArguments); | 
 |   SetFieldAtOffset(field_offset, value); | 
 | } | 
 |  | 
 | /* | 
 | Specification of instance checks (e is T) and casts (e as T), where e evaluates | 
 | to a value v and v has runtime type S: | 
 |  | 
 | Instance checks (e is T) in weak checking mode in a legacy or opted-in library: | 
 |   If v == null and T is a legacy type | 
 |     return LEGACY_SUBTYPE(T, Null) || LEGACY_SUBTYPE(Object, T) | 
 |   If v == null and T is not a legacy type, return NNBD_SUBTYPE(Null, T) | 
 |   Otherwise return LEGACY_SUBTYPE(S, T) | 
 |  | 
 | Instance checks (e is T) in strong checking mode in a legacy or opted-in lib: | 
 |   If v == null and T is a legacy type | 
 |     return LEGACY_SUBTYPE(T, Null) || LEGACY_SUBTYPE(Object, T) | 
 |   Otherwise return NNBD_SUBTYPE(S, T) | 
 |  | 
 | Casts (e as T) in weak checking mode in a legacy or opted-in library: | 
 |   If LEGACY_SUBTYPE(S, T) then e as T evaluates to v. | 
 |   Otherwise a TypeError is thrown. | 
 |  | 
 | Casts (e as T) in strong checking mode in a legacy or opted-in library: | 
 |   If NNBD_SUBTYPE(S, T) then e as T evaluates to v. | 
 |   Otherwise a TypeError is thrown. | 
 | */ | 
 |  | 
 | bool Instance::IsInstanceOf( | 
 |     const AbstractType& other, | 
 |     const TypeArguments& other_instantiator_type_arguments, | 
 |     const TypeArguments& other_function_type_arguments) const { | 
 |   ASSERT(!other.IsDynamicType()); | 
 |   if (IsNull()) { | 
 |     return Instance::NullIsInstanceOf(other, other_instantiator_type_arguments, | 
 |                                       other_function_type_arguments); | 
 |   } | 
 |   // In strong mode, compute NNBD_SUBTYPE(runtimeType, other). | 
 |   // In weak mode, compute LEGACY_SUBTYPE(runtimeType, other). | 
 |   return RuntimeTypeIsSubtypeOf(other, other_instantiator_type_arguments, | 
 |                                 other_function_type_arguments); | 
 | } | 
 |  | 
 | bool Instance::IsAssignableTo( | 
 |     const AbstractType& other, | 
 |     const TypeArguments& other_instantiator_type_arguments, | 
 |     const TypeArguments& other_function_type_arguments) const { | 
 |   ASSERT(!other.IsDynamicType()); | 
 |   // In strong mode, compute NNBD_SUBTYPE(runtimeType, other). | 
 |   // In weak mode, compute LEGACY_SUBTYPE(runtimeType, other). | 
 |   return RuntimeTypeIsSubtypeOf(other, other_instantiator_type_arguments, | 
 |                                 other_function_type_arguments); | 
 | } | 
 |  | 
 | // If 'other' type (once instantiated) is a legacy type: | 
 | //   return LEGACY_SUBTYPE(other, Null) || LEGACY_SUBTYPE(Object, other). | 
 | // Otherwise return NNBD_SUBTYPE(Null, T). | 
 | // Ignore value of strong flag value. | 
 | bool Instance::NullIsInstanceOf( | 
 |     const AbstractType& other, | 
 |     const TypeArguments& other_instantiator_type_arguments, | 
 |     const TypeArguments& other_function_type_arguments) { | 
 |   ASSERT(other.IsFinalized()); | 
 |   if (other.IsNullable()) { | 
 |     // This case includes top types (void, dynamic, Object?). | 
 |     // The uninstantiated nullable type will remain nullable after | 
 |     // instantiation. | 
 |     return true; | 
 |   } | 
 |   if (other.IsFutureOrType()) { | 
 |     const auto& type = AbstractType::Handle(other.UnwrapFutureOr()); | 
 |     return NullIsInstanceOf(type, other_instantiator_type_arguments, | 
 |                             other_function_type_arguments); | 
 |   } | 
 |   // No need to instantiate type, unless it is a type parameter. | 
 |   // Note that a typeref cannot refer to a type parameter. | 
 |   if (other.IsTypeParameter()) { | 
 |     auto& type = AbstractType::Handle(other.InstantiateFrom( | 
 |         other_instantiator_type_arguments, other_function_type_arguments, | 
 |         kAllFree, Heap::kOld)); | 
 |     return Instance::NullIsInstanceOf(type, Object::null_type_arguments(), | 
 |                                       Object::null_type_arguments()); | 
 |   } | 
 |   return false; | 
 | } | 
 |  | 
 | // Must be kept in sync with GenerateNullIsAssignableToType in | 
 | // stub_code_compiler.cc if any changes are made. | 
 | bool Instance::NullIsAssignableTo(const AbstractType& other) { | 
 |   // "Left Null" rule: null is assignable when destination type is | 
 |   // nullable. Otherwise it is not assignable or we cannot tell | 
 |   // without instantiating type parameter. | 
 |   if (other.IsNullable()) { | 
 |     return true; | 
 |   } | 
 |   if (other.IsFutureOrType()) { | 
 |     return NullIsAssignableTo(AbstractType::Handle(other.UnwrapFutureOr())); | 
 |   } | 
 |   // Since the TAVs are not available, for non-nullable type parameters | 
 |   // this returns a conservative approximation of "not assignable" . | 
 |   return false; | 
 | } | 
 |  | 
 | // Must be kept in sync with GenerateNullIsAssignableToType in | 
 | // stub_code_compiler.cc if any changes are made. | 
 | bool Instance::NullIsAssignableTo( | 
 |     const AbstractType& other, | 
 |     const TypeArguments& other_instantiator_type_arguments, | 
 |     const TypeArguments& other_function_type_arguments) { | 
 |   // Do checks that don't require instantiation first. | 
 |   if (NullIsAssignableTo(other)) return true; | 
 |   if (!other.IsTypeParameter()) return false; | 
 |   const auto& type = AbstractType::Handle(other.InstantiateFrom( | 
 |       other_instantiator_type_arguments, other_function_type_arguments, | 
 |       kAllFree, Heap::kNew)); | 
 |   return NullIsAssignableTo(type); | 
 | } | 
 |  | 
 | bool Instance::RuntimeTypeIsSubtypeOf( | 
 |     const AbstractType& other, | 
 |     const TypeArguments& other_instantiator_type_arguments, | 
 |     const TypeArguments& other_function_type_arguments) const { | 
 |   ASSERT(other.IsFinalized()); | 
 |   ASSERT(ptr() != Object::sentinel().ptr()); | 
 |   // Instance may not have runtimeType dynamic, void, or Never. | 
 |   if (other.IsTopTypeForSubtyping()) { | 
 |     return true; | 
 |   } | 
 |   Thread* thread = Thread::Current(); | 
 |   Zone* zone = thread->zone(); | 
 |   const Class& cls = Class::Handle(zone, clazz()); | 
 |   if (cls.IsClosureClass()) { | 
 |     if (other.IsDartFunctionType() || other.IsDartClosureType() || | 
 |         other.IsObjectType()) { | 
 |       return true; | 
 |     } | 
 |     AbstractType& instantiated_other = AbstractType::Handle(zone, other.ptr()); | 
 |     if (!other.IsInstantiated()) { | 
 |       instantiated_other = other.InstantiateFrom( | 
 |           other_instantiator_type_arguments, other_function_type_arguments, | 
 |           kAllFree, Heap::kOld); | 
 |       if (instantiated_other.IsTopTypeForSubtyping() || | 
 |           instantiated_other.IsObjectType() || | 
 |           instantiated_other.IsDartFunctionType()) { | 
 |         return true; | 
 |       } | 
 |     } | 
 |     if (RuntimeTypeIsSubtypeOfFutureOr(zone, instantiated_other)) { | 
 |       return true; | 
 |     } | 
 |     if (!instantiated_other.IsFunctionType()) { | 
 |       return false; | 
 |     } | 
 |     const FunctionType& sig = FunctionType::Handle( | 
 |         Closure::Cast(*this).GetInstantiatedSignature(zone)); | 
 |     return sig.IsSubtypeOf(FunctionType::Cast(instantiated_other), Heap::kOld); | 
 |   } | 
 |   if (cls.IsRecordClass()) { | 
 |     if (other.IsDartRecordType() || other.IsObjectType()) { | 
 |       return true; | 
 |     } | 
 |     AbstractType& instantiated_other = AbstractType::Handle(zone, other.ptr()); | 
 |     if (!other.IsInstantiated()) { | 
 |       instantiated_other = other.InstantiateFrom( | 
 |           other_instantiator_type_arguments, other_function_type_arguments, | 
 |           kAllFree, Heap::kOld); | 
 |       if (instantiated_other.IsTopTypeForSubtyping() || | 
 |           instantiated_other.IsObjectType() || | 
 |           instantiated_other.IsDartRecordType()) { | 
 |         return true; | 
 |       } | 
 |     } | 
 |     if (RuntimeTypeIsSubtypeOfFutureOr(zone, instantiated_other)) { | 
 |       return true; | 
 |     } | 
 |     if (!instantiated_other.IsRecordType()) { | 
 |       return false; | 
 |     } | 
 |     const Record& record = Record::Cast(*this); | 
 |     const RecordType& record_type = RecordType::Cast(instantiated_other); | 
 |     if (record.shape() != record_type.shape()) { | 
 |       return false; | 
 |     } | 
 |     Instance& field_value = Instance::Handle(zone); | 
 |     AbstractType& field_type = AbstractType::Handle(zone); | 
 |     const intptr_t num_fields = record.num_fields(); | 
 |     for (intptr_t i = 0; i < num_fields; ++i) { | 
 |       field_value ^= record.FieldAt(i); | 
 |       field_type = record_type.FieldTypeAt(i); | 
 |       if (!field_value.RuntimeTypeIsSubtypeOf(field_type, | 
 |                                               Object::null_type_arguments(), | 
 |                                               Object::null_type_arguments())) { | 
 |         return false; | 
 |       } | 
 |     } | 
 |     return true; | 
 |   } | 
 |   TypeArguments& type_arguments = TypeArguments::Handle(zone); | 
 |   const intptr_t num_type_arguments = cls.NumTypeArguments(); | 
 |   if (num_type_arguments > 0) { | 
 |     type_arguments = GetTypeArguments(); | 
 |     ASSERT(type_arguments.IsNull() || type_arguments.IsCanonical()); | 
 |     // The number of type arguments in the instance must be greater or equal to | 
 |     // the number of type arguments expected by the instance class. | 
 |     // A discrepancy is allowed for closures, which borrow the type argument | 
 |     // vector of their instantiator, which may be of a subclass of the class | 
 |     // defining the closure. Truncating the vector to the correct length on | 
 |     // instantiation is unnecessary. The vector may therefore be longer. | 
 |     // Also, an optimization reuses the type argument vector of the instantiator | 
 |     // of generic instances when its layout is compatible. | 
 |     ASSERT(type_arguments.IsNull() || | 
 |            (type_arguments.Length() >= num_type_arguments)); | 
 |   } | 
 |   AbstractType& instantiated_other = AbstractType::Handle(zone, other.ptr()); | 
 |   if (!other.IsInstantiated()) { | 
 |     instantiated_other = other.InstantiateFrom( | 
 |         other_instantiator_type_arguments, other_function_type_arguments, | 
 |         kAllFree, Heap::kOld); | 
 |     if (instantiated_other.IsTopTypeForSubtyping()) { | 
 |       return true; | 
 |     } | 
 |   } | 
 |   if (IsNull()) { | 
 |     if (instantiated_other.IsNullType()) { | 
 |       return true; | 
 |     } | 
 |     if (RuntimeTypeIsSubtypeOfFutureOr(zone, instantiated_other)) { | 
 |       return true; | 
 |     } | 
 |     // At this point, instantiated_other can be a function type. | 
 |     return !instantiated_other.IsNonNullable(); | 
 |   } | 
 |   if (!instantiated_other.IsType()) { | 
 |     return false; | 
 |   } | 
 |   // RuntimeType of non-null instance is non-nullable, so there is no need to | 
 |   // check nullability of other type. | 
 |   return Class::IsSubtypeOf(cls, type_arguments, Nullability::kNonNullable, | 
 |                             instantiated_other, Heap::kOld); | 
 | } | 
 |  | 
 | bool Instance::RuntimeTypeIsSubtypeOfFutureOr(Zone* zone, | 
 |                                               const AbstractType& other) const { | 
 |   if (other.IsFutureOrType()) { | 
 |     const TypeArguments& other_type_arguments = | 
 |         TypeArguments::Handle(zone, other.arguments()); | 
 |     const AbstractType& other_type_arg = | 
 |         AbstractType::Handle(zone, other_type_arguments.TypeAtNullSafe(0)); | 
 |     if (other_type_arg.IsTopTypeForSubtyping()) { | 
 |       return true; | 
 |     } | 
 |     if (Class::Handle(zone, clazz()).IsFutureClass()) { | 
 |       const TypeArguments& type_arguments = | 
 |           TypeArguments::Handle(zone, GetTypeArguments()); | 
 |       const AbstractType& type_arg = | 
 |           AbstractType::Handle(zone, type_arguments.TypeAtNullSafe(0)); | 
 |       if (type_arg.IsSubtypeOf(other_type_arg, Heap::kOld)) { | 
 |         return true; | 
 |       } | 
 |     } | 
 |     // Retry RuntimeTypeIsSubtypeOf after unwrapping type arg of FutureOr. | 
 |     if (RuntimeTypeIsSubtypeOf(other_type_arg, Object::null_type_arguments(), | 
 |                                Object::null_type_arguments())) { | 
 |       return true; | 
 |     } | 
 |   } | 
 |   return false; | 
 | } | 
 |  | 
 | bool Instance::OperatorEquals(const Instance& other) const { | 
 |   // TODO(koda): Optimize for all builtin classes and all classes | 
 |   // that do not override operator==. | 
 |   return DartLibraryCalls::Equals(*this, other) == Object::bool_true().ptr(); | 
 | } | 
 |  | 
 | bool Instance::IsIdenticalTo(const Instance& other) const { | 
 |   if (ptr() == other.ptr()) return true; | 
 |   if (IsInteger() && other.IsInteger()) { | 
 |     return Integer::Cast(*this).Equals(other); | 
 |   } | 
 |   if (IsDouble() && other.IsDouble()) { | 
 |     double other_value = Double::Cast(other).value(); | 
 |     return Double::Cast(*this).BitwiseEqualsToDouble(other_value); | 
 |   } | 
 |   return false; | 
 | } | 
 |  | 
 | intptr_t* Instance::NativeFieldsDataAddr() const { | 
 |   ASSERT(Thread::Current()->no_safepoint_scope_depth() > 0); | 
 |   TypedDataPtr native_fields = static_cast<TypedDataPtr>( | 
 |       NativeFieldsAddr()->Decompress(untag()->heap_base())); | 
 |   if (native_fields == TypedData::null()) { | 
 |     return nullptr; | 
 |   } | 
 |   return reinterpret_cast<intptr_t*>(native_fields->untag()->data()); | 
 | } | 
 |  | 
 | void Instance::SetNativeField(int index, intptr_t value) const { | 
 |   ASSERT(IsValidNativeIndex(index)); | 
 |   Object& native_fields = | 
 |       Object::Handle(NativeFieldsAddr()->Decompress(untag()->heap_base())); | 
 |   if (native_fields.IsNull()) { | 
 |     // Allocate backing storage for the native fields. | 
 |     native_fields = TypedData::New(kIntPtrCid, NumNativeFields()); | 
 |     StoreCompressedPointer(NativeFieldsAddr(), native_fields.ptr()); | 
 |   } | 
 |   intptr_t byte_offset = index * sizeof(intptr_t); | 
 |   TypedData::Cast(native_fields).SetIntPtr(byte_offset, value); | 
 | } | 
 |  | 
 | void Instance::SetNativeFields(uint16_t num_native_fields, | 
 |                                const intptr_t* field_values) const { | 
 |   ASSERT(num_native_fields == NumNativeFields()); | 
 |   ASSERT(field_values != nullptr); | 
 |   Object& native_fields = | 
 |       Object::Handle(NativeFieldsAddr()->Decompress(untag()->heap_base())); | 
 |   if (native_fields.IsNull()) { | 
 |     // Allocate backing storage for the native fields. | 
 |     native_fields = TypedData::New(kIntPtrCid, NumNativeFields()); | 
 |     StoreCompressedPointer(NativeFieldsAddr(), native_fields.ptr()); | 
 |   } | 
 |   for (uint16_t i = 0; i < num_native_fields; i++) { | 
 |     intptr_t byte_offset = i * sizeof(intptr_t); | 
 |     TypedData::Cast(native_fields).SetIntPtr(byte_offset, field_values[i]); | 
 |   } | 
 | } | 
 |  | 
 | bool Instance::IsCallable(Function* function) const { | 
 |   Class& cls = Class::Handle(clazz()); | 
 |   if (cls.IsClosureClass()) { | 
 |     if (function != nullptr) { | 
 |       *function = Closure::Cast(*this).function(); | 
 |     } | 
 |     return true; | 
 |   } | 
 |   // Try to resolve a "call" method. | 
 |   Zone* zone = Thread::Current()->zone(); | 
 |   Function& call_function = Function::Handle( | 
 |       zone, Resolver::ResolveDynamicAnyArgs(zone, cls, Symbols::DynamicCall(), | 
 |                                             /*allow_add=*/false)); | 
 |   if (call_function.IsNull()) { | 
 |     return false; | 
 |   } | 
 |   if (function != nullptr) { | 
 |     *function = call_function.ptr(); | 
 |   } | 
 |   return true; | 
 | } | 
 |  | 
 | InstancePtr Instance::New(const Class& cls, Heap::Space space) { | 
 |   Thread* thread = Thread::Current(); | 
 |   if (cls.EnsureIsAllocateFinalized(thread) != Error::null()) { | 
 |     return Instance::null(); | 
 |   } | 
 |   return NewAlreadyFinalized(cls, space); | 
 | } | 
 |  | 
 | InstancePtr Instance::NewAlreadyFinalized(const Class& cls, Heap::Space space) { | 
 |   ASSERT(cls.is_allocate_finalized()); | 
 |   intptr_t instance_size = cls.host_instance_size(); | 
 |   ASSERT(instance_size > 0); | 
 |   // Initialize everything after the object header with Object::null(), since | 
 |   // this isn't a predefined class. | 
 |   const uword ptr_field_end_offset = | 
 |       instance_size - (Instance::ContainsCompressedPointers() | 
 |                            ? kCompressedWordSize | 
 |                            : kWordSize); | 
 |   return static_cast<InstancePtr>(Object::Allocate( | 
 |       cls.id(), instance_size, space, Instance::ContainsCompressedPointers(), | 
 |       from_offset<Instance>(), ptr_field_end_offset)); | 
 | } | 
 |  | 
 | bool Instance::IsValidFieldOffset(intptr_t offset) const { | 
 |   Thread* thread = Thread::Current(); | 
 |   REUSABLE_CLASS_HANDLESCOPE(thread); | 
 |   Class& cls = thread->ClassHandle(); | 
 |   cls = clazz(); | 
 |   return (offset >= 0 && | 
 |           offset <= (cls.host_instance_size() - kCompressedWordSize)); | 
 | } | 
 |  | 
 | intptr_t Instance::ElementSizeFor(intptr_t cid) { | 
 |   if (IsExternalTypedDataClassId(cid) || IsTypedDataClassId(cid) || | 
 |       IsTypedDataViewClassId(cid) || IsUnmodifiableTypedDataViewClassId(cid)) { | 
 |     return TypedDataBase::ElementSizeInBytes(cid); | 
 |   } | 
 |   switch (cid) { | 
 |     case kArrayCid: | 
 |     case kImmutableArrayCid: | 
 |       return Array::kBytesPerElement; | 
 |     case kTypeArgumentsCid: | 
 |       return TypeArguments::ArrayTraits::kElementSize; | 
 |     case kOneByteStringCid: | 
 |       return OneByteString::kBytesPerElement; | 
 |     case kTwoByteStringCid: | 
 |       return TwoByteString::kBytesPerElement; | 
 |     default: | 
 |       UNIMPLEMENTED(); | 
 |       return 0; | 
 |   } | 
 | } | 
 |  | 
 | intptr_t Instance::DataOffsetFor(intptr_t cid) { | 
 |   if (IsExternalTypedDataClassId(cid)) { | 
 |     // Elements start at offset 0 of the external data. | 
 |     return 0; | 
 |   } | 
 |   if (IsTypedDataClassId(cid)) { | 
 |     return TypedData::payload_offset(); | 
 |   } | 
 |   switch (cid) { | 
 |     case kArrayCid: | 
 |     case kImmutableArrayCid: | 
 |       return Array::data_offset(); | 
 |     case kTypeArgumentsCid: | 
 |       return TypeArguments::types_offset(); | 
 |     case kOneByteStringCid: | 
 |       return OneByteString::data_offset(); | 
 |     case kTwoByteStringCid: | 
 |       return TwoByteString::data_offset(); | 
 |     case kRecordCid: | 
 |       return Record::field_offset(0); | 
 |     default: | 
 |       UNIMPLEMENTED(); | 
 |       return Array::data_offset(); | 
 |   } | 
 | } | 
 |  | 
 | const char* Instance::ToCString() const { | 
 |   if (IsNull()) { | 
 |     return "null"; | 
 |   } else if (Thread::Current()->no_safepoint_scope_depth() > 0) { | 
 |     // Can occur when running disassembler. | 
 |     return "Instance"; | 
 |   } else { | 
 |     if (IsClosure()) { | 
 |       return Closure::Cast(*this).ToCString(); | 
 |     } | 
 |     // Background compiler disassembly of instructions referring to pool objects | 
 |     // calls this function and requires allocation of Type in old space. | 
 |     const AbstractType& type = AbstractType::Handle(GetType(Heap::kOld)); | 
 |     const String& type_name = String::Handle(type.UserVisibleName()); | 
 |     return OS::SCreate(Thread::Current()->zone(), "Instance of '%s'", | 
 |                        type_name.ToCString()); | 
 |   } | 
 | } | 
 |  | 
 | classid_t AbstractType::type_class_id() const { | 
 |   // All subclasses should implement this appropriately, so the only value that | 
 |   // should reach this implementation should be the null value. | 
 |   ASSERT(IsNull()); | 
 |   // AbstractType is an abstract class. | 
 |   UNREACHABLE(); | 
 |   return kIllegalCid; | 
 | } | 
 |  | 
 | ClassPtr AbstractType::type_class() const { | 
 |   // All subclasses should implement this appropriately, so the only value that | 
 |   // should reach this implementation should be the null value. | 
 |   ASSERT(IsNull()); | 
 |   // AbstractType is an abstract class. | 
 |   UNREACHABLE(); | 
 |   return Class::null(); | 
 | } | 
 |  | 
 | TypeArgumentsPtr AbstractType::arguments() const { | 
 |   // All subclasses should implement this appropriately, so the only value that | 
 |   // should reach this implementation should be the null value. | 
 |   ASSERT(IsNull()); | 
 |   // AbstractType is an abstract class. | 
 |   UNREACHABLE(); | 
 |   return nullptr; | 
 | } | 
 |  | 
 | bool AbstractType::IsStrictlyNonNullable() const { | 
 |   // Null can be assigned to legacy and nullable types. | 
 |   if (!IsNonNullable()) { | 
 |     return false; | 
 |   } | 
 |  | 
 |   Thread* thread = Thread::Current(); | 
 |   Zone* zone = thread->zone(); | 
 |  | 
 |   if (IsTypeParameter()) { | 
 |     const auto& bound = | 
 |         AbstractType::Handle(zone, TypeParameter::Cast(*this).bound()); | 
 |     ASSERT(!bound.IsNull()); | 
 |     return bound.IsStrictlyNonNullable(); | 
 |   } | 
 |   if (IsFutureOrType()) { | 
 |     return AbstractType::Handle(zone, UnwrapFutureOr()).IsStrictlyNonNullable(); | 
 |   } | 
 |   return true; | 
 | } | 
 |  | 
 | AbstractTypePtr AbstractType::SetInstantiatedNullability( | 
 |     const TypeParameter& type_param, | 
 |     Heap::Space space) const { | 
 |   Nullability result_nullability; | 
 |   const Nullability arg_nullability = nullability(); | 
 |   const Nullability var_nullability = type_param.nullability(); | 
 |   // Adjust nullability of result 'arg' instantiated from 'var'. | 
 |   // arg/var ! ? | 
 |   //  !      ! ? | 
 |   //  ?      ? ? | 
 |   if (var_nullability == Nullability::kNullable) { | 
 |     result_nullability = Nullability::kNullable; | 
 |   } else { | 
 |     // Keep arg nullability. | 
 |     return ptr(); | 
 |   } | 
 |   if (arg_nullability == result_nullability) { | 
 |     return ptr(); | 
 |   } | 
 |   if (IsType()) { | 
 |     return Type::Cast(*this).ToNullability(result_nullability, space); | 
 |   } | 
 |   if (IsFunctionType()) { | 
 |     return FunctionType::Cast(*this).ToNullability(result_nullability, space); | 
 |   } | 
 |   if (IsRecordType()) { | 
 |     return RecordType::Cast(*this).ToNullability(result_nullability, space); | 
 |   } | 
 |   if (IsTypeParameter()) { | 
 |     return TypeParameter::Cast(*this).ToNullability(result_nullability, space); | 
 |   } | 
 |   UNREACHABLE(); | 
 | } | 
 |  | 
 | AbstractTypePtr AbstractType::NormalizeFutureOrType(Heap::Space space) const { | 
 |   if (IsFutureOrType()) { | 
 |     Zone* zone = Thread::Current()->zone(); | 
 |     const AbstractType& unwrapped_type = | 
 |         AbstractType::Handle(zone, UnwrapFutureOr()); | 
 |     const classid_t cid = unwrapped_type.type_class_id(); | 
 |     if (cid == kDynamicCid || cid == kVoidCid) { | 
 |       return unwrapped_type.ptr(); | 
 |     } | 
 |     if (cid == kInstanceCid) { | 
 |       if (IsNonNullable()) { | 
 |         return unwrapped_type.ptr(); | 
 |       } | 
 |       ASSERT(IsNullable()); | 
 |       return Type::Cast(unwrapped_type) | 
 |           .ToNullability(Nullability::kNullable, space); | 
 |     } | 
 |     if (cid == kNeverCid && unwrapped_type.IsNonNullable()) { | 
 |       ObjectStore* object_store = IsolateGroup::Current()->object_store(); | 
 |       const Type& future_never_type = | 
 |           Type::Handle(zone, object_store->non_nullable_future_never_type()); | 
 |       ASSERT(!future_never_type.IsNull()); | 
 |       return future_never_type.ToNullability(nullability(), space); | 
 |     } | 
 |     if (cid == kNullCid) { | 
 |       ObjectStore* object_store = IsolateGroup::Current()->object_store(); | 
 |       ASSERT(object_store->nullable_future_null_type() != Type::null()); | 
 |       return object_store->nullable_future_null_type(); | 
 |     } | 
 |     if (IsNullable() && unwrapped_type.IsNullable()) { | 
 |       return Type::Cast(*this).ToNullability(Nullability::kNonNullable, space); | 
 |     } | 
 |   } | 
 |   return ptr(); | 
 | } | 
 |  | 
 | bool AbstractType::IsInstantiated(Genericity genericity, | 
 |                                   intptr_t num_free_fun_type_params) const { | 
 |   // All subclasses should implement this appropriately, so the only value that | 
 |   // should reach this implementation should be the null value. | 
 |   ASSERT(IsNull()); | 
 |   // AbstractType is an abstract class. | 
 |   UNREACHABLE(); | 
 |   return false; | 
 | } | 
 |  | 
 | void AbstractType::SetIsFinalized() const { | 
 |   ASSERT(!IsFinalized()); | 
 |   set_type_state(IsInstantiated() | 
 |                      ? UntaggedAbstractType::kFinalizedInstantiated | 
 |                      : UntaggedAbstractType::kFinalizedUninstantiated); | 
 | } | 
 |  | 
 | void AbstractType::set_flags(uint32_t value) const { | 
 |   untag()->set_flags(value); | 
 | } | 
 |  | 
 | void AbstractType::set_type_state(UntaggedAbstractType::TypeState value) const { | 
 |   ASSERT(!IsCanonical()); | 
 |   set_flags( | 
 |       UntaggedAbstractType::TypeStateBits::update(value, untag()->flags())); | 
 | } | 
 |  | 
 | void AbstractType::set_nullability(Nullability value) const { | 
 |   ASSERT(!IsCanonical()); | 
 |   set_flags(UntaggedAbstractType::NullabilityBit::update( | 
 |       static_cast<uint8_t>(value), untag()->flags())); | 
 | } | 
 |  | 
 | bool AbstractType::IsEquivalent( | 
 |     const Instance& other, | 
 |     TypeEquality kind, | 
 |     FunctionTypeMapping* function_type_equivalence) const { | 
 |   // All subclasses should implement this appropriately, so the only value that | 
 |   // should reach this implementation should be the null value. | 
 |   ASSERT(IsNull()); | 
 |   // AbstractType is an abstract class. | 
 |   UNREACHABLE(); | 
 |   return false; | 
 | } | 
 |  | 
 | bool AbstractType::IsNullabilityEquivalent(Thread* thread, | 
 |                                            const AbstractType& other_type, | 
 |                                            TypeEquality kind) const { | 
 |   Nullability this_type_nullability = nullability(); | 
 |   Nullability other_type_nullability = other_type.nullability(); | 
 |   if (kind == TypeEquality::kInSubtypeTest) { | 
 |     if (this_type_nullability == Nullability::kNullable && | 
 |         other_type_nullability == Nullability::kNonNullable) { | 
 |       return false; | 
 |     } | 
 |   } else { | 
 |     ASSERT((kind == TypeEquality::kSyntactical) || | 
 |            (kind == TypeEquality::kCanonical)); | 
 |     if (this_type_nullability != other_type_nullability) { | 
 |       return false; | 
 |     } | 
 |   } | 
 |   return true; | 
 | } | 
 |  | 
 | AbstractTypePtr AbstractType::InstantiateFrom( | 
 |     const TypeArguments& instantiator_type_arguments, | 
 |     const TypeArguments& function_type_arguments, | 
 |     intptr_t num_free_fun_type_params, | 
 |     Heap::Space space, | 
 |     FunctionTypeMapping* function_type_mapping, | 
 |     intptr_t num_parent_type_args_adjustment) const { | 
 |   // All subclasses should implement this appropriately, so the only value that | 
 |   // should reach this implementation should be the null value. | 
 |   ASSERT(IsNull()); | 
 |   // AbstractType is an abstract class. | 
 |   UNREACHABLE(); | 
 |   return nullptr; | 
 | } | 
 |  | 
 | AbstractTypePtr AbstractType::UpdateFunctionTypes( | 
 |     intptr_t num_parent_type_args_adjustment, | 
 |     intptr_t num_free_fun_type_params, | 
 |     Heap::Space space, | 
 |     FunctionTypeMapping* function_type_mapping) const { | 
 |   UNREACHABLE(); | 
 |   return nullptr; | 
 | } | 
 |  | 
 | AbstractTypePtr AbstractType::Canonicalize(Thread* thread) const { | 
 |   // All subclasses should implement this appropriately, so the only value that | 
 |   // should reach this implementation should be the null value. | 
 |   ASSERT(IsNull()); | 
 |   // AbstractType is an abstract class. | 
 |   UNREACHABLE(); | 
 |   return nullptr; | 
 | } | 
 |  | 
 | void AbstractType::EnumerateURIs(URIs* uris) const { | 
 |   // All subclasses should implement this appropriately, so the only value that | 
 |   // should reach this implementation should be the null value. | 
 |   ASSERT(IsNull()); | 
 |   // AbstractType is an abstract class. | 
 |   UNREACHABLE(); | 
 | } | 
 |  | 
 | void AbstractType::AddURI(URIs* uris, const String& name, const String& uri) { | 
 |   ASSERT(uris != nullptr); | 
 |   const intptr_t len = uris->length(); | 
 |   ASSERT((len % 3) == 0); | 
 |   bool print_uri = false; | 
 |   for (intptr_t i = 0; i < len; i += 3) { | 
 |     if (uris->At(i).Equals(name)) { | 
 |       if (uris->At(i + 1).Equals(uri)) { | 
 |         // Same name and same URI: no need to add this already listed URI. | 
 |         return;  // No state change is possible. | 
 |       } else { | 
 |         // Same name and different URI: the name is ambiguous, print both URIs. | 
 |         print_uri = true; | 
 |         uris->SetAt(i + 2, Symbols::print()); | 
 |       } | 
 |     } | 
 |   } | 
 |   uris->Add(name); | 
 |   uris->Add(uri); | 
 |   if (print_uri) { | 
 |     uris->Add(Symbols::print()); | 
 |   } else { | 
 |     uris->Add(Symbols::Empty()); | 
 |   } | 
 | } | 
 |  | 
 | StringPtr AbstractType::PrintURIs(URIs* uris) { | 
 |   ASSERT(uris != nullptr); | 
 |   Thread* thread = Thread::Current(); | 
 |   Zone* zone = thread->zone(); | 
 |   const intptr_t len = uris->length(); | 
 |   ASSERT((len % 3) == 0); | 
 |   GrowableHandlePtrArray<const String> pieces(zone, 5 * (len / 3)); | 
 |   for (intptr_t i = 0; i < len; i += 3) { | 
 |     // Only print URIs that have been marked. | 
 |     if (uris->At(i + 2).ptr() == Symbols::print().ptr()) { | 
 |       pieces.Add(Symbols::TwoSpaces()); | 
 |       pieces.Add(uris->At(i)); | 
 |       pieces.Add(Symbols::SpaceIsFromSpace()); | 
 |       pieces.Add(uris->At(i + 1)); | 
 |       pieces.Add(Symbols::NewLine()); | 
 |     } | 
 |   } | 
 |   return Symbols::FromConcatAll(thread, pieces); | 
 | } | 
 |  | 
 | const char* AbstractType::NullabilitySuffix( | 
 |     NameVisibility name_visibility) const { | 
 |   if (IsDynamicType() || IsVoidType() || IsNullType()) { | 
 |     // Hide nullable suffix. | 
 |     return ""; | 
 |   } | 
 |   // Keep in sync with Nullability enum in runtime/vm/object.h. | 
 |   switch (nullability()) { | 
 |     case Nullability::kNullable: | 
 |       return "?"; | 
 |     case Nullability::kNonNullable: | 
 |       return ""; | 
 |     default: | 
 |       UNREACHABLE(); | 
 |   } | 
 | } | 
 |  | 
 | StringPtr AbstractType::Name() const { | 
 |   return Symbols::New(Thread::Current(), NameCString()); | 
 | } | 
 |  | 
 | const char* AbstractType::NameCString() const { | 
 |   Thread* thread = Thread::Current(); | 
 |   ZoneTextBuffer printer(thread->zone()); | 
 |   PrintName(kInternalName, &printer); | 
 |   return printer.buffer(); | 
 | } | 
 |  | 
 | StringPtr AbstractType::UserVisibleName() const { | 
 |   return Symbols::New(Thread::Current(), UserVisibleNameCString()); | 
 | } | 
 |  | 
 | const char* AbstractType::UserVisibleNameCString() const { | 
 |   Thread* thread = Thread::Current(); | 
 |   ZoneTextBuffer printer(thread->zone()); | 
 |   PrintName(kUserVisibleName, &printer); | 
 |   return printer.buffer(); | 
 | } | 
 |  | 
 | StringPtr AbstractType::ScrubbedName() const { | 
 |   return Symbols::New(Thread::Current(), ScrubbedNameCString()); | 
 | } | 
 |  | 
 | const char* AbstractType::ScrubbedNameCString() const { | 
 |   Thread* thread = Thread::Current(); | 
 |   ZoneTextBuffer printer(thread->zone()); | 
 |   PrintName(kScrubbedName, &printer); | 
 |   return printer.buffer(); | 
 | } | 
 |  | 
 | void AbstractType::PrintName(NameVisibility name_visibility, | 
 |                              BaseTextBuffer* printer) const { | 
 |   // All subclasses should implement this appropriately, so the only value that | 
 |   // should reach this implementation should be the null value. | 
 |   ASSERT(IsNull()); | 
 |   // AbstractType is an abstract class. | 
 |   UNREACHABLE(); | 
 | } | 
 |  | 
 | StringPtr AbstractType::ClassName() const { | 
 |   ASSERT(!IsFunctionType() && !IsRecordType()); | 
 |   return Class::Handle(type_class()).Name(); | 
 | } | 
 |  | 
 | bool AbstractType::IsNullType() const { | 
 |   return type_class_id() == kNullCid; | 
 | } | 
 |  | 
 | bool AbstractType::IsNeverType() const { | 
 |   return type_class_id() == kNeverCid; | 
 | } | 
 |  | 
 | bool AbstractType::IsSentinelType() const { | 
 |   return type_class_id() == kSentinelCid; | 
 | } | 
 |  | 
 | bool AbstractType::IsTopTypeForInstanceOf() const { | 
 |   const classid_t cid = type_class_id(); | 
 |   if (cid == kDynamicCid || cid == kVoidCid) { | 
 |     return true; | 
 |   } | 
 |   if (cid == kInstanceCid) {  // Object type. | 
 |     return IsNullable(); | 
 |   } | 
 |   if (cid == kFutureOrCid) { | 
 |     // FutureOr<T> where T is a top type behaves as a top type. | 
 |     return AbstractType::Handle(UnwrapFutureOr()).IsTopTypeForInstanceOf(); | 
 |   } | 
 |   return false; | 
 | } | 
 |  | 
 | // Must be kept in sync with GenerateTypeIsTopTypeForSubtyping in | 
 | // stub_code_compiler.cc if any changes are made. | 
 | bool AbstractType::IsTopTypeForSubtyping() const { | 
 |   const classid_t cid = type_class_id(); | 
 |   if (cid == kDynamicCid || cid == kVoidCid) { | 
 |     return true; | 
 |   } | 
 |   if (cid == kInstanceCid) {  // Object type. | 
 |     return !IsNonNullable(); | 
 |   } | 
 |   if (cid == kFutureOrCid) { | 
 |     // FutureOr<T> where T is a top type behaves as a top type. | 
 |     return AbstractType::Handle(UnwrapFutureOr()).IsTopTypeForSubtyping(); | 
 |   } | 
 |   return false; | 
 | } | 
 |  | 
 | bool AbstractType::IsIntType() const { | 
 |   return HasTypeClass() && | 
 |          (type_class() == Type::Handle(Type::IntType()).type_class()); | 
 | } | 
 |  | 
 | bool AbstractType::IsIntegerImplementationType() const { | 
 |   return HasTypeClass() && | 
 |          (type_class() == IsolateGroup::Current() | 
 |                               ->object_store() | 
 |                               ->integer_implementation_class()); | 
 | } | 
 |  | 
 | bool AbstractType::IsDoubleType() const { | 
 |   return HasTypeClass() && | 
 |          (type_class() == Type::Handle(Type::Double()).type_class()); | 
 | } | 
 |  | 
 | bool AbstractType::IsFloat32x4Type() const { | 
 |   // kFloat32x4Cid refers to the private class and cannot be used here. | 
 |   return HasTypeClass() && | 
 |          (type_class() == Type::Handle(Type::Float32x4()).type_class()); | 
 | } | 
 |  | 
 | bool AbstractType::IsFloat64x2Type() const { | 
 |   // kFloat64x2Cid refers to the private class and cannot be used here. | 
 |   return HasTypeClass() && | 
 |          (type_class() == Type::Handle(Type::Float64x2()).type_class()); | 
 | } | 
 |  | 
 | bool AbstractType::IsInt32x4Type() const { | 
 |   // kInt32x4Cid refers to the private class and cannot be used here. | 
 |   return HasTypeClass() && | 
 |          (type_class() == Type::Handle(Type::Int32x4()).type_class()); | 
 | } | 
 |  | 
 | bool AbstractType::IsStringType() const { | 
 |   return HasTypeClass() && | 
 |          (type_class() == Type::Handle(Type::StringType()).type_class()); | 
 | } | 
 |  | 
 | bool AbstractType::IsDartFunctionType() const { | 
 |   return HasTypeClass() && | 
 |          (type_class() == Type::Handle(Type::DartFunctionType()).type_class()); | 
 | } | 
 |  | 
 | bool AbstractType::IsDartClosureType() const { | 
 |   return (type_class_id() == kClosureCid); | 
 | } | 
 |  | 
 | bool AbstractType::IsDartRecordType() const { | 
 |   if (!HasTypeClass()) return false; | 
 |   const auto cid = type_class_id(); | 
 |   return ((cid == kRecordCid) || | 
 |           (cid == Class::Handle( | 
 |                       IsolateGroup::Current()->object_store()->record_class()) | 
 |                       .id())); | 
 | } | 
 |  | 
 | bool AbstractType::IsFfiPointerType() const { | 
 |   return HasTypeClass() && type_class_id() == kPointerCid; | 
 | } | 
 |  | 
 | bool AbstractType::IsTypeClassAllowedBySpawnUri() const { | 
 |   if (!HasTypeClass()) return false; | 
 |  | 
 |   intptr_t cid = type_class_id(); | 
 |  | 
 |   if (cid == kBoolCid) return true; | 
 |   if (cid == kDynamicCid) return true; | 
 |   if (cid == kInstanceCid) return true;  // Object. | 
 |   if (cid == kNeverCid) return true; | 
 |   if (cid == kNullCid) return true; | 
 |   if (cid == kVoidCid) return true; | 
 |  | 
 |   // These are not constant CID checks because kDoubleCid refers to _Double | 
 |   // not double, etc. | 
 |   ObjectStore* object_store = IsolateGroup::Current()->object_store(); | 
 |   Type& candidate_type = Type::Handle(); | 
 |   candidate_type = object_store->int_type(); | 
 |   if (cid == candidate_type.type_class_id()) return true; | 
 |   candidate_type = object_store->double_type(); | 
 |   if (cid == candidate_type.type_class_id()) return true; | 
 |   candidate_type = object_store->number_type(); | 
 |   if (cid == candidate_type.type_class_id()) return true; | 
 |   candidate_type = object_store->string_type(); | 
 |   if (cid == candidate_type.type_class_id()) return true; | 
 |  | 
 |   Class& candidate_cls = Class::Handle(); | 
 |   candidate_cls = object_store->list_class(); | 
 |   if (cid == candidate_cls.id()) return true; | 
 |   candidate_cls = object_store->map_class(); | 
 |   if (cid == candidate_cls.id()) return true; | 
 |   candidate_cls = object_store->set_class(); | 
 |   if (cid == candidate_cls.id()) return true; | 
 |   candidate_cls = object_store->capability_class(); | 
 |   if (cid == candidate_cls.id()) return true; | 
 |   candidate_cls = object_store->send_port_class(); | 
 |   if (cid == candidate_cls.id()) return true; | 
 |   candidate_cls = object_store->transferable_class(); | 
 |   if (cid == candidate_cls.id()) return true; | 
 |  | 
 |   const auto& typed_data_lib = | 
 |       Library::Handle(object_store->typed_data_library()); | 
 |  | 
 | #define IS_CHECK(name)                                                         \ | 
 |   candidate_cls = typed_data_lib.LookupClass(Symbols::name##List());           \ | 
 |   if (cid == candidate_cls.id()) {                                             \ | 
 |     return true;                                                               \ | 
 |   } | 
 |   DART_CLASS_LIST_TYPED_DATA(IS_CHECK) | 
 | #undef IS_CHECK | 
 |  | 
 |   return false; | 
 | } | 
 |  | 
 | AbstractTypePtr AbstractType::UnwrapFutureOr() const { | 
 |   if (!IsFutureOrType()) { | 
 |     return ptr(); | 
 |   } | 
 |   if (arguments() == TypeArguments::null()) { | 
 |     return Type::dynamic_type().ptr(); | 
 |   } | 
 |   Thread* thread = Thread::Current(); | 
 |   REUSABLE_TYPE_ARGUMENTS_HANDLESCOPE(thread); | 
 |   TypeArguments& type_args = thread->TypeArgumentsHandle(); | 
 |   type_args = arguments(); | 
 |   REUSABLE_ABSTRACT_TYPE_HANDLESCOPE(thread); | 
 |   AbstractType& type_arg = thread->AbstractTypeHandle(); | 
 |   type_arg = type_args.TypeAt(0); | 
 |   while (type_arg.IsFutureOrType()) { | 
 |     if (type_arg.arguments() == TypeArguments::null()) { | 
 |       return Type::dynamic_type().ptr(); | 
 |     } | 
 |     type_args = type_arg.arguments(); | 
 |     type_arg = type_args.TypeAt(0); | 
 |   } | 
 |   return type_arg.ptr(); | 
 | } | 
 |  | 
 | bool AbstractType::IsSubtypeOf( | 
 |     const AbstractType& other, | 
 |     Heap::Space space, | 
 |     FunctionTypeMapping* function_type_equivalence) const { | 
 |   TRACE_TYPE_CHECKS_VERBOSE("  AbstractType::IsSubtypeOf(%s, %s)\n", | 
 |                             ToCString(), other.ToCString()); | 
 |   ASSERT(IsFinalized()); | 
 |   ASSERT(other.IsFinalized()); | 
 |   // Reflexivity. | 
 |   if (ptr() == other.ptr()) { | 
 |     TRACE_TYPE_CHECKS_VERBOSE("   - result: true (same types)\n"); | 
 |     return true; | 
 |   } | 
 |   // Right top type. | 
 |   if (other.IsTopTypeForSubtyping()) { | 
 |     TRACE_TYPE_CHECKS_VERBOSE("   - result: true (right is top)\n"); | 
 |     return true; | 
 |   } | 
 |   // Left bottom type. | 
 |   // Any form of Never in weak mode maps to Null and Null is a bottom type in | 
 |   // weak mode. In strong mode, Never and Never* are bottom types. Therefore, | 
 |   // Never and Never* are bottom types regardless of weak/strong mode. | 
 |   // Note that we cannot encounter Never?, as it is normalized to Null. | 
 |   if (IsNeverType()) { | 
 |     ASSERT(!IsNullable()); | 
 |     TRACE_TYPE_CHECKS_VERBOSE("   - result: true (left is Never)\n"); | 
 |     return true; | 
 |   } | 
 |   // Left top type. | 
 |   if (IsDynamicType() || IsVoidType()) { | 
 |     TRACE_TYPE_CHECKS_VERBOSE("   - result: false (left is top)\n"); | 
 |     return false; | 
 |   } | 
 |   // Left Null type. | 
 |   if (IsNullType()) { | 
 |     const bool result = Instance::NullIsAssignableTo(other); | 
 |     TRACE_TYPE_CHECKS_VERBOSE("   - result: %s (left is Null)\n", | 
 |                               (result ? "true" : "false")); | 
 |     return result; | 
 |   } | 
 |   Thread* thread = Thread::Current(); | 
 |   Zone* zone = thread->zone(); | 
 |   // Type parameters cannot be handled by Class::IsSubtypeOf(). | 
 |   // When comparing two uninstantiated function types, one returning type | 
 |   // parameter K, the other returning type parameter V, we cannot assume that | 
 |   // K is a subtype of V, or vice versa. We only return true if K equals V, as | 
 |   // defined by TypeParameter::Equals. | 
 |   // The same rule applies when checking the upper bound of a still | 
 |   // uninstantiated type at compile time. Returning false will defer the test | 
 |   // to run time. | 
 |   // There are however some cases that can be decided at compile time. | 
 |   // For example, with class A<K, V extends K>, new A<T, T> called from within | 
 |   // a class B<T> will never require a run time bound check, even if T is | 
 |   // uninstantiated at compile time. | 
 |   if (IsTypeParameter()) { | 
 |     const TypeParameter& type_param = TypeParameter::Cast(*this); | 
 |     if (other.IsTypeParameter()) { | 
 |       const TypeParameter& other_type_param = TypeParameter::Cast(other); | 
 |       if (type_param.IsEquivalent(other_type_param, | 
 |                                   TypeEquality::kInSubtypeTest, | 
 |                                   function_type_equivalence)) { | 
 |         TRACE_TYPE_CHECKS_VERBOSE( | 
 |             "   - result: true (equivalent type parameters)\n"); | 
 |         return true; | 
 |       } | 
 |     } | 
 |     const AbstractType& bound = AbstractType::Handle(zone, type_param.bound()); | 
 |     ASSERT(bound.IsFinalized()); | 
 |     if (bound.IsSubtypeOf(other, space, function_type_equivalence)) { | 
 |       TRACE_TYPE_CHECKS_VERBOSE("   - result: true (bound is a subtype)\n"); | 
 |       return true; | 
 |     } | 
 |     // Apply additional subtyping rules if 'other' is 'FutureOr'. | 
 |     if (IsSubtypeOfFutureOr(zone, other, space, function_type_equivalence)) { | 
 |       TRACE_TYPE_CHECKS_VERBOSE( | 
 |           "   - result: true (type parameter is a subtype of FutureOr)\n"); | 
 |       return true; | 
 |     } | 
 |     TRACE_TYPE_CHECKS_VERBOSE( | 
 |         "   - result: false (left is a type parameter)\n"); | 
 |     return false; | 
 |   } | 
 |   if (other.IsTypeParameter()) { | 
 |     TRACE_TYPE_CHECKS_VERBOSE( | 
 |         "   - result: false (right is a type parameter)\n"); | 
 |     return false; | 
 |   } | 
 |   // Function types cannot be handled by Class::IsSubtypeOf(). | 
 |   if (IsFunctionType()) { | 
 |     // Any type that can be the type of a closure is a subtype of Function or | 
 |     // non-nullable Object. | 
 |     if (other.IsObjectType() || other.IsDartFunctionType()) { | 
 |       const bool result = !IsNullable() || !other.IsNonNullable(); | 
 |       TRACE_TYPE_CHECKS_VERBOSE("   - result: %s (function vs non-function)\n", | 
 |                                 (result ? "true" : "false")); | 
 |       return result; | 
 |     } | 
 |     if (other.IsFunctionType()) { | 
 |       // Check for two function types. | 
 |       if (IsNullable() && other.IsNonNullable()) { | 
 |         TRACE_TYPE_CHECKS_VERBOSE( | 
 |             "   - result: false (function nullability)\n"); | 
 |         return false; | 
 |       } | 
 |       const bool result = FunctionType::Cast(*this).IsSubtypeOf( | 
 |           FunctionType::Cast(other), space, function_type_equivalence); | 
 |       TRACE_TYPE_CHECKS_VERBOSE("   - result: %s (function types)\n", | 
 |                                 (result ? "true" : "false")); | 
 |       return result; | 
 |     } | 
 |     // Apply additional subtyping rules if 'other' is 'FutureOr'. | 
 |     if (IsSubtypeOfFutureOr(zone, other, space, function_type_equivalence)) { | 
 |       TRACE_TYPE_CHECKS_VERBOSE( | 
 |           "   - result: true (function type is a subtype of FutureOr)\n"); | 
 |       return true; | 
 |     } | 
 |     // All possible supertypes for FunctionType have been checked. | 
 |     TRACE_TYPE_CHECKS_VERBOSE("   - result: false (function type)\n"); | 
 |     return false; | 
 |   } else if (other.IsFunctionType()) { | 
 |     // FunctionTypes can only be subtyped by other FunctionTypes, so don't | 
 |     // fall through to class-based type tests. | 
 |     TRACE_TYPE_CHECKS_VERBOSE( | 
 |         "   - result: false (right is a function type)\n"); | 
 |     return false; | 
 |   } | 
 |   // Record types cannot be handled by Class::IsSubtypeOf(). | 
 |   if (IsRecordType()) { | 
 |     if (other.IsObjectType() || other.IsDartRecordType()) { | 
 |       const bool result = !IsNullable() || !other.IsNonNullable(); | 
 |       TRACE_TYPE_CHECKS_VERBOSE("   - result: %s (record vs non-record)\n", | 
 |                                 (result ? "true" : "false")); | 
 |       return result; | 
 |     } | 
 |     if (other.IsRecordType()) { | 
 |       // Check for two record types. | 
 |       if (IsNullable() && other.IsNonNullable()) { | 
 |         TRACE_TYPE_CHECKS_VERBOSE("   - result: false (record nullability)\n"); | 
 |         return false; | 
 |       } | 
 |       const bool result = RecordType::Cast(*this).IsSubtypeOf( | 
 |           RecordType::Cast(other), space, function_type_equivalence); | 
 |       TRACE_TYPE_CHECKS_VERBOSE("   - result: %s (record types)\n", | 
 |                                 (result ? "true" : "false")); | 
 |       return result; | 
 |     } | 
 |     // Apply additional subtyping rules if 'other' is 'FutureOr'. | 
 |     if (IsSubtypeOfFutureOr(zone, other, space, function_type_equivalence)) { | 
 |       TRACE_TYPE_CHECKS_VERBOSE( | 
 |           "   - result: true (record type is a subtype of FutureOr)\n"); | 
 |       return true; | 
 |     } | 
 |     // All possible supertypes for record type have been checked. | 
 |     TRACE_TYPE_CHECKS_VERBOSE("   - result: false (record type)\n"); | 
 |     return false; | 
 |   } else if (other.IsRecordType()) { | 
 |     // RecordTypes can only be subtyped by other RecordTypes, so don't | 
 |     // fall through to class-based type tests. | 
 |     TRACE_TYPE_CHECKS_VERBOSE("   - result: false (right is a record type)\n"); | 
 |     return false; | 
 |   } | 
 |   ASSERT(IsType()); | 
 |   const Class& type_cls = Class::Handle(zone, type_class()); | 
 |   const bool result = Class::IsSubtypeOf( | 
 |       type_cls, | 
 |       TypeArguments::Handle(zone, Type::Cast(*this).GetInstanceTypeArguments( | 
 |                                       thread, /*canonicalize=*/false)), | 
 |       nullability(), other, space, function_type_equivalence); | 
 |   TRACE_TYPE_CHECKS_VERBOSE("   - result: %s (class type check)\n", | 
 |                             (result ? "true" : "false")); | 
 |   return result; | 
 | } | 
 |  | 
 | bool AbstractType::IsSubtypeOfFutureOr( | 
 |     Zone* zone, | 
 |     const AbstractType& other, | 
 |     Heap::Space space, | 
 |     FunctionTypeMapping* function_type_equivalence) const { | 
 |   if (other.IsFutureOrType()) { | 
 |     // This function is only called with a receiver that is either a function | 
 |     // type, record type, or an uninstantiated type parameter. | 
 |     // Therefore, it cannot be of class Future and we can spare the check. | 
 |     ASSERT(IsFunctionType() || IsRecordType() || IsTypeParameter()); | 
 |     const TypeArguments& other_type_arguments = | 
 |         TypeArguments::Handle(zone, other.arguments()); | 
 |     const AbstractType& other_type_arg = | 
 |         AbstractType::Handle(zone, other_type_arguments.TypeAtNullSafe(0)); | 
 |     if (other_type_arg.IsTopTypeForSubtyping()) { | 
 |       return true; | 
 |     } | 
 |     // Retry the IsSubtypeOf check after unwrapping type arg of FutureOr. | 
 |     if (IsSubtypeOf(other_type_arg, space, function_type_equivalence)) { | 
 |       return true; | 
 |     } | 
 |   } | 
 |   return false; | 
 | } | 
 |  | 
 | uword AbstractType::ComputeHash() const { | 
 |   // AbstractType is an abstract class. | 
 |   UNREACHABLE(); | 
 |   return 0; | 
 | } | 
 |  | 
 | const char* AbstractType::ToCString() const { | 
 |   // All subclasses should implement this appropriately, so the only value that | 
 |   // should reach this implementation should be the null value. | 
 |   ASSERT(IsNull()); | 
 |   return "AbstractType: null"; | 
 | } | 
 |  | 
 | void AbstractType::SetTypeTestingStub(const Code& stub) const { | 
 |   if (stub.IsNull()) { | 
 |     InitializeTypeTestingStubNonAtomic(stub); | 
 |     return; | 
 |   } | 
 |  | 
 |   auto& old = Code::Handle(Thread::Current()->zone()); | 
 |   while (true) { | 
 |     // We load the old TTS and it's entrypoint. | 
 |     old = untag()->type_test_stub<std::memory_order_acquire>(); | 
 |     uword old_entry_point = old.IsNull() ? 0 : old.EntryPoint(); | 
 |  | 
 |     // If we can successfully update the entrypoint of the TTS, we will | 
 |     // unconditionally also set the [Code] of the TTS. | 
 |     // | 
 |     // Any competing writer would do the same, lose the compare-exchange, loop | 
 |     // around and continue loading the old [Code] TTS and continue to lose the | 
 |     // race until we have finally also updated the [Code] TTS. | 
 |     if (untag()->type_test_stub_entry_point_.compare_exchange_strong( | 
 |             old_entry_point, stub.EntryPoint())) { | 
 |       untag()->set_type_test_stub<std::memory_order_release>(stub.ptr()); | 
 |       return; | 
 |     } | 
 |   } | 
 | } | 
 |  | 
 | void AbstractType::InitializeTypeTestingStubNonAtomic(const Code& stub) const { | 
 |   if (stub.IsNull()) { | 
 |     // This only happens during bootstrapping when creating Type objects before | 
 |     // we have the instructions. | 
 |     ASSERT(type_class_id() == kDynamicCid || type_class_id() == kVoidCid); | 
 |     StoreNonPointer(&untag()->type_test_stub_entry_point_, 0); | 
 |     untag()->set_type_test_stub(stub.ptr()); | 
 |     return; | 
 |   } | 
 |  | 
 |   StoreNonPointer(&untag()->type_test_stub_entry_point_, stub.EntryPoint()); | 
 |   untag()->set_type_test_stub(stub.ptr()); | 
 | } | 
 |  | 
 | TypePtr Type::NullType() { | 
 |   return IsolateGroup::Current()->object_store()->null_type(); | 
 | } | 
 |  | 
 | TypePtr Type::DynamicType() { | 
 |   return Object::dynamic_type().ptr(); | 
 | } | 
 |  | 
 | TypePtr Type::VoidType() { | 
 |   return Object::void_type().ptr(); | 
 | } | 
 |  | 
 | TypePtr Type::NeverType() { | 
 |   return IsolateGroup::Current()->object_store()->never_type(); | 
 | } | 
 |  | 
 | TypePtr Type::ObjectType() { | 
 |   return IsolateGroup::Current()->object_store()->object_type(); | 
 | } | 
 |  | 
 | TypePtr Type::BoolType() { | 
 |   return IsolateGroup::Current()->object_store()->bool_type(); | 
 | } | 
 |  | 
 | TypePtr Type::IntType() { | 
 |   return IsolateGroup::Current()->object_store()->int_type(); | 
 | } | 
 |  | 
 | TypePtr Type::NullableIntType() { | 
 |   return IsolateGroup::Current()->object_store()->nullable_int_type(); | 
 | } | 
 |  | 
 | TypePtr Type::SmiType() { | 
 |   return IsolateGroup::Current()->object_store()->smi_type(); | 
 | } | 
 |  | 
 | TypePtr Type::MintType() { | 
 |   return IsolateGroup::Current()->object_store()->mint_type(); | 
 | } | 
 |  | 
 | TypePtr Type::Double() { | 
 |   return IsolateGroup::Current()->object_store()->double_type(); | 
 | } | 
 |  | 
 | TypePtr Type::NullableDouble() { | 
 |   return IsolateGroup::Current()->object_store()->nullable_double_type(); | 
 | } | 
 |  | 
 | TypePtr Type::Float32x4() { | 
 |   return IsolateGroup::Current()->object_store()->float32x4_type(); | 
 | } | 
 |  | 
 | TypePtr Type::Float64x2() { | 
 |   return IsolateGroup::Current()->object_store()->float64x2_type(); | 
 | } | 
 |  | 
 | TypePtr Type::Int32x4() { | 
 |   return IsolateGroup::Current()->object_store()->int32x4_type(); | 
 | } | 
 |  | 
 | TypePtr Type::Number() { | 
 |   return IsolateGroup::Current()->object_store()->number_type(); | 
 | } | 
 |  | 
 | TypePtr Type::NullableNumber() { | 
 |   return IsolateGroup::Current()->object_store()->nullable_number_type(); | 
 | } | 
 |  | 
 | TypePtr Type::StringType() { | 
 |   return IsolateGroup::Current()->object_store()->string_type(); | 
 | } | 
 |  | 
 | TypePtr Type::ArrayType() { | 
 |   return IsolateGroup::Current()->object_store()->array_type(); | 
 | } | 
 |  | 
 | TypePtr Type::DartFunctionType() { | 
 |   return IsolateGroup::Current()->object_store()->function_type(); | 
 | } | 
 |  | 
 | TypePtr Type::DartTypeType() { | 
 |   return IsolateGroup::Current()->object_store()->type_type(); | 
 | } | 
 |  | 
 | TypePtr Type::NewNonParameterizedType(const Class& type_class) { | 
 |   ASSERT(type_class.NumTypeArguments() == 0); | 
 |   if (type_class.IsNullClass()) { | 
 |     return Type::NullType(); | 
 |   } | 
 |   if (type_class.IsDynamicClass()) { | 
 |     return Type::DynamicType(); | 
 |   } | 
 |   if (type_class.IsVoidClass()) { | 
 |     return Type::VoidType(); | 
 |   } | 
 |   // It is too early to use the class finalizer, as type_class may not be named | 
 |   // yet, so do not call DeclarationType(). | 
 |   Type& type = Type::Handle(type_class.declaration_type()); | 
 |   if (type.IsNull()) { | 
 |     type = Type::New(Class::Handle(type_class.ptr()), | 
 |                      Object::null_type_arguments(), Nullability::kNonNullable); | 
 |     type.SetIsFinalized(); | 
 |     type ^= type.Canonicalize(Thread::Current()); | 
 |     type_class.set_declaration_type(type); | 
 |   } | 
 |   ASSERT(type.IsFinalized()); | 
 |   return type.ptr(); | 
 | } | 
 |  | 
 | TypePtr Type::ToNullability(Nullability value, Heap::Space space) const { | 
 |   if (nullability() == value) { | 
 |     return ptr(); | 
 |   } | 
 |   // Type parameter instantiation may request a nullability change, which should | 
 |   // be ignored for types dynamic and void. Type Null cannot be the result of | 
 |   // instantiating a non-nullable type parameter (TypeError thrown). | 
 |   const classid_t cid = type_class_id(); | 
 |   if (cid == kDynamicCid || cid == kVoidCid || cid == kNullCid) { | 
 |     return ptr(); | 
 |   } | 
 |   if (cid == kNeverCid && value == Nullability::kNullable) { | 
 |     // Normalize Never? to Null. | 
 |     return Type::NullType(); | 
 |   } | 
 |   // Clone type and set new nullability. | 
 |   Type& type = Type::Handle(); | 
 |   // Always cloning in old space and removing space parameter would not satisfy | 
 |   // currently existing requests for type instantiation in new space. | 
 |   // Load with relaxed atomics to prevent data race with updating type | 
 |   // testing stub. | 
 |   type ^= Object::Clone(*this, space, /*load_with_relaxed_atomics=*/true); | 
 |   type.set_nullability(value); | 
 |   type.SetHash(0); | 
 |   type.InitializeTypeTestingStubNonAtomic( | 
 |       Code::Handle(TypeTestingStubGenerator::DefaultCodeForType(type))); | 
 |   if (IsCanonical()) { | 
 |     // Object::Clone does not clone canonical bit. | 
 |     ASSERT(!type.IsCanonical()); | 
 |     type ^= type.Canonicalize(Thread::Current()); | 
 |   } | 
 |   return type.ptr(); | 
 | } | 
 |  | 
 | FunctionTypePtr FunctionType::ToNullability(Nullability value, | 
 |                                             Heap::Space space) const { | 
 |   if (nullability() == value) { | 
 |     return ptr(); | 
 |   } | 
 |   // Clone function type and set new nullability. | 
 |   FunctionType& type = FunctionType::Handle(FunctionType::Clone(*this, space)); | 
 |   type.set_nullability(value); | 
 |   type.SetHash(0); | 
 |   type.InitializeTypeTestingStubNonAtomic( | 
 |       Code::Handle(TypeTestingStubGenerator::DefaultCodeForType(type))); | 
 |   if (IsCanonical()) { | 
 |     // Object::Clone does not clone canonical bit. | 
 |     ASSERT(!type.IsCanonical()); | 
 |     type ^= type.Canonicalize(Thread::Current()); | 
 |   } | 
 |   return type.ptr(); | 
 | } | 
 |  | 
 | classid_t Type::type_class_id() const { | 
 |   return untag()->type_class_id(); | 
 | } | 
 |  | 
 | ClassPtr Type::type_class() const { | 
 |   return IsolateGroup::Current()->class_table()->At(type_class_id()); | 
 | } | 
 |  | 
 | bool Type::IsInstantiated(Genericity genericity, | 
 |                           intptr_t num_free_fun_type_params) const { | 
 |   if (type_state() == UntaggedType::kFinalizedInstantiated) { | 
 |     return true; | 
 |   } | 
 |   if ((genericity == kAny) && (num_free_fun_type_params == kAllFree) && | 
 |       (type_state() == UntaggedType::kFinalizedUninstantiated)) { | 
 |     return false; | 
 |   } | 
 |   if (arguments() == TypeArguments::null()) { | 
 |     return true; | 
 |   } | 
 |   const TypeArguments& args = TypeArguments::Handle(arguments()); | 
 |   return args.IsSubvectorInstantiated(0, args.Length(), genericity, | 
 |                                       num_free_fun_type_params); | 
 | } | 
 |  | 
 | AbstractTypePtr Type::InstantiateFrom( | 
 |     const TypeArguments& instantiator_type_arguments, | 
 |     const TypeArguments& function_type_arguments, | 
 |     intptr_t num_free_fun_type_params, | 
 |     Heap::Space space, | 
 |     FunctionTypeMapping* function_type_mapping, | 
 |     intptr_t num_parent_type_args_adjustment) const { | 
 |   Zone* zone = Thread::Current()->zone(); | 
 |   ASSERT(IsFinalized()); | 
 |   ASSERT(!IsInstantiated()); | 
 |   // Note that the type class has to be resolved at this time, but not | 
 |   // necessarily finalized yet. We may be checking bounds at compile time or | 
 |   // finalizing the type argument vector of a recursive type. | 
 |   const Class& cls = Class::Handle(zone, type_class()); | 
 |   TypeArguments& type_arguments = TypeArguments::Handle(zone, arguments()); | 
 |   ASSERT(type_arguments.Length() == cls.NumTypeParameters()); | 
 |   type_arguments = type_arguments.InstantiateFrom( | 
 |       instantiator_type_arguments, function_type_arguments, | 
 |       num_free_fun_type_params, space, function_type_mapping, | 
 |       num_parent_type_args_adjustment); | 
 |   // A returned empty_type_arguments indicates a failed instantiation in dead | 
 |   // code that must be propagated up to the caller, the optimizing compiler. | 
 |   if (type_arguments.ptr() == Object::empty_type_arguments().ptr()) { | 
 |     return Type::null(); | 
 |   } | 
 |   // This uninstantiated type is not modified, as it can be instantiated | 
 |   // with different instantiators. Allocate a new instantiated version of it. | 
 |   const Type& instantiated_type = | 
 |       Type::Handle(zone, Type::New(cls, type_arguments, nullability(), space)); | 
 |   instantiated_type.SetIsFinalized(); | 
 |   // Canonicalization is not part of instantiation. | 
 |   return instantiated_type.NormalizeFutureOrType(space); | 
 | } | 
 |  | 
 | AbstractTypePtr Type::UpdateFunctionTypes( | 
 |     intptr_t num_parent_type_args_adjustment, | 
 |     intptr_t num_free_fun_type_params, | 
 |     Heap::Space space, | 
 |     FunctionTypeMapping* function_type_mapping) const { | 
 |   ASSERT(IsFinalized()); | 
 |   ASSERT(num_parent_type_args_adjustment >= 0); | 
 |   if (arguments() == Object::null()) { | 
 |     return ptr(); | 
 |   } | 
 |   Zone* zone = Thread::Current()->zone(); | 
 |   const auto& type_args = TypeArguments::Handle(zone, arguments()); | 
 |   const auto& updated_type_args = TypeArguments::Handle( | 
 |       zone, type_args.UpdateFunctionTypes(num_parent_type_args_adjustment, | 
 |                                           num_free_fun_type_params, space, | 
 |                                           function_type_mapping)); | 
 |   if (type_args.ptr() == updated_type_args.ptr()) { | 
 |     return ptr(); | 
 |   } | 
 |   const Class& cls = Class::Handle(zone, type_class()); | 
 |   const Type& new_type = Type::Handle( | 
 |       zone, Type::New(cls, updated_type_args, nullability(), space)); | 
 |   new_type.SetIsFinalized(); | 
 |   return new_type.ptr(); | 
 | } | 
 |  | 
 | // Certain built-in classes are treated as syntactically equivalent. | 
 | static classid_t NormalizeClassIdForSyntacticalTypeEquality(classid_t cid) { | 
 |   if (IsIntegerClassId(cid)) { | 
 |     return Type::Handle(Type::IntType()).type_class_id(); | 
 |   } else if (IsStringClassId(cid)) { | 
 |     return Type::Handle(Type::StringType()).type_class_id(); | 
 |   } else if (cid == kDoubleCid) { | 
 |     return Type::Handle(Type::Double()).type_class_id(); | 
 |   } else if (IsTypeClassId(cid)) { | 
 |     return Type::Handle(Type::DartTypeType()).type_class_id(); | 
 |   } else if (IsArrayClassId(cid)) { | 
 |     return Class::Handle(IsolateGroup::Current()->object_store()->list_class()) | 
 |         .id(); | 
 |   } | 
 |   return cid; | 
 | } | 
 |  | 
 | bool Type::IsEquivalent(const Instance& other, | 
 |                         TypeEquality kind, | 
 |                         FunctionTypeMapping* function_type_equivalence) const { | 
 |   ASSERT(!IsNull()); | 
 |   if (ptr() == other.ptr()) { | 
 |     return true; | 
 |   } | 
 |   if (!other.IsType()) { | 
 |     return false; | 
 |   } | 
 |   const Type& other_type = Type::Cast(other); | 
 |   const classid_t type_cid = type_class_id(); | 
 |   const classid_t other_type_cid = other_type.type_class_id(); | 
 |   if (type_cid != other_type_cid) { | 
 |     if ((kind != TypeEquality::kSyntactical) || | 
 |         (NormalizeClassIdForSyntacticalTypeEquality(type_cid) != | 
 |          NormalizeClassIdForSyntacticalTypeEquality(other_type_cid))) { | 
 |       return false; | 
 |     } | 
 |   } | 
 |   Thread* thread = Thread::Current(); | 
 |   Zone* zone = thread->zone(); | 
 |   ASSERT( | 
 |       Class::Handle(zone, type_class()).NumTypeParameters(thread) == | 
 |       Class::Handle(zone, other_type.type_class()).NumTypeParameters(thread)); | 
 |  | 
 |   if (!IsNullabilityEquivalent(thread, other_type, kind)) { | 
 |     return false; | 
 |   } | 
 |   if (!IsFinalized() || !other_type.IsFinalized()) { | 
 |     ASSERT(kind != TypeEquality::kCanonical); | 
 |     return false;  // Too early to decide if equal. | 
 |   } | 
 |   if (arguments() == other_type.arguments()) { | 
 |     return true; | 
 |   } | 
 |   const TypeArguments& type_args = | 
 |       TypeArguments::Handle(zone, this->arguments()); | 
 |   const TypeArguments& other_type_args = | 
 |       TypeArguments::Handle(zone, other_type.arguments()); | 
 |   return type_args.IsEquivalent(other_type_args, kind, | 
 |                                 function_type_equivalence); | 
 | } | 
 |  | 
 | bool FunctionType::IsEquivalent( | 
 |     const Instance& other, | 
 |     TypeEquality kind, | 
 |     FunctionTypeMapping* function_type_equivalence) const { | 
 |   ASSERT(!IsNull()); | 
 |   if (ptr() == other.ptr()) { | 
 |     return true; | 
 |   } | 
 |   if (!other.IsFunctionType()) { | 
 |     return false; | 
 |   } | 
 |   const FunctionType& other_type = FunctionType::Cast(other); | 
 |   if ((packed_parameter_counts() != other_type.packed_parameter_counts()) || | 
 |       (packed_type_parameter_counts() != | 
 |        other_type.packed_type_parameter_counts())) { | 
 |     // Different number of type parameters or parameters. | 
 |     return false; | 
 |   } | 
 |   Thread* thread = Thread::Current(); | 
 |   Zone* zone = thread->zone(); | 
 |   if (!IsNullabilityEquivalent(thread, other_type, kind)) { | 
 |     return false; | 
 |   } | 
 |   if (!IsFinalized() || !other_type.IsFinalized()) { | 
 |     ASSERT(kind != TypeEquality::kCanonical); | 
 |     return false;  // Too early to decide if equal. | 
 |   } | 
 |   FunctionTypeMapping scope(zone, &function_type_equivalence, *this, | 
 |                             other_type); | 
 |  | 
 |   // Equal function types must have equal signature types and equal optional | 
 |   // named arguments. | 
 |   // Compare function type parameters and their bounds. | 
 |   // Check the type parameters and bounds of generic functions. | 
 |   if (!HasSameTypeParametersAndBounds(other_type, kind, | 
 |                                       function_type_equivalence)) { | 
 |     return false; | 
 |   } | 
 |   AbstractType& param_type = Type::Handle(zone); | 
 |   AbstractType& other_param_type = Type::Handle(zone); | 
 |   // Check the result type. | 
 |   param_type = result_type(); | 
 |   other_param_type = other_type.result_type(); | 
 |   if (!param_type.IsEquivalent(other_param_type, kind, | 
 |                                function_type_equivalence)) { | 
 |     return false; | 
 |   } | 
 |   // Check the types of all parameters. | 
 |   const intptr_t num_params = NumParameters(); | 
 |   ASSERT(other_type.NumParameters() == num_params); | 
 |   for (intptr_t i = 0; i < num_params; i++) { | 
 |     param_type = ParameterTypeAt(i); | 
 |     other_param_type = other_type.ParameterTypeAt(i); | 
 |     // Use contravariant order in case we test for subtyping. | 
 |     if (!other_param_type.IsEquivalent(param_type, kind, | 
 |                                        function_type_equivalence)) { | 
 |       return false; | 
 |     } | 
 |   } | 
 |   if (HasOptionalNamedParameters()) { | 
 |     ASSERT(other_type.HasOptionalNamedParameters());  // Same packed counts. | 
 |     for (intptr_t i = num_fixed_parameters(); i < num_params; i++) { | 
 |       if (ParameterNameAt(i) != other_type.ParameterNameAt(i)) { | 
 |         return false; | 
 |       } | 
 |       if (IsRequiredAt(i) != other_type.IsRequiredAt(i)) { | 
 |         return false; | 
 |       } | 
 |     } | 
 |   } | 
 |   return true; | 
 | } | 
 |  | 
 | bool Type::IsDeclarationTypeOf(const Class& cls) const { | 
 |   ASSERT(type_class() == cls.ptr()); | 
 |   if (cls.IsNullClass()) { | 
 |     return true; | 
 |   } | 
 |   if (cls.IsGeneric() || cls.IsClosureClass()) { | 
 |     return false; | 
 |   } | 
 |   return nullability() == Nullability::kNonNullable; | 
 | } | 
 |  | 
 | // Keep in sync with TypeSerializationCluster::IsInCanonicalSet. | 
 | AbstractTypePtr Type::Canonicalize(Thread* thread) const { | 
 |   Zone* zone = thread->zone(); | 
 |   ASSERT(IsFinalized()); | 
 |   if (IsCanonical()) { | 
 | #ifdef DEBUG | 
 |     TypeArguments& type_args = TypeArguments::Handle(zone, arguments()); | 
 |     ASSERT(type_args.IsCanonical()); | 
 |     ASSERT(type_args.IsOld()); | 
 | #endif | 
 |     return this->ptr(); | 
 |   } | 
 |   auto isolate_group = thread->isolate_group(); | 
 |   const classid_t cid = type_class_id(); | 
 |   if (cid == kDynamicCid) { | 
 |     ASSERT(Object::dynamic_type().IsCanonical()); | 
 |     return Object::dynamic_type().ptr(); | 
 |   } | 
 |  | 
 |   if (cid == kVoidCid) { | 
 |     ASSERT(Object::void_type().IsCanonical()); | 
 |     return Object::void_type().ptr(); | 
 |   } | 
 |  | 
 |   const Class& cls = Class::Handle(zone, type_class()); | 
 |  | 
 |   // Fast canonical lookup/registry for simple types. | 
 |   if (IsDeclarationTypeOf(cls)) { | 
 |     ASSERT(!cls.IsNullClass() || IsNullable()); | 
 |     Type& type = Type::Handle(zone, cls.declaration_type()); | 
 |     if (type.IsNull()) { | 
 |       ASSERT(!cls.ptr()->untag()->InVMIsolateHeap() || | 
 |              (isolate_group == Dart::vm_isolate_group())); | 
 |       // Canonicalize the type arguments of the supertype, if any. | 
 |       TypeArguments& type_args = TypeArguments::Handle(zone, arguments()); | 
 |       type_args = type_args.Canonicalize(thread); | 
 |       set_arguments(type_args); | 
 |       type = cls.declaration_type(); | 
 |       // May be set while canonicalizing type args. | 
 |       if (type.IsNull()) { | 
 |         SafepointMutexLocker ml(isolate_group->type_canonicalization_mutex()); | 
 |         // Recheck if type exists. | 
 |         type = cls.declaration_type(); | 
 |         if (type.IsNull()) { | 
 |           if (this->IsNew()) { | 
 |             type ^= Object::Clone(*this, Heap::kOld); | 
 |           } else { | 
 |             type = this->ptr(); | 
 |           } | 
 |           ASSERT(type.IsOld()); | 
 |           type.ComputeHash(); | 
 |           type.SetCanonical(); | 
 |           cls.set_declaration_type(type); | 
 |           return type.ptr(); | 
 |         } | 
 |       } | 
 |     } | 
 |     ASSERT(this->Equals(type)); | 
 |     ASSERT(type.IsOld()); | 
 |     if (type.IsCanonical()) { | 
 |       return type.ptr(); | 
 |     } | 
 |   } | 
 |  | 
 |   Type& type = Type::Handle(zone); | 
 |   ObjectStore* object_store = isolate_group->object_store(); | 
 |   { | 
 |     SafepointMutexLocker ml(isolate_group->type_canonicalization_mutex()); | 
 |     CanonicalTypeSet table(zone, object_store->canonical_types()); | 
 |     type ^= table.GetOrNull(CanonicalTypeKey(*this)); | 
 |     ASSERT(object_store->canonical_types() == table.Release().ptr()); | 
 |   } | 
 |   if (type.IsNull()) { | 
 |     // The type was not found in the table. It is not canonical yet. | 
 |  | 
 |     // Canonicalize the type arguments. | 
 |     TypeArguments& type_args = TypeArguments::Handle(zone, arguments()); | 
 |     ASSERT(type_args.IsNull() || | 
 |            (type_args.Length() == cls.NumTypeParameters())); | 
 |     type_args = type_args.Canonicalize(thread); | 
 |     set_arguments(type_args); | 
 |     ASSERT(type_args.IsNull() || type_args.IsOld()); | 
 |  | 
 |     // Check to see if the type got added to canonical table as part of the | 
 |     // type arguments canonicalization. | 
 |     SafepointMutexLocker ml(isolate_group->type_canonicalization_mutex()); | 
 |     CanonicalTypeSet table(zone, object_store->canonical_types()); | 
 |     type ^= table.GetOrNull(CanonicalTypeKey(*this)); | 
 |     if (type.IsNull()) { | 
 |       // Add this type into the canonical table of types. | 
 |       if (this->IsNew()) { | 
 |         type ^= Object::Clone(*this, Heap::kOld); | 
 |       } else { | 
 |         type = this->ptr(); | 
 |       } | 
 |       ASSERT(type.IsOld()); | 
 |       type.SetCanonical();  // Mark object as being canonical. | 
 |       bool present = table.Insert(type); | 
 |       ASSERT(!present); | 
 |     } | 
 |     object_store->set_canonical_types(table.Release()); | 
 |   } | 
 |   return type.ptr(); | 
 | } | 
 |  | 
 | void Type::EnumerateURIs(URIs* uris) const { | 
 |   if (IsDynamicType() || IsVoidType() || IsNeverType()) { | 
 |     return; | 
 |   } | 
 |   Thread* thread = Thread::Current(); | 
 |   Zone* zone = thread->zone(); | 
 |   const Class& cls = Class::Handle(zone, type_class()); | 
 |   const String& name = String::Handle(zone, cls.UserVisibleName()); | 
 |   const Library& library = Library::Handle(zone, cls.library()); | 
 |   const String& uri = String::Handle(zone, library.url()); | 
 |   AddURI(uris, name, uri); | 
 |   const TypeArguments& type_args = TypeArguments::Handle(zone, arguments()); | 
 |   type_args.EnumerateURIs(uris); | 
 | } | 
 |  | 
 | void Type::PrintName(NameVisibility name_visibility, | 
 |                      BaseTextBuffer* printer) const { | 
 |   Thread* thread = Thread::Current(); | 
 |   Zone* zone = thread->zone(); | 
 |   const Class& cls = Class::Handle(zone, type_class()); | 
 |   const TypeParameters& params = | 
 |       TypeParameters::Handle(zone, cls.type_parameters()); | 
 |   printer->AddString(cls.NameCString(name_visibility)); | 
 |   const TypeArguments& args = TypeArguments::Handle(zone, arguments()); | 
 |   intptr_t num_type_params = 0; | 
 |   if (cls.is_declaration_loaded()) { | 
 |     num_type_params = cls.NumTypeParameters(thread); | 
 |   } else if (!args.IsNull() || args.ptr() != params.defaults()) { | 
 |     num_type_params = args.Length(); | 
 |   } | 
 |   if (num_type_params == 0) { | 
 |     // Do nothing. | 
 |   } else { | 
 |     args.PrintSubvectorName(0, num_type_params, name_visibility, printer); | 
 |   } | 
 |   printer->AddString(NullabilitySuffix(name_visibility)); | 
 |   // The name is only used for type checking and debugging purposes. | 
 |   // Unless profiling data shows otherwise, it is not worth caching the name in | 
 |   // the type. | 
 | } | 
 |  | 
 | uword Type::ComputeHash() const { | 
 |   ASSERT(IsFinalized()); | 
 |   uint32_t result = type_class_id(); | 
 |   result = CombineHashes(result, static_cast<uint32_t>(nullability())); | 
 |   uint32_t type_args_hash = TypeArguments::kAllDynamicHash; | 
 |   if (arguments() != TypeArguments::null()) { | 
 |     const TypeArguments& args = TypeArguments::Handle(arguments()); | 
 |     type_args_hash = args.Hash(); | 
 |   } | 
 |   result = CombineHashes(result, type_args_hash); | 
 |   result = FinalizeHash(result, kHashBits); | 
 |   SetHash(result); | 
 |   return result; | 
 | } | 
 |  | 
 | uword FunctionType::ComputeHash() const { | 
 |   ASSERT(IsFinalized()); | 
 |   uint32_t result = | 
 |       CombineHashes(packed_parameter_counts(), packed_type_parameter_counts()); | 
 |   result = CombineHashes(result, static_cast<uint32_t>(nullability())); | 
 |   AbstractType& type = AbstractType::Handle(); | 
 |   const intptr_t num_type_params = NumTypeParameters(); | 
 |   if (num_type_params > 0) { | 
 |     const TypeParameters& type_params = | 
 |         TypeParameters::Handle(type_parameters()); | 
 |     const TypeArguments& bounds = TypeArguments::Handle(type_params.bounds()); | 
 |     result = CombineHashes(result, bounds.Hash()); | 
 |     // Since the default arguments are ignored when comparing two generic | 
 |     // function types for type equality, the hash does not depend on them. | 
 |   } | 
 |   type = result_type(); | 
 |   result = CombineHashes(result, type.Hash()); | 
 |   const intptr_t num_params = NumParameters(); | 
 |   for (intptr_t i = 0; i < num_params; i++) { | 
 |     type = ParameterTypeAt(i); | 
 |     result = CombineHashes(result, type.Hash()); | 
 |   } | 
 |   if (HasOptionalNamedParameters()) { | 
 |     String& param_name = String::Handle(); | 
 |     for (intptr_t i = num_fixed_parameters(); i < num_params; i++) { | 
 |       param_name = ParameterNameAt(i); | 
 |       result = CombineHashes(result, param_name.Hash()); | 
 |     } | 
 |     // Required flag is not hashed, see comment above about legacy type. | 
 |   } | 
 |   result = FinalizeHash(result, kHashBits); | 
 |   SetHash(result); | 
 |   return result; | 
 | } | 
 |  | 
 | void Type::set_type_class(const Class& value) const { | 
 |   ASSERT(!value.IsNull()); | 
 |   set_type_class_id(value.id()); | 
 | } | 
 |  | 
 | void Type::set_arguments(const TypeArguments& value) const { | 
 |   ASSERT(!IsCanonical()); | 
 |   ASSERT(value.IsNull() || | 
 |          // Do not attempt to query number of type parameters | 
 |          // before class declaration is fully loaded. | 
 |          !Class::Handle(type_class()).is_declaration_loaded() || | 
 |          // Relax assertion in order to support invalid generic types | 
 |          // created in ClosureMirror_function. | 
 |          (type_class_id() == kInstanceCid) || | 
 |          value.Length() == Class::Handle(type_class()).NumTypeParameters()); | 
 |   untag()->set_arguments(value.ptr()); | 
 | } | 
 |  | 
 | TypeArgumentsPtr Type::GetInstanceTypeArguments(Thread* thread, | 
 |                                                 bool canonicalize) const { | 
 |   Zone* zone = thread->zone(); | 
 |   const auto& cls = Class::Handle(zone, type_class()); | 
 |   const auto& args = TypeArguments::Handle(zone, arguments()); | 
 |   return cls.GetInstanceTypeArguments(thread, args, canonicalize); | 
 | } | 
 |  | 
 | TypePtr Type::New(Heap::Space space) { | 
 |   return Object::Allocate<Type>(space); | 
 | } | 
 |  | 
 | TypePtr Type::New(const Class& clazz, | 
 |                   const TypeArguments& arguments, | 
 |                   Nullability nullability, | 
 |                   Heap::Space space) { | 
 |   Zone* Z = Thread::Current()->zone(); | 
 |   const Type& result = Type::Handle(Z, Type::New(space)); | 
 |   result.SetHash(0); | 
 |   result.set_flags(0); | 
 |   result.set_nullability(nullability); | 
 |   result.set_type_state(UntaggedAbstractType::kAllocated); | 
 |   result.set_type_class(clazz); | 
 |   result.set_arguments(arguments); | 
 |  | 
 |   result.InitializeTypeTestingStubNonAtomic( | 
 |       Code::Handle(Z, TypeTestingStubGenerator::DefaultCodeForType(result))); | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | void Type::set_type_class_id(intptr_t id) const { | 
 |   ASSERT(Utils::IsUint(UntaggedObject::kClassIdTagSize, id)); | 
 |   // We should never need a Type object for a top-level class. | 
 |   ASSERT(!ClassTable::IsTopLevelCid(id)); | 
 |   ASSERT(id != kIllegalCid); | 
 |   ASSERT(!IsInternalOnlyClassId(id)); | 
 |   untag()->set_type_class_id(id); | 
 | } | 
 |  | 
 | const char* Type::ToCString() const { | 
 |   if (IsNull()) { | 
 |     return "Type: null"; | 
 |   } | 
 |   Zone* zone = Thread::Current()->zone(); | 
 |   ZoneTextBuffer args(zone); | 
 |   const TypeArguments& type_args = TypeArguments::Handle(zone, arguments()); | 
 |   const char* args_cstr = ""; | 
 |   if (!type_args.IsNull()) { | 
 |     type_args.PrintSubvectorName(0, type_args.Length(), kInternalName, &args); | 
 |     args_cstr = args.buffer(); | 
 |   } | 
 |   const Class& cls = Class::Handle(zone, type_class()); | 
 |   const char* class_name; | 
 |   const String& name = String::Handle(zone, cls.Name()); | 
 |   class_name = name.IsNull() ? "<null>" : name.ToCString(); | 
 |   const char* suffix = NullabilitySuffix(kInternalName); | 
 |   return OS::SCreate(zone, "Type: %s%s%s", class_name, args_cstr, suffix); | 
 | } | 
 |  | 
 | AbstractTypePtr FunctionType::Canonicalize(Thread* thread) const { | 
 |   ASSERT(IsFinalized()); | 
 |   Zone* zone = thread->zone(); | 
 |   if (IsCanonical()) { | 
 | #ifdef DEBUG | 
 |     // Verify that all fields are allocated in old space and are canonical. | 
 |     if (IsGeneric()) { | 
 |       const TypeParameters& type_params = | 
 |           TypeParameters::Handle(zone, type_parameters()); | 
 |       ASSERT(type_params.IsOld()); | 
 |       TypeArguments& type_args = TypeArguments::Handle(zone); | 
 |       type_args = type_params.bounds(); | 
 |       ASSERT(type_args.IsOld()); | 
 |       ASSERT(type_args.IsCanonical()); | 
 |       type_args = type_params.defaults(); | 
 |       ASSERT(type_args.IsOld()); | 
 |       ASSERT(type_args.IsCanonical()); | 
 |     } | 
 |     AbstractType& type = AbstractType::Handle(zone); | 
 |     type = result_type(); | 
 |     ASSERT(type.IsOld()); | 
 |     ASSERT(type.IsCanonical()); | 
 |     ASSERT(Array::Handle(zone, parameter_types()).IsOld()); | 
 |     ASSERT(Array::Handle(zone, named_parameter_names()).IsOld()); | 
 |     const intptr_t num_params = NumParameters(); | 
 |     for (intptr_t i = 0; i < num_params; i++) { | 
 |       type = ParameterTypeAt(i); | 
 |       ASSERT(type.IsOld()); | 
 |       ASSERT(type.IsCanonical()); | 
 |     } | 
 | #endif | 
 |     return ptr(); | 
 |   } | 
 |   auto isolate_group = thread->isolate_group(); | 
 |   ObjectStore* object_store = isolate_group->object_store(); | 
 |   FunctionType& sig = FunctionType::Handle(zone); | 
 |   { | 
 |     SafepointMutexLocker ml(isolate_group->type_canonicalization_mutex()); | 
 |     CanonicalFunctionTypeSet table(zone, | 
 |                                    object_store->canonical_function_types()); | 
 |     sig ^= table.GetOrNull(CanonicalFunctionTypeKey(*this)); | 
 |     ASSERT(object_store->canonical_function_types() == table.Release().ptr()); | 
 |   } | 
 |   if (sig.IsNull()) { | 
 |     // The function type was not found in the table. It is not canonical yet. | 
 |     // Canonicalize its type parameters and types. | 
 |  | 
 |     // Clone this function type to the old heap and update | 
 |     // owners of type parameters. | 
 |     FunctionType& new_sig = FunctionType::Handle(zone); | 
 |     if (this->IsNew()) { | 
 |       new_sig ^= FunctionType::Clone(*this, Heap::kOld); | 
 |     } else { | 
 |       new_sig ^= this->ptr(); | 
 |     } | 
 |     ASSERT(new_sig.IsOld()); | 
 |  | 
 |     if (new_sig.IsGeneric()) { | 
 |       const TypeParameters& type_params = | 
 |           TypeParameters::Handle(zone, new_sig.type_parameters()); | 
 |       ASSERT(type_params.IsOld()); | 
 |       TypeArguments& type_args = TypeArguments::Handle(zone); | 
 |       type_args = type_params.bounds(); | 
 |       if (!type_args.IsCanonical()) { | 
 |         type_args = type_args.Canonicalize(thread); | 
 |         type_params.set_bounds(type_args); | 
 |       } | 
 |       type_args = type_params.defaults(); | 
 |       if (!type_args.IsCanonical()) { | 
 |         type_args = type_args.Canonicalize(thread); | 
 |         type_params.set_defaults(type_args); | 
 |       } | 
 |     } | 
 |     AbstractType& type = AbstractType::Handle(zone); | 
 |     type = new_sig.result_type(); | 
 |     if (!type.IsCanonical()) { | 
 |       type = type.Canonicalize(thread); | 
 |       new_sig.set_result_type(type); | 
 |     } | 
 |     ASSERT(Array::Handle(zone, new_sig.parameter_types()).IsOld()); | 
 |     ASSERT(Array::Handle(zone, new_sig.named_parameter_names()).IsOld()); | 
 |     const intptr_t num_params = new_sig.NumParameters(); | 
 |     for (intptr_t i = 0; i < num_params; i++) { | 
 |       type = new_sig.ParameterTypeAt(i); | 
 |       if (!type.IsCanonical()) { | 
 |         type = type.Canonicalize(thread); | 
 |         new_sig.SetParameterTypeAt(i, type); | 
 |       } | 
 |     } | 
 |     // Check to see if the function type got added to canonical table | 
 |     // during canonicalization of its signature types. | 
 |     SafepointMutexLocker ml(isolate_group->type_canonicalization_mutex()); | 
 |     CanonicalFunctionTypeSet table(zone, | 
 |                                    object_store->canonical_function_types()); | 
 |     sig ^= table.GetOrNull(CanonicalFunctionTypeKey(new_sig)); | 
 |     if (sig.IsNull()) { | 
 |       // Add this function type into the canonical table of function types. | 
 |       sig = new_sig.ptr(); | 
 |       ASSERT(sig.IsOld()); | 
 |       sig.SetCanonical();  // Mark object as being canonical. | 
 |       bool present = table.Insert(sig); | 
 |       ASSERT(!present); | 
 |     } | 
 |     object_store->set_canonical_function_types(table.Release()); | 
 |   } | 
 |   return sig.ptr(); | 
 | } | 
 |  | 
 | void FunctionType::EnumerateURIs(URIs* uris) const { | 
 |   Thread* thread = Thread::Current(); | 
 |   Zone* zone = thread->zone(); | 
 |   AbstractType& type = AbstractType::Handle(zone); | 
 |   const intptr_t num_params = NumParameters(); | 
 |   for (intptr_t i = 0; i < num_params; i++) { | 
 |     type = ParameterTypeAt(i); | 
 |     type.EnumerateURIs(uris); | 
 |   } | 
 |   // Handle result type last, since it appears last in the user visible name. | 
 |   type = result_type(); | 
 |   type.EnumerateURIs(uris); | 
 | } | 
 |  | 
 | void FunctionType::PrintName(NameVisibility name_visibility, | 
 |                              BaseTextBuffer* printer) const { | 
 |   const char* suffix = NullabilitySuffix(name_visibility); | 
 |   if (suffix[0] != '\0') { | 
 |     printer->AddString("("); | 
 |   } | 
 |   FunctionType::Cast(*this).Print(name_visibility, printer); | 
 |   if (suffix[0] != '\0') { | 
 |     printer->AddString(")"); | 
 |     printer->AddString(suffix); | 
 |   } | 
 | } | 
 |  | 
 | TypeParameterPtr TypeParameter::ToNullability(Nullability value, | 
 |                                               Heap::Space space) const { | 
 |   if (nullability() == value) { | 
 |     return ptr(); | 
 |   } | 
 |   // Clone type parameter and set new nullability. | 
 |   TypeParameter& type_parameter = TypeParameter::Handle(); | 
 |   type_parameter ^= Object::Clone(*this, space); | 
 |   type_parameter.set_nullability(value); | 
 |   type_parameter.SetHash(0); | 
 |   type_parameter.InitializeTypeTestingStubNonAtomic(Code::Handle( | 
 |       TypeTestingStubGenerator::DefaultCodeForType(type_parameter))); | 
 |   if (IsCanonical()) { | 
 |     // Object::Clone does not clone canonical bit. | 
 |     ASSERT(!type_parameter.IsCanonical()); | 
 |     ASSERT(IsFinalized()); | 
 |     ASSERT(type_parameter.IsFinalized()); | 
 |     type_parameter ^= type_parameter.Canonicalize(Thread::Current()); | 
 |   } | 
 |   return type_parameter.ptr(); | 
 | } | 
 |  | 
 | bool TypeParameter::IsInstantiated(Genericity genericity, | 
 |                                    intptr_t num_free_fun_type_params) const { | 
 |   // Bounds of class type parameters are ignored in the VM. | 
 |   if (IsClassTypeParameter()) { | 
 |     return genericity == kFunctions; | 
 |   } | 
 |   ASSERT(IsFunctionTypeParameter()); | 
 |   return (genericity == kCurrentClass) || (index() >= num_free_fun_type_params); | 
 | } | 
 |  | 
 | bool TypeParameter::IsEquivalent( | 
 |     const Instance& other, | 
 |     TypeEquality kind, | 
 |     FunctionTypeMapping* function_type_equivalence) const { | 
 |   TRACE_TYPE_CHECKS_VERBOSE("  TypeParameter::IsEquivalent(%s, %s, kind %d)\n", | 
 |                             ToCString(), other.ToCString(), | 
 |                             static_cast<int>(kind)); | 
 |   if (ptr() == other.ptr()) { | 
 |     TRACE_TYPE_CHECKS_VERBOSE("   - result: true (same types)\n"); | 
 |     return true; | 
 |   } | 
 |   if (!other.IsTypeParameter()) { | 
 |     TRACE_TYPE_CHECKS_VERBOSE( | 
 |         "   - result: false (other is not a type parameter)\n"); | 
 |     return false; | 
 |   } | 
 |   const TypeParameter& other_type_param = TypeParameter::Cast(other); | 
 |   ASSERT(IsFinalized() && other_type_param.IsFinalized()); | 
 |   // Compare index, base and owner. | 
 |   if (IsFunctionTypeParameter()) { | 
 |     if (!other_type_param.IsFunctionTypeParameter()) { | 
 |       TRACE_TYPE_CHECKS_VERBOSE( | 
 |           "   - result: false (other is not a function type parameter)\n"); | 
 |       return false; | 
 |     } | 
 |     if ((parameterized_function_type() != | 
 |          other_type_param.parameterized_function_type()) && | 
 |         ((function_type_equivalence == nullptr) || | 
 |          !function_type_equivalence->ContainsOwnersOfTypeParameters( | 
 |              *this, other_type_param))) { | 
 |       TRACE_TYPE_CHECKS_VERBOSE( | 
 |           "   - result: false (owners are not equivalent)\n"); | 
 |       return false; | 
 |     } | 
 |   } else { | 
 |     if (!other_type_param.IsClassTypeParameter()) { | 
 |       TRACE_TYPE_CHECKS_VERBOSE( | 
 |           "   - result: false (other is not a class type parameter)\n"); | 
 |       return false; | 
 |     } | 
 |     if (parameterized_class_id() != other_type_param.parameterized_class_id()) { | 
 |       TRACE_TYPE_CHECKS_VERBOSE( | 
 |           "   - result: false (parameterized class id)\n"); | 
 |       return false; | 
 |     } | 
 |   } | 
 |   if (base() != other_type_param.base() || | 
 |       index() != other_type_param.index()) { | 
 |     TRACE_TYPE_CHECKS_VERBOSE("   - result: false (mismatch base/index)\n"); | 
 |     return false; | 
 |   } | 
 |   if (!IsNullabilityEquivalent(Thread::Current(), other_type_param, kind)) { | 
 |     TRACE_TYPE_CHECKS_VERBOSE("   - result: false (mismatch nullability)\n"); | 
 |     return false; | 
 |   } | 
 |   TRACE_TYPE_CHECKS_VERBOSE("   - result: true\n"); | 
 |   return true; | 
 | } | 
 |  | 
 | void TypeParameter::set_owner(const Object& value) const { | 
 |   ASSERT((IsFunctionTypeParameter() && value.IsFunctionType()) || | 
 |          (IsClassTypeParameter() && value.IsSmi())); | 
 |   untag()->set_owner(value.ptr()); | 
 | } | 
 |  | 
 | classid_t TypeParameter::parameterized_class_id() const { | 
 |   if (IsClassTypeParameter()) { | 
 |     return Smi::Value(Smi::RawCast(untag()->owner())); | 
 |   } else { | 
 |     return kFunctionCid; | 
 |   } | 
 | } | 
 | void TypeParameter::set_parameterized_class_id(classid_t value) const { | 
 |   ASSERT(IsClassTypeParameter()); | 
 |   untag()->set_owner(Smi::New(value)); | 
 | } | 
 |  | 
 | ClassPtr TypeParameter::parameterized_class() const { | 
 |   if (IsClassTypeParameter()) { | 
 |     const classid_t cid = parameterized_class_id(); | 
 |     if (cid != kIllegalCid) { | 
 |       return IsolateGroup::Current()->class_table()->At(cid); | 
 |     } | 
 |   } | 
 |   return Class::null(); | 
 | } | 
 |  | 
 | FunctionTypePtr TypeParameter::parameterized_function_type() const { | 
 |   ASSERT(IsFunctionTypeParameter()); | 
 |   return FunctionType::RawCast(untag()->owner()); | 
 | } | 
 |  | 
 | void TypeParameter::set_base(intptr_t value) const { | 
 |   ASSERT(value >= 0); | 
 |   ASSERT(Utils::IsUint(16, value)); | 
 |   StoreNonPointer(&untag()->base_, value); | 
 | } | 
 |  | 
 | void TypeParameter::set_index(intptr_t value) const { | 
 |   ASSERT(value >= 0); | 
 |   ASSERT(Utils::IsUint(16, value)); | 
 |   StoreNonPointer(&untag()->index_, value); | 
 | } | 
 |  | 
 | AbstractTypePtr TypeParameter::bound() const { | 
 |   if (IsFunctionTypeParameter()) { | 
 |     const auto& owner = FunctionType::Handle(parameterized_function_type()); | 
 |     const auto& type_parameters = | 
 |         TypeParameters::Handle(owner.type_parameters()); | 
 |     return type_parameters.BoundAt(index() - base()); | 
 |   } else { | 
 |     const auto& owner = Class::Handle(parameterized_class()); | 
 |     if (owner.IsNull()) { | 
 |       return IsolateGroup::Current()->object_store()->nullable_object_type(); | 
 |     } | 
 |     const auto& type_parameters = | 
 |         TypeParameters::Handle(owner.type_parameters()); | 
 |     return type_parameters.BoundAt(index() - base()); | 
 |   } | 
 | } | 
 |  | 
 | AbstractTypePtr TypeParameter::GetFromTypeArguments( | 
 |     const TypeArguments& instantiator_type_arguments, | 
 |     const TypeArguments& function_type_arguments) const { | 
 |   ASSERT(IsFinalized()); | 
 |   const TypeArguments& type_args = IsFunctionTypeParameter() | 
 |                                        ? function_type_arguments | 
 |                                        : instantiator_type_arguments; | 
 |   return type_args.TypeAtNullSafe(index()); | 
 | } | 
 |  | 
 | AbstractTypePtr TypeParameter::InstantiateFrom( | 
 |     const TypeArguments& instantiator_type_arguments, | 
 |     const TypeArguments& function_type_arguments, | 
 |     intptr_t num_free_fun_type_params, | 
 |     Heap::Space space, | 
 |     FunctionTypeMapping* function_type_mapping, | 
 |     intptr_t num_parent_type_args_adjustment) const { | 
 |   Zone* zone = Thread::Current()->zone(); | 
 |   AbstractType& result = AbstractType::Handle(zone); | 
 |   bool substituted = false; | 
 |   if (IsFunctionTypeParameter()) { | 
 |     ASSERT(IsFinalized()); | 
 |     if (index() >= num_free_fun_type_params) { | 
 |       // Do not instantiate the function type parameter. | 
 |       // Get a replacement from the updated function type. | 
 |       ASSERT(function_type_mapping != nullptr); | 
 |       result = function_type_mapping->MapTypeParameter(*this); | 
 |       ASSERT(TypeParameter::Cast(result).index() == | 
 |              index() - num_free_fun_type_params); | 
 |       ASSERT(TypeParameter::Cast(result).base() == | 
 |              base() - num_free_fun_type_params); | 
 |       ASSERT(TypeParameter::Cast(result).nullability() == nullability()); | 
 |       AbstractType& upper_bound = AbstractType::Handle(zone, bound()); | 
 |       if (!upper_bound.IsInstantiated()) { | 
 |         upper_bound = upper_bound.InstantiateFrom( | 
 |             instantiator_type_arguments, function_type_arguments, | 
 |             num_free_fun_type_params, space, function_type_mapping, | 
 |             num_parent_type_args_adjustment); | 
 |       } | 
 |       if (upper_bound.ptr() == Type::NeverType()) { | 
 |         // Normalize 'X extends Never' to 'Never'. | 
 |         result = Type::NeverType(); | 
 |       } | 
 |     } else if (function_type_arguments.IsNull()) { | 
 |       return Type::DynamicType(); | 
 |     } else { | 
 |       result = function_type_arguments.TypeAt(index()); | 
 |       substituted = true; | 
 |     } | 
 |   } else { | 
 |     ASSERT(IsClassTypeParameter()); | 
 |     ASSERT(IsFinalized()); | 
 |     if (instantiator_type_arguments.IsNull()) { | 
 |       return Type::DynamicType(); | 
 |     } | 
 |     if (instantiator_type_arguments.Length() <= index()) { | 
 |       // InstantiateFrom can be invoked from a compilation pipeline with | 
 |       // mismatching type arguments vector. This can only happen for | 
 |       // a dynamically unreachable code - which compiler can't remove | 
 |       // statically for some reason. | 
 |       // To prevent crashes we return AbstractType::null(), understood by caller | 
 |       // (see AssertAssignableInstr::Canonicalize). | 
 |       return AbstractType::null(); | 
 |     } | 
 |     result = instantiator_type_arguments.TypeAt(index()); | 
 |     substituted = true; | 
 |     // Instantiating a class type parameter cannot result in a | 
 |     // function type parameter. | 
 |     // Bounds of class type parameters are ignored in the VM. | 
 |   } | 
 |   result = result.SetInstantiatedNullability(*this, space); | 
 |   if (substituted && (num_parent_type_args_adjustment != 0)) { | 
 |     // This type parameter is used inside a generic function type. | 
 |     // A type being substituted can have nested function types, | 
 |     // whose number of parent function type arguments should be adjusted | 
 |     // after the substitution. | 
 |     result = result.UpdateFunctionTypes(num_parent_type_args_adjustment, | 
 |                                         kAllFree, space, function_type_mapping); | 
 |   } | 
 |   // Canonicalization is not part of instantiation. | 
 |   return result.NormalizeFutureOrType(space); | 
 | } | 
 |  | 
 | AbstractTypePtr TypeParameter::UpdateFunctionTypes( | 
 |     intptr_t num_parent_type_args_adjustment, | 
 |     intptr_t num_free_fun_type_params, | 
 |     Heap::Space space, | 
 |     FunctionTypeMapping* function_type_mapping) const { | 
 |   ASSERT(IsFinalized()); | 
 |   ASSERT(num_parent_type_args_adjustment >= 0); | 
 |   if (IsFunctionTypeParameter() && (index() >= num_free_fun_type_params)) { | 
 |     Zone* zone = Thread::Current()->zone(); | 
 |     ASSERT(function_type_mapping != nullptr); | 
 |     const auto& new_tp = TypeParameter::Handle( | 
 |         zone, function_type_mapping->MapTypeParameter(*this)); | 
 |     ASSERT(new_tp.base() == base() + num_parent_type_args_adjustment); | 
 |     ASSERT(new_tp.index() == index() + num_parent_type_args_adjustment); | 
 |     ASSERT(new_tp.nullability() == nullability()); | 
 |     ASSERT(new_tp.IsFinalized()); | 
 |     return new_tp.ptr(); | 
 |   } else { | 
 |     return ptr(); | 
 |   } | 
 | } | 
 |  | 
 | AbstractTypePtr TypeParameter::Canonicalize(Thread* thread) const { | 
 |   ASSERT(IsFinalized()); | 
 |   Zone* zone = thread->zone(); | 
 |   if (IsCanonical()) { | 
 | #ifdef DEBUG | 
 |     if (IsFunctionTypeParameter()) { | 
 |       ASSERT(FunctionType::Handle(zone, parameterized_function_type()).IsOld()); | 
 |     } | 
 | #endif | 
 |     return this->ptr(); | 
 |   } | 
 |   auto isolate_group = thread->isolate_group(); | 
 |   ObjectStore* object_store = isolate_group->object_store(); | 
 |   TypeParameter& type_parameter = TypeParameter::Handle(zone); | 
 |   { | 
 |     SafepointMutexLocker ml(isolate_group->type_canonicalization_mutex()); | 
 |     CanonicalTypeParameterSet table(zone, | 
 |                                     object_store->canonical_type_parameters()); | 
 |     type_parameter ^= table.GetOrNull(CanonicalTypeParameterKey(*this)); | 
 |     if (type_parameter.IsNull()) { | 
 |       // Add this type parameter into the canonical table of type parameters. | 
 |       if (this->IsNew()) { | 
 |         type_parameter ^= Object::Clone(*this, Heap::kOld); | 
 |       } else { | 
 |         type_parameter = this->ptr(); | 
 |       } | 
 |       ASSERT(type_parameter.IsOld()); | 
 |       type_parameter.SetCanonical();  // Mark object as being canonical. | 
 |       bool present = table.Insert(type_parameter); | 
 |       ASSERT(!present); | 
 |     } | 
 |     object_store->set_canonical_type_parameters(table.Release()); | 
 |   } | 
 |   return type_parameter.ptr(); | 
 | } | 
 |  | 
 | void TypeParameter::PrintName(NameVisibility name_visibility, | 
 |                               BaseTextBuffer* printer) const { | 
 |   const TypeParameter& type_param = TypeParameter::Cast(*this); | 
 |   // Type parameter names are meaningless after canonicalization. | 
 |   printer->AddString(type_param.CanonicalNameCString()); | 
 |   printer->AddString(NullabilitySuffix(name_visibility)); | 
 | } | 
 |  | 
 | uword TypeParameter::ComputeHash() const { | 
 |   ASSERT(IsFinalized()); | 
 |   uint32_t result = parameterized_class_id(); | 
 |   result = CombineHashes(result, base()); | 
 |   result = CombineHashes(result, index()); | 
 |   result = CombineHashes(result, static_cast<uint32_t>(nullability())); | 
 |   result = FinalizeHash(result, kHashBits); | 
 |   SetHash(result); | 
 |   return result; | 
 | } | 
 |  | 
 | TypeParameterPtr TypeParameter::New() { | 
 |   return Object::Allocate<TypeParameter>(Heap::kOld); | 
 | } | 
 |  | 
 | TypeParameterPtr TypeParameter::New(const Object& owner, | 
 |                                     intptr_t base, | 
 |                                     intptr_t index, | 
 |                                     Nullability nullability) { | 
 |   ASSERT(owner.IsNull() || owner.IsClass() || owner.IsFunctionType()); | 
 |   const bool is_function_type_parameter = owner.IsFunctionType(); | 
 |   const uint32_t flags = UntaggedTypeParameter::IsFunctionTypeParameter::encode( | 
 |       is_function_type_parameter); | 
 |   Zone* Z = Thread::Current()->zone(); | 
 |   const TypeParameter& result = TypeParameter::Handle(Z, TypeParameter::New()); | 
 |   result.set_flags(flags); | 
 |   if (is_function_type_parameter) { | 
 |     result.set_owner(owner); | 
 |   } else { | 
 |     result.set_parameterized_class_id(owner.IsNull() ? kIllegalCid | 
 |                                                      : Class::Cast(owner).id()); | 
 |   } | 
 |   result.set_base(base); | 
 |   result.set_index(index); | 
 |   result.SetHash(0); | 
 |   result.set_nullability(nullability); | 
 |   result.set_type_state(UntaggedAbstractType::kAllocated); | 
 |  | 
 |   result.InitializeTypeTestingStubNonAtomic( | 
 |       Code::Handle(Z, TypeTestingStubGenerator::DefaultCodeForType(result))); | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | const char* TypeParameter::CanonicalNameCString(bool is_class_type_parameter, | 
 |                                                 intptr_t base, | 
 |                                                 intptr_t index) { | 
 |   Thread* thread = Thread::Current(); | 
 |   ZoneTextBuffer printer(thread->zone()); | 
 |   const char* base_fmt = is_class_type_parameter ? "C%" Pd : "F%" Pd; | 
 |   const char* index_fmt = is_class_type_parameter ? "X%" Pd : "Y%" Pd; | 
 |   if (base != 0) { | 
 |     printer.Printf(base_fmt, base); | 
 |   } | 
 |   printer.Printf(index_fmt, index - base); | 
 |   return printer.buffer(); | 
 | } | 
 |  | 
 | const char* TypeParameter::ToCString() const { | 
 |   if (IsNull()) { | 
 |     return "TypeParameter: null"; | 
 |   } | 
 |   Thread* thread = Thread::Current(); | 
 |   ZoneTextBuffer printer(thread->zone()); | 
 |   printer.Printf("TypeParameter: "); | 
 |   printer.AddString(CanonicalNameCString()); | 
 |   printer.AddString(NullabilitySuffix(kInternalName)); | 
 |   return printer.buffer(); | 
 | } | 
 |  | 
 | const char* Number::ToCString() const { | 
 |   // Number is an interface. No instances of Number should exist. | 
 |   UNREACHABLE(); | 
 |   return "Number"; | 
 | } | 
 |  | 
 | const char* Integer::ToCString() const { | 
 |   // Integer is an interface. No instances of Integer should exist except null. | 
 |   ASSERT(IsNull()); | 
 |   return "nullptr Integer"; | 
 | } | 
 |  | 
 | IntegerPtr Integer::New(const String& str, Heap::Space space) { | 
 |   // We are not supposed to have integers represented as two byte strings. | 
 |   ASSERT(str.IsOneByteString()); | 
 |   if (str.IsNull() || (str.Length() == 0)) { | 
 |     return Integer::null(); | 
 |   } | 
 |   int64_t value = 0; | 
 |   const char* cstr = str.ToCString(); | 
 |   if (!OS::StringToInt64(cstr, &value)) { | 
 |     // Out of range. | 
 |     return Integer::null(); | 
 |   } | 
 |   return Integer::New(value, space); | 
 | } | 
 |  | 
 | IntegerPtr Integer::NewCanonical(const String& str) { | 
 |   // We are not supposed to have integers represented as two byte strings. | 
 |   ASSERT(str.IsOneByteString()); | 
 |   int64_t value = 0; | 
 |   const char* cstr = str.ToCString(); | 
 |   if (!OS::StringToInt64(cstr, &value)) { | 
 |     // Out of range. | 
 |     return Integer::null(); | 
 |   } | 
 |   return NewCanonical(value); | 
 | } | 
 |  | 
 | IntegerPtr Integer::NewCanonical(int64_t value) { | 
 |   if (Smi::IsValid(value)) { | 
 |     return Smi::New(static_cast<intptr_t>(value)); | 
 |   } | 
 |   return Mint::NewCanonical(value); | 
 | } | 
 |  | 
 | IntegerPtr Integer::New(int64_t value, Heap::Space space) { | 
 |   const bool is_smi = Smi::IsValid(value); | 
 |   if (is_smi) { | 
 |     return Smi::New(static_cast<intptr_t>(value)); | 
 |   } | 
 |   return Mint::New(value, space); | 
 | } | 
 |  | 
 | IntegerPtr Integer::NewFromUint64(uint64_t value, Heap::Space space) { | 
 |   return Integer::New(static_cast<int64_t>(value), space); | 
 | } | 
 |  | 
 | bool Integer::IsValueInRange(uint64_t value) { | 
 |   return (value <= static_cast<uint64_t>(Mint::kMaxValue)); | 
 | } | 
 |  | 
 | bool Integer::Equals(const Instance& other) const { | 
 |   // Integer is an abstract class. | 
 |   UNREACHABLE(); | 
 |   return false; | 
 | } | 
 |  | 
 | bool Integer::IsZero() const { | 
 |   // Integer is an abstract class. | 
 |   UNREACHABLE(); | 
 |   return false; | 
 | } | 
 |  | 
 | bool Integer::IsNegative() const { | 
 |   // Integer is an abstract class. | 
 |   UNREACHABLE(); | 
 |   return false; | 
 | } | 
 |  | 
 | double Integer::AsDoubleValue() const { | 
 |   // Integer is an abstract class. | 
 |   UNREACHABLE(); | 
 |   return 0.0; | 
 | } | 
 |  | 
 | int64_t Integer::AsInt64Value() const { | 
 |   // Integer is an abstract class. | 
 |   UNREACHABLE(); | 
 |   return 0; | 
 | } | 
 |  | 
 | uint32_t Integer::AsTruncatedUint32Value() const { | 
 |   // Integer is an abstract class. | 
 |   UNREACHABLE(); | 
 |   return 0; | 
 | } | 
 |  | 
 | bool Integer::FitsIntoSmi() const { | 
 |   // Integer is an abstract class. | 
 |   UNREACHABLE(); | 
 |   return false; | 
 | } | 
 |  | 
 | int Integer::CompareWith(const Integer& other) const { | 
 |   // Integer is an abstract class. | 
 |   UNREACHABLE(); | 
 |   return 0; | 
 | } | 
 |  | 
 | uint32_t Integer::CanonicalizeHash() const { | 
 |   return Multiply64Hash(AsInt64Value()); | 
 | } | 
 |  | 
 | IntegerPtr Integer::AsValidInteger() const { | 
 |   if (IsSmi()) return ptr(); | 
 |   if (IsMint()) { | 
 |     Mint& mint = Mint::Handle(); | 
 |     mint ^= ptr(); | 
 |     if (Smi::IsValid(mint.value())) { | 
 |       return Smi::New(static_cast<intptr_t>(mint.value())); | 
 |     } else { | 
 |       return ptr(); | 
 |     } | 
 |   } | 
 |   return ptr(); | 
 | } | 
 |  | 
 | const char* Integer::ToHexCString(Zone* zone) const { | 
 |   ASSERT(IsSmi() || IsMint()); | 
 |   int64_t value = AsInt64Value(); | 
 |   if (value < 0) { | 
 |     return OS::SCreate(zone, "-0x%" PX64, -static_cast<uint64_t>(value)); | 
 |   } else { | 
 |     return OS::SCreate(zone, "0x%" PX64, static_cast<uint64_t>(value)); | 
 |   } | 
 | } | 
 |  | 
 | IntegerPtr Integer::ArithmeticOp(Token::Kind operation, | 
 |                                  const Integer& other, | 
 |                                  Heap::Space space) const { | 
 |   // In 32-bit mode, the result of any operation between two Smis will fit in a | 
 |   // 32-bit signed result, except the product of two Smis, which will be 64-bit. | 
 |   // In 64-bit mode, the result of any operation between two Smis will fit in a | 
 |   // 64-bit signed result, except the product of two Smis (see below). | 
 |   if (IsSmi() && other.IsSmi()) { | 
 |     const intptr_t left_value = Smi::Value(Smi::RawCast(ptr())); | 
 |     const intptr_t right_value = Smi::Value(Smi::RawCast(other.ptr())); | 
 |     switch (operation) { | 
 |       case Token::kADD: | 
 |         return Integer::New(left_value + right_value, space); | 
 |       case Token::kSUB: | 
 |         return Integer::New(left_value - right_value, space); | 
 |       case Token::kMUL: | 
 |         return Integer::New( | 
 |             Utils::MulWithWrapAround(static_cast<int64_t>(left_value), | 
 |                                      static_cast<int64_t>(right_value)), | 
 |             space); | 
 |       case Token::kTRUNCDIV: | 
 |         return Integer::New(left_value / right_value, space); | 
 |       case Token::kMOD: { | 
 |         const intptr_t remainder = left_value % right_value; | 
 |         if (remainder < 0) { | 
 |           if (right_value < 0) { | 
 |             return Integer::New(remainder - right_value, space); | 
 |           } else { | 
 |             return Integer::New(remainder + right_value, space); | 
 |           } | 
 |         } | 
 |         return Integer::New(remainder, space); | 
 |       } | 
 |       default: | 
 |         UNIMPLEMENTED(); | 
 |     } | 
 |   } | 
 |   const int64_t left_value = AsInt64Value(); | 
 |   const int64_t right_value = other.AsInt64Value(); | 
 |   switch (operation) { | 
 |     case Token::kADD: | 
 |       return Integer::New(Utils::AddWithWrapAround(left_value, right_value), | 
 |                           space); | 
 |  | 
 |     case Token::kSUB: | 
 |       return Integer::New(Utils::SubWithWrapAround(left_value, right_value), | 
 |                           space); | 
 |  | 
 |     case Token::kMUL: | 
 |       return Integer::New(Utils::MulWithWrapAround(left_value, right_value), | 
 |                           space); | 
 |  | 
 |     case Token::kTRUNCDIV: | 
 |       if ((left_value == Mint::kMinValue) && (right_value == -1)) { | 
 |         // Division special case: overflow in int64_t. | 
 |         // MIN_VALUE / -1 = (MAX_VALUE + 1), which wraps around to MIN_VALUE | 
 |         return Integer::New(Mint::kMinValue, space); | 
 |       } | 
 |       return Integer::New(left_value / right_value, space); | 
 |  | 
 |     case Token::kMOD: { | 
 |       if ((left_value == Mint::kMinValue) && (right_value == -1)) { | 
 |         // Modulo special case: overflow in int64_t. | 
 |         // MIN_VALUE % -1 = 0 for reason given above. | 
 |         return Integer::New(0, space); | 
 |       } | 
 |       const int64_t remainder = left_value % right_value; | 
 |       if (remainder < 0) { | 
 |         if (right_value < 0) { | 
 |           return Integer::New(remainder - right_value, space); | 
 |         } else { | 
 |           return Integer::New(remainder + right_value, space); | 
 |         } | 
 |       } | 
 |       return Integer::New(remainder, space); | 
 |     } | 
 |     default: | 
 |       UNIMPLEMENTED(); | 
 |       return Integer::null(); | 
 |   } | 
 | } | 
 |  | 
 | IntegerPtr Integer::BitOp(Token::Kind kind, | 
 |                           const Integer& other, | 
 |                           Heap::Space space) const { | 
 |   if (IsSmi() && other.IsSmi()) { | 
 |     intptr_t op1_value = Smi::Value(Smi::RawCast(ptr())); | 
 |     intptr_t op2_value = Smi::Value(Smi::RawCast(other.ptr())); | 
 |     intptr_t result = 0; | 
 |     switch (kind) { | 
 |       case Token::kBIT_AND: | 
 |         result = op1_value & op2_value; | 
 |         break; | 
 |       case Token::kBIT_OR: | 
 |         result = op1_value | op2_value; | 
 |         break; | 
 |       case Token::kBIT_XOR: | 
 |         result = op1_value ^ op2_value; | 
 |         break; | 
 |       default: | 
 |         UNIMPLEMENTED(); | 
 |     } | 
 |     ASSERT(Smi::IsValid(result)); | 
 |     return Smi::New(result); | 
 |   } else { | 
 |     int64_t a = AsInt64Value(); | 
 |     int64_t b = other.AsInt64Value(); | 
 |     switch (kind) { | 
 |       case Token::kBIT_AND: | 
 |         return Integer::New(a & b, space); | 
 |       case Token::kBIT_OR: | 
 |         return Integer::New(a | b, space); | 
 |       case Token::kBIT_XOR: | 
 |         return Integer::New(a ^ b, space); | 
 |       default: | 
 |         UNIMPLEMENTED(); | 
 |         return Integer::null(); | 
 |     } | 
 |   } | 
 | } | 
 |  | 
 | IntegerPtr Integer::ShiftOp(Token::Kind kind, | 
 |                             const Integer& other, | 
 |                             Heap::Space space) const { | 
 |   int64_t a = AsInt64Value(); | 
 |   int64_t b = other.AsInt64Value(); | 
 |   ASSERT(b >= 0); | 
 |   switch (kind) { | 
 |     case Token::kSHL: | 
 |       return Integer::New(Utils::ShiftLeftWithTruncation(a, b), space); | 
 |     case Token::kSHR: | 
 |       return Integer::New(a >> Utils::Minimum<int64_t>(b, Mint::kBits), space); | 
 |     case Token::kUSHR: | 
 |       return Integer::New( | 
 |           (b >= kBitsPerInt64) ? 0 : static_cast<uint64_t>(a) >> b, space); | 
 |     default: | 
 |       UNIMPLEMENTED(); | 
 |       return Integer::null(); | 
 |   } | 
 | } | 
 |  | 
 | bool Smi::Equals(const Instance& other) const { | 
 |   if (other.IsNull() || !other.IsSmi()) { | 
 |     return false; | 
 |   } | 
 |   return (this->Value() == Smi::Cast(other).Value()); | 
 | } | 
 |  | 
 | double Smi::AsDoubleValue() const { | 
 |   return static_cast<double>(this->Value()); | 
 | } | 
 |  | 
 | int64_t Smi::AsInt64Value() const { | 
 |   return this->Value(); | 
 | } | 
 |  | 
 | uint32_t Smi::AsTruncatedUint32Value() const { | 
 |   return this->Value() & 0xFFFFFFFF; | 
 | } | 
 |  | 
 | int Smi::CompareWith(const Integer& other) const { | 
 |   if (other.IsSmi()) { | 
 |     const Smi& other_smi = Smi::Cast(other); | 
 |     if (this->Value() < other_smi.Value()) { | 
 |       return -1; | 
 |     } else if (this->Value() > other_smi.Value()) { | 
 |       return 1; | 
 |     } else { | 
 |       return 0; | 
 |     } | 
 |   } | 
 |   ASSERT(!other.FitsIntoSmi()); | 
 |   if (other.IsMint()) { | 
 |     if (this->IsNegative() == other.IsNegative()) { | 
 |       return this->IsNegative() ? 1 : -1; | 
 |     } | 
 |     return this->IsNegative() ? -1 : 1; | 
 |   } | 
 |   UNREACHABLE(); | 
 |   return 0; | 
 | } | 
 |  | 
 | const char* Smi::ToCString() const { | 
 |   return OS::SCreate(Thread::Current()->zone(), "%" Pd "", Value()); | 
 | } | 
 |  | 
 | ClassPtr Smi::Class() { | 
 |   return IsolateGroup::Current()->object_store()->smi_class(); | 
 | } | 
 |  | 
 | void Mint::set_value(int64_t value) const { | 
 |   StoreNonPointer(&untag()->value_, value); | 
 | } | 
 |  | 
 | MintPtr Mint::New(int64_t val, Heap::Space space) { | 
 |   // Do not allocate a Mint if Smi would do. | 
 |   ASSERT(!Smi::IsValid(val)); | 
 |   ASSERT(IsolateGroup::Current()->object_store()->mint_class() != | 
 |          Class::null()); | 
 |   const auto& result = Mint::Handle(Object::Allocate<Mint>(space)); | 
 |   result.set_value(val); | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | MintPtr Mint::NewCanonical(int64_t value) { | 
 |   Thread* thread = Thread::Current(); | 
 |   Mint& mint = Mint::Handle(thread->zone(), Mint::New(value, Heap::kOld)); | 
 |   mint ^= mint.Canonicalize(thread); | 
 |   return mint.ptr(); | 
 | } | 
 |  | 
 | bool Mint::Equals(const Instance& other) const { | 
 |   if (this->ptr() == other.ptr()) { | 
 |     // Both handles point to the same raw instance. | 
 |     return true; | 
 |   } | 
 |   if (!other.IsMint() || other.IsNull()) { | 
 |     return false; | 
 |   } | 
 |   return value() == Mint::Cast(other).value(); | 
 | } | 
 |  | 
 | double Mint::AsDoubleValue() const { | 
 |   return static_cast<double>(this->value()); | 
 | } | 
 |  | 
 | int64_t Mint::AsInt64Value() const { | 
 |   return this->value(); | 
 | } | 
 |  | 
 | uint32_t Mint::AsTruncatedUint32Value() const { | 
 |   return this->value() & 0xFFFFFFFF; | 
 | } | 
 |  | 
 | bool Mint::FitsIntoSmi() const { | 
 |   return Smi::IsValid(AsInt64Value()); | 
 | } | 
 |  | 
 | int Mint::CompareWith(const Integer& other) const { | 
 |   ASSERT(!FitsIntoSmi()); | 
 |   ASSERT(other.IsMint() || other.IsSmi()); | 
 |   int64_t a = AsInt64Value(); | 
 |   int64_t b = other.AsInt64Value(); | 
 |   if (a < b) { | 
 |     return -1; | 
 |   } else if (a > b) { | 
 |     return 1; | 
 |   } else { | 
 |     return 0; | 
 |   } | 
 | } | 
 |  | 
 | const char* Mint::ToCString() const { | 
 |   return OS::SCreate(Thread::Current()->zone(), "%" Pd64 "", value()); | 
 | } | 
 |  | 
 | void Double::set_value(double value) const { | 
 |   StoreNonPointer(&untag()->value_, value); | 
 | } | 
 |  | 
 | bool Double::BitwiseEqualsToDouble(double value) const { | 
 |   intptr_t value_offset = Double::value_offset(); | 
 |   void* this_addr = reinterpret_cast<void*>( | 
 |       reinterpret_cast<uword>(this->untag()) + value_offset); | 
 |   void* other_addr = reinterpret_cast<void*>(&value); | 
 |   return (memcmp(this_addr, other_addr, sizeof(value)) == 0); | 
 | } | 
 |  | 
 | bool Double::OperatorEquals(const Instance& other) const { | 
 |   if (this->IsNull() || other.IsNull()) { | 
 |     return (this->IsNull() && other.IsNull()); | 
 |   } | 
 |   if (!other.IsDouble()) { | 
 |     return false; | 
 |   } | 
 |   return this->value() == Double::Cast(other).value(); | 
 | } | 
 |  | 
 | bool Double::CanonicalizeEquals(const Instance& other) const { | 
 |   if (this->ptr() == other.ptr()) { | 
 |     return true;  // "===". | 
 |   } | 
 |   if (other.IsNull() || !other.IsDouble()) { | 
 |     return false; | 
 |   } | 
 |   return BitwiseEqualsToDouble(Double::Cast(other).value()); | 
 | } | 
 |  | 
 | uint32_t Double::CanonicalizeHash() const { | 
 |   return Hash64To32(bit_cast<uint64_t>(value())); | 
 | } | 
 |  | 
 | DoublePtr Double::New(double d, Heap::Space space) { | 
 |   ASSERT(IsolateGroup::Current()->object_store()->double_class() != | 
 |          Class::null()); | 
 |   const auto& result = Double::Handle(Object::Allocate<Double>(space)); | 
 |   result.set_value(d); | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | DoublePtr Double::New(const String& str, Heap::Space space) { | 
 |   double double_value; | 
 |   if (!CStringToDouble(str.ToCString(), str.Length(), &double_value)) { | 
 |     return Double::Handle().ptr(); | 
 |   } | 
 |   return New(double_value, space); | 
 | } | 
 |  | 
 | DoublePtr Double::NewCanonical(double value) { | 
 |   Thread* thread = Thread::Current(); | 
 |   Double& dbl = Double::Handle(thread->zone(), Double::New(value, Heap::kOld)); | 
 |   dbl ^= dbl.Canonicalize(thread); | 
 |   return dbl.ptr(); | 
 | } | 
 |  | 
 | DoublePtr Double::NewCanonical(const String& str) { | 
 |   double double_value; | 
 |   if (!CStringToDouble(str.ToCString(), str.Length(), &double_value)) { | 
 |     return Double::Handle().ptr(); | 
 |   } | 
 |   return NewCanonical(double_value); | 
 | } | 
 |  | 
 | StringPtr Number::ToString(Heap::Space space) const { | 
 |   // Refactoring can avoid Zone::Alloc and strlen, but gains are insignificant. | 
 |   const char* cstr = ToCString(); | 
 |   intptr_t len = strlen(cstr); | 
 | // Resulting string is ASCII ... | 
 | #ifdef DEBUG | 
 |   for (intptr_t i = 0; i < len; ++i) { | 
 |     ASSERT(static_cast<uint8_t>(cstr[i]) < 128); | 
 |   } | 
 | #endif  // DEBUG | 
 |   // ... which is a subset of Latin-1. | 
 |   return String::FromLatin1(reinterpret_cast<const uint8_t*>(cstr), len, space); | 
 | } | 
 |  | 
 | const char* Double::ToCString() const { | 
 |   if (isnan(value())) { | 
 |     return "NaN"; | 
 |   } | 
 |   if (isinf(value())) { | 
 |     return value() < 0 ? "-Infinity" : "Infinity"; | 
 |   } | 
 |   const int kBufferSize = 128; | 
 |   char* buffer = Thread::Current()->zone()->Alloc<char>(kBufferSize); | 
 |   buffer[kBufferSize - 1] = '\0'; | 
 |   DoubleToCString(value(), buffer, kBufferSize); | 
 |   return buffer; | 
 | } | 
 |  | 
 | void StringHasher::Add(const String& str, intptr_t begin_index, intptr_t len) { | 
 |   ASSERT(begin_index >= 0); | 
 |   ASSERT(len >= 0); | 
 |   ASSERT((begin_index + len) <= str.Length()); | 
 |   if (len == 0) { | 
 |     return; | 
 |   } | 
 |   if (str.IsOneByteString()) { | 
 |     NoSafepointScope no_safepoint; | 
 |     Add(OneByteString::CharAddr(str, begin_index), len); | 
 |   } else if (str.IsTwoByteString()) { | 
 |     NoSafepointScope no_safepoint; | 
 |     Add(TwoByteString::CharAddr(str, begin_index), len); | 
 |   } else { | 
 |     UNREACHABLE(); | 
 |   } | 
 | } | 
 |  | 
 | uword String::Hash(const String& str, intptr_t begin_index, intptr_t len) { | 
 |   StringHasher hasher; | 
 |   hasher.Add(str, begin_index, len); | 
 |   return hasher.Finalize(); | 
 | } | 
 |  | 
 | uword String::HashConcat(const String& str1, const String& str2) { | 
 |   StringHasher hasher; | 
 |   hasher.Add(str1, 0, str1.Length()); | 
 |   hasher.Add(str2, 0, str2.Length()); | 
 |   return hasher.Finalize(); | 
 | } | 
 |  | 
 | uword String::Hash(StringPtr raw) { | 
 |   StringHasher hasher; | 
 |   uword length = Smi::Value(raw->untag()->length()); | 
 |   if (raw->IsOneByteString()) { | 
 |     const uint8_t* data = static_cast<OneByteStringPtr>(raw)->untag()->data(); | 
 |     return String::Hash(data, length); | 
 |   } else { | 
 |     const uint16_t* data = static_cast<TwoByteStringPtr>(raw)->untag()->data(); | 
 |     return String::Hash(data, length); | 
 |   } | 
 | } | 
 |  | 
 | uword String::Hash(const char* characters, intptr_t len) { | 
 |   StringHasher hasher; | 
 |   hasher.Add(reinterpret_cast<const uint8_t*>(characters), len); | 
 |   return hasher.Finalize(); | 
 | } | 
 |  | 
 | uword String::Hash(const uint8_t* characters, intptr_t len) { | 
 |   StringHasher hasher; | 
 |   hasher.Add(characters, len); | 
 |   return hasher.Finalize(); | 
 | } | 
 |  | 
 | uword String::Hash(const uint16_t* characters, intptr_t len) { | 
 |   StringHasher hasher; | 
 |   hasher.Add(characters, len); | 
 |   return hasher.Finalize(); | 
 | } | 
 |  | 
 | intptr_t String::CharSize() const { | 
 |   intptr_t class_id = ptr()->GetClassId(); | 
 |   if (class_id == kOneByteStringCid) { | 
 |     return kOneByteChar; | 
 |   } | 
 |   ASSERT(class_id == kTwoByteStringCid); | 
 |   return kTwoByteChar; | 
 | } | 
 |  | 
 | bool String::Equals(const Instance& other) const { | 
 |   if (this->ptr() == other.ptr()) { | 
 |     // Both handles point to the same raw instance. | 
 |     return true; | 
 |   } | 
 |  | 
 |   if (!other.IsString()) { | 
 |     return false; | 
 |   } | 
 |  | 
 |   const String& other_string = String::Cast(other); | 
 |   return Equals(other_string); | 
 | } | 
 |  | 
 | bool String::Equals(const String& str, | 
 |                     intptr_t begin_index, | 
 |                     intptr_t len) const { | 
 |   ASSERT(begin_index >= 0); | 
 |   ASSERT((begin_index == 0) || (begin_index < str.Length())); | 
 |   ASSERT(len >= 0); | 
 |   ASSERT(len <= str.Length()); | 
 |   if (len != this->Length()) { | 
 |     return false;  // Lengths don't match. | 
 |   } | 
 |  | 
 |   for (intptr_t i = 0; i < len; i++) { | 
 |     if (CharAt(i) != str.CharAt(begin_index + i)) { | 
 |       return false; | 
 |     } | 
 |   } | 
 |  | 
 |   return true; | 
 | } | 
 |  | 
 | bool String::Equals(const char* cstr) const { | 
 |   ASSERT(cstr != nullptr); | 
 |   CodePointIterator it(*this); | 
 |   intptr_t len = strlen(cstr); | 
 |   while (it.Next()) { | 
 |     if (*cstr == '\0') { | 
 |       // Lengths don't match. | 
 |       return false; | 
 |     } | 
 |     int32_t ch; | 
 |     intptr_t consumed = | 
 |         Utf8::Decode(reinterpret_cast<const uint8_t*>(cstr), len, &ch); | 
 |     if (consumed == 0 || it.Current() != ch) { | 
 |       return false; | 
 |     } | 
 |     cstr += consumed; | 
 |     len -= consumed; | 
 |   } | 
 |   return *cstr == '\0'; | 
 | } | 
 |  | 
 | bool String::Equals(const uint8_t* latin1_array, intptr_t len) const { | 
 |   if (len != this->Length()) { | 
 |     // Lengths don't match. | 
 |     return false; | 
 |   } | 
 |  | 
 |   for (intptr_t i = 0; i < len; i++) { | 
 |     if (this->CharAt(i) != latin1_array[i]) { | 
 |       return false; | 
 |     } | 
 |   } | 
 |   return true; | 
 | } | 
 |  | 
 | bool String::Equals(const uint16_t* utf16_array, intptr_t len) const { | 
 |   if (len != this->Length()) { | 
 |     // Lengths don't match. | 
 |     return false; | 
 |   } | 
 |  | 
 |   for (intptr_t i = 0; i < len; i++) { | 
 |     if (this->CharAt(i) != LoadUnaligned(&utf16_array[i])) { | 
 |       return false; | 
 |     } | 
 |   } | 
 |   return true; | 
 | } | 
 |  | 
 | bool String::Equals(const int32_t* utf32_array, intptr_t len) const { | 
 |   if (len < 0) return false; | 
 |   intptr_t j = 0; | 
 |   for (intptr_t i = 0; i < len; ++i) { | 
 |     if (Utf::IsSupplementary(utf32_array[i])) { | 
 |       uint16_t encoded[2]; | 
 |       Utf16::Encode(utf32_array[i], &encoded[0]); | 
 |       if (j + 1 >= Length()) return false; | 
 |       if (CharAt(j++) != encoded[0]) return false; | 
 |       if (CharAt(j++) != encoded[1]) return false; | 
 |     } else { | 
 |       if (j >= Length()) return false; | 
 |       if (CharAt(j++) != utf32_array[i]) return false; | 
 |     } | 
 |   } | 
 |   return j == Length(); | 
 | } | 
 |  | 
 | bool String::EqualsConcat(const String& str1, const String& str2) const { | 
 |   return (Length() == str1.Length() + str2.Length()) && | 
 |          str1.Equals(*this, 0, str1.Length()) && | 
 |          str2.Equals(*this, str1.Length(), str2.Length()); | 
 | } | 
 |  | 
 | intptr_t String::CompareTo(const String& other) const { | 
 |   const intptr_t this_len = this->Length(); | 
 |   const intptr_t other_len = other.IsNull() ? 0 : other.Length(); | 
 |   const intptr_t len = (this_len < other_len) ? this_len : other_len; | 
 |   for (intptr_t i = 0; i < len; i++) { | 
 |     uint16_t this_code_unit = this->CharAt(i); | 
 |     uint16_t other_code_unit = other.CharAt(i); | 
 |     if (this_code_unit < other_code_unit) { | 
 |       return -1; | 
 |     } | 
 |     if (this_code_unit > other_code_unit) { | 
 |       return 1; | 
 |     } | 
 |   } | 
 |   if (this_len < other_len) return -1; | 
 |   if (this_len > other_len) return 1; | 
 |   return 0; | 
 | } | 
 |  | 
 | bool String::StartsWith(StringPtr str, StringPtr prefix) { | 
 |   if (prefix == String::null()) return false; | 
 |  | 
 |   const intptr_t length = String::LengthOf(str); | 
 |   const intptr_t prefix_length = String::LengthOf(prefix); | 
 |   if (prefix_length > length) return false; | 
 |  | 
 |   for (intptr_t i = 0; i < prefix_length; i++) { | 
 |     if (String::CharAt(str, i) != String::CharAt(prefix, i)) { | 
 |       return false; | 
 |     } | 
 |   } | 
 |   return true; | 
 | } | 
 |  | 
 | bool String::EndsWith(const String& other) const { | 
 |   if (other.IsNull()) { | 
 |     return false; | 
 |   } | 
 |   const intptr_t len = this->Length(); | 
 |   const intptr_t other_len = other.Length(); | 
 |   const intptr_t offset = len - other_len; | 
 |  | 
 |   if ((other_len == 0) || (other_len > len)) { | 
 |     return false; | 
 |   } | 
 |   for (int i = offset; i < len; i++) { | 
 |     if (this->CharAt(i) != other.CharAt(i - offset)) { | 
 |       return false; | 
 |     } | 
 |   } | 
 |   return true; | 
 | } | 
 |  | 
 | InstancePtr String::CanonicalizeLocked(Thread* thread) const { | 
 |   if (IsCanonical()) { | 
 |     return this->ptr(); | 
 |   } | 
 |   return Symbols::New(Thread::Current(), *this); | 
 | } | 
 |  | 
 | StringPtr String::New(const char* cstr, Heap::Space space) { | 
 |   ASSERT(cstr != nullptr); | 
 |   intptr_t array_len = strlen(cstr); | 
 |   const uint8_t* utf8_array = reinterpret_cast<const uint8_t*>(cstr); | 
 |   return String::FromUTF8(utf8_array, array_len, space); | 
 | } | 
 |  | 
 | StringPtr String::FromUTF8(const uint8_t* utf8_array, | 
 |                            intptr_t array_len, | 
 |                            Heap::Space space) { | 
 |   Utf8::Type type; | 
 |   intptr_t len = Utf8::CodeUnitCount(utf8_array, array_len, &type); | 
 |   if (type == Utf8::kLatin1) { | 
 |     const String& strobj = String::Handle(OneByteString::New(len, space)); | 
 |     if (len > 0) { | 
 |       NoSafepointScope no_safepoint; | 
 |       if (!Utf8::DecodeToLatin1(utf8_array, array_len, | 
 |                                 OneByteString::DataStart(strobj), len)) { | 
 |         Utf8::ReportInvalidByte(utf8_array, array_len, len); | 
 |         return String::null(); | 
 |       } | 
 |     } | 
 |     return strobj.ptr(); | 
 |   } | 
 |   ASSERT((type == Utf8::kBMP) || (type == Utf8::kSupplementary)); | 
 |   const String& strobj = String::Handle(TwoByteString::New(len, space)); | 
 |   NoSafepointScope no_safepoint; | 
 |   if (!Utf8::DecodeToUTF16(utf8_array, array_len, | 
 |                            TwoByteString::DataStart(strobj), len)) { | 
 |     Utf8::ReportInvalidByte(utf8_array, array_len, len); | 
 |     return String::null(); | 
 |   } | 
 |   return strobj.ptr(); | 
 | } | 
 |  | 
 | StringPtr String::FromLatin1(const uint8_t* latin1_array, | 
 |                              intptr_t array_len, | 
 |                              Heap::Space space) { | 
 |   return OneByteString::New(latin1_array, array_len, space); | 
 | } | 
 |  | 
 | StringPtr String::FromUTF16(const uint16_t* utf16_array, | 
 |                             intptr_t array_len, | 
 |                             Heap::Space space) { | 
 |   bool is_one_byte_string = true; | 
 |   for (intptr_t i = 0; i < array_len; ++i) { | 
 |     if (!Utf::IsLatin1(LoadUnaligned(&utf16_array[i]))) { | 
 |       is_one_byte_string = false; | 
 |       break; | 
 |     } | 
 |   } | 
 |   if (is_one_byte_string) { | 
 |     return OneByteString::New(utf16_array, array_len, space); | 
 |   } | 
 |   return TwoByteString::New(utf16_array, array_len, space); | 
 | } | 
 |  | 
 | StringPtr String::FromUTF32(const int32_t* utf32_array, | 
 |                             intptr_t array_len, | 
 |                             Heap::Space space) { | 
 |   bool is_one_byte_string = true; | 
 |   intptr_t utf16_len = array_len; | 
 |   for (intptr_t i = 0; i < array_len; ++i) { | 
 |     if (!Utf::IsLatin1(utf32_array[i])) { | 
 |       is_one_byte_string = false; | 
 |       if (Utf::IsSupplementary(utf32_array[i])) { | 
 |         utf16_len += 1; | 
 |       } | 
 |     } | 
 |   } | 
 |   if (is_one_byte_string) { | 
 |     return OneByteString::New(utf32_array, array_len, space); | 
 |   } | 
 |   return TwoByteString::New(utf16_len, utf32_array, array_len, space); | 
 | } | 
 |  | 
 | StringPtr String::New(const String& str, Heap::Space space) { | 
 |   // Currently this just creates a copy of the string in the correct space. | 
 |   // Once we have external string support, this will also create a heap copy of | 
 |   // the string if necessary. Some optimizations are possible, such as not | 
 |   // copying internal strings into the same space. | 
 |   intptr_t len = str.Length(); | 
 |   String& result = String::Handle(); | 
 |   intptr_t char_size = str.CharSize(); | 
 |   if (char_size == kOneByteChar) { | 
 |     result = OneByteString::New(len, space); | 
 |   } else { | 
 |     ASSERT(char_size == kTwoByteChar); | 
 |     result = TwoByteString::New(len, space); | 
 |   } | 
 |   String::Copy(result, 0, str, 0, len); | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | void String::Copy(const String& dst, | 
 |                   intptr_t dst_offset, | 
 |                   const uint8_t* characters, | 
 |                   intptr_t len) { | 
 |   ASSERT(dst_offset >= 0); | 
 |   ASSERT(len >= 0); | 
 |   ASSERT(len <= (dst.Length() - dst_offset)); | 
 |   if (dst.IsOneByteString()) { | 
 |     NoSafepointScope no_safepoint; | 
 |     if (len > 0) { | 
 |       memmove(OneByteString::CharAddr(dst, dst_offset), characters, len); | 
 |     } | 
 |   } else if (dst.IsTwoByteString()) { | 
 |     for (intptr_t i = 0; i < len; ++i) { | 
 |       *TwoByteString::CharAddr(dst, i + dst_offset) = characters[i]; | 
 |     } | 
 |   } | 
 | } | 
 |  | 
 | void String::Copy(const String& dst, | 
 |                   intptr_t dst_offset, | 
 |                   const uint16_t* utf16_array, | 
 |                   intptr_t array_len) { | 
 |   ASSERT(dst_offset >= 0); | 
 |   ASSERT(array_len >= 0); | 
 |   ASSERT(array_len <= (dst.Length() - dst_offset)); | 
 |   if (dst.IsOneByteString()) { | 
 |     NoSafepointScope no_safepoint; | 
 |     for (intptr_t i = 0; i < array_len; ++i) { | 
 |       ASSERT(Utf::IsLatin1(LoadUnaligned(&utf16_array[i]))); | 
 |       *OneByteString::CharAddr(dst, i + dst_offset) = utf16_array[i]; | 
 |     } | 
 |   } else { | 
 |     ASSERT(dst.IsTwoByteString()); | 
 |     NoSafepointScope no_safepoint; | 
 |     if (array_len > 0) { | 
 |       memmove(TwoByteString::CharAddr(dst, dst_offset), utf16_array, | 
 |               array_len * 2); | 
 |     } | 
 |   } | 
 | } | 
 |  | 
 | void String::Copy(const String& dst, | 
 |                   intptr_t dst_offset, | 
 |                   const String& src, | 
 |                   intptr_t src_offset, | 
 |                   intptr_t len) { | 
 |   ASSERT(dst_offset >= 0); | 
 |   ASSERT(src_offset >= 0); | 
 |   ASSERT(len >= 0); | 
 |   ASSERT(len <= (dst.Length() - dst_offset)); | 
 |   ASSERT(len <= (src.Length() - src_offset)); | 
 |   if (len > 0) { | 
 |     intptr_t char_size = src.CharSize(); | 
 |     if (char_size == kOneByteChar) { | 
 |       ASSERT(src.IsOneByteString()); | 
 |       NoSafepointScope no_safepoint; | 
 |       String::Copy(dst, dst_offset, OneByteString::CharAddr(src, src_offset), | 
 |                    len); | 
 |     } else { | 
 |       ASSERT(char_size == kTwoByteChar); | 
 |       ASSERT(src.IsTwoByteString()); | 
 |       NoSafepointScope no_safepoint; | 
 |       String::Copy(dst, dst_offset, TwoByteString::CharAddr(src, src_offset), | 
 |                    len); | 
 |     } | 
 |   } | 
 | } | 
 |  | 
 | StringPtr String::EscapeSpecialCharacters(const String& str) { | 
 |   if (str.IsOneByteString()) { | 
 |     return OneByteString::EscapeSpecialCharacters(str); | 
 |   } | 
 |   ASSERT(str.IsTwoByteString()); | 
 |   return TwoByteString::EscapeSpecialCharacters(str); | 
 | } | 
 |  | 
 | static bool IsPercent(int32_t c) { | 
 |   return c == '%'; | 
 | } | 
 |  | 
 | static bool IsHexCharacter(int32_t c) { | 
 |   if (c >= '0' && c <= '9') { | 
 |     return true; | 
 |   } | 
 |   if (c >= 'A' && c <= 'F') { | 
 |     return true; | 
 |   } | 
 |   return false; | 
 | } | 
 |  | 
 | static bool IsURISafeCharacter(int32_t c) { | 
 |   if ((c >= '0') && (c <= '9')) { | 
 |     return true; | 
 |   } | 
 |   if ((c >= 'a') && (c <= 'z')) { | 
 |     return true; | 
 |   } | 
 |   if ((c >= 'A') && (c <= 'Z')) { | 
 |     return true; | 
 |   } | 
 |   return (c == '-') || (c == '_') || (c == '.') || (c == '~'); | 
 | } | 
 |  | 
 | static int32_t GetHexCharacter(int32_t c) { | 
 |   ASSERT(c >= 0); | 
 |   ASSERT(c < 16); | 
 |   const char* hex = "0123456789ABCDEF"; | 
 |   return hex[c]; | 
 | } | 
 |  | 
 | static int32_t GetHexValue(int32_t c) { | 
 |   if (c >= '0' && c <= '9') { | 
 |     return c - '0'; | 
 |   } | 
 |   if (c >= 'A' && c <= 'F') { | 
 |     return c - 'A' + 10; | 
 |   } | 
 |   UNREACHABLE(); | 
 |   return 0; | 
 | } | 
 |  | 
 | static int32_t MergeHexCharacters(int32_t c1, int32_t c2) { | 
 |   return GetHexValue(c1) << 4 | GetHexValue(c2); | 
 | } | 
 |  | 
 | const char* String::EncodeIRI(const String& str) { | 
 |   const intptr_t len = Utf8::Length(str); | 
 |   Zone* zone = Thread::Current()->zone(); | 
 |   uint8_t* utf8 = zone->Alloc<uint8_t>(len); | 
 |   str.ToUTF8(utf8, len); | 
 |   intptr_t num_escapes = 0; | 
 |   for (int i = 0; i < len; ++i) { | 
 |     uint8_t byte = utf8[i]; | 
 |     if (!IsURISafeCharacter(byte)) { | 
 |       num_escapes += 2; | 
 |     } | 
 |   } | 
 |   intptr_t cstr_len = len + num_escapes + 1; | 
 |   char* cstr = zone->Alloc<char>(cstr_len); | 
 |   intptr_t index = 0; | 
 |   for (int i = 0; i < len; ++i) { | 
 |     uint8_t byte = utf8[i]; | 
 |     if (!IsURISafeCharacter(byte)) { | 
 |       cstr[index++] = '%'; | 
 |       cstr[index++] = GetHexCharacter(byte >> 4); | 
 |       cstr[index++] = GetHexCharacter(byte & 0xF); | 
 |     } else { | 
 |       ASSERT(byte <= 127); | 
 |       cstr[index++] = byte; | 
 |     } | 
 |   } | 
 |   cstr[index] = '\0'; | 
 |   return cstr; | 
 | } | 
 |  | 
 | StringPtr String::DecodeIRI(const String& str) { | 
 |   CodePointIterator cpi(str); | 
 |   intptr_t num_escapes = 0; | 
 |   intptr_t len = str.Length(); | 
 |   { | 
 |     CodePointIterator cpi(str); | 
 |     while (cpi.Next()) { | 
 |       int32_t code_point = cpi.Current(); | 
 |       if (IsPercent(code_point)) { | 
 |         // Verify that the two characters following the % are hex digits. | 
 |         if (!cpi.Next()) { | 
 |           return String::null(); | 
 |         } | 
 |         int32_t code_point = cpi.Current(); | 
 |         if (!IsHexCharacter(code_point)) { | 
 |           return String::null(); | 
 |         } | 
 |         if (!cpi.Next()) { | 
 |           return String::null(); | 
 |         } | 
 |         code_point = cpi.Current(); | 
 |         if (!IsHexCharacter(code_point)) { | 
 |           return String::null(); | 
 |         } | 
 |         num_escapes += 2; | 
 |       } | 
 |     } | 
 |   } | 
 |   intptr_t utf8_len = len - num_escapes; | 
 |   ASSERT(utf8_len >= 0); | 
 |   Zone* zone = Thread::Current()->zone(); | 
 |   uint8_t* utf8 = zone->Alloc<uint8_t>(utf8_len); | 
 |   { | 
 |     intptr_t index = 0; | 
 |     CodePointIterator cpi(str); | 
 |     while (cpi.Next()) { | 
 |       ASSERT(index < utf8_len); | 
 |       int32_t code_point = cpi.Current(); | 
 |       if (IsPercent(code_point)) { | 
 |         cpi.Next(); | 
 |         int32_t ch1 = cpi.Current(); | 
 |         cpi.Next(); | 
 |         int32_t ch2 = cpi.Current(); | 
 |         int32_t merged = MergeHexCharacters(ch1, ch2); | 
 |         ASSERT(merged >= 0 && merged < 256); | 
 |         utf8[index] = static_cast<uint8_t>(merged); | 
 |       } else { | 
 |         ASSERT(code_point >= 0 && code_point < 256); | 
 |         utf8[index] = static_cast<uint8_t>(code_point); | 
 |       } | 
 |       index++; | 
 |     } | 
 |   } | 
 |   return FromUTF8(utf8, utf8_len); | 
 | } | 
 |  | 
 | StringPtr String::NewFormatted(const char* format, ...) { | 
 |   va_list args; | 
 |   va_start(args, format); | 
 |   StringPtr result = NewFormattedV(format, args); | 
 |   NoSafepointScope no_safepoint; | 
 |   va_end(args); | 
 |   return result; | 
 | } | 
 |  | 
 | StringPtr String::NewFormatted(Heap::Space space, const char* format, ...) { | 
 |   va_list args; | 
 |   va_start(args, format); | 
 |   StringPtr result = NewFormattedV(format, args, space); | 
 |   NoSafepointScope no_safepoint; | 
 |   va_end(args); | 
 |   return result; | 
 | } | 
 |  | 
 | StringPtr String::NewFormattedV(const char* format, | 
 |                                 va_list args, | 
 |                                 Heap::Space space) { | 
 |   va_list args_copy; | 
 |   va_copy(args_copy, args); | 
 |   intptr_t len = Utils::VSNPrint(nullptr, 0, format, args_copy); | 
 |   va_end(args_copy); | 
 |  | 
 |   Zone* zone = Thread::Current()->zone(); | 
 |   char* buffer = zone->Alloc<char>(len + 1); | 
 |   Utils::VSNPrint(buffer, (len + 1), format, args); | 
 |  | 
 |   return String::New(buffer, space); | 
 | } | 
 |  | 
 | StringPtr String::Concat(const String& str1, | 
 |                          const String& str2, | 
 |                          Heap::Space space) { | 
 |   ASSERT(!str1.IsNull() && !str2.IsNull()); | 
 |   intptr_t char_size = Utils::Maximum(str1.CharSize(), str2.CharSize()); | 
 |   if (char_size == kTwoByteChar) { | 
 |     return TwoByteString::Concat(str1, str2, space); | 
 |   } | 
 |   return OneByteString::Concat(str1, str2, space); | 
 | } | 
 |  | 
 | StringPtr String::ConcatAll(const Array& strings, Heap::Space space) { | 
 |   return ConcatAllRange(strings, 0, strings.Length(), space); | 
 | } | 
 |  | 
 | StringPtr String::ConcatAllRange(const Array& strings, | 
 |                                  intptr_t start, | 
 |                                  intptr_t end, | 
 |                                  Heap::Space space) { | 
 |   ASSERT(!strings.IsNull()); | 
 |   ASSERT(start >= 0); | 
 |   ASSERT(end <= strings.Length()); | 
 |   intptr_t result_len = 0; | 
 |   String& str = String::Handle(); | 
 |   intptr_t char_size = kOneByteChar; | 
 |   // Compute 'char_size' and 'result_len'. | 
 |   for (intptr_t i = start; i < end; i++) { | 
 |     str ^= strings.At(i); | 
 |     const intptr_t str_len = str.Length(); | 
 |     if ((kMaxElements - result_len) < str_len) { | 
 |       Exceptions::ThrowOOM(); | 
 |       UNREACHABLE(); | 
 |     } | 
 |     result_len += str_len; | 
 |     char_size = Utils::Maximum(char_size, str.CharSize()); | 
 |   } | 
 |   if (char_size == kOneByteChar) { | 
 |     return OneByteString::ConcatAll(strings, start, end, result_len, space); | 
 |   } | 
 |   ASSERT(char_size == kTwoByteChar); | 
 |   return TwoByteString::ConcatAll(strings, start, end, result_len, space); | 
 | } | 
 |  | 
 | StringPtr String::SubString(const String& str, | 
 |                             intptr_t begin_index, | 
 |                             Heap::Space space) { | 
 |   ASSERT(!str.IsNull()); | 
 |   if (begin_index >= str.Length()) { | 
 |     return String::null(); | 
 |   } | 
 |   return String::SubString(str, begin_index, (str.Length() - begin_index), | 
 |                            space); | 
 | } | 
 |  | 
 | StringPtr String::SubString(Thread* thread, | 
 |                             const String& str, | 
 |                             intptr_t begin_index, | 
 |                             intptr_t length, | 
 |                             Heap::Space space) { | 
 |   ASSERT(!str.IsNull()); | 
 |   ASSERT(begin_index >= 0); | 
 |   ASSERT(length >= 0); | 
 |   if (begin_index <= str.Length() && length == 0) { | 
 |     return Symbols::Empty().ptr(); | 
 |   } | 
 |   if (begin_index > str.Length()) { | 
 |     return String::null(); | 
 |   } | 
 |   bool is_one_byte_string = true; | 
 |   intptr_t char_size = str.CharSize(); | 
 |   if (char_size == kTwoByteChar) { | 
 |     for (intptr_t i = begin_index; i < begin_index + length; ++i) { | 
 |       if (!Utf::IsLatin1(str.CharAt(i))) { | 
 |         is_one_byte_string = false; | 
 |         break; | 
 |       } | 
 |     } | 
 |   } | 
 |   REUSABLE_STRING_HANDLESCOPE(thread); | 
 |   String& result = thread->StringHandle(); | 
 |   if (is_one_byte_string) { | 
 |     result = OneByteString::New(length, space); | 
 |   } else { | 
 |     result = TwoByteString::New(length, space); | 
 |   } | 
 |   String::Copy(result, 0, str, begin_index, length); | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | const char* String::ToCString() const { | 
 |   if (IsNull()) { | 
 |     return "String: null"; | 
 |   } | 
 |   const intptr_t len = Utf8::Length(*this); | 
 |   Zone* zone = Thread::Current()->zone(); | 
 |   uint8_t* result = zone->Alloc<uint8_t>(len + 1); | 
 |   ToUTF8(result, len); | 
 |   result[len] = 0; | 
 |   return reinterpret_cast<const char*>(result); | 
 | } | 
 |  | 
 | char* String::ToMallocCString() const { | 
 |   const intptr_t len = Utf8::Length(*this); | 
 |   uint8_t* result = reinterpret_cast<uint8_t*>(malloc(len + 1)); | 
 |   ToUTF8(result, len); | 
 |   result[len] = 0; | 
 |   return reinterpret_cast<char*>(result); | 
 | } | 
 |  | 
 | void String::ToUTF8(uint8_t* utf8_array, intptr_t array_len) const { | 
 |   ASSERT(array_len >= Utf8::Length(*this)); | 
 |   Utf8::Encode(*this, reinterpret_cast<char*>(utf8_array), array_len); | 
 | } | 
 |  | 
 | const char* String::ToCString(Thread* thread, StringPtr ptr) { | 
 |   if (ptr == nullptr) return nullptr; | 
 |   REUSABLE_STRING_HANDLESCOPE(thread); | 
 |   String& str = reused_string_handle.Handle(); | 
 |   str = ptr; | 
 |   return str.ToCString(); | 
 | } | 
 |  | 
 | static FinalizablePersistentHandle* AddFinalizer(const Object& referent, | 
 |                                                  void* peer, | 
 |                                                  Dart_HandleFinalizer callback, | 
 |                                                  intptr_t external_size) { | 
 |   ASSERT(callback != nullptr); | 
 |   FinalizablePersistentHandle* finalizable_ref = | 
 |       FinalizablePersistentHandle::New(IsolateGroup::Current(), referent, peer, | 
 |                                        callback, external_size, | 
 |                                        /*auto_delete=*/true); | 
 |   ASSERT(finalizable_ref != nullptr); | 
 |   return finalizable_ref; | 
 | } | 
 |  | 
 | StringPtr String::Transform(int32_t (*mapping)(int32_t ch), | 
 |                             const String& str, | 
 |                             Heap::Space space) { | 
 |   ASSERT(!str.IsNull()); | 
 |   bool has_mapping = false; | 
 |   int32_t dst_max = 0; | 
 |   CodePointIterator it(str); | 
 |   while (it.Next()) { | 
 |     int32_t src = it.Current(); | 
 |     int32_t dst = mapping(src); | 
 |     if (src != dst) { | 
 |       has_mapping = true; | 
 |     } | 
 |     dst_max = Utils::Maximum(dst_max, dst); | 
 |   } | 
 |   if (!has_mapping) { | 
 |     return str.ptr(); | 
 |   } | 
 |   if (Utf::IsLatin1(dst_max)) { | 
 |     return OneByteString::Transform(mapping, str, space); | 
 |   } | 
 |   ASSERT(Utf::IsBmp(dst_max) || Utf::IsSupplementary(dst_max)); | 
 |   return TwoByteString::Transform(mapping, str, space); | 
 | } | 
 |  | 
 | StringPtr String::ToUpperCase(const String& str, Heap::Space space) { | 
 |   // TODO(cshapiro): create a fast-path for OneByteString instances. | 
 |   return Transform(CaseMapping::ToUpper, str, space); | 
 | } | 
 |  | 
 | StringPtr String::ToLowerCase(const String& str, Heap::Space space) { | 
 |   // TODO(cshapiro): create a fast-path for OneByteString instances. | 
 |   return Transform(CaseMapping::ToLower, str, space); | 
 | } | 
 |  | 
 | bool String::ParseDouble(const String& str, | 
 |                          intptr_t start, | 
 |                          intptr_t end, | 
 |                          double* result) { | 
 |   ASSERT(0 <= start); | 
 |   ASSERT(start <= end); | 
 |   ASSERT(end <= str.Length()); | 
 |   intptr_t length = end - start; | 
 |   NoSafepointScope no_safepoint; | 
 |   const uint8_t* startChar; | 
 |   if (str.IsOneByteString()) { | 
 |     startChar = OneByteString::CharAddr(str, start); | 
 |   } else { | 
 |     uint8_t* chars = Thread::Current()->zone()->Alloc<uint8_t>(length); | 
 |     for (intptr_t i = 0; i < length; i++) { | 
 |       int32_t ch = str.CharAt(start + i); | 
 |       if (ch < 128) { | 
 |         chars[i] = ch; | 
 |       } else { | 
 |         return false;  // Not ASCII, so definitely not valid double numeral. | 
 |       } | 
 |     } | 
 |     startChar = chars; | 
 |   } | 
 |   return CStringToDouble(reinterpret_cast<const char*>(startChar), length, | 
 |                          result); | 
 | } | 
 |  | 
 | // Check to see if 'str1' matches 'str2' as is or | 
 | // once the private key separator is stripped from str2. | 
 | // | 
 | // Things are made more complicated by the fact that constructors are | 
 | // added *after* the private suffix, so "foo@123.named" should match | 
 | // "foo.named". | 
 | // | 
 | // Also, the private suffix can occur more than once in the name, as in: | 
 | // | 
 | //    _ReceivePortImpl@6be832b._internal@6be832b | 
 | // | 
 | template <typename T1, typename T2> | 
 | static bool EqualsIgnoringPrivateKey(const String& str1, const String& str2) { | 
 |   intptr_t len = str1.Length(); | 
 |   intptr_t str2_len = str2.Length(); | 
 |   if (len == str2_len) { | 
 |     for (intptr_t i = 0; i < len; i++) { | 
 |       if (T1::CharAt(str1, i) != T2::CharAt(str2, i)) { | 
 |         return false; | 
 |       } | 
 |     } | 
 |     return true; | 
 |   } | 
 |   if (len < str2_len) { | 
 |     return false;  // No way they can match. | 
 |   } | 
 |   intptr_t pos = 0; | 
 |   intptr_t str2_pos = 0; | 
 |   while (pos < len) { | 
 |     int32_t ch = T1::CharAt(str1, pos); | 
 |     pos++; | 
 |  | 
 |     if ((str2_pos < str2_len) && (ch == T2::CharAt(str2, str2_pos))) { | 
 |       str2_pos++; | 
 |       continue; | 
 |     } | 
 |  | 
 |     if (ch == Library::kPrivateKeySeparator) { | 
 |       // Consume a private key separator if str1 has it but str2 does not. | 
 |       while ((pos < len) && (T1::CharAt(str1, pos) != '.') && | 
 |              (T1::CharAt(str1, pos) != '&')) { | 
 |         pos++; | 
 |       } | 
 |       // Resume matching characters. | 
 |       continue; | 
 |     } | 
 |  | 
 |     return false; | 
 |   } | 
 |  | 
 |   // We have reached the end of mangled_name string. | 
 |   ASSERT(pos == len); | 
 |   return (str2_pos == str2_len); | 
 | } | 
 |  | 
 | #define EQUALS_IGNORING_PRIVATE_KEY(class_id, type, str1, str2)                \ | 
 |   switch (class_id) {                                                          \ | 
 |     case kOneByteStringCid:                                                    \ | 
 |       return dart::EqualsIgnoringPrivateKey<type, OneByteString>(str1, str2);  \ | 
 |     case kTwoByteStringCid:                                                    \ | 
 |       return dart::EqualsIgnoringPrivateKey<type, TwoByteString>(str1, str2);  \ | 
 |   }                                                                            \ | 
 |   UNREACHABLE(); | 
 |  | 
 | bool String::EqualsIgnoringPrivateKey(const String& str1, const String& str2) { | 
 |   if (str1.ptr() == str2.ptr()) { | 
 |     return true;  // Both handles point to the same raw instance. | 
 |   } | 
 |   NoSafepointScope no_safepoint; | 
 |   intptr_t str1_class_id = str1.ptr()->GetClassId(); | 
 |   intptr_t str2_class_id = str2.ptr()->GetClassId(); | 
 |   switch (str1_class_id) { | 
 |     case kOneByteStringCid: | 
 |       EQUALS_IGNORING_PRIVATE_KEY(str2_class_id, OneByteString, str1, str2); | 
 |       break; | 
 |     case kTwoByteStringCid: | 
 |       EQUALS_IGNORING_PRIVATE_KEY(str2_class_id, TwoByteString, str1, str2); | 
 |       break; | 
 |   } | 
 |   UNREACHABLE(); | 
 |   return false; | 
 | } | 
 |  | 
 | bool String::CodePointIterator::Next() { | 
 |   ASSERT(index_ >= -1); | 
 |   intptr_t length = Utf16::Length(ch_); | 
 |   if (index_ < (end_ - length)) { | 
 |     index_ += length; | 
 |     ch_ = str_.CharAt(index_); | 
 |     if (Utf16::IsLeadSurrogate(ch_) && (index_ < (end_ - 1))) { | 
 |       int32_t ch2 = str_.CharAt(index_ + 1); | 
 |       if (Utf16::IsTrailSurrogate(ch2)) { | 
 |         ch_ = Utf16::Decode(ch_, ch2); | 
 |       } | 
 |     } | 
 |     return true; | 
 |   } | 
 |   index_ = end_; | 
 |   return false; | 
 | } | 
 |  | 
 | OneByteStringPtr OneByteString::EscapeSpecialCharacters(const String& str) { | 
 |   intptr_t len = str.Length(); | 
 |   if (len > 0) { | 
 |     intptr_t num_escapes = 0; | 
 |     for (intptr_t i = 0; i < len; i++) { | 
 |       num_escapes += EscapeOverhead(CharAt(str, i)); | 
 |     } | 
 |     const String& dststr = | 
 |         String::Handle(OneByteString::New(len + num_escapes, Heap::kNew)); | 
 |     intptr_t index = 0; | 
 |     for (intptr_t i = 0; i < len; i++) { | 
 |       uint8_t ch = CharAt(str, i); | 
 |       if (IsSpecialCharacter(ch)) { | 
 |         SetCharAt(dststr, index, '\\'); | 
 |         SetCharAt(dststr, index + 1, SpecialCharacter(ch)); | 
 |         index += 2; | 
 |       } else if (IsAsciiNonprintable(ch)) { | 
 |         SetCharAt(dststr, index, '\\'); | 
 |         SetCharAt(dststr, index + 1, 'x'); | 
 |         SetCharAt(dststr, index + 2, GetHexCharacter(ch >> 4)); | 
 |         SetCharAt(dststr, index + 3, GetHexCharacter(ch & 0xF)); | 
 |         index += 4; | 
 |       } else { | 
 |         SetCharAt(dststr, index, ch); | 
 |         index += 1; | 
 |       } | 
 |     } | 
 |     return OneByteString::raw(dststr); | 
 |   } | 
 |   return OneByteString::raw(Symbols::Empty()); | 
 | } | 
 |  | 
 | OneByteStringPtr OneByteString::New(intptr_t len, Heap::Space space) { | 
 |   ASSERT((IsolateGroup::Current() == Dart::vm_isolate_group()) || | 
 |          ((IsolateGroup::Current()->object_store() != nullptr) && | 
 |           (IsolateGroup::Current()->object_store()->one_byte_string_class() != | 
 |            Class::null()))); | 
 |   if (len < 0 || len > kMaxElements) { | 
 |     // This should be caught before we reach here. | 
 |     FATAL("Fatal error in OneByteString::New: invalid len %" Pd "\n", len); | 
 |   } | 
 |   auto result = Object::Allocate<OneByteString>(space, len); | 
 |   NoSafepointScope no_safepoint; | 
 |   result->untag()->set_length(Smi::New(len)); | 
 | #if !defined(HASH_IN_OBJECT_HEADER) | 
 |   result->untag()->set_hash(Smi::New(0)); | 
 | #endif | 
 |   intptr_t size = OneByteString::UnroundedSize(result); | 
 |   ASSERT(size <= result->untag()->HeapSize()); | 
 |   memset(reinterpret_cast<void*>(UntaggedObject::ToAddr(result) + size), 0, | 
 |          result->untag()->HeapSize() - size); | 
 |   return result; | 
 | } | 
 |  | 
 | OneByteStringPtr OneByteString::New(const uint8_t* characters, | 
 |                                     intptr_t len, | 
 |                                     Heap::Space space) { | 
 |   const String& result = String::Handle(OneByteString::New(len, space)); | 
 |   if (len > 0) { | 
 |     NoSafepointScope no_safepoint; | 
 |     memmove(DataStart(result), characters, len); | 
 |   } | 
 |   return OneByteString::raw(result); | 
 | } | 
 |  | 
 | OneByteStringPtr OneByteString::New(const uint16_t* characters, | 
 |                                     intptr_t len, | 
 |                                     Heap::Space space) { | 
 |   const String& result = String::Handle(OneByteString::New(len, space)); | 
 |   NoSafepointScope no_safepoint; | 
 |   for (intptr_t i = 0; i < len; ++i) { | 
 |     ASSERT(Utf::IsLatin1(characters[i])); | 
 |     *CharAddr(result, i) = characters[i]; | 
 |   } | 
 |   return OneByteString::raw(result); | 
 | } | 
 |  | 
 | OneByteStringPtr OneByteString::New(const int32_t* characters, | 
 |                                     intptr_t len, | 
 |                                     Heap::Space space) { | 
 |   const String& result = String::Handle(OneByteString::New(len, space)); | 
 |   NoSafepointScope no_safepoint; | 
 |   for (intptr_t i = 0; i < len; ++i) { | 
 |     ASSERT(Utf::IsLatin1(characters[i])); | 
 |     *CharAddr(result, i) = characters[i]; | 
 |   } | 
 |   return OneByteString::raw(result); | 
 | } | 
 |  | 
 | OneByteStringPtr OneByteString::New(const String& str, Heap::Space space) { | 
 |   intptr_t len = str.Length(); | 
 |   const String& result = String::Handle(OneByteString::New(len, space)); | 
 |   String::Copy(result, 0, str, 0, len); | 
 |   return OneByteString::raw(result); | 
 | } | 
 |  | 
 | OneByteStringPtr OneByteString::New(const String& other_one_byte_string, | 
 |                                     intptr_t other_start_index, | 
 |                                     intptr_t other_len, | 
 |                                     Heap::Space space) { | 
 |   const String& result = String::Handle(OneByteString::New(other_len, space)); | 
 |   ASSERT(other_one_byte_string.IsOneByteString()); | 
 |   if (other_len > 0) { | 
 |     NoSafepointScope no_safepoint; | 
 |     memmove(OneByteString::DataStart(result), | 
 |             OneByteString::CharAddr(other_one_byte_string, other_start_index), | 
 |             other_len); | 
 |   } | 
 |   return OneByteString::raw(result); | 
 | } | 
 |  | 
 | OneByteStringPtr OneByteString::New(const TypedDataBase& other_typed_data, | 
 |                                     intptr_t other_start_index, | 
 |                                     intptr_t other_len, | 
 |                                     Heap::Space space) { | 
 |   const String& result = String::Handle(OneByteString::New(other_len, space)); | 
 |   ASSERT(other_typed_data.ElementSizeInBytes() == 1); | 
 |   if (other_len > 0) { | 
 |     NoSafepointScope no_safepoint; | 
 |     memmove(OneByteString::DataStart(result), | 
 |             other_typed_data.DataAddr(other_start_index), other_len); | 
 |   } | 
 |   return OneByteString::raw(result); | 
 | } | 
 |  | 
 | OneByteStringPtr OneByteString::Concat(const String& str1, | 
 |                                        const String& str2, | 
 |                                        Heap::Space space) { | 
 |   intptr_t len1 = str1.Length(); | 
 |   intptr_t len2 = str2.Length(); | 
 |   intptr_t len = len1 + len2; | 
 |   const String& result = String::Handle(OneByteString::New(len, space)); | 
 |   String::Copy(result, 0, str1, 0, len1); | 
 |   String::Copy(result, len1, str2, 0, len2); | 
 |   return OneByteString::raw(result); | 
 | } | 
 |  | 
 | OneByteStringPtr OneByteString::ConcatAll(const Array& strings, | 
 |                                           intptr_t start, | 
 |                                           intptr_t end, | 
 |                                           intptr_t len, | 
 |                                           Heap::Space space) { | 
 |   ASSERT(!strings.IsNull()); | 
 |   ASSERT(start >= 0); | 
 |   ASSERT(end <= strings.Length()); | 
 |   const String& result = String::Handle(OneByteString::New(len, space)); | 
 |   String& str = String::Handle(); | 
 |   intptr_t pos = 0; | 
 |   for (intptr_t i = start; i < end; i++) { | 
 |     str ^= strings.At(i); | 
 |     const intptr_t str_len = str.Length(); | 
 |     String::Copy(result, pos, str, 0, str_len); | 
 |     ASSERT((kMaxElements - pos) >= str_len); | 
 |     pos += str_len; | 
 |   } | 
 |   return OneByteString::raw(result); | 
 | } | 
 |  | 
 | OneByteStringPtr OneByteString::Transform(int32_t (*mapping)(int32_t ch), | 
 |                                           const String& str, | 
 |                                           Heap::Space space) { | 
 |   ASSERT(!str.IsNull()); | 
 |   intptr_t len = str.Length(); | 
 |   const String& result = String::Handle(OneByteString::New(len, space)); | 
 |   NoSafepointScope no_safepoint; | 
 |   for (intptr_t i = 0; i < len; ++i) { | 
 |     int32_t ch = mapping(str.CharAt(i)); | 
 |     ASSERT(Utf::IsLatin1(ch)); | 
 |     *CharAddr(result, i) = ch; | 
 |   } | 
 |   return OneByteString::raw(result); | 
 | } | 
 |  | 
 | OneByteStringPtr OneByteString::SubStringUnchecked(const String& str, | 
 |                                                    intptr_t begin_index, | 
 |                                                    intptr_t length, | 
 |                                                    Heap::Space space) { | 
 |   ASSERT(!str.IsNull() && str.IsOneByteString()); | 
 |   ASSERT(begin_index >= 0); | 
 |   ASSERT(length >= 0); | 
 |   if (begin_index <= str.Length() && length == 0) { | 
 |     return OneByteString::raw(Symbols::Empty()); | 
 |   } | 
 |   ASSERT(begin_index < str.Length()); | 
 |   OneByteStringPtr result = OneByteString::New(length, space); | 
 |   NoSafepointScope no_safepoint; | 
 |   if (length > 0) { | 
 |     uint8_t* dest = &result->untag()->data()[0]; | 
 |     const uint8_t* src = &untag(str)->data()[begin_index]; | 
 |     memmove(dest, src, length); | 
 |   } | 
 |   return result; | 
 | } | 
 |  | 
 | TwoByteStringPtr TwoByteString::EscapeSpecialCharacters(const String& str) { | 
 |   intptr_t len = str.Length(); | 
 |   if (len > 0) { | 
 |     intptr_t num_escapes = 0; | 
 |     for (intptr_t i = 0; i < len; i++) { | 
 |       num_escapes += EscapeOverhead(CharAt(str, i)); | 
 |     } | 
 |     const String& dststr = | 
 |         String::Handle(TwoByteString::New(len + num_escapes, Heap::kNew)); | 
 |     intptr_t index = 0; | 
 |     for (intptr_t i = 0; i < len; i++) { | 
 |       uint16_t ch = CharAt(str, i); | 
 |       if (IsSpecialCharacter(ch)) { | 
 |         SetCharAt(dststr, index, '\\'); | 
 |         SetCharAt(dststr, index + 1, SpecialCharacter(ch)); | 
 |         index += 2; | 
 |       } else if (IsAsciiNonprintable(ch)) { | 
 |         SetCharAt(dststr, index, '\\'); | 
 |         SetCharAt(dststr, index + 1, 'x'); | 
 |         SetCharAt(dststr, index + 2, GetHexCharacter(ch >> 4)); | 
 |         SetCharAt(dststr, index + 3, GetHexCharacter(ch & 0xF)); | 
 |         index += 4; | 
 |       } else { | 
 |         SetCharAt(dststr, index, ch); | 
 |         index += 1; | 
 |       } | 
 |     } | 
 |     return TwoByteString::raw(dststr); | 
 |   } | 
 |   return TwoByteString::New(0, Heap::kNew); | 
 | } | 
 |  | 
 | TwoByteStringPtr TwoByteString::New(intptr_t len, Heap::Space space) { | 
 |   ASSERT(IsolateGroup::Current()->object_store()->two_byte_string_class() != | 
 |          nullptr); | 
 |   if (len < 0 || len > kMaxElements) { | 
 |     // This should be caught before we reach here. | 
 |     FATAL("Fatal error in TwoByteString::New: invalid len %" Pd "\n", len); | 
 |   } | 
 |   auto s = Object::Allocate<TwoByteString>(space, len); | 
 |   NoSafepointScope no_safepoint; | 
 |   s->untag()->set_length(Smi::New(len)); | 
 | #if !defined(HASH_IN_OBJECT_HEADER) | 
 |   s->untag()->set_hash(Smi::New(0)); | 
 | #endif | 
 |   intptr_t size = TwoByteString::UnroundedSize(s); | 
 |   ASSERT(size <= s->untag()->HeapSize()); | 
 |   memset(reinterpret_cast<void*>(UntaggedObject::ToAddr(s) + size), 0, | 
 |          s->untag()->HeapSize() - size); | 
 |   return s; | 
 | } | 
 |  | 
 | TwoByteStringPtr TwoByteString::New(const uint16_t* utf16_array, | 
 |                                     intptr_t array_len, | 
 |                                     Heap::Space space) { | 
 |   ASSERT(array_len > 0); | 
 |   const String& result = String::Handle(TwoByteString::New(array_len, space)); | 
 |   { | 
 |     NoSafepointScope no_safepoint; | 
 |     memmove(reinterpret_cast<void*>(DataStart(result)), | 
 |             reinterpret_cast<const void*>(utf16_array), (array_len * 2)); | 
 |   } | 
 |   return TwoByteString::raw(result); | 
 | } | 
 |  | 
 | TwoByteStringPtr TwoByteString::New(intptr_t utf16_len, | 
 |                                     const int32_t* utf32_array, | 
 |                                     intptr_t array_len, | 
 |                                     Heap::Space space) { | 
 |   ASSERT((array_len > 0) && (utf16_len >= array_len)); | 
 |   const String& result = String::Handle(TwoByteString::New(utf16_len, space)); | 
 |   { | 
 |     NoSafepointScope no_safepoint; | 
 |     intptr_t j = 0; | 
 |     for (intptr_t i = 0; i < array_len; ++i) { | 
 |       if (Utf::IsSupplementary(utf32_array[i])) { | 
 |         ASSERT(j < (utf16_len - 1)); | 
 |         Utf16::Encode(utf32_array[i], CharAddr(result, j)); | 
 |         j += 2; | 
 |       } else { | 
 |         ASSERT(j < utf16_len); | 
 |         *CharAddr(result, j) = utf32_array[i]; | 
 |         j += 1; | 
 |       } | 
 |     } | 
 |   } | 
 |   return TwoByteString::raw(result); | 
 | } | 
 |  | 
 | TwoByteStringPtr TwoByteString::New(const String& str, Heap::Space space) { | 
 |   intptr_t len = str.Length(); | 
 |   const String& result = String::Handle(TwoByteString::New(len, space)); | 
 |   String::Copy(result, 0, str, 0, len); | 
 |   return TwoByteString::raw(result); | 
 | } | 
 |  | 
 | TwoByteStringPtr TwoByteString::New(const TypedDataBase& other_typed_data, | 
 |                                     intptr_t other_start_index, | 
 |                                     intptr_t other_len, | 
 |                                     Heap::Space space) { | 
 |   const String& result = String::Handle(TwoByteString::New(other_len, space)); | 
 |   if (other_len > 0) { | 
 |     NoSafepointScope no_safepoint; | 
 |     memmove(TwoByteString::DataStart(result), | 
 |             other_typed_data.DataAddr(other_start_index), | 
 |             other_len * sizeof(uint16_t)); | 
 |   } | 
 |   return TwoByteString::raw(result); | 
 | } | 
 |  | 
 | TwoByteStringPtr TwoByteString::Concat(const String& str1, | 
 |                                        const String& str2, | 
 |                                        Heap::Space space) { | 
 |   intptr_t len1 = str1.Length(); | 
 |   intptr_t len2 = str2.Length(); | 
 |   intptr_t len = len1 + len2; | 
 |   const String& result = String::Handle(TwoByteString::New(len, space)); | 
 |   String::Copy(result, 0, str1, 0, len1); | 
 |   String::Copy(result, len1, str2, 0, len2); | 
 |   return TwoByteString::raw(result); | 
 | } | 
 |  | 
 | TwoByteStringPtr TwoByteString::ConcatAll(const Array& strings, | 
 |                                           intptr_t start, | 
 |                                           intptr_t end, | 
 |                                           intptr_t len, | 
 |                                           Heap::Space space) { | 
 |   ASSERT(!strings.IsNull()); | 
 |   ASSERT(start >= 0); | 
 |   ASSERT(end <= strings.Length()); | 
 |   const String& result = String::Handle(TwoByteString::New(len, space)); | 
 |   String& str = String::Handle(); | 
 |   intptr_t pos = 0; | 
 |   for (intptr_t i = start; i < end; i++) { | 
 |     str ^= strings.At(i); | 
 |     const intptr_t str_len = str.Length(); | 
 |     String::Copy(result, pos, str, 0, str_len); | 
 |     ASSERT((kMaxElements - pos) >= str_len); | 
 |     pos += str_len; | 
 |   } | 
 |   return TwoByteString::raw(result); | 
 | } | 
 |  | 
 | TwoByteStringPtr TwoByteString::Transform(int32_t (*mapping)(int32_t ch), | 
 |                                           const String& str, | 
 |                                           Heap::Space space) { | 
 |   ASSERT(!str.IsNull()); | 
 |   intptr_t len = str.Length(); | 
 |   const String& result = String::Handle(TwoByteString::New(len, space)); | 
 |   String::CodePointIterator it(str); | 
 |   intptr_t i = 0; | 
 |   NoSafepointScope no_safepoint; | 
 |   while (it.Next()) { | 
 |     int32_t src = it.Current(); | 
 |     int32_t dst = mapping(src); | 
 |     ASSERT(dst >= 0 && dst <= 0x10FFFF); | 
 |     intptr_t len = Utf16::Length(dst); | 
 |     if (len == 1) { | 
 |       *CharAddr(result, i) = dst; | 
 |     } else { | 
 |       ASSERT(len == 2); | 
 |       Utf16::Encode(dst, CharAddr(result, i)); | 
 |     } | 
 |     i += len; | 
 |   } | 
 |   return TwoByteString::raw(result); | 
 | } | 
 |  | 
 | const char* Bool::ToCString() const { | 
 |   return value() ? "true" : "false"; | 
 | } | 
 |  | 
 | bool Array::CanonicalizeEquals(const Instance& other) const { | 
 |   if (this->ptr() == other.ptr()) { | 
 |     // Both handles point to the same raw instance. | 
 |     return true; | 
 |   } | 
 |  | 
 |   // An Array may be compared to an ImmutableArray. | 
 |   if (!other.IsArray() || other.IsNull()) { | 
 |     return false; | 
 |   } | 
 |  | 
 |   // First check if both arrays have the same length and elements. | 
 |   const Array& other_arr = Array::Cast(other); | 
 |  | 
 |   intptr_t len = this->Length(); | 
 |   if (len != other_arr.Length()) { | 
 |     return false; | 
 |   } | 
 |  | 
 |   for (intptr_t i = 0; i < len; i++) { | 
 |     if (this->At(i) != other_arr.At(i)) { | 
 |       return false; | 
 |     } | 
 |   } | 
 |  | 
 |   // Now check if both arrays have the same type arguments. | 
 |   if (GetTypeArguments() == other.GetTypeArguments()) { | 
 |     return true; | 
 |   } | 
 |   const TypeArguments& type_args = TypeArguments::Handle(GetTypeArguments()); | 
 |   const TypeArguments& other_type_args = | 
 |       TypeArguments::Handle(other.GetTypeArguments()); | 
 |   if (!type_args.Equals(other_type_args)) { | 
 |     return false; | 
 |   } | 
 |   return true; | 
 | } | 
 |  | 
 | uint32_t Array::CanonicalizeHash() const { | 
 |   intptr_t len = Length(); | 
 |   if (len == 0) { | 
 |     return 1; | 
 |   } | 
 |   Thread* thread = Thread::Current(); | 
 |   uint32_t hash = thread->heap()->GetCanonicalHash(ptr()); | 
 |   if (hash != 0) { | 
 |     return hash; | 
 |   } | 
 |   hash = len; | 
 |   Instance& member = Instance::Handle(GetTypeArguments()); | 
 |   hash = CombineHashes(hash, member.CanonicalizeHash()); | 
 |   for (intptr_t i = 0; i < len; i++) { | 
 |     member ^= At(i); | 
 |     hash = CombineHashes(hash, member.CanonicalizeHash()); | 
 |   } | 
 |   hash = FinalizeHash(hash, kHashBits); | 
 |   thread->heap()->SetCanonicalHash(ptr(), hash); | 
 |   return hash; | 
 | } | 
 |  | 
 | ArrayPtr Array::New(intptr_t len, | 
 |                     const AbstractType& element_type, | 
 |                     Heap::Space space) { | 
 |   const Array& result = Array::Handle(Array::New(len, space)); | 
 |   if (!element_type.IsDynamicType()) { | 
 |     TypeArguments& type_args = TypeArguments::Handle(TypeArguments::New(1)); | 
 |     type_args.SetTypeAt(0, element_type); | 
 |     type_args = type_args.Canonicalize(Thread::Current()); | 
 |     result.SetTypeArguments(type_args); | 
 |   } | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | ArrayPtr Array::NewUninitialized(intptr_t class_id, | 
 |                                  intptr_t len, | 
 |                                  Heap::Space space) { | 
 |   if (!IsValidLength(len)) { | 
 |     // This should be caught before we reach here. | 
 |     FATAL("Fatal error in Array::New: invalid len %" Pd "\n", len); | 
 |   } | 
 |   auto raw = Object::AllocateVariant<Array>(class_id, space, len); | 
 |   NoSafepointScope no_safepoint; | 
 |   raw->untag()->set_length(Smi::New(len)); | 
 |   if (UseCardMarkingForAllocation(len)) { | 
 |     ASSERT(raw->IsOldObject()); | 
 |     raw->untag()->SetCardRememberedBitUnsynchronized(); | 
 |   } | 
 |   return raw; | 
 | } | 
 |  | 
 | ArrayPtr Array::New(intptr_t class_id, intptr_t len, Heap::Space space) { | 
 |   if (!UseCardMarkingForAllocation(len)) { | 
 |     return NewUninitialized(class_id, len, space); | 
 |   } | 
 |  | 
 |   Thread* thread = Thread::Current(); | 
 |   Array& result = | 
 |       Array::Handle(thread->zone(), NewUninitialized(class_id, len, space)); | 
 |   result.SetTypeArguments(Object::null_type_arguments()); | 
 |   for (intptr_t i = 0; i < len; i++) { | 
 |     result.SetAt(i, Object::null_object(), thread); | 
 |     if (((i + 1) % kSlotsPerInterruptCheck) == 0) { | 
 |       thread->CheckForSafepoint(); | 
 |     } | 
 |   } | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | ArrayPtr Array::Slice(intptr_t start, | 
 |                       intptr_t count, | 
 |                       bool with_type_argument) const { | 
 |   Thread* thread = Thread::Current(); | 
 |   Zone* zone = thread->zone(); | 
 |   const Array& dest = Array::Handle(zone, Array::NewUninitialized(count)); | 
 |   if (with_type_argument) { | 
 |     dest.SetTypeArguments(TypeArguments::Handle(zone, GetTypeArguments())); | 
 |   } else { | 
 |     dest.SetTypeArguments(Object::null_type_arguments()); | 
 |   } | 
 |   if (!UseCardMarkingForAllocation(count)) { | 
 |     NoSafepointScope no_safepoint(thread); | 
 |     for (int i = 0; i < count; i++) { | 
 |       dest.untag()->set_element(i, untag()->element(i + start), thread); | 
 |     } | 
 |   } else { | 
 |     for (int i = 0; i < count; i++) { | 
 |       dest.untag()->set_element(i, untag()->element(i + start), thread); | 
 |       if (((i + 1) % kSlotsPerInterruptCheck) == 0) { | 
 |         thread->CheckForSafepoint(); | 
 |       } | 
 |     } | 
 |   } | 
 |   return dest.ptr(); | 
 | } | 
 |  | 
 | void Array::MakeImmutable() const { | 
 |   if (IsImmutable()) return; | 
 |   ASSERT(!IsCanonical()); | 
 |   untag()->SetClassId(kImmutableArrayCid); | 
 | } | 
 |  | 
 | const char* Array::ToCString() const { | 
 |   if (IsNull()) { | 
 |     return IsImmutable() ? "_ImmutableList nullptr" : "_List nullptr"; | 
 |   } | 
 |   Zone* zone = Thread::Current()->zone(); | 
 |   const char* format = | 
 |       IsImmutable() ? "_ImmutableList len:%" Pd : "_List len:%" Pd; | 
 |   return zone->PrintToString(format, Length()); | 
 | } | 
 |  | 
 | ArrayPtr Array::Grow(const Array& source, | 
 |                      intptr_t new_length, | 
 |                      Heap::Space space) { | 
 |   Thread* thread = Thread::Current(); | 
 |   Zone* zone = thread->zone(); | 
 |   const Array& result = | 
 |       Array::Handle(zone, Array::NewUninitialized(new_length, space)); | 
 |   intptr_t old_length = 0; | 
 |   if (!source.IsNull()) { | 
 |     old_length = source.Length(); | 
 |     result.SetTypeArguments( | 
 |         TypeArguments::Handle(zone, source.GetTypeArguments())); | 
 |   } else { | 
 |     result.SetTypeArguments(Object::null_type_arguments()); | 
 |   } | 
 |   ASSERT(new_length > old_length);  // Unnecessary copying of array. | 
 |   if (!UseCardMarkingForAllocation(new_length)) { | 
 |     NoSafepointScope no_safepoint(thread); | 
 |     for (intptr_t i = 0; i < old_length; i++) { | 
 |       result.untag()->set_element(i, source.untag()->element(i), thread); | 
 |     } | 
 |     for (intptr_t i = old_length; i < new_length; i++) { | 
 |       ASSERT(result.untag()->element(i) == Object::null()); | 
 |     } | 
 |   } else { | 
 |     for (intptr_t i = 0; i < old_length; i++) { | 
 |       result.untag()->set_element(i, source.untag()->element(i), thread); | 
 |       if (((i + 1) % kSlotsPerInterruptCheck) == 0) { | 
 |         thread->CheckForSafepoint(); | 
 |       } | 
 |     } | 
 |     for (intptr_t i = old_length; i < new_length; i++) { | 
 |       result.untag()->set_element(i, Object::null(), thread); | 
 |       if (((i + 1) % kSlotsPerInterruptCheck) == 0) { | 
 |         thread->CheckForSafepoint(); | 
 |       } | 
 |     } | 
 |   } | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | void Array::Truncate(intptr_t new_len) const { | 
 |   if (IsNull()) { | 
 |     return; | 
 |   } | 
 |   Thread* thread = Thread::Current(); | 
 |   Zone* zone = thread->zone(); | 
 |   const Array& array = Array::Handle(zone, this->ptr()); | 
 |  | 
 |   intptr_t old_len = array.Length(); | 
 |   ASSERT(new_len <= old_len); | 
 |   if (old_len == new_len) { | 
 |     return; | 
 |   } | 
 |   intptr_t old_size = Array::InstanceSize(old_len); | 
 |   intptr_t new_size = Array::InstanceSize(new_len); | 
 |  | 
 |   NoSafepointScope no_safepoint; | 
 |  | 
 |   // If there is any left over space fill it with either an Array object or | 
 |   // just a plain object (depending on the amount of left over space) so | 
 |   // that it can be traversed over successfully during garbage collection. | 
 |   Object::MakeUnusedSpaceTraversable(array, old_size, new_size); | 
 |  | 
 |   // Update the size in the header field and length of the array object. | 
 |   // These release operations are balanced by acquire operations in the | 
 |   // concurrent sweeper. | 
 |   uword old_tags = array.untag()->tags_; | 
 |   uword new_tags; | 
 |   ASSERT(kArrayCid == UntaggedObject::ClassIdTag::decode(old_tags)); | 
 |   do { | 
 |     new_tags = UntaggedObject::SizeTag::update(new_size, old_tags); | 
 |   } while (!array.untag()->tags_.compare_exchange_weak( | 
 |       old_tags, new_tags, std::memory_order_release)); | 
 |  | 
 |   // Between the CAS of the header above and the SetLength below, the array is | 
 |   // temporarily in an inconsistent state. The header is considered the | 
 |   // overriding source of object size by UntaggedObject::HeapSize, but the | 
 |   // ASSERTs in UntaggedObject::HeapSizeFromClass must handle this special case. | 
 |   array.SetLengthRelease(new_len); | 
 | } | 
 |  | 
 | ArrayPtr Array::MakeFixedLength(const GrowableObjectArray& growable_array, | 
 |                                 bool unique) { | 
 |   ASSERT(!growable_array.IsNull()); | 
 |   Thread* thread = Thread::Current(); | 
 |   Zone* zone = thread->zone(); | 
 |   intptr_t used_len = growable_array.Length(); | 
 |   // Get the type arguments and prepare to copy them. | 
 |   const TypeArguments& type_arguments = | 
 |       TypeArguments::Handle(growable_array.GetTypeArguments()); | 
 |   if (used_len == 0) { | 
 |     if (type_arguments.IsNull() && !unique) { | 
 |       // This is a raw List (as in no type arguments), so we can return the | 
 |       // simple empty array. | 
 |       return Object::empty_array().ptr(); | 
 |     } | 
 |  | 
 |     // The backing array may be a shared instance, or may not have correct | 
 |     // type parameters. Create a new empty array. | 
 |     Heap::Space space = thread->IsDartMutatorThread() ? Heap::kNew : Heap::kOld; | 
 |     Array& array = Array::Handle(zone, Array::New(0, space)); | 
 |     array.SetTypeArguments(type_arguments); | 
 |     return array.ptr(); | 
 |   } | 
 |   const Array& array = Array::Handle(zone, growable_array.data()); | 
 |   ASSERT(array.IsArray()); | 
 |   array.SetTypeArguments(type_arguments); | 
 |  | 
 |   // Null the GrowableObjectArray, we are removing its backing array. | 
 |   growable_array.SetLength(0); | 
 |   growable_array.SetData(Object::empty_array()); | 
 |  | 
 |   // Truncate the old backing array and return it. | 
 |   array.Truncate(used_len); | 
 |   return array.ptr(); | 
 | } | 
 |  | 
 | void Array::CanonicalizeFieldsLocked(Thread* thread) const { | 
 |   ASSERT(IsImmutable()); | 
 |   intptr_t len = Length(); | 
 |   if (len > 0) { | 
 |     Zone* zone = thread->zone(); | 
 |     Instance& obj = Instance::Handle(zone); | 
 |     for (intptr_t i = 0; i < len; i++) { | 
 |       obj ^= At(i); | 
 |       obj = obj.CanonicalizeLocked(thread); | 
 |       this->SetAt(i, obj); | 
 |     } | 
 |   } | 
 | } | 
 |  | 
 | ImmutableArrayPtr ImmutableArray::New(intptr_t len, Heap::Space space) { | 
 |   ASSERT(IsolateGroup::Current()->object_store()->immutable_array_class() != | 
 |          Class::null()); | 
 |   return static_cast<ImmutableArrayPtr>(Array::New(kClassId, len, space)); | 
 | } | 
 |  | 
 | void GrowableObjectArray::Add(const Object& value, Heap::Space space) const { | 
 |   ASSERT(!IsNull()); | 
 |   if (Length() == Capacity()) { | 
 |     // Grow from 0 to 3, and then double + 1. | 
 |     intptr_t new_capacity = (Capacity() * 2) | 3; | 
 |     if (new_capacity <= Capacity()) { | 
 |       Exceptions::ThrowOOM(); | 
 |       UNREACHABLE(); | 
 |     } | 
 |     Grow(new_capacity, space); | 
 |   } | 
 |   ASSERT(Length() < Capacity()); | 
 |   intptr_t index = Length(); | 
 |   SetLength(index + 1); | 
 |   SetAt(index, value); | 
 | } | 
 |  | 
 | void GrowableObjectArray::Grow(intptr_t new_capacity, Heap::Space space) const { | 
 |   ASSERT(new_capacity > Capacity()); | 
 |   const Array& contents = Array::Handle(data()); | 
 |   const Array& new_contents = | 
 |       Array::Handle(Array::Grow(contents, new_capacity, space)); | 
 |   untag()->set_data(new_contents.ptr()); | 
 | } | 
 |  | 
 | ObjectPtr GrowableObjectArray::RemoveLast() const { | 
 |   ASSERT(!IsNull()); | 
 |   ASSERT(Length() > 0); | 
 |   intptr_t index = Length() - 1; | 
 |   const Array& contents = Array::Handle(data()); | 
 |   const PassiveObject& obj = PassiveObject::Handle(contents.At(index)); | 
 |   contents.SetAt(index, Object::null_object()); | 
 |   SetLength(index); | 
 |   return obj.ptr(); | 
 | } | 
 |  | 
 | GrowableObjectArrayPtr GrowableObjectArray::New(intptr_t capacity, | 
 |                                                 Heap::Space space) { | 
 |   ArrayPtr raw_data = (capacity == 0) ? Object::empty_array().ptr() | 
 |                                       : Array::New(capacity, space); | 
 |   const Array& data = Array::Handle(raw_data); | 
 |   return New(data, space); | 
 | } | 
 |  | 
 | GrowableObjectArrayPtr GrowableObjectArray::New(const Array& array, | 
 |                                                 Heap::Space space) { | 
 |   ASSERT( | 
 |       IsolateGroup::Current()->object_store()->growable_object_array_class() != | 
 |       Class::null()); | 
 |   const auto& result = | 
 |       GrowableObjectArray::Handle(Object::Allocate<GrowableObjectArray>(space)); | 
 |   result.SetLength(0); | 
 |   result.SetData(array); | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | const char* GrowableObjectArray::ToCString() const { | 
 |   if (IsNull()) { | 
 |     return "_GrowableList: null"; | 
 |   } | 
 |   return OS::SCreate(Thread::Current()->zone(), | 
 |                      "Instance(length:%" Pd ") of '_GrowableList'", Length()); | 
 | } | 
 |  | 
 | // Equivalent to Dart's operator "==" and hashCode. | 
 | class DefaultHashTraits { | 
 |  public: | 
 |   static const char* Name() { return "DefaultHashTraits"; } | 
 |   static bool ReportStats() { return false; } | 
 |  | 
 |   static bool IsMatch(const Object& a, const Object& b) { | 
 |     if (a.IsNull() || b.IsNull()) { | 
 |       return (a.IsNull() && b.IsNull()); | 
 |     } else { | 
 |       return Instance::Cast(a).OperatorEquals(Instance::Cast(b)); | 
 |     } | 
 |   } | 
 |   static uword Hash(const Object& obj) { | 
 |     if (obj.IsNull()) { | 
 |       return 0; | 
 |     } | 
 |     // TODO(koda): Ensure VM classes only produce Smi hash codes, and remove | 
 |     // non-Smi cases once Dart-side implementation is complete. | 
 |     Thread* thread = Thread::Current(); | 
 |     REUSABLE_INSTANCE_HANDLESCOPE(thread); | 
 |     Instance& hash_code = thread->InstanceHandle(); | 
 |     hash_code ^= Instance::Cast(obj).HashCode(); | 
 |     if (hash_code.IsSmi()) { | 
 |       // May waste some bits on 64-bit, to ensure consistency with non-Smi case. | 
 |       return static_cast<uword>(Smi::Cast(hash_code).AsTruncatedUint32Value()); | 
 |     } else if (hash_code.IsInteger()) { | 
 |       return static_cast<uword>( | 
 |           Integer::Cast(hash_code).AsTruncatedUint32Value()); | 
 |     } else { | 
 |       return 0; | 
 |     } | 
 |   } | 
 | }; | 
 |  | 
 | MapPtr Map::NewDefault(intptr_t class_id, Heap::Space space) { | 
 |   const Array& data = Array::Handle(Array::New(kInitialIndexSize, space)); | 
 |   const TypedData& index = TypedData::Handle( | 
 |       TypedData::New(kTypedDataUint32ArrayCid, kInitialIndexSize, space)); | 
 |   // On 32-bit, the top bits are wasted to avoid Mint allocation. | 
 |   const intptr_t kAvailableBits = (kSmiBits >= 32) ? 32 : kSmiBits; | 
 |   const intptr_t kInitialHashMask = | 
 |       (1 << (kAvailableBits - kInitialIndexBits)) - 1; | 
 |   return Map::New(class_id, data, index, kInitialHashMask, 0, 0, space); | 
 | } | 
 |  | 
 | MapPtr Map::New(intptr_t class_id, | 
 |                 const Array& data, | 
 |                 const TypedData& index, | 
 |                 intptr_t hash_mask, | 
 |                 intptr_t used_data, | 
 |                 intptr_t deleted_keys, | 
 |                 Heap::Space space) { | 
 |   ASSERT(class_id == kMapCid || class_id == kConstMapCid); | 
 |   ASSERT(IsolateGroup::Current()->object_store()->map_impl_class() != | 
 |          Class::null()); | 
 |   Map& result = Map::Handle(Map::NewUninitialized(class_id, space)); | 
 |   result.set_data(data); | 
 |   result.set_index(index); | 
 |   result.set_hash_mask(hash_mask); | 
 |   result.set_used_data(used_data); | 
 |   result.set_deleted_keys(deleted_keys); | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | MapPtr Map::NewUninitialized(intptr_t class_id, Heap::Space space) { | 
 |   ASSERT(IsolateGroup::Current()->object_store()->map_impl_class() != | 
 |          Class::null()); | 
 |   return Object::AllocateVariant<Map>(class_id, space); | 
 | } | 
 |  | 
 | const char* Map::ToCString() const { | 
 |   Zone* zone = Thread::Current()->zone(); | 
 |   return zone->PrintToString( | 
 |       "%s len:%" Pd, GetClassId() == kConstMapCid ? "_ConstMap" : "_Map", | 
 |       Length()); | 
 | } | 
 |  | 
 | void LinkedHashBase::ComputeAndSetHashMask() const { | 
 |   ASSERT(IsImmutable()); | 
 |   ASSERT_EQUAL(Smi::Value(deleted_keys()), 0); | 
 |   Thread* const thread = Thread::Current(); | 
 |   Zone* const zone = thread->zone(); | 
 |  | 
 |   const auto& data_array = Array::Handle(zone, data()); | 
 |   const intptr_t data_length = Utils::RoundUpToPowerOfTwo(data_array.Length()); | 
 |   const intptr_t index_size_mult = IsMap() ? 1 : 2; | 
 |   const intptr_t index_size = Utils::Maximum(LinkedHashBase::kInitialIndexSize, | 
 |                                              data_length * index_size_mult); | 
 |   ASSERT(Utils::IsPowerOfTwo(index_size)); | 
 |  | 
 |   const intptr_t hash_mask = IndexSizeToHashMask(index_size); | 
 |   set_hash_mask(hash_mask); | 
 | } | 
 |  | 
 | bool LinkedHashBase::CanonicalizeEquals(const Instance& other) const { | 
 |   ASSERT(IsImmutable()); | 
 |  | 
 |   if (this->ptr() == other.ptr()) { | 
 |     // Both handles point to the same raw instance. | 
 |     return true; | 
 |   } | 
 |   if (other.IsNull()) { | 
 |     return false; | 
 |   } | 
 |   if (GetClassId() != other.GetClassId()) { | 
 |     return false; | 
 |   } | 
 |  | 
 |   Zone* zone = Thread::Current()->zone(); | 
 |  | 
 |   const LinkedHashBase& other_map = LinkedHashBase::Cast(other); | 
 |  | 
 |   if (!Smi::Handle(zone, used_data()) | 
 |            .Equals(Smi::Handle(zone, other_map.used_data()))) { | 
 |     return false; | 
 |   } | 
 |  | 
 |   // Immutable maps and sets do not have deleted keys. | 
 |   ASSERT_EQUAL(RawSmiValue(deleted_keys()), 0); | 
 |  | 
 |   if (!Array::Handle(zone, data()) | 
 |            .CanonicalizeEquals(Array::Handle(zone, other_map.data()))) { | 
 |     return false; | 
 |   } | 
 |  | 
 |   if (GetTypeArguments() == other.GetTypeArguments()) { | 
 |     return true; | 
 |   } | 
 |   const TypeArguments& type_args = | 
 |       TypeArguments::Handle(zone, GetTypeArguments()); | 
 |   const TypeArguments& other_type_args = | 
 |       TypeArguments::Handle(zone, other.GetTypeArguments()); | 
 |   return type_args.Equals(other_type_args); | 
 | } | 
 |  | 
 | uint32_t LinkedHashBase::CanonicalizeHash() const { | 
 |   ASSERT(IsImmutable()); | 
 |  | 
 |   Thread* thread = Thread::Current(); | 
 |   uint32_t hash = thread->heap()->GetCanonicalHash(ptr()); | 
 |   if (hash != 0) { | 
 |     return hash; | 
 |   } | 
 |  | 
 |   // Immutable maps and sets do not have deleted keys. | 
 |   ASSERT_EQUAL(RawSmiValue(deleted_keys()), 0); | 
 |  | 
 |   Zone* zone = thread->zone(); | 
 |   auto& member = Instance::Handle(zone, GetTypeArguments()); | 
 |   hash = member.CanonicalizeHash(); | 
 |   member = data(); | 
 |   hash = CombineHashes(hash, member.CanonicalizeHash()); | 
 |   member = used_data(); | 
 |   hash = CombineHashes(hash, member.CanonicalizeHash()); | 
 |   hash = FinalizeHash(hash, kHashBits); | 
 |   thread->heap()->SetCanonicalHash(ptr(), hash); | 
 |   return hash; | 
 | } | 
 |  | 
 | void LinkedHashBase::CanonicalizeFieldsLocked(Thread* thread) const { | 
 |   ASSERT(IsImmutable()); | 
 |  | 
 |   Zone* zone = thread->zone(); | 
 |  | 
 |   TypeArguments& type_args = TypeArguments::Handle(zone, GetTypeArguments()); | 
 |   if (!type_args.IsNull()) { | 
 |     type_args = type_args.Canonicalize(thread); | 
 |     SetTypeArguments(type_args); | 
 |   } | 
 |  | 
 |   auto& data_array = Array::Handle(zone, data()); | 
 |   data_array.MakeImmutable(); | 
 |   data_array ^= data_array.CanonicalizeLocked(thread); | 
 |   set_data(data_array); | 
 |  | 
 |   // Ignoring index. It will be initially null, created on first use, and | 
 |   // possibly non-null here if we are rehashing. | 
 | } | 
 |  | 
 | ConstMapPtr ConstMap::NewDefault(Heap::Space space) { | 
 |   ASSERT(IsolateGroup::Current()->object_store()->const_map_impl_class() != | 
 |          Class::null()); | 
 |   return static_cast<ConstMapPtr>(Map::NewDefault(kClassId, space)); | 
 | } | 
 |  | 
 | ConstMapPtr ConstMap::NewUninitialized(Heap::Space space) { | 
 |   ASSERT(IsolateGroup::Current()->object_store()->const_map_impl_class() != | 
 |          Class::null()); | 
 |   return static_cast<ConstMapPtr>(Map::NewUninitialized(kClassId, space)); | 
 | } | 
 |  | 
 | SetPtr Set::New(intptr_t class_id, | 
 |                 const Array& data, | 
 |                 const TypedData& index, | 
 |                 intptr_t hash_mask, | 
 |                 intptr_t used_data, | 
 |                 intptr_t deleted_keys, | 
 |                 Heap::Space space) { | 
 |   ASSERT(class_id == kSetCid || class_id == kConstSetCid); | 
 |   ASSERT(IsolateGroup::Current()->object_store()->set_impl_class() != | 
 |          Class::null()); | 
 |   Set& result = Set::Handle(Set::NewUninitialized(class_id, space)); | 
 |   result.set_data(data); | 
 |   result.set_index(index); | 
 |   result.set_hash_mask(hash_mask); | 
 |   result.set_used_data(used_data); | 
 |   result.set_deleted_keys(deleted_keys); | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | SetPtr Set::NewDefault(intptr_t class_id, Heap::Space space) { | 
 |   const Array& data = Array::Handle(Array::New(kInitialIndexSize, space)); | 
 |   const TypedData& index = TypedData::Handle( | 
 |       TypedData::New(kTypedDataUint32ArrayCid, kInitialIndexSize, space)); | 
 |   // On 32-bit, the top bits are wasted to avoid Mint allocation. | 
 |   const intptr_t kAvailableBits = (kSmiBits >= 32) ? 32 : kSmiBits; | 
 |   const intptr_t kInitialHashMask = | 
 |       (1 << (kAvailableBits - kInitialIndexBits)) - 1; | 
 |   return Set::New(class_id, data, index, kInitialHashMask, 0, 0, space); | 
 | } | 
 |  | 
 | SetPtr Set::NewUninitialized(intptr_t class_id, Heap::Space space) { | 
 |   ASSERT(IsolateGroup::Current()->object_store()->set_impl_class() != | 
 |          Class::null()); | 
 |   return Object::AllocateVariant<Set>(class_id, space); | 
 | } | 
 |  | 
 | ConstSetPtr ConstSet::NewDefault(Heap::Space space) { | 
 |   ASSERT(IsolateGroup::Current()->object_store()->const_set_impl_class() != | 
 |          Class::null()); | 
 |   return static_cast<ConstSetPtr>(Set::NewDefault(kClassId, space)); | 
 | } | 
 |  | 
 | ConstSetPtr ConstSet::NewUninitialized(Heap::Space space) { | 
 |   ASSERT(IsolateGroup::Current()->object_store()->const_set_impl_class() != | 
 |          Class::null()); | 
 |   return static_cast<ConstSetPtr>(Set::NewUninitialized(kClassId, space)); | 
 | } | 
 |  | 
 | const char* Set::ToCString() const { | 
 |   Zone* zone = Thread::Current()->zone(); | 
 |   return zone->PrintToString( | 
 |       "%s len:%" Pd, GetClassId() == kConstSetCid ? "_ConstSet" : "_Set", | 
 |       Length()); | 
 | } | 
 |  | 
 | const char* FutureOr::ToCString() const { | 
 |   // FutureOr is an abstract class. | 
 |   UNREACHABLE(); | 
 | } | 
 |  | 
 | Float32x4Ptr Float32x4::New(float v0, | 
 |                             float v1, | 
 |                             float v2, | 
 |                             float v3, | 
 |                             Heap::Space space) { | 
 |   ASSERT(IsolateGroup::Current()->object_store()->float32x4_class() != | 
 |          Class::null()); | 
 |   const auto& result = Float32x4::Handle(Object::Allocate<Float32x4>(space)); | 
 |   result.set_x(v0); | 
 |   result.set_y(v1); | 
 |   result.set_z(v2); | 
 |   result.set_w(v3); | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | Float32x4Ptr Float32x4::New(simd128_value_t value, Heap::Space space) { | 
 |   ASSERT(IsolateGroup::Current()->object_store()->float32x4_class() != | 
 |          Class::null()); | 
 |   const auto& result = Float32x4::Handle(Object::Allocate<Float32x4>(space)); | 
 |   result.set_value(value); | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | simd128_value_t Float32x4::value() const { | 
 |   return LoadUnaligned( | 
 |       reinterpret_cast<const simd128_value_t*>(&untag()->value_)); | 
 | } | 
 |  | 
 | void Float32x4::set_value(simd128_value_t value) const { | 
 |   StoreUnaligned(reinterpret_cast<simd128_value_t*>(&ptr()->untag()->value_), | 
 |                  value); | 
 | } | 
 |  | 
 | void Float32x4::set_x(float value) const { | 
 |   StoreNonPointer(&untag()->value_[0], value); | 
 | } | 
 |  | 
 | void Float32x4::set_y(float value) const { | 
 |   StoreNonPointer(&untag()->value_[1], value); | 
 | } | 
 |  | 
 | void Float32x4::set_z(float value) const { | 
 |   StoreNonPointer(&untag()->value_[2], value); | 
 | } | 
 |  | 
 | void Float32x4::set_w(float value) const { | 
 |   StoreNonPointer(&untag()->value_[3], value); | 
 | } | 
 |  | 
 | float Float32x4::x() const { | 
 |   return untag()->value_[0]; | 
 | } | 
 |  | 
 | float Float32x4::y() const { | 
 |   return untag()->value_[1]; | 
 | } | 
 |  | 
 | float Float32x4::z() const { | 
 |   return untag()->value_[2]; | 
 | } | 
 |  | 
 | float Float32x4::w() const { | 
 |   return untag()->value_[3]; | 
 | } | 
 |  | 
 | bool Float32x4::CanonicalizeEquals(const Instance& other) const { | 
 |   return memcmp(&untag()->value_, Float32x4::Cast(other).untag()->value_, | 
 |                 sizeof(simd128_value_t)) == 0; | 
 | } | 
 |  | 
 | uint32_t Float32x4::CanonicalizeHash() const { | 
 |   return HashBytes(reinterpret_cast<const uint8_t*>(&untag()->value_), | 
 |                    sizeof(simd128_value_t)); | 
 | } | 
 |  | 
 | const char* Float32x4::ToCString() const { | 
 |   float _x = x(); | 
 |   float _y = y(); | 
 |   float _z = z(); | 
 |   float _w = w(); | 
 |   return OS::SCreate(Thread::Current()->zone(), "[%f, %f, %f, %f]", _x, _y, _z, | 
 |                      _w); | 
 | } | 
 |  | 
 | Int32x4Ptr Int32x4::New(int32_t v0, | 
 |                         int32_t v1, | 
 |                         int32_t v2, | 
 |                         int32_t v3, | 
 |                         Heap::Space space) { | 
 |   ASSERT(IsolateGroup::Current()->object_store()->int32x4_class() != | 
 |          Class::null()); | 
 |   const auto& result = Int32x4::Handle(Object::Allocate<Int32x4>(space)); | 
 |   result.set_x(v0); | 
 |   result.set_y(v1); | 
 |   result.set_z(v2); | 
 |   result.set_w(v3); | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | Int32x4Ptr Int32x4::New(simd128_value_t value, Heap::Space space) { | 
 |   ASSERT(IsolateGroup::Current()->object_store()->int32x4_class() != | 
 |          Class::null()); | 
 |   const auto& result = Int32x4::Handle(Object::Allocate<Int32x4>(space)); | 
 |   result.set_value(value); | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | void Int32x4::set_x(int32_t value) const { | 
 |   StoreNonPointer(&untag()->value_[0], value); | 
 | } | 
 |  | 
 | void Int32x4::set_y(int32_t value) const { | 
 |   StoreNonPointer(&untag()->value_[1], value); | 
 | } | 
 |  | 
 | void Int32x4::set_z(int32_t value) const { | 
 |   StoreNonPointer(&untag()->value_[2], value); | 
 | } | 
 |  | 
 | void Int32x4::set_w(int32_t value) const { | 
 |   StoreNonPointer(&untag()->value_[3], value); | 
 | } | 
 |  | 
 | int32_t Int32x4::x() const { | 
 |   return untag()->value_[0]; | 
 | } | 
 |  | 
 | int32_t Int32x4::y() const { | 
 |   return untag()->value_[1]; | 
 | } | 
 |  | 
 | int32_t Int32x4::z() const { | 
 |   return untag()->value_[2]; | 
 | } | 
 |  | 
 | int32_t Int32x4::w() const { | 
 |   return untag()->value_[3]; | 
 | } | 
 |  | 
 | simd128_value_t Int32x4::value() const { | 
 |   return LoadUnaligned( | 
 |       reinterpret_cast<const simd128_value_t*>(&untag()->value_)); | 
 | } | 
 |  | 
 | void Int32x4::set_value(simd128_value_t value) const { | 
 |   StoreUnaligned(reinterpret_cast<simd128_value_t*>(&ptr()->untag()->value_), | 
 |                  value); | 
 | } | 
 |  | 
 | bool Int32x4::CanonicalizeEquals(const Instance& other) const { | 
 |   return memcmp(&untag()->value_, Int32x4::Cast(other).untag()->value_, | 
 |                 sizeof(simd128_value_t)) == 0; | 
 | } | 
 |  | 
 | uint32_t Int32x4::CanonicalizeHash() const { | 
 |   return HashBytes(reinterpret_cast<const uint8_t*>(&untag()->value_), | 
 |                    sizeof(simd128_value_t)); | 
 | } | 
 |  | 
 | const char* Int32x4::ToCString() const { | 
 |   int32_t _x = x(); | 
 |   int32_t _y = y(); | 
 |   int32_t _z = z(); | 
 |   int32_t _w = w(); | 
 |   return OS::SCreate(Thread::Current()->zone(), "[%08x, %08x, %08x, %08x]", _x, | 
 |                      _y, _z, _w); | 
 | } | 
 |  | 
 | Float64x2Ptr Float64x2::New(double value0, double value1, Heap::Space space) { | 
 |   ASSERT(IsolateGroup::Current()->object_store()->float64x2_class() != | 
 |          Class::null()); | 
 |   const auto& result = Float64x2::Handle(Object::Allocate<Float64x2>(space)); | 
 |   result.set_x(value0); | 
 |   result.set_y(value1); | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | Float64x2Ptr Float64x2::New(simd128_value_t value, Heap::Space space) { | 
 |   ASSERT(IsolateGroup::Current()->object_store()->float64x2_class() != | 
 |          Class::null()); | 
 |   const auto& result = Float64x2::Handle(Object::Allocate<Float64x2>(space)); | 
 |   result.set_value(value); | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | double Float64x2::x() const { | 
 |   return untag()->value_[0]; | 
 | } | 
 |  | 
 | double Float64x2::y() const { | 
 |   return untag()->value_[1]; | 
 | } | 
 |  | 
 | void Float64x2::set_x(double x) const { | 
 |   StoreNonPointer(&untag()->value_[0], x); | 
 | } | 
 |  | 
 | void Float64x2::set_y(double y) const { | 
 |   StoreNonPointer(&untag()->value_[1], y); | 
 | } | 
 |  | 
 | simd128_value_t Float64x2::value() const { | 
 |   return simd128_value_t().readFrom(&untag()->value_[0]); | 
 | } | 
 |  | 
 | void Float64x2::set_value(simd128_value_t value) const { | 
 |   StoreSimd128(&untag()->value_[0], value); | 
 | } | 
 |  | 
 | bool Float64x2::CanonicalizeEquals(const Instance& other) const { | 
 |   return memcmp(&untag()->value_, Float64x2::Cast(other).untag()->value_, | 
 |                 sizeof(simd128_value_t)) == 0; | 
 | } | 
 |  | 
 | uint32_t Float64x2::CanonicalizeHash() const { | 
 |   return HashBytes(reinterpret_cast<const uint8_t*>(&untag()->value_), | 
 |                    sizeof(simd128_value_t)); | 
 | } | 
 |  | 
 | const char* Float64x2::ToCString() const { | 
 |   double _x = x(); | 
 |   double _y = y(); | 
 |   return OS::SCreate(Thread::Current()->zone(), "[%f, %f]", _x, _y); | 
 | } | 
 |  | 
 | const intptr_t | 
 |     TypedDataBase::element_size_table[TypedDataBase::kNumElementSizes] = { | 
 |         1,   // kTypedDataInt8ArrayCid. | 
 |         1,   // kTypedDataUint8ArrayCid. | 
 |         1,   // kTypedDataUint8ClampedArrayCid. | 
 |         2,   // kTypedDataInt16ArrayCid. | 
 |         2,   // kTypedDataUint16ArrayCid. | 
 |         4,   // kTypedDataInt32ArrayCid. | 
 |         4,   // kTypedDataUint32ArrayCid. | 
 |         8,   // kTypedDataInt64ArrayCid. | 
 |         8,   // kTypedDataUint64ArrayCid. | 
 |         4,   // kTypedDataFloat32ArrayCid. | 
 |         8,   // kTypedDataFloat64ArrayCid. | 
 |         16,  // kTypedDataFloat32x4ArrayCid. | 
 |         16,  // kTypedDataInt32x4ArrayCid. | 
 |         16,  // kTypedDataFloat64x2ArrayCid, | 
 |     }; | 
 |  | 
 | bool TypedData::CanonicalizeEquals(const Instance& other) const { | 
 |   if (this->ptr() == other.ptr()) { | 
 |     // Both handles point to the same raw instance. | 
 |     return true; | 
 |   } | 
 |  | 
 |   if (!other.IsTypedData() || other.IsNull()) { | 
 |     return false; | 
 |   } | 
 |  | 
 |   const TypedData& other_typed_data = TypedData::Cast(other); | 
 |  | 
 |   if (this->ElementType() != other_typed_data.ElementType()) { | 
 |     return false; | 
 |   } | 
 |  | 
 |   const intptr_t len = this->LengthInBytes(); | 
 |   if (len != other_typed_data.LengthInBytes()) { | 
 |     return false; | 
 |   } | 
 |   NoSafepointScope no_safepoint; | 
 |   return (len == 0) || | 
 |          (memcmp(DataAddr(0), other_typed_data.DataAddr(0), len) == 0); | 
 | } | 
 |  | 
 | uint32_t TypedData::CanonicalizeHash() const { | 
 |   const intptr_t len = this->LengthInBytes(); | 
 |   if (len == 0) { | 
 |     return 1; | 
 |   } | 
 |   uint32_t hash = len; | 
 |   for (intptr_t i = 0; i < len; i++) { | 
 |     hash = CombineHashes(len, GetUint8(i)); | 
 |   } | 
 |   return FinalizeHash(hash, kHashBits); | 
 | } | 
 |  | 
 | TypedDataPtr TypedData::New(intptr_t class_id, | 
 |                             intptr_t len, | 
 |                             Heap::Space space) { | 
 |   if (len < 0 || len > TypedData::MaxElements(class_id)) { | 
 |     FATAL("Fatal error in TypedData::New: invalid len %" Pd "\n", len); | 
 |   } | 
 |   auto raw = Object::AllocateVariant<TypedData>( | 
 |       class_id, space, len * ElementSizeInBytes(class_id)); | 
 |   NoSafepointScope no_safepoint; | 
 |   raw->untag()->set_length(Smi::New(len)); | 
 |   raw->untag()->RecomputeDataField(); | 
 |   return raw; | 
 | } | 
 |  | 
 | TypedDataPtr TypedData::Grow(const TypedData& current, | 
 |                              intptr_t len, | 
 |                              Heap::Space space) { | 
 |   ASSERT(len > current.Length()); | 
 |   const auto& new_td = | 
 |       TypedData::Handle(TypedData::New(current.GetClassId(), len, space)); | 
 |   { | 
 |     NoSafepointScope no_safepoint_scope; | 
 |     memcpy(new_td.DataAddr(0), current.DataAddr(0), current.LengthInBytes()); | 
 |   } | 
 |   return new_td.ptr(); | 
 | } | 
 |  | 
 | const char* TypedData::ToCString() const { | 
 |   const Class& cls = Class::Handle(clazz()); | 
 |   return cls.ScrubbedNameCString(); | 
 | } | 
 |  | 
 | FinalizablePersistentHandle* ExternalTypedData::AddFinalizer( | 
 |     void* peer, | 
 |     Dart_HandleFinalizer callback, | 
 |     intptr_t external_size) const { | 
 |   return dart::AddFinalizer(*this, peer, callback, external_size); | 
 | } | 
 |  | 
 | ExternalTypedDataPtr ExternalTypedData::New( | 
 |     intptr_t class_id, | 
 |     uint8_t* data, | 
 |     intptr_t len, | 
 |     Heap::Space space, | 
 |     bool perform_eager_msan_initialization_check) { | 
 |   if (len < 0 || len > ExternalTypedData::MaxElements(class_id)) { | 
 |     FATAL("Fatal error in ExternalTypedData::New: invalid len %" Pd "\n", len); | 
 |   } | 
 |  | 
 |   if (perform_eager_msan_initialization_check) { | 
 |     // Once the TypedData is created, Dart might read this memory. Check for | 
 |     // initialization at construction to make it easier to track the source. | 
 |     MSAN_CHECK_INITIALIZED(data, len); | 
 |   } | 
 |  | 
 |   const auto& result = ExternalTypedData::Handle( | 
 |       Object::AllocateVariant<ExternalTypedData>(class_id, space)); | 
 |   result.SetLength(len); | 
 |   result.SetData(data); | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | ExternalTypedDataPtr ExternalTypedData::NewFinalizeWithFree(uint8_t* data, | 
 |                                                             intptr_t len) { | 
 |   ExternalTypedData& result = ExternalTypedData::Handle(ExternalTypedData::New( | 
 |       kExternalTypedDataUint8ArrayCid, data, len, Heap::kOld)); | 
 |   result.AddFinalizer( | 
 |       data, [](void* isolate_callback_data, void* data) { free(data); }, len); | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | TypedDataViewPtr TypedDataView::New(intptr_t class_id, Heap::Space space) { | 
 |   return Object::AllocateVariant<TypedDataView>(class_id, space); | 
 | } | 
 |  | 
 | TypedDataViewPtr TypedDataView::New(intptr_t class_id, | 
 |                                     const TypedDataBase& typed_data, | 
 |                                     intptr_t offset_in_bytes, | 
 |                                     intptr_t length, | 
 |                                     Heap::Space space) { | 
 |   auto& result = TypedDataView::Handle(TypedDataView::New(class_id, space)); | 
 |   result.InitializeWith(typed_data, offset_in_bytes, length); | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | bool TypedDataBase::IsExternalOrExternalView() const { | 
 |   if (IsExternalTypedData()) return true; | 
 |   if (IsTypedDataView()) { | 
 |     const auto& backing = | 
 |         TypedDataBase::Handle(TypedDataView::Cast(*this).typed_data()); | 
 |     return backing.IsExternalTypedData(); | 
 |   } | 
 |   return false; | 
 | } | 
 |  | 
 | TypedDataViewPtr TypedDataBase::ViewFromTo(intptr_t start, | 
 |                                            intptr_t end, | 
 |                                            Heap::Space space) const { | 
 |   const intptr_t len = end - start; | 
 |   ASSERT(0 <= len); | 
 |   ASSERT(start < Length()); | 
 |   ASSERT((start + len) <= Length()); | 
 |  | 
 |   const intptr_t cid = GetClassId(); | 
 |  | 
 |   if (IsTypedDataView()) { | 
 |     const auto& view = TypedDataView::Cast(*this); | 
 |     const auto& td = TypedDataBase::Handle(view.typed_data()); | 
 |     const intptr_t view_offset = Smi::Value(view.offset_in_bytes()); | 
 |     ASSERT(IsTypedDataViewClassId(cid)); | 
 |     return TypedDataView::New(cid, ExternalTypedData::Cast(td), | 
 |                               view_offset + start, len, Heap::kOld); | 
 |   } else if (IsExternalTypedData()) { | 
 |     ASSERT(IsExternalTypedDataClassId(cid)); | 
 |     ASSERT(IsTypedDataViewClassId(cid - 1)); | 
 |     return TypedDataView::New(cid - 1, *this, start, len, Heap::kOld); | 
 |   } | 
 |   RELEASE_ASSERT(IsTypedData()); | 
 |   ASSERT(IsExternalTypedDataClassId(cid)); | 
 |   ASSERT(IsTypedDataViewClassId(cid + 1)); | 
 |   return TypedDataView::New(cid + 1, *this, start, len, Heap::kOld); | 
 | } | 
 |  | 
 | const char* TypedDataBase::ToCString() const { | 
 |   // There are no instances of UntaggedTypedDataBase. | 
 |   UNREACHABLE(); | 
 |   return nullptr; | 
 | } | 
 |  | 
 | const char* TypedDataView::ToCString() const { | 
 |   const Class& cls = Class::Handle(clazz()); | 
 |   return cls.ScrubbedNameCString(); | 
 | } | 
 |  | 
 | const char* ExternalTypedData::ToCString() const { | 
 |   const Class& cls = Class::Handle(clazz()); | 
 |   return cls.ScrubbedNameCString(); | 
 | } | 
 |  | 
 | PointerPtr Pointer::New(uword native_address, Heap::Space space) { | 
 |   Thread* thread = Thread::Current(); | 
 |   Zone* zone = thread->zone(); | 
 |  | 
 |   const auto& type_args = TypeArguments::Handle( | 
 |       zone, IsolateGroup::Current()->object_store()->type_argument_never()); | 
 |  | 
 |   const Class& cls = | 
 |       Class::Handle(IsolateGroup::Current()->class_table()->At(kPointerCid)); | 
 |   cls.EnsureIsAllocateFinalized(Thread::Current()); | 
 |  | 
 |   const auto& result = Pointer::Handle(zone, Object::Allocate<Pointer>(space)); | 
 |   result.SetTypeArguments(type_args); | 
 |   result.SetNativeAddress(native_address); | 
 |  | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | const char* Pointer::ToCString() const { | 
 |   return OS::SCreate(Thread::Current()->zone(), "Pointer: address=0x%" Px, | 
 |                      NativeAddress()); | 
 | } | 
 |  | 
 | DynamicLibraryPtr DynamicLibrary::New(void* handle, | 
 |                                       bool canBeClosed, | 
 |                                       Heap::Space space) { | 
 |   const auto& result = | 
 |       DynamicLibrary::Handle(Object::Allocate<DynamicLibrary>(space)); | 
 |   ASSERT_EQUAL(result.IsClosed(), false); | 
 |   result.SetHandle(handle); | 
 |   result.SetCanBeClosed(canBeClosed); | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | bool Pointer::IsPointer(const Instance& obj) { | 
 |   return IsFfiPointerClassId(obj.ptr()->GetClassId()); | 
 | } | 
 |  | 
 | bool Instance::IsPointer() const { | 
 |   return Pointer::IsPointer(*this); | 
 | } | 
 |  | 
 | const char* DynamicLibrary::ToCString() const { | 
 |   return OS::SCreate(Thread::Current()->zone(), "DynamicLibrary: handle=0x%" Px, | 
 |                      reinterpret_cast<uintptr_t>(GetHandle())); | 
 | } | 
 |  | 
 | CapabilityPtr Capability::New(uint64_t id, Heap::Space space) { | 
 |   const auto& result = Capability::Handle(Object::Allocate<Capability>(space)); | 
 |   result.StoreNonPointer(&result.untag()->id_, id); | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | const char* Capability::ToCString() const { | 
 |   return "Capability"; | 
 | } | 
 |  | 
 | ReceivePortPtr ReceivePort::New(Dart_Port id, | 
 |                                 const String& debug_name, | 
 |                                 Heap::Space space) { | 
 |   ASSERT(id != ILLEGAL_PORT); | 
 |   Thread* thread = Thread::Current(); | 
 |   Zone* zone = thread->zone(); | 
 |   const SendPort& send_port = | 
 |       SendPort::Handle(zone, SendPort::New(id, thread->isolate()->origin_id())); | 
 | #if !defined(PRODUCT) | 
 |   const StackTrace& allocation_location_ = | 
 |       HasStack() ? GetCurrentStackTrace(0) : StackTrace::Handle(); | 
 | #endif  // !defined(PRODUCT) | 
 |  | 
 |   const auto& result = | 
 |       ReceivePort::Handle(zone, Object::Allocate<ReceivePort>(space)); | 
 |   result.untag()->set_send_port(send_port.ptr()); | 
 |   result.untag()->set_bitfield( | 
 |       Smi::New(IsOpen::encode(true) | IsKeepIsolateAlive::encode(true))); | 
 | #if !defined(PRODUCT) | 
 |   result.untag()->set_debug_name(debug_name.ptr()); | 
 |   result.untag()->set_allocation_location(allocation_location_.ptr()); | 
 | #endif  // !defined(PRODUCT) | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | const char* ReceivePort::ToCString() const { | 
 |   return "ReceivePort"; | 
 | } | 
 |  | 
 | SendPortPtr SendPort::New(Dart_Port id, Heap::Space space) { | 
 |   return New(id, ILLEGAL_PORT, space); | 
 | } | 
 |  | 
 | SendPortPtr SendPort::New(Dart_Port id, | 
 |                           Dart_Port origin_id, | 
 |                           Heap::Space space) { | 
 |   ASSERT(id != ILLEGAL_PORT); | 
 |   const auto& result = SendPort::Handle(Object::Allocate<SendPort>(space)); | 
 |   result.StoreNonPointer(&result.untag()->id_, id); | 
 |   result.StoreNonPointer(&result.untag()->origin_id_, origin_id); | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | const char* SendPort::ToCString() const { | 
 |   return "SendPort"; | 
 | } | 
 |  | 
 | static void TransferableTypedDataFinalizer(void* isolate_callback_data, | 
 |                                            void* peer) { | 
 |   delete (reinterpret_cast<TransferableTypedDataPeer*>(peer)); | 
 | } | 
 |  | 
 | TransferableTypedDataPtr TransferableTypedData::New(uint8_t* data, | 
 |                                                     intptr_t length) { | 
 |   auto* const peer = new TransferableTypedDataPeer(data, length); | 
 |  | 
 |   Thread* thread = Thread::Current(); | 
 |   const auto& result = | 
 |       TransferableTypedData::Handle(Object::Allocate<TransferableTypedData>( | 
 |           thread->heap()->SpaceForExternal(length))); | 
 |   thread->heap()->SetPeer(result.ptr(), peer); | 
 |  | 
 |   // Set up finalizer so it frees allocated memory if handle is | 
 |   // garbage-collected. | 
 |   FinalizablePersistentHandle* finalizable_ref = | 
 |       FinalizablePersistentHandle::New(thread->isolate_group(), result, peer, | 
 |                                        &TransferableTypedDataFinalizer, length, | 
 |                                        /*auto_delete=*/true); | 
 |   ASSERT(finalizable_ref != nullptr); | 
 |   peer->set_handle(finalizable_ref); | 
 |  | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | const char* TransferableTypedData::ToCString() const { | 
 |   return "TransferableTypedData"; | 
 | } | 
 |  | 
 | bool Closure::CanonicalizeEquals(const Instance& other) const { | 
 |   if (!other.IsClosure()) return false; | 
 |  | 
 |   const Closure& other_closure = Closure::Cast(other); | 
 |   return (instantiator_type_arguments() == | 
 |           other_closure.instantiator_type_arguments()) && | 
 |          (function_type_arguments() == | 
 |           other_closure.function_type_arguments()) && | 
 |          (delayed_type_arguments() == other_closure.delayed_type_arguments()) && | 
 |          (function() == other_closure.function()) && | 
 |          (RawContext() == other_closure.RawContext()); | 
 | } | 
 |  | 
 | void Closure::CanonicalizeFieldsLocked(Thread* thread) const { | 
 |   TypeArguments& type_args = TypeArguments::Handle(); | 
 |   type_args = instantiator_type_arguments(); | 
 |   if (!type_args.IsNull()) { | 
 |     type_args = type_args.Canonicalize(thread); | 
 |     set_instantiator_type_arguments(type_args); | 
 |   } | 
 |   type_args = function_type_arguments(); | 
 |   if (!type_args.IsNull()) { | 
 |     type_args = type_args.Canonicalize(thread); | 
 |     set_function_type_arguments(type_args); | 
 |   } | 
 |   type_args = delayed_type_arguments(); | 
 |   if (!type_args.IsNull()) { | 
 |     type_args = type_args.Canonicalize(thread); | 
 |     set_delayed_type_arguments(type_args); | 
 |   } | 
 |   // Ignore function, context, hash. | 
 | } | 
 |  | 
 | const char* Closure::ToCString() const { | 
 |   auto const thread = Thread::Current(); | 
 |   auto const zone = thread->zone(); | 
 |   ZoneTextBuffer buffer(zone); | 
 |   buffer.AddString("Closure: "); | 
 |   const Function& fun = Function::Handle(zone, function()); | 
 |   const FunctionType& sig = | 
 |       FunctionType::Handle(zone, GetInstantiatedSignature(zone)); | 
 |   sig.Print(kUserVisibleName, &buffer); | 
 |   if (fun.IsImplicitClosureFunction()) { | 
 |     buffer.Printf(" from %s", fun.ToCString()); | 
 |   } | 
 |   return buffer.buffer(); | 
 | } | 
 |  | 
 | uword Closure::ComputeHash() const { | 
 |   Thread* thread = Thread::Current(); | 
 |   DEBUG_ASSERT(thread->TopErrorHandlerIsExitFrame()); | 
 |   Zone* zone = thread->zone(); | 
 |   const Function& func = Function::Handle(zone, function()); | 
 |   uint32_t result = 0; | 
 |   if (func.IsImplicitClosureFunction() || func.IsGeneric()) { | 
 |     // Combine function's hash code, delayed type arguments hash code | 
 |     // (if generic), and identityHashCode of cached receiver (if implicit | 
 |     // instance closure). | 
 |     result = static_cast<uint32_t>(func.Hash()); | 
 |     if (func.IsGeneric()) { | 
 |       const TypeArguments& delayed_type_args = | 
 |           TypeArguments::Handle(zone, delayed_type_arguments()); | 
 |       result = CombineHashes(result, delayed_type_args.Hash()); | 
 |     } | 
 |     if (func.IsImplicitInstanceClosureFunction()) { | 
 |       const Instance& receiver = | 
 |           Instance::Handle(zone, GetImplicitClosureReceiver()); | 
 |       const Integer& receiverHash = | 
 |           Integer::Handle(zone, receiver.IdentityHashCode(thread)); | 
 |       result = CombineHashes(result, receiverHash.AsTruncatedUint32Value()); | 
 |     } | 
 |   } else { | 
 |     // Non-implicit closures of non-generic functions are unique, | 
 |     // so identityHashCode of closure object is good enough. | 
 |     const Integer& identityHash = | 
 |         Integer::Handle(zone, this->IdentityHashCode(thread)); | 
 |     result = identityHash.AsTruncatedUint32Value(); | 
 |   } | 
 |   return FinalizeHash(result, String::kHashBits); | 
 | } | 
 |  | 
 | ClosurePtr Closure::New(const TypeArguments& instantiator_type_arguments, | 
 |                         const TypeArguments& function_type_arguments, | 
 |                         const Function& function, | 
 |                         const Object& context, | 
 |                         Heap::Space space) { | 
 |   // We store null delayed type arguments, not empty ones, in closures with | 
 |   // non-generic functions a) to make method extraction slightly faster and | 
 |   // b) to make the Closure::IsGeneric check fast. | 
 |   // Keep in sync with StubCodeCompiler::GenerateAllocateClosureStub. | 
 |   return Closure::New(instantiator_type_arguments, function_type_arguments, | 
 |                       function.IsGeneric() ? Object::empty_type_arguments() | 
 |                                            : Object::null_type_arguments(), | 
 |                       function, context, space); | 
 | } | 
 |  | 
 | ClosurePtr Closure::New(const TypeArguments& instantiator_type_arguments, | 
 |                         const TypeArguments& function_type_arguments, | 
 |                         const TypeArguments& delayed_type_arguments, | 
 |                         const Function& function, | 
 |                         const Object& context, | 
 |                         Heap::Space space) { | 
 |   ASSERT(instantiator_type_arguments.IsCanonical()); | 
 |   ASSERT(function_type_arguments.IsCanonical()); | 
 |   ASSERT(delayed_type_arguments.IsCanonical()); | 
 |   ASSERT(FunctionType::Handle(function.signature()).IsCanonical()); | 
 |   ASSERT( | 
 |       (function.IsImplicitInstanceClosureFunction() && context.IsInstance()) || | 
 |       (function.IsNonImplicitClosureFunction() && context.IsContext()) || | 
 |       context.IsNull()); | 
 |   const auto& result = Closure::Handle(Object::Allocate<Closure>(space)); | 
 |   result.untag()->set_instantiator_type_arguments( | 
 |       instantiator_type_arguments.ptr()); | 
 |   result.untag()->set_function_type_arguments(function_type_arguments.ptr()); | 
 |   result.untag()->set_delayed_type_arguments(delayed_type_arguments.ptr()); | 
 |   result.untag()->set_function(function.ptr()); | 
 |   result.untag()->set_context(context.ptr()); | 
 | #if defined(DART_PRECOMPILED_RUNTIME) | 
 |   result.set_entry_point(function.entry_point()); | 
 | #endif | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | FunctionTypePtr Closure::GetInstantiatedSignature(Zone* zone) const { | 
 |   const Function& fun = Function::Handle(zone, function()); | 
 |   FunctionType& sig = FunctionType::Handle(zone, fun.signature()); | 
 |   TypeArguments& fn_type_args = | 
 |       TypeArguments::Handle(zone, function_type_arguments()); | 
 |   const TypeArguments& delayed_type_args = | 
 |       TypeArguments::Handle(zone, delayed_type_arguments()); | 
 |   const TypeArguments& inst_type_args = | 
 |       TypeArguments::Handle(zone, instantiator_type_arguments()); | 
 |  | 
 |   // We detect the case of a partial tearoff type application and substitute the | 
 |   // type arguments for the type parameters of the function. | 
 |   intptr_t num_free_params; | 
 |   if (!IsGeneric() && fun.IsGeneric()) { | 
 |     num_free_params = kCurrentAndEnclosingFree; | 
 |     fn_type_args = delayed_type_args.Prepend( | 
 |         zone, fn_type_args, sig.NumParentTypeArguments(), | 
 |         sig.NumTypeParameters() + sig.NumParentTypeArguments()); | 
 |   } else { | 
 |     num_free_params = kAllFree; | 
 |   } | 
 |   if (num_free_params == kCurrentAndEnclosingFree || !sig.IsInstantiated()) { | 
 |     sig ^= sig.InstantiateFrom(inst_type_args, fn_type_args, num_free_params, | 
 |                                Heap::kOld); | 
 |   } | 
 |   return sig.ptr(); | 
 | } | 
 |  | 
 | bool StackTrace::skip_sync_start_in_parent_stack() const { | 
 |   return untag()->skip_sync_start_in_parent_stack; | 
 | } | 
 |  | 
 | void StackTrace::set_skip_sync_start_in_parent_stack(bool value) const { | 
 |   StoreNonPointer(&untag()->skip_sync_start_in_parent_stack, value); | 
 | } | 
 |  | 
 | intptr_t StackTrace::Length() const { | 
 |   const Array& code_array = Array::Handle(untag()->code_array()); | 
 |   return code_array.Length(); | 
 | } | 
 |  | 
 | ObjectPtr StackTrace::CodeAtFrame(intptr_t frame_index) const { | 
 |   const Array& code_array = Array::Handle(untag()->code_array()); | 
 |   return code_array.At(frame_index); | 
 | } | 
 |  | 
 | void StackTrace::SetCodeAtFrame(intptr_t frame_index, | 
 |                                 const Object& code) const { | 
 |   const Array& code_array = Array::Handle(untag()->code_array()); | 
 |   code_array.SetAt(frame_index, code); | 
 | } | 
 |  | 
 | uword StackTrace::PcOffsetAtFrame(intptr_t frame_index) const { | 
 |   const TypedData& pc_offset_array = | 
 |       TypedData::Handle(untag()->pc_offset_array()); | 
 |   return pc_offset_array.GetUintPtr(frame_index * kWordSize); | 
 | } | 
 |  | 
 | void StackTrace::SetPcOffsetAtFrame(intptr_t frame_index, | 
 |                                     uword pc_offset) const { | 
 |   const TypedData& pc_offset_array = | 
 |       TypedData::Handle(untag()->pc_offset_array()); | 
 |   pc_offset_array.SetUintPtr(frame_index * kWordSize, pc_offset); | 
 | } | 
 |  | 
 | void StackTrace::set_async_link(const StackTrace& async_link) const { | 
 |   untag()->set_async_link(async_link.ptr()); | 
 | } | 
 |  | 
 | void StackTrace::set_code_array(const Array& code_array) const { | 
 |   untag()->set_code_array(code_array.ptr()); | 
 | } | 
 |  | 
 | void StackTrace::set_pc_offset_array(const TypedData& pc_offset_array) const { | 
 |   untag()->set_pc_offset_array(pc_offset_array.ptr()); | 
 | } | 
 |  | 
 | void StackTrace::set_expand_inlined(bool value) const { | 
 |   StoreNonPointer(&untag()->expand_inlined_, value); | 
 | } | 
 |  | 
 | bool StackTrace::expand_inlined() const { | 
 |   return untag()->expand_inlined_; | 
 | } | 
 |  | 
 | StackTracePtr StackTrace::New(const Array& code_array, | 
 |                               const TypedData& pc_offset_array, | 
 |                               Heap::Space space) { | 
 |   const auto& result = StackTrace::Handle(Object::Allocate<StackTrace>(space)); | 
 |   result.set_code_array(code_array); | 
 |   result.set_pc_offset_array(pc_offset_array); | 
 |   result.set_expand_inlined(true);  // default. | 
 |   ASSERT_EQUAL(result.skip_sync_start_in_parent_stack(), false); | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | StackTracePtr StackTrace::New(const Array& code_array, | 
 |                               const TypedData& pc_offset_array, | 
 |                               const StackTrace& async_link, | 
 |                               bool skip_sync_start_in_parent_stack, | 
 |                               Heap::Space space) { | 
 |   const auto& result = StackTrace::Handle(Object::Allocate<StackTrace>(space)); | 
 |   result.set_async_link(async_link); | 
 |   result.set_code_array(code_array); | 
 |   result.set_pc_offset_array(pc_offset_array); | 
 |   result.set_expand_inlined(true);  // default. | 
 |   result.set_skip_sync_start_in_parent_stack(skip_sync_start_in_parent_stack); | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | #if defined(DART_PRECOMPILED_RUNTIME) | 
 | static bool TryPrintNonSymbolicStackFrameBodyRelative( | 
 |     BaseTextBuffer* buffer, | 
 |     uword call_addr, | 
 |     uword instructions, | 
 |     bool vm, | 
 |     LoadingUnit* unit = nullptr) { | 
 |   const Image image(reinterpret_cast<const uint8_t*>(instructions)); | 
 |   if (!image.contains(call_addr)) return false; | 
 |   if (unit != nullptr) { | 
 |     ASSERT(!unit->IsNull()); | 
 |     // Add the unit ID to the stack frame, so the correct loading unit | 
 |     // information from the header can be checked. | 
 |     buffer->Printf(" unit %" Pd "", unit->id()); | 
 |   } | 
 |   auto const offset = call_addr - instructions; | 
 |   // Only print the relocated address of the call when we know the saved | 
 |   // debugging information (if any) will have the same relocated address. | 
 |   // Also only print 'virt' fields for isolate addresses. | 
 |   if (!vm && image.compiled_to_elf()) { | 
 |     const uword relocated_section_start = | 
 |         image.instructions_relocated_address(); | 
 |     buffer->Printf(" virt %" Pp "", relocated_section_start + offset); | 
 |   } | 
 |   const char* symbol = vm ? kVmSnapshotInstructionsAsmSymbol | 
 |                           : kIsolateSnapshotInstructionsAsmSymbol; | 
 |   buffer->Printf(" %s+0x%" Px "\n", symbol, offset); | 
 |   return true; | 
 | } | 
 |  | 
 | // Prints the best representation(s) for the call address. | 
 | static void PrintNonSymbolicStackFrameBody(BaseTextBuffer* buffer, | 
 |                                            uword call_addr, | 
 |                                            uword isolate_instructions, | 
 |                                            uword vm_instructions, | 
 |                                            const Array& loading_units, | 
 |                                            LoadingUnit* unit) { | 
 |   if (TryPrintNonSymbolicStackFrameBodyRelative(buffer, call_addr, | 
 |                                                 vm_instructions, | 
 |                                                 /*vm=*/true)) { | 
 |     return; | 
 |   } | 
 |  | 
 |   if (!loading_units.IsNull()) { | 
 |     // All non-VM stack frames should include the loading unit id. | 
 |     const intptr_t unit_count = loading_units.Length(); | 
 |     for (intptr_t i = LoadingUnit::kRootId; i < unit_count; i++) { | 
 |       *unit ^= loading_units.At(i); | 
 |       if (!unit->has_instructions_image()) continue; | 
 |       auto const instructions = | 
 |           reinterpret_cast<uword>(unit->instructions_image()); | 
 |       if (TryPrintNonSymbolicStackFrameBodyRelative(buffer, call_addr, | 
 |                                                     instructions, | 
 |                                                     /*vm=*/false, unit)) { | 
 |         return; | 
 |       } | 
 |     } | 
 |   } else { | 
 |     if (TryPrintNonSymbolicStackFrameBodyRelative(buffer, call_addr, | 
 |                                                   isolate_instructions, | 
 |                                                   /*vm=*/false)) { | 
 |       return; | 
 |     } | 
 |   } | 
 |  | 
 |   // The stack trace printer should never end up here, since these are not | 
 |   // addresses within a loading unit or the VM or app isolate instructions | 
 |   // sections. Thus, make it easy to notice when looking at the stack trace. | 
 |   buffer->Printf(" <invalid Dart instruction address>\n"); | 
 | } | 
 | #endif | 
 |  | 
 | static void PrintSymbolicStackFrameIndex(BaseTextBuffer* buffer, | 
 |                                          intptr_t frame_index) { | 
 |   buffer->Printf("#%-6" Pd "", frame_index); | 
 | } | 
 |  | 
 | static void PrintSymbolicStackFrameBody(BaseTextBuffer* buffer, | 
 |                                         const char* function_name, | 
 |                                         const char* url, | 
 |                                         intptr_t line = -1, | 
 |                                         intptr_t column = -1) { | 
 |   buffer->Printf(" %s (%s", function_name, url); | 
 |   if (line >= 0) { | 
 |     buffer->Printf(":%" Pd "", line); | 
 |     if (column >= 0) { | 
 |       buffer->Printf(":%" Pd "", column); | 
 |     } | 
 |   } | 
 |   buffer->Printf(")\n"); | 
 | } | 
 |  | 
 | static void PrintSymbolicStackFrame(Zone* zone, | 
 |                                     BaseTextBuffer* buffer, | 
 |                                     const Function& function, | 
 |                                     TokenPosition token_pos_or_line, | 
 |                                     intptr_t frame_index, | 
 |                                     bool is_line = false) { | 
 |   ASSERT(!function.IsNull()); | 
 |   const auto& script = Script::Handle(zone, function.script()); | 
 |   const char* function_name = function.QualifiedUserVisibleNameCString(); | 
 |   const char* url = script.IsNull() | 
 |                         ? "Kernel" | 
 |                         : String::Handle(zone, script.url()).ToCString(); | 
 |  | 
 |   // If the URI starts with "data:application/dart;" this is a URI encoded | 
 |   // script so we shouldn't print the entire URI because it could be very long. | 
 |   if (strstr(url, "data:application/dart;") == url) { | 
 |     url = "<data:application/dart>"; | 
 |   } | 
 |  | 
 |   intptr_t line = -1; | 
 |   intptr_t column = -1; | 
 |   if (is_line) { | 
 |     ASSERT(token_pos_or_line.IsNoSource() || token_pos_or_line.IsReal()); | 
 |     if (token_pos_or_line.IsReal()) { | 
 |       line = token_pos_or_line.Pos(); | 
 |     } | 
 |   } else { | 
 |     ASSERT(!script.IsNull()); | 
 |     script.GetTokenLocation(token_pos_or_line, &line, &column); | 
 |   } | 
 |   PrintSymbolicStackFrameIndex(buffer, frame_index); | 
 |   PrintSymbolicStackFrameBody(buffer, function_name, url, line, column); | 
 | } | 
 |  | 
 | static bool IsVisibleAsFutureListener(const Function& function) { | 
 |   if (function.is_visible()) { | 
 |     return true; | 
 |   } | 
 |  | 
 |   if (function.IsImplicitClosureFunction()) { | 
 |     return function.parent_function() == Function::null() || | 
 |            Function::is_visible(function.parent_function()); | 
 |   } | 
 |  | 
 |   return false; | 
 | } | 
 |  | 
 | #if defined(DART_PRECOMPILED_RUNTIME) | 
 | static void WriteImageBuildId(BaseTextBuffer* buffer, | 
 |                               const char* prefix, | 
 |                               uword image_address) { | 
 |   const auto& build_id = OS::GetAppBuildId(image_address); | 
 |   if (build_id.data != nullptr) { | 
 |     ASSERT(build_id.len > 0); | 
 |     buffer->AddString(prefix); | 
 |     buffer->AddString("'"); | 
 |     for (intptr_t i = 0; i < build_id.len; i++) { | 
 |       buffer->Printf("%2.2x", build_id.data[i]); | 
 |     } | 
 |     buffer->AddString("'"); | 
 |   } | 
 | } | 
 |  | 
 | void WriteStackTraceHeaderLoadingUnitEntry(BaseTextBuffer* buffer, | 
 |                                            intptr_t id, | 
 |                                            uword dso_base, | 
 |                                            uword instructions) { | 
 |   buffer->Printf("loading_unit: %" Pd "", id); | 
 |   WriteImageBuildId(buffer, ", build_id: ", instructions); | 
 |   buffer->Printf(", dso_base: %" Px ", instructions: %" Px "\n", dso_base, | 
 |                  instructions); | 
 | } | 
 | #endif | 
 |  | 
 | const char* StackTrace::ToCString() const { | 
 |   auto const T = Thread::Current(); | 
 |   auto const zone = T->zone(); | 
 |   auto& stack_trace = StackTrace::Handle(zone, this->ptr()); | 
 |   auto& owner = Object::Handle(zone); | 
 |   auto& function = Function::Handle(zone); | 
 |   auto& code_object = Object::Handle(zone); | 
 |   auto& code = Code::Handle(zone); | 
 |  | 
 | #if defined(DART_PRECOMPILED_RUNTIME) | 
 |   const Array& loading_units = | 
 |       Array::Handle(T->isolate_group()->object_store()->loading_units()); | 
 |   auto* const unit = | 
 |       loading_units.IsNull() ? nullptr : &LoadingUnit::Handle(zone); | 
 | #endif | 
 |  | 
 |   NoSafepointScope no_allocation; | 
 |   GrowableArray<const Function*> inlined_functions; | 
 |   GrowableArray<TokenPosition> inlined_token_positions; | 
 |  | 
 | #if defined(DART_PRECOMPILED_RUNTIME) | 
 |   GrowableArray<void*> addresses(10); | 
 |   const bool have_footnote_callback = | 
 |       FLAG_dwarf_stack_traces_mode && | 
 |       Dart::dwarf_stacktrace_footnote_callback() != nullptr; | 
 | #endif | 
 |  | 
 |   ZoneTextBuffer buffer(zone, 1024); | 
 |  | 
 | #if defined(DART_PRECOMPILED_RUNTIME) | 
 |   auto const isolate_instructions = reinterpret_cast<uword>( | 
 |       T->isolate_group()->source()->snapshot_instructions); | 
 | #if defined(DEBUG) | 
 |   if (!loading_units.IsNull()) { | 
 |     *unit ^= loading_units.At(LoadingUnit::kRootId); | 
 |     ASSERT(!unit->IsNull()); | 
 |     ASSERT(unit->has_instructions_image()); | 
 |     ASSERT(reinterpret_cast<uword>(unit->instructions_image()) == | 
 |            isolate_instructions); | 
 |   } | 
 | #endif | 
 |   auto const vm_instructions = reinterpret_cast<uword>( | 
 |       Dart::vm_isolate_group()->source()->snapshot_instructions); | 
 |   if (FLAG_dwarf_stack_traces_mode) { | 
 |     // This prologue imitates Android's debuggerd to make it possible to paste | 
 |     // the stack trace into ndk-stack. | 
 |     buffer.Printf( | 
 |         "*** *** *** *** *** *** *** *** *** *** *** *** *** *** *** ***\n"); | 
 |     OSThread* thread = OSThread::Current(); | 
 |     buffer.Printf("pid: %" Pd ", tid: %" Pd ", name %s\n", OS::ProcessId(), | 
 |                   OSThread::ThreadIdToIntPtr(thread->id()), thread->name()); | 
 | #if defined(DART_COMPRESSED_POINTERS) | 
 |     const char kCompressedPointers[] = "yes"; | 
 | #else | 
 |     const char kCompressedPointers[] = "no"; | 
 | #endif | 
 | #if defined(USING_SIMULATOR) | 
 |     const char kUsingSimulator[] = "yes"; | 
 | #else | 
 |     const char kUsingSimulator[] = "no"; | 
 | #endif | 
 |     buffer.Printf("os: %s arch: %s comp: %s sim: %s\n", | 
 |                   kHostOperatingSystemName, kTargetArchitectureName, | 
 |                   kCompressedPointers, kUsingSimulator); | 
 |     WriteImageBuildId(&buffer, "build_id: ", isolate_instructions); | 
 |     buffer.AddString("\n"); | 
 |     if (!loading_units.IsNull()) { | 
 |       const intptr_t unit_count = loading_units.Length(); | 
 |       for (intptr_t i = LoadingUnit::kRootId; i < unit_count; i++) { | 
 |         *unit ^= loading_units.At(i); | 
 |         if (!unit->has_instructions_image()) continue; | 
 |         const uword instructions = | 
 |             reinterpret_cast<uword>(unit->instructions_image()); | 
 |         const uword dso_base = OS::GetAppDSOBase(instructions); | 
 |         WriteStackTraceHeaderLoadingUnitEntry(&buffer, i, dso_base, | 
 |                                               instructions); | 
 |       } | 
 |     } | 
 |     // Print the dso_base of the VM and isolate_instructions. We print both here | 
 |     // as the VM and isolate may be loaded from different snapshot images. | 
 |     const uword isolate_dso_base = OS::GetAppDSOBase(isolate_instructions); | 
 |     buffer.Printf("isolate_dso_base: %" Px "", isolate_dso_base); | 
 |     const uword vm_dso_base = OS::GetAppDSOBase(vm_instructions); | 
 |     buffer.Printf(", vm_dso_base: %" Px "\n", vm_dso_base); | 
 |     buffer.Printf("isolate_instructions: %" Px "", isolate_instructions); | 
 |     buffer.Printf(", vm_instructions: %" Px "\n", vm_instructions); | 
 |   } | 
 | #endif | 
 |  | 
 |   // Iterate through the stack frames and create C string description | 
 |   // for each frame. | 
 |   intptr_t frame_index = 0; | 
 |   uint32_t frame_skip = 0; | 
 |   // If we're already in a gap, don't print multiple gap markers. | 
 |   bool in_gap = false; | 
 |   do { | 
 |     for (intptr_t i = frame_skip; i < stack_trace.Length(); i++) { | 
 |       code_object = stack_trace.CodeAtFrame(i); | 
 |       if (code_object.IsNull()) { | 
 |         // Check for a null function, which indicates a gap in a StackOverflow | 
 |         // or OutOfMemory trace. | 
 |         if ((i < (stack_trace.Length() - 1)) && | 
 |             (stack_trace.CodeAtFrame(i + 1) != Code::null())) { | 
 |           buffer.AddString("...\n...\n"); | 
 |           // To account for gap frames. | 
 |           frame_index += stack_trace.PcOffsetAtFrame(i); | 
 |         } | 
 |         continue; | 
 |       } | 
 |  | 
 |       if (code_object.ptr() == StubCode::AsynchronousGapMarker().ptr()) { | 
 |         if (!in_gap) { | 
 |           buffer.AddString("<asynchronous suspension>\n"); | 
 |         } | 
 |         in_gap = true; | 
 |         continue; | 
 |       } | 
 |  | 
 |       const uword pc_offset = stack_trace.PcOffsetAtFrame(i); | 
 |       ASSERT(code_object.IsCode()); | 
 |       code ^= code_object.ptr(); | 
 |       ASSERT(code.IsFunctionCode()); | 
 |       owner = code.owner(); | 
 |       if (owner.IsFunction()) { | 
 |         function ^= owner.ptr(); | 
 |       } else { | 
 |         function = Function::null(); | 
 |       } | 
 |       const uword pc = code.PayloadStart() + pc_offset; | 
 |  | 
 |       const bool is_future_listener = | 
 |           pc_offset == StackTraceUtils::kFutureListenerPcOffset; | 
 |  | 
 |       // A visible frame ends any gap we might be in. | 
 |       in_gap = false; | 
 |  | 
 | #if defined(DART_PRECOMPILED_RUNTIME) | 
 |       // When printing non-symbolic frames, we normally print call | 
 |       // addresses, not return addresses, by subtracting one from the PC to | 
 |       // get an address within the preceding instruction. | 
 |       // | 
 |       // The one exception is a normal closure registered as a listener on a | 
 |       // future. In this case, the returned pc_offset will be pointing to the | 
 |       // entry pooint of the function, which will be invoked when the future | 
 |       // completes. To make things more uniform stack unwinding code offets | 
 |       // pc_offset by 1 for such cases. | 
 |       const uword call_addr = pc - 1; | 
 |  | 
 |       if (FLAG_dwarf_stack_traces_mode) { | 
 |         if (have_footnote_callback) { | 
 |           addresses.Add(reinterpret_cast<void*>(call_addr)); | 
 |         } | 
 |  | 
 |         // This output is formatted like Android's debuggerd. Note debuggerd | 
 |         // prints call addresses instead of return addresses. | 
 |         buffer.Printf("    #%02" Pd " abs %" Pp "", frame_index, call_addr); | 
 |         PrintNonSymbolicStackFrameBody(&buffer, call_addr, isolate_instructions, | 
 |                                        vm_instructions, loading_units, unit); | 
 |         frame_index++; | 
 |         continue; | 
 |       } | 
 |  | 
 |       if (function.IsNull()) { | 
 |         in_gap = false; | 
 |         // We can't print the symbolic information since the owner was not | 
 |         // retained, so instead print the static symbol + offset like the | 
 |         // non-symbolic stack traces. | 
 |         PrintSymbolicStackFrameIndex(&buffer, frame_index); | 
 |         PrintNonSymbolicStackFrameBody(&buffer, call_addr, isolate_instructions, | 
 |                                        vm_instructions, loading_units, unit); | 
 |         frame_index++; | 
 |         continue; | 
 |       } | 
 | #endif | 
 |  | 
 |       if (code.is_optimized() && stack_trace.expand_inlined() && | 
 |           (FLAG_precompiled_mode || !is_future_listener)) { | 
 |         // Note: In AOT mode EmitFunctionEntrySourcePositionDescriptorIfNeeded | 
 |         // will take care of emitting a descriptor that would allow us to | 
 |         // symbolize stack frame with 0 offset. | 
 |         code.GetInlinedFunctionsAtReturnAddress( | 
 |             is_future_listener ? 0 : pc_offset, &inlined_functions, | 
 |             &inlined_token_positions); | 
 |         ASSERT(inlined_functions.length() >= 1); | 
 |         for (intptr_t j = inlined_functions.length() - 1; j >= 0; j--) { | 
 |           function = inlined_functions[j]->ptr(); | 
 |           auto const pos = inlined_token_positions[j]; | 
 |           if (is_future_listener && function.IsImplicitClosureFunction()) { | 
 |             function = function.parent_function(); | 
 |           } | 
 |           if (FLAG_show_invisible_frames || function.is_visible()) { | 
 |             PrintSymbolicStackFrame(zone, &buffer, function, pos, frame_index, | 
 |                                     /*is_line=*/FLAG_precompiled_mode); | 
 |             frame_index++; | 
 |           } | 
 |         } | 
 |         continue; | 
 |       } | 
 |  | 
 |       if (FLAG_show_invisible_frames || function.is_visible() || | 
 |           (is_future_listener && IsVisibleAsFutureListener(function))) { | 
 |         auto const pos = is_future_listener ? function.token_pos() | 
 |                                             : code.GetTokenIndexOfPC(pc); | 
 |         PrintSymbolicStackFrame(zone, &buffer, function, pos, frame_index); | 
 |         frame_index++; | 
 |       } | 
 |     } | 
 |  | 
 |     // Follow the link. | 
 |     frame_skip = stack_trace.skip_sync_start_in_parent_stack() | 
 |                      ? StackTrace::kSyncAsyncCroppedFrames | 
 |                      : 0; | 
 |     stack_trace = stack_trace.async_link(); | 
 |   } while (!stack_trace.IsNull()); | 
 |  | 
 | #if defined(DART_PRECOMPILED_RUNTIME) | 
 |   if (have_footnote_callback) { | 
 |     char* footnote = Dart::dwarf_stacktrace_footnote_callback()( | 
 |         &addresses[0], addresses.length()); | 
 |     if (footnote != nullptr) { | 
 |       buffer.AddString(footnote); | 
 |       free(footnote); | 
 |     } | 
 |   } | 
 | #endif | 
 |  | 
 |   return buffer.buffer(); | 
 | } | 
 |  | 
 | static void DwarfStackTracesHandler(bool value) { | 
 |   FLAG_dwarf_stack_traces_mode = value; | 
 |  | 
 | #if defined(PRODUCT) | 
 |   // We can safely remove function objects in precompiled snapshots if the | 
 |   // runtime will generate DWARF stack traces and we don't have runtime | 
 |   // debugging options like the observatory available. | 
 |   if (value) { | 
 |     FLAG_retain_function_objects = false; | 
 |     FLAG_retain_code_objects = false; | 
 |   } | 
 | #endif | 
 | } | 
 |  | 
 | DEFINE_FLAG_HANDLER(DwarfStackTracesHandler, | 
 |                     dwarf_stack_traces, | 
 |                     "Omit CodeSourceMaps in precompiled snapshots and don't " | 
 |                     "symbolize stack traces in the precompiled runtime."); | 
 |  | 
 | SuspendStatePtr SuspendState::New(intptr_t frame_size, | 
 |                                   const Instance& function_data, | 
 |                                   Heap::Space space) { | 
 |   ASSERT(frame_size >= 0); | 
 |   const intptr_t num_elements = frame_size + SuspendState::FrameSizeGrowthGap(); | 
 | #if !defined(DART_PRECOMPILED_RUNTIME) | 
 |   // Include heap object alignment overhead into the frame capacity. | 
 |   const intptr_t instance_size = SuspendState::InstanceSize(num_elements); | 
 |   const intptr_t frame_capacity = | 
 |       instance_size - SuspendState::payload_offset(); | 
 |   ASSERT(SuspendState::InstanceSize(frame_capacity) == instance_size); | 
 |   ASSERT(frame_size <= frame_capacity); | 
 | #endif | 
 |   auto raw = Object::Allocate<SuspendState>(space, num_elements); | 
 |   NoSafepointScope no_safepoint; | 
 |   ASSERT_EQUAL(raw->untag()->pc_, 0); | 
 | #if !defined(DART_PRECOMPILED_RUNTIME) | 
 |   raw->untag()->frame_capacity_ = frame_capacity; | 
 | #endif | 
 |   raw->untag()->frame_size_ = frame_size; | 
 |   raw->untag()->set_function_data(function_data.ptr()); | 
 |   return raw; | 
 | } | 
 |  | 
 | SuspendStatePtr SuspendState::Clone(Thread* thread, | 
 |                                     const SuspendState& src, | 
 |                                     Heap::Space space) { | 
 |   ASSERT(src.pc() != 0); | 
 |   Zone* zone = thread->zone(); | 
 |   const intptr_t frame_size = src.frame_size(); | 
 |   const SuspendState& dst = SuspendState::Handle( | 
 |       zone, | 
 |       SuspendState::New(frame_size, Instance::Handle(zone, src.function_data()), | 
 |                         space)); | 
 |   dst.set_then_callback(Closure::Handle(zone, src.then_callback())); | 
 |   dst.set_error_callback(Closure::Handle(zone, src.error_callback())); | 
 |   { | 
 |     NoSafepointScope no_safepoint; | 
 |     memmove(dst.payload(), src.payload(), frame_size); | 
 |     // Update value of :suspend_state variable in the copied frame. | 
 |     const uword fp = reinterpret_cast<uword>(dst.payload() + frame_size); | 
 |     *reinterpret_cast<ObjectPtr*>( | 
 |         LocalVarAddress(fp, runtime_frame_layout.FrameSlotForVariableIndex( | 
 |                                 kSuspendStateVarIndex))) = dst.ptr(); | 
 |     dst.set_pc(src.pc()); | 
 |     // Trigger write barrier if needed. | 
 |     if (dst.ptr()->IsOldObject()) { | 
 |       dst.untag()->EnsureInRememberedSet(thread); | 
 |     } | 
 |     if (thread->is_marking()) { | 
 |       thread->DeferredMarkingStackAddObject(dst.ptr()); | 
 |     } | 
 |   } | 
 |   return dst.ptr(); | 
 | } | 
 |  | 
 | #if !defined(DART_PRECOMPILED_RUNTIME) | 
 | void SuspendState::set_frame_capacity(intptr_t frame_capcity) const { | 
 |   ASSERT(frame_capcity >= 0); | 
 |   StoreNonPointer(&untag()->frame_capacity_, frame_capcity); | 
 | } | 
 | #endif | 
 |  | 
 | void SuspendState::set_frame_size(intptr_t frame_size) const { | 
 |   ASSERT(frame_size >= 0); | 
 |   StoreNonPointer(&untag()->frame_size_, frame_size); | 
 | } | 
 |  | 
 | void SuspendState::set_pc(uword pc) const { | 
 |   StoreNonPointer(&untag()->pc_, pc); | 
 | } | 
 |  | 
 | void SuspendState::set_function_data(const Instance& function_data) const { | 
 |   untag()->set_function_data(function_data.ptr()); | 
 | } | 
 |  | 
 | void SuspendState::set_then_callback(const Closure& then_callback) const { | 
 |   untag()->set_then_callback(then_callback.ptr()); | 
 | } | 
 |  | 
 | void SuspendState::set_error_callback(const Closure& error_callback) const { | 
 |   untag()->set_error_callback(error_callback.ptr()); | 
 | } | 
 |  | 
 | const char* SuspendState::ToCString() const { | 
 |   return "SuspendState"; | 
 | } | 
 |  | 
 | CodePtr SuspendState::GetCodeObject() const { | 
 |   ASSERT(pc() != 0); | 
 | #if defined(DART_PRECOMPILED_RUNTIME) | 
 |   NoSafepointScope no_safepoint; | 
 |   CodePtr code = ReversePc::Lookup(IsolateGroup::Current(), pc(), | 
 |                                    /*is_return_address=*/true); | 
 |   ASSERT(code != Code::null()); | 
 |   return code; | 
 | #else | 
 |   ObjectPtr code = *(reinterpret_cast<ObjectPtr*>( | 
 |       untag()->payload() + untag()->frame_size_ + | 
 |       runtime_frame_layout.code_from_fp * kWordSize)); | 
 |   return Code::RawCast(code); | 
 | #endif  // defined(DART_PRECOMPILED_RUNTIME) | 
 | } | 
 |  | 
 | void RegExp::set_pattern(const String& pattern) const { | 
 |   untag()->set_pattern(pattern.ptr()); | 
 | } | 
 |  | 
 | void RegExp::set_function(intptr_t cid, | 
 |                           bool sticky, | 
 |                           const Function& value) const { | 
 |   if (sticky) { | 
 |     switch (cid) { | 
 |       case kOneByteStringCid: | 
 |         return untag()->set_one_byte_sticky(value.ptr()); | 
 |       case kTwoByteStringCid: | 
 |         return untag()->set_two_byte_sticky(value.ptr()); | 
 |     } | 
 |   } else { | 
 |     switch (cid) { | 
 |       case kOneByteStringCid: | 
 |         return untag()->set_one_byte(value.ptr()); | 
 |       case kTwoByteStringCid: | 
 |         return untag()->set_two_byte(value.ptr()); | 
 |     } | 
 |   } | 
 | } | 
 |  | 
 | void RegExp::set_bytecode(bool is_one_byte, | 
 |                           bool sticky, | 
 |                           const TypedData& bytecode) const { | 
 |   if (sticky) { | 
 |     if (is_one_byte) { | 
 |       untag()->set_one_byte_sticky<std::memory_order_release>(bytecode.ptr()); | 
 |     } else { | 
 |       untag()->set_two_byte_sticky<std::memory_order_release>(bytecode.ptr()); | 
 |     } | 
 |   } else { | 
 |     if (is_one_byte) { | 
 |       untag()->set_one_byte<std::memory_order_release>(bytecode.ptr()); | 
 |     } else { | 
 |       untag()->set_two_byte<std::memory_order_release>(bytecode.ptr()); | 
 |     } | 
 |   } | 
 | } | 
 |  | 
 | void RegExp::set_num_bracket_expressions(intptr_t value) const { | 
 |   untag()->num_bracket_expressions_ = value; | 
 | } | 
 |  | 
 | void RegExp::set_capture_name_map(const Array& array) const { | 
 |   untag()->set_capture_name_map(array.ptr()); | 
 | } | 
 |  | 
 | RegExpPtr RegExp::New(Zone* zone, Heap::Space space) { | 
 |   const auto& result = RegExp::Handle(Object::Allocate<RegExp>(space)); | 
 |   ASSERT_EQUAL(result.type(), kUninitialized); | 
 |   ASSERT(result.flags() == RegExpFlags()); | 
 |   result.set_num_bracket_expressions(-1); | 
 |   result.set_num_registers(/*is_one_byte=*/false, -1); | 
 |   result.set_num_registers(/*is_one_byte=*/true, -1); | 
 |  | 
 |   if (!FLAG_interpret_irregexp) { | 
 |     auto thread = Thread::Current(); | 
 |     const Library& lib = Library::Handle(zone, Library::CoreLibrary()); | 
 |     const Class& owner = | 
 |         Class::Handle(zone, lib.LookupClass(Symbols::RegExp())); | 
 |  | 
 |     for (intptr_t cid = kOneByteStringCid; cid <= kTwoByteStringCid; cid++) { | 
 |       CreateSpecializedFunction(thread, zone, result, cid, /*sticky=*/false, | 
 |                                 owner); | 
 |       CreateSpecializedFunction(thread, zone, result, cid, /*sticky=*/true, | 
 |                                 owner); | 
 |     } | 
 |   } | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | const char* RegExpFlags::ToCString() const { | 
 |   switch (value_ & ~kGlobal) { | 
 |     case kIgnoreCase | kMultiLine | kDotAll | kUnicode: | 
 |       return "imsu"; | 
 |     case kIgnoreCase | kMultiLine | kDotAll: | 
 |       return "ims"; | 
 |     case kIgnoreCase | kMultiLine | kUnicode: | 
 |       return "imu"; | 
 |     case kIgnoreCase | kUnicode | kDotAll: | 
 |       return "ius"; | 
 |     case kMultiLine | kDotAll | kUnicode: | 
 |       return "msu"; | 
 |     case kIgnoreCase | kMultiLine: | 
 |       return "im"; | 
 |     case kIgnoreCase | kDotAll: | 
 |       return "is"; | 
 |     case kIgnoreCase | kUnicode: | 
 |       return "iu"; | 
 |     case kMultiLine | kDotAll: | 
 |       return "ms"; | 
 |     case kMultiLine | kUnicode: | 
 |       return "mu"; | 
 |     case kDotAll | kUnicode: | 
 |       return "su"; | 
 |     case kIgnoreCase: | 
 |       return "i"; | 
 |     case kMultiLine: | 
 |       return "m"; | 
 |     case kDotAll: | 
 |       return "s"; | 
 |     case kUnicode: | 
 |       return "u"; | 
 |     default: | 
 |       break; | 
 |   } | 
 |   return ""; | 
 | } | 
 |  | 
 | bool RegExp::CanonicalizeEquals(const Instance& other) const { | 
 |   if (this->ptr() == other.ptr()) { | 
 |     return true;  // "===". | 
 |   } | 
 |   if (other.IsNull() || !other.IsRegExp()) { | 
 |     return false; | 
 |   } | 
 |   const RegExp& other_js = RegExp::Cast(other); | 
 |   // Match the pattern. | 
 |   const String& str1 = String::Handle(pattern()); | 
 |   const String& str2 = String::Handle(other_js.pattern()); | 
 |   if (!str1.Equals(str2)) { | 
 |     return false; | 
 |   } | 
 |   // Match the flags. | 
 |   if (flags() != other_js.flags()) { | 
 |     return false; | 
 |   } | 
 |   return true; | 
 | } | 
 |  | 
 | uint32_t RegExp::CanonicalizeHash() const { | 
 |   // Must agree with RegExpKey::Hash. | 
 |   return CombineHashes(String::Hash(pattern()), flags().value()); | 
 | } | 
 |  | 
 | const char* RegExp::ToCString() const { | 
 |   const String& str = String::Handle(pattern()); | 
 |   return OS::SCreate(Thread::Current()->zone(), "RegExp: pattern=%s flags=%s", | 
 |                      str.ToCString(), flags().ToCString()); | 
 | } | 
 |  | 
 | WeakPropertyPtr WeakProperty::New(Heap::Space space) { | 
 |   ASSERT(IsolateGroup::Current()->object_store()->weak_property_class() != | 
 |          Class::null()); | 
 |   return Object::Allocate<WeakProperty>(space); | 
 | } | 
 |  | 
 | const char* WeakProperty::ToCString() const { | 
 |   return "_WeakProperty"; | 
 | } | 
 |  | 
 | WeakReferencePtr WeakReference::New(Heap::Space space) { | 
 |   ASSERT(IsolateGroup::Current()->object_store()->weak_reference_class() != | 
 |          Class::null()); | 
 |   return Object::Allocate<WeakReference>(space); | 
 | } | 
 | const char* WeakReference::ToCString() const { | 
 |   TypeArguments& type_args = TypeArguments::Handle(GetTypeArguments()); | 
 |   String& type_args_name = String::Handle(type_args.UserVisibleName()); | 
 |   return OS::SCreate(Thread::Current()->zone(), "_WeakReference%s", | 
 |                      type_args_name.ToCString()); | 
 | } | 
 |  | 
 | const char* FinalizerBase::ToCString() const { | 
 |   return "FinalizerBase"; | 
 | } | 
 |  | 
 | FinalizerPtr Finalizer::New(Heap::Space space) { | 
 |   ASSERT(IsolateGroup::Current()->object_store()->finalizer_class() != | 
 |          Class::null()); | 
 |   ASSERT( | 
 |       Class::Handle(IsolateGroup::Current()->object_store()->finalizer_class()) | 
 |           .EnsureIsAllocateFinalized(Thread::Current()) == Error::null()); | 
 |   return Object::Allocate<Finalizer>(space); | 
 | } | 
 |  | 
 | const char* Finalizer::ToCString() const { | 
 |   TypeArguments& type_args = TypeArguments::Handle(GetTypeArguments()); | 
 |   String& type_args_name = String::Handle(type_args.UserVisibleName()); | 
 |   return OS::SCreate(Thread::Current()->zone(), "_FinalizerImpl%s", | 
 |                      type_args_name.ToCString()); | 
 | } | 
 |  | 
 | NativeFinalizerPtr NativeFinalizer::New(Heap::Space space) { | 
 |   ASSERT(IsolateGroup::Current()->object_store()->native_finalizer_class() != | 
 |          Class::null()); | 
 |   ASSERT(Class::Handle( | 
 |              IsolateGroup::Current()->object_store()->native_finalizer_class()) | 
 |              .EnsureIsAllocateFinalized(Thread::Current()) == Error::null()); | 
 |   return Object::Allocate<NativeFinalizer>(space); | 
 | } | 
 |  | 
 | // Runs the finalizer if not detached, detaches the value and set external size | 
 | // to 0. | 
 | // TODO(http://dartbug.com/47777): Can this be merged with | 
 | // RunNativeFinalizerCallback? | 
 | void NativeFinalizer::RunCallback(const FinalizerEntry& entry, | 
 |                                   const char* trace_context) const { | 
 |   Thread* const thread = Thread::Current(); | 
 |   Zone* const zone = thread->zone(); | 
 |   IsolateGroup* const group = thread->isolate_group(); | 
 |   const intptr_t external_size = entry.external_size(); | 
 |   const auto& token_object = Object::Handle(zone, entry.token()); | 
 |   const auto& callback_pointer = Pointer::Handle(zone, this->callback()); | 
 |   const auto callback = reinterpret_cast<NativeFinalizer::Callback>( | 
 |       callback_pointer.NativeAddress()); | 
 |   if (token_object.IsFinalizerEntry()) { | 
 |     // Detached from Dart code. | 
 |     ASSERT(token_object.ptr() == entry.ptr()); | 
 |     ASSERT(external_size == 0); | 
 |     if (FLAG_trace_finalizers) { | 
 |       THR_Print( | 
 |           "%s: Not running native finalizer %p callback %p, " | 
 |           "detached\n", | 
 |           trace_context, ptr()->untag(), callback); | 
 |     } | 
 |   } else { | 
 |     const auto& token = Pointer::Cast(token_object); | 
 |     void* peer = reinterpret_cast<void*>(token.NativeAddress()); | 
 |     if (FLAG_trace_finalizers) { | 
 |       THR_Print( | 
 |           "%s: Running native finalizer %p callback %p " | 
 |           "with token %p\n", | 
 |           trace_context, ptr()->untag(), callback, peer); | 
 |     } | 
 |     entry.set_token(entry); | 
 |     callback(peer); | 
 |     if (external_size > 0) { | 
 |       ASSERT(!entry.value()->IsSmi()); | 
 |       Heap::Space space = | 
 |           entry.value()->IsOldObject() ? Heap::kOld : Heap::kNew; | 
 |       if (FLAG_trace_finalizers) { | 
 |         THR_Print("%s: Clearing external size %" Pd " bytes in %s space\n", | 
 |                   trace_context, external_size, space == 0 ? "new" : "old"); | 
 |       } | 
 |       group->heap()->FreedExternal(external_size, space); | 
 |       entry.set_external_size(0); | 
 |     } | 
 |   } | 
 | } | 
 |  | 
 | const char* NativeFinalizer::ToCString() const { | 
 |   const auto& pointer = Pointer::Handle(callback()); | 
 |   return OS::SCreate(Thread::Current()->zone(), "_NativeFinalizer %s", | 
 |                      pointer.ToCString()); | 
 | } | 
 |  | 
 | FinalizerEntryPtr FinalizerEntry::New(const FinalizerBase& finalizer, | 
 |                                       Heap::Space space) { | 
 |   ASSERT(IsolateGroup::Current()->object_store()->finalizer_entry_class() != | 
 |          Class::null()); | 
 |   const auto& entry = | 
 |       FinalizerEntry::Handle(Object::Allocate<FinalizerEntry>(space)); | 
 |   ASSERT_EQUAL(entry.external_size(), 0); | 
 |   entry.set_finalizer(finalizer); | 
 |   return entry.ptr(); | 
 | } | 
 |  | 
 | void FinalizerEntry::set_finalizer(const FinalizerBase& value) const { | 
 |   untag()->set_finalizer(value.ptr()); | 
 | } | 
 |  | 
 | const char* FinalizerEntry::ToCString() const { | 
 |   return "FinalizerEntry"; | 
 | } | 
 |  | 
 | AbstractTypePtr MirrorReference::GetAbstractTypeReferent() const { | 
 |   ASSERT(Object::Handle(referent()).IsAbstractType()); | 
 |   return AbstractType::Cast(Object::Handle(referent())).ptr(); | 
 | } | 
 |  | 
 | ClassPtr MirrorReference::GetClassReferent() const { | 
 |   ASSERT(Object::Handle(referent()).IsClass()); | 
 |   return Class::Cast(Object::Handle(referent())).ptr(); | 
 | } | 
 |  | 
 | FieldPtr MirrorReference::GetFieldReferent() const { | 
 |   ASSERT(Object::Handle(referent()).IsField()); | 
 |   return Field::Cast(Object::Handle(referent())).ptr(); | 
 | } | 
 |  | 
 | FunctionPtr MirrorReference::GetFunctionReferent() const { | 
 |   ASSERT(Object::Handle(referent()).IsFunction()); | 
 |   return Function::Cast(Object::Handle(referent())).ptr(); | 
 | } | 
 |  | 
 | FunctionTypePtr MirrorReference::GetFunctionTypeReferent() const { | 
 |   ASSERT(Object::Handle(referent()).IsFunctionType()); | 
 |   return FunctionType::Cast(Object::Handle(referent())).ptr(); | 
 | } | 
 |  | 
 | LibraryPtr MirrorReference::GetLibraryReferent() const { | 
 |   ASSERT(Object::Handle(referent()).IsLibrary()); | 
 |   return Library::Cast(Object::Handle(referent())).ptr(); | 
 | } | 
 |  | 
 | TypeParameterPtr MirrorReference::GetTypeParameterReferent() const { | 
 |   ASSERT(Object::Handle(referent()).IsTypeParameter()); | 
 |   return TypeParameter::Cast(Object::Handle(referent())).ptr(); | 
 | } | 
 |  | 
 | MirrorReferencePtr MirrorReference::New(const Object& referent, | 
 |                                         Heap::Space space) { | 
 |   const auto& result = | 
 |       MirrorReference::Handle(Object::Allocate<MirrorReference>(space)); | 
 |   result.set_referent(referent); | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | const char* MirrorReference::ToCString() const { | 
 |   return "_MirrorReference"; | 
 | } | 
 |  | 
 | UserTagPtr UserTag::MakeActive() const { | 
 |   Isolate* isolate = Isolate::Current(); | 
 |   ASSERT(isolate != nullptr); | 
 |   UserTag& old = UserTag::Handle(isolate->current_tag()); | 
 |   isolate->set_current_tag(*this); | 
 |  | 
 | #if !defined(PRODUCT) | 
 |   // Notify VM service clients that the current UserTag has changed. | 
 |   if (Service::profiler_stream.enabled()) { | 
 |     ServiceEvent event(isolate, ServiceEvent::kUserTagChanged); | 
 |     String& name = String::Handle(old.label()); | 
 |     event.set_previous_tag(name.ToCString()); | 
 |     name ^= label(); | 
 |     event.set_updated_tag(name.ToCString()); | 
 |     Service::HandleEvent(&event); | 
 |   } | 
 | #endif  // !defined(PRODUCT) | 
 |  | 
 |   return old.ptr(); | 
 | } | 
 |  | 
 | UserTagPtr UserTag::New(const String& label, Heap::Space space) { | 
 |   Thread* thread = Thread::Current(); | 
 |   Isolate* isolate = thread->isolate(); | 
 |   ASSERT(isolate->tag_table() != GrowableObjectArray::null()); | 
 |   // Canonicalize by name. | 
 |   UserTag& result = UserTag::Handle(FindTagInIsolate(thread, label)); | 
 |   if (!result.IsNull()) { | 
 |     // Tag already exists, return existing instance. | 
 |     return result.ptr(); | 
 |   } | 
 |   if (TagTableIsFull(thread)) { | 
 |     const String& error = String::Handle(String::NewFormatted( | 
 |         "UserTag instance limit (%" Pd ") reached.", UserTags::kMaxUserTags)); | 
 |     const Array& args = Array::Handle(Array::New(1)); | 
 |     args.SetAt(0, error); | 
 |     Exceptions::ThrowByType(Exceptions::kUnsupported, args); | 
 |   } | 
 |   // No tag with label exists, create and register with isolate tag table. | 
 |   result = Object::Allocate<UserTag>(space); | 
 |   result.set_label(label); | 
 |   result.set_streamable(UserTags::IsTagNameStreamable(label.ToCString())); | 
 |   AddTagToIsolate(thread, result); | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | UserTagPtr UserTag::DefaultTag() { | 
 |   Thread* thread = Thread::Current(); | 
 |   Zone* zone = thread->zone(); | 
 |   Isolate* isolate = thread->isolate(); | 
 |   ASSERT(isolate != nullptr); | 
 |   if (isolate->default_tag() != UserTag::null()) { | 
 |     // Already created. | 
 |     return isolate->default_tag(); | 
 |   } | 
 |   // Create default tag. | 
 |   const UserTag& result = | 
 |       UserTag::Handle(zone, UserTag::New(Symbols::Default())); | 
 |   ASSERT(result.tag() == UserTags::kDefaultUserTag); | 
 |   isolate->set_default_tag(result); | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | UserTagPtr UserTag::FindTagInIsolate(Isolate* isolate, | 
 |                                      Thread* thread, | 
 |                                      const String& label) { | 
 |   Zone* zone = thread->zone(); | 
 |   if (isolate->tag_table() == GrowableObjectArray::null()) { | 
 |     return UserTag::null(); | 
 |   } | 
 |   const GrowableObjectArray& tag_table = | 
 |       GrowableObjectArray::Handle(zone, isolate->tag_table()); | 
 |   UserTag& other = UserTag::Handle(zone); | 
 |   String& tag_label = String::Handle(zone); | 
 |   for (intptr_t i = 0; i < tag_table.Length(); i++) { | 
 |     other ^= tag_table.At(i); | 
 |     ASSERT(!other.IsNull()); | 
 |     tag_label = other.label(); | 
 |     ASSERT(!tag_label.IsNull()); | 
 |     if (tag_label.Equals(label)) { | 
 |       return other.ptr(); | 
 |     } | 
 |   } | 
 |   return UserTag::null(); | 
 | } | 
 |  | 
 | UserTagPtr UserTag::FindTagInIsolate(Thread* thread, const String& label) { | 
 |   Isolate* isolate = thread->isolate(); | 
 |   return FindTagInIsolate(isolate, thread, label); | 
 | } | 
 |  | 
 | void UserTag::AddTagToIsolate(Thread* thread, const UserTag& tag) { | 
 |   Isolate* isolate = thread->isolate(); | 
 |   Zone* zone = thread->zone(); | 
 |   ASSERT(isolate->tag_table() != GrowableObjectArray::null()); | 
 |   const GrowableObjectArray& tag_table = | 
 |       GrowableObjectArray::Handle(zone, isolate->tag_table()); | 
 |   ASSERT(!TagTableIsFull(thread)); | 
 | #if defined(DEBUG) | 
 |   // Verify that no existing tag has the same tag id. | 
 |   UserTag& other = UserTag::Handle(thread->zone()); | 
 |   for (intptr_t i = 0; i < tag_table.Length(); i++) { | 
 |     other ^= tag_table.At(i); | 
 |     ASSERT(!other.IsNull()); | 
 |     ASSERT(tag.tag() != other.tag()); | 
 |   } | 
 | #endif | 
 |   // Generate the UserTag tag id by taking the length of the isolate's | 
 |   // tag table + kUserTagIdOffset. | 
 |   uword tag_id = tag_table.Length() + UserTags::kUserTagIdOffset; | 
 |   ASSERT(tag_id >= UserTags::kUserTagIdOffset); | 
 |   ASSERT(tag_id < (UserTags::kUserTagIdOffset + UserTags::kMaxUserTags)); | 
 |   tag.set_tag(tag_id); | 
 |   tag_table.Add(tag); | 
 | } | 
 |  | 
 | bool UserTag::TagTableIsFull(Thread* thread) { | 
 |   Isolate* isolate = thread->isolate(); | 
 |   ASSERT(isolate->tag_table() != GrowableObjectArray::null()); | 
 |   const GrowableObjectArray& tag_table = | 
 |       GrowableObjectArray::Handle(thread->zone(), isolate->tag_table()); | 
 |   ASSERT(tag_table.Length() <= UserTags::kMaxUserTags); | 
 |   return tag_table.Length() == UserTags::kMaxUserTags; | 
 | } | 
 |  | 
 | UserTagPtr UserTag::FindTagById(const Isolate* isolate, uword tag_id) { | 
 |   ASSERT(isolate != nullptr); | 
 |   Thread* thread = Thread::Current(); | 
 |   Zone* zone = thread->zone(); | 
 |   ASSERT(isolate->tag_table() != GrowableObjectArray::null()); | 
 |   const GrowableObjectArray& tag_table = | 
 |       GrowableObjectArray::Handle(zone, isolate->tag_table()); | 
 |   UserTag& tag = UserTag::Handle(zone); | 
 |   for (intptr_t i = 0; i < tag_table.Length(); i++) { | 
 |     tag ^= tag_table.At(i); | 
 |     if (tag.tag() == tag_id) { | 
 |       return tag.ptr(); | 
 |     } | 
 |   } | 
 |   return UserTag::null(); | 
 | } | 
 |  | 
 | const char* UserTag::ToCString() const { | 
 |   const String& tag_label = String::Handle(label()); | 
 |   return tag_label.ToCString(); | 
 | } | 
 |  | 
 | void DumpTypeTable(Isolate* isolate) { | 
 |   OS::PrintErr("canonical types:\n"); | 
 |   CanonicalTypeSet table(isolate->group()->object_store()->canonical_types()); | 
 |   table.Dump(); | 
 |   table.Release(); | 
 | } | 
 |  | 
 | void DumpFunctionTypeTable(Isolate* isolate) { | 
 |   OS::PrintErr("canonical function types:\n"); | 
 |   CanonicalFunctionTypeSet table( | 
 |       isolate->group()->object_store()->canonical_function_types()); | 
 |   table.Dump(); | 
 |   table.Release(); | 
 | } | 
 |  | 
 | void DumpRecordTypeTable(Isolate* isolate) { | 
 |   OS::PrintErr("canonical record types:\n"); | 
 |   CanonicalRecordTypeSet table( | 
 |       isolate->group()->object_store()->canonical_record_types()); | 
 |   table.Dump(); | 
 |   table.Release(); | 
 | } | 
 |  | 
 | void DumpTypeParameterTable(Isolate* isolate) { | 
 |   OS::PrintErr("canonical type parameters (cloned from declarations):\n"); | 
 |   CanonicalTypeParameterSet table( | 
 |       isolate->group()->object_store()->canonical_type_parameters()); | 
 |   table.Dump(); | 
 |   table.Release(); | 
 | } | 
 |  | 
 | void DumpTypeArgumentsTable(Isolate* isolate) { | 
 |   OS::PrintErr("canonical type arguments:\n"); | 
 |   CanonicalTypeArgumentsSet table( | 
 |       isolate->group()->object_store()->canonical_type_arguments()); | 
 |   table.Dump(); | 
 |   table.Release(); | 
 | } | 
 |  | 
 | EntryPointPragma FindEntryPointPragma(IsolateGroup* IG, | 
 |                                       const Array& metadata, | 
 |                                       Field* reusable_field_handle, | 
 |                                       Object* pragma) { | 
 |   for (intptr_t i = 0; i < metadata.Length(); i++) { | 
 |     *pragma = metadata.At(i); | 
 |     if (pragma->clazz() != IG->object_store()->pragma_class()) { | 
 |       continue; | 
 |     } | 
 |     *reusable_field_handle = IG->object_store()->pragma_name(); | 
 |     if (Instance::Cast(*pragma).GetField(*reusable_field_handle) != | 
 |         Symbols::vm_entry_point().ptr()) { | 
 |       continue; | 
 |     } | 
 |     *reusable_field_handle = IG->object_store()->pragma_options(); | 
 |     *pragma = Instance::Cast(*pragma).GetField(*reusable_field_handle); | 
 |     if (pragma->ptr() == Bool::null() || pragma->ptr() == Bool::True().ptr()) { | 
 |       return EntryPointPragma::kAlways; | 
 |       break; | 
 |     } | 
 |     if (pragma->ptr() == Symbols::get().ptr()) { | 
 |       return EntryPointPragma::kGetterOnly; | 
 |     } | 
 |     if (pragma->ptr() == Symbols::set().ptr()) { | 
 |       return EntryPointPragma::kSetterOnly; | 
 |     } | 
 |     if (pragma->ptr() == Symbols::call().ptr()) { | 
 |       return EntryPointPragma::kCallOnly; | 
 |     } | 
 |   } | 
 |   return EntryPointPragma::kNever; | 
 | } | 
 |  | 
 | DART_WARN_UNUSED_RESULT | 
 | ErrorPtr VerifyEntryPoint( | 
 |     const Library& lib, | 
 |     const Object& member, | 
 |     const Object& annotated, | 
 |     std::initializer_list<EntryPointPragma> allowed_kinds) { | 
 | #if defined(DART_PRECOMPILED_RUNTIME) | 
 |   // Annotations are discarded in the AOT snapshot, so we can't determine | 
 |   // precisely if this member was marked as an entry-point. Instead, we use | 
 |   // "has_pragma()" as a proxy, since that bit is usually retained. | 
 |   bool is_marked_entrypoint = true; | 
 |   if (annotated.IsClass() && !Class::Cast(annotated).has_pragma()) { | 
 |     is_marked_entrypoint = false; | 
 |   } else if (annotated.IsField() && !Field::Cast(annotated).has_pragma()) { | 
 |     is_marked_entrypoint = false; | 
 |   } else if (annotated.IsFunction() && | 
 |              !Function::Cast(annotated).has_pragma()) { | 
 |     is_marked_entrypoint = false; | 
 |   } | 
 | #else | 
 |   Object& metadata = Object::Handle(Object::empty_array().ptr()); | 
 |   if (!annotated.IsNull()) { | 
 |     metadata = lib.GetMetadata(annotated); | 
 |   } | 
 |   if (metadata.IsError()) return Error::RawCast(metadata.ptr()); | 
 |   ASSERT(!metadata.IsNull() && metadata.IsArray()); | 
 |   EntryPointPragma pragma = | 
 |       FindEntryPointPragma(IsolateGroup::Current(), Array::Cast(metadata), | 
 |                            &Field::Handle(), &Object::Handle()); | 
 |   bool is_marked_entrypoint = pragma == EntryPointPragma::kAlways; | 
 |   if (!is_marked_entrypoint) { | 
 |     for (const auto allowed_kind : allowed_kinds) { | 
 |       if (pragma == allowed_kind) { | 
 |         is_marked_entrypoint = true; | 
 |         break; | 
 |       } | 
 |     } | 
 |   } | 
 | #endif | 
 |   if (!is_marked_entrypoint) { | 
 |     return EntryPointMemberInvocationError(member); | 
 |   } | 
 |   return Error::null(); | 
 | } | 
 |  | 
 | DART_WARN_UNUSED_RESULT | 
 | ErrorPtr EntryPointFieldInvocationError(const String& getter_name) { | 
 |   if (!FLAG_verify_entry_points) return Error::null(); | 
 |  | 
 |   char const* error = OS::SCreate( | 
 |       Thread::Current()->zone(), | 
 |       "ERROR: Entry-points do not allow invoking fields " | 
 |       "(failure to resolve '%s')\n" | 
 |       "ERROR: See " | 
 |       "https://github.com/dart-lang/sdk/blob/master/runtime/docs/compiler/" | 
 |       "aot/entry_point_pragma.md\n", | 
 |       getter_name.ToCString()); | 
 |   OS::PrintErr("%s", error); | 
 |   return ApiError::New(String::Handle(String::New(error))); | 
 | } | 
 |  | 
 | DART_WARN_UNUSED_RESULT | 
 | ErrorPtr EntryPointMemberInvocationError(const Object& member) { | 
 |   const char* member_cstring = | 
 |       member.IsFunction() | 
 |           ? OS::SCreate( | 
 |                 Thread::Current()->zone(), "%s (kind %s)", | 
 |                 Function::Cast(member).ToLibNamePrefixedQualifiedCString(), | 
 |                 Function::KindToCString(Function::Cast(member).kind())) | 
 |           : member.ToCString(); | 
 |   if (!FLAG_verify_entry_points) { | 
 |     // Print a warning, but do not return an error. | 
 |     char const* warning = OS::SCreate( | 
 |         Thread::Current()->zone(), | 
 |         "WARNING: '%s' is accessed through Dart C API without being marked as " | 
 |         "an entry point; its tree-shaken signature cannot be verified.\n" | 
 |         "WARNING: See " | 
 |         "https://github.com/dart-lang/sdk/blob/master/runtime/docs/compiler/" | 
 |         "aot/entry_point_pragma.md\n", | 
 |         member_cstring); | 
 |     OS::PrintErr("%s", warning); | 
 |     return Error::null(); | 
 |   } | 
 |   char const* error = OS::SCreate( | 
 |       Thread::Current()->zone(), | 
 |       "ERROR: It is illegal to access '%s' through Dart C API.\n" | 
 |       "ERROR: See " | 
 |       "https://github.com/dart-lang/sdk/blob/master/runtime/docs/compiler/" | 
 |       "aot/entry_point_pragma.md\n", | 
 |       member_cstring); | 
 |   OS::PrintErr("%s", error); | 
 |   return ApiError::New(String::Handle(String::New(error))); | 
 | } | 
 |  | 
 | #if !defined(DART_PRECOMPILED_RUNTIME) | 
 | // Note: see also [NeedsDynamicInvocationForwarder] which ensures that we | 
 | // never land in a function which expects parameters in registers from a | 
 | // dynamic call site. | 
 | intptr_t Function::MaxNumberOfParametersInRegisters(Zone* zone) const { | 
 | #if defined(TARGET_ARCH_X64) || defined(TARGET_ARCH_ARM64) ||                  \ | 
 |     defined(TARGET_ARCH_ARM) | 
 |   if (!FLAG_precompiled_mode) { | 
 |     return 0; | 
 |   } | 
 |  | 
 |   if (!FLAG_use_register_cc) { | 
 |     return 0; | 
 |   } | 
 |  | 
 |   if (IsGeneric()) { | 
 |     return 0; | 
 |   } | 
 |  | 
 |   switch (kind()) { | 
 |     case UntaggedFunction::kClosureFunction: | 
 |     case UntaggedFunction::kImplicitClosureFunction: | 
 |     case UntaggedFunction::kNoSuchMethodDispatcher: | 
 |     case UntaggedFunction::kInvokeFieldDispatcher: | 
 |     case UntaggedFunction::kDynamicInvocationForwarder: | 
 |     case UntaggedFunction::kMethodExtractor: | 
 |     case UntaggedFunction::kFfiTrampoline: | 
 |     case UntaggedFunction::kFieldInitializer: | 
 |     case UntaggedFunction::kIrregexpFunction: | 
 |       return 0; | 
 |  | 
 |     default: | 
 |       break; | 
 |   } | 
 |  | 
 |   const auto unboxing_metadata = kernel::UnboxingInfoMetadataOf(*this, zone); | 
 |   if (unboxing_metadata != nullptr && | 
 |       unboxing_metadata->must_use_stack_calling_convention) { | 
 |     return 0; | 
 |   } | 
 |  | 
 |   // Getters and setters have fixed signatures. | 
 |   switch (kind()) { | 
 |     case UntaggedFunction::kGetterFunction: | 
 |     case UntaggedFunction::kImplicitGetter: | 
 |     case UntaggedFunction::kSetterFunction: | 
 |     case UntaggedFunction::kImplicitSetter: | 
 |       return num_fixed_parameters(); | 
 |  | 
 |     default: | 
 |       break; | 
 |   } | 
 |  | 
 |   if (unboxing_metadata != nullptr && | 
 |       unboxing_metadata->has_overrides_with_less_direct_parameters) { | 
 |     // Receiver (`this`) can always be passed in the register because it is | 
 |     // never an optional or named parameter. | 
 |     return unboxing_metadata->unboxed_args_info.length() + 1; | 
 |   } | 
 |  | 
 |   return num_fixed_parameters(); | 
 | #endif | 
 |   return 0; | 
 | } | 
 | #endif  // !defined(DART_PRECOMPILED_RUNTIME) | 
 |  | 
 | ErrorPtr Function::VerifyCallEntryPoint() const { | 
 |   if (!FLAG_verify_entry_points) return Error::null(); | 
 |  | 
 |   const Class& cls = Class::Handle(Owner()); | 
 |   const Library& lib = Library::Handle(cls.library()); | 
 |   switch (kind()) { | 
 |     case UntaggedFunction::kRegularFunction: | 
 |     case UntaggedFunction::kSetterFunction: | 
 |     case UntaggedFunction::kConstructor: | 
 |       return dart::VerifyEntryPoint(lib, *this, *this, | 
 |                                     {EntryPointPragma::kCallOnly}); | 
 |       break; | 
 |     case UntaggedFunction::kGetterFunction: | 
 |       return dart::VerifyEntryPoint( | 
 |           lib, *this, *this, | 
 |           {EntryPointPragma::kCallOnly, EntryPointPragma::kGetterOnly}); | 
 |       break; | 
 |     case UntaggedFunction::kImplicitGetter: | 
 |       return dart::VerifyEntryPoint(lib, *this, Field::Handle(accessor_field()), | 
 |                                     {EntryPointPragma::kGetterOnly}); | 
 |       break; | 
 |     case UntaggedFunction::kImplicitSetter: | 
 |       return dart::VerifyEntryPoint(lib, *this, Field::Handle(accessor_field()), | 
 |                                     {EntryPointPragma::kSetterOnly}); | 
 |     case UntaggedFunction::kMethodExtractor: | 
 |       return Function::Handle(extracted_method_closure()) | 
 |           .VerifyClosurizedEntryPoint(); | 
 |       break; | 
 |     default: | 
 |       return dart::VerifyEntryPoint(lib, *this, Object::Handle(), {}); | 
 |       break; | 
 |   } | 
 | } | 
 |  | 
 | ErrorPtr Function::VerifyClosurizedEntryPoint() const { | 
 |   if (!FLAG_verify_entry_points) return Error::null(); | 
 |  | 
 |   const Class& cls = Class::Handle(Owner()); | 
 |   const Library& lib = Library::Handle(cls.library()); | 
 |   switch (kind()) { | 
 |     case UntaggedFunction::kRegularFunction: | 
 |       return dart::VerifyEntryPoint(lib, *this, *this, | 
 |                                     {EntryPointPragma::kGetterOnly}); | 
 |     case UntaggedFunction::kImplicitClosureFunction: { | 
 |       const Function& parent = Function::Handle(parent_function()); | 
 |       return dart::VerifyEntryPoint(lib, parent, parent, | 
 |                                     {EntryPointPragma::kGetterOnly}); | 
 |     } | 
 |     default: | 
 |       UNREACHABLE(); | 
 |   } | 
 | } | 
 |  | 
 | ErrorPtr Field::VerifyEntryPoint(EntryPointPragma pragma) const { | 
 |   if (!FLAG_verify_entry_points) return Error::null(); | 
 |   const Class& cls = Class::Handle(Owner()); | 
 |   const Library& lib = Library::Handle(cls.library()); | 
 |   return dart::VerifyEntryPoint(lib, *this, *this, {pragma}); | 
 | } | 
 |  | 
 | ErrorPtr Class::VerifyEntryPoint() const { | 
 |   if (!FLAG_verify_entry_points) return Error::null(); | 
 |   const Library& lib = Library::Handle(library()); | 
 |   if (!lib.IsNull()) { | 
 |     return dart::VerifyEntryPoint(lib, *this, *this, {}); | 
 |   } else { | 
 |     return Error::null(); | 
 |   } | 
 | } | 
 |  | 
 | AbstractTypePtr RecordType::FieldTypeAt(intptr_t index) const { | 
 |   const Array& field_types = Array::Handle(untag()->field_types()); | 
 |   return AbstractType::RawCast(field_types.At(index)); | 
 | } | 
 |  | 
 | void RecordType::SetFieldTypeAt(intptr_t index, | 
 |                                 const AbstractType& value) const { | 
 |   ASSERT(!value.IsNull()); | 
 |   const Array& field_types = Array::Handle(untag()->field_types()); | 
 |   field_types.SetAt(index, value); | 
 | } | 
 |  | 
 | void RecordType::set_field_types(const Array& value) const { | 
 |   ASSERT(!value.IsNull()); | 
 |   untag()->set_field_types(value.ptr()); | 
 | } | 
 |  | 
 | void RecordType::set_shape(RecordShape shape) const { | 
 |   untag()->set_shape(shape.AsSmi()); | 
 | } | 
 |  | 
 | ArrayPtr RecordType::GetFieldNames(Thread* thread) const { | 
 |   return shape().GetFieldNames(thread); | 
 | } | 
 |  | 
 | void RecordType::Print(NameVisibility name_visibility, | 
 |                        BaseTextBuffer* printer) const { | 
 |   if (IsNull()) { | 
 |     printer->AddString("null"); | 
 |     return; | 
 |   } | 
 |   Thread* thread = Thread::Current(); | 
 |   Zone* zone = thread->zone(); | 
 |   AbstractType& type = AbstractType::Handle(zone); | 
 |   String& name = String::Handle(zone); | 
 |   const intptr_t num_fields = NumFields(); | 
 |   const Array& field_names = Array::Handle(zone, GetFieldNames(thread)); | 
 |   const intptr_t num_positional_fields = num_fields - field_names.Length(); | 
 |   printer->AddString("("); | 
 |   for (intptr_t i = 0; i < num_fields; ++i) { | 
 |     if (i != 0) { | 
 |       printer->AddString(", "); | 
 |     } | 
 |     if (i == num_positional_fields) { | 
 |       printer->AddString("{"); | 
 |     } | 
 |     type = FieldTypeAt(i); | 
 |     type.PrintName(name_visibility, printer); | 
 |     if (i >= num_positional_fields) { | 
 |       printer->AddString(" "); | 
 |       name ^= field_names.At(i - num_positional_fields); | 
 |       printer->AddString(name.ToCString()); | 
 |     } | 
 |   } | 
 |   if (num_positional_fields < num_fields) { | 
 |     printer->AddString("}"); | 
 |   } | 
 |   printer->AddString(")"); | 
 |   printer->AddString(NullabilitySuffix(name_visibility)); | 
 | } | 
 |  | 
 | const char* RecordType::ToCString() const { | 
 |   Zone* zone = Thread::Current()->zone(); | 
 |   ZoneTextBuffer printer(zone); | 
 |   Print(kInternalName, &printer); | 
 |   return printer.buffer(); | 
 | } | 
 |  | 
 | bool RecordType::IsInstantiated(Genericity genericity, | 
 |                                 intptr_t num_free_fun_type_params) const { | 
 |   AbstractType& type = AbstractType::Handle(); | 
 |   const intptr_t num_fields = NumFields(); | 
 |   for (intptr_t i = 0; i < num_fields; ++i) { | 
 |     type = FieldTypeAt(i); | 
 |     if (!type.IsInstantiated(genericity, num_free_fun_type_params)) { | 
 |       return false; | 
 |     } | 
 |   } | 
 |   return true; | 
 | } | 
 |  | 
 | RecordTypePtr RecordType::New(Heap::Space space) { | 
 |   return Object::Allocate<RecordType>(space); | 
 | } | 
 |  | 
 | RecordTypePtr RecordType::New(RecordShape shape, | 
 |                               const Array& field_types, | 
 |                               Nullability nullability, | 
 |                               Heap::Space space) { | 
 |   Zone* Z = Thread::Current()->zone(); | 
 |   const RecordType& result = RecordType::Handle(Z, RecordType::New(space)); | 
 |   result.set_shape(shape); | 
 |   result.set_field_types(field_types); | 
 |   result.SetHash(0); | 
 |   result.set_flags(0); | 
 |   result.set_nullability(nullability); | 
 |   result.set_type_state(UntaggedAbstractType::kAllocated); | 
 |   result.InitializeTypeTestingStubNonAtomic( | 
 |       Code::Handle(Z, TypeTestingStubGenerator::DefaultCodeForType(result))); | 
 |   return result.ptr(); | 
 | } | 
 |  | 
 | RecordTypePtr RecordType::ToNullability(Nullability value, | 
 |                                         Heap::Space space) const { | 
 |   if (nullability() == value) { | 
 |     return ptr(); | 
 |   } | 
 |   // Clone record type and set new nullability. | 
 |   // Always cloning in old space and removing space parameter would not satisfy | 
 |   // currently existing requests for type instantiation in new space. | 
 |   Thread* T = Thread::Current(); | 
 |   Zone* Z = T->zone(); | 
 |   AbstractType& type = RecordType::Handle( | 
 |       Z, | 
 |       RecordType::New(shape(), Array::Handle(Z, field_types()), value, space)); | 
 |   if (IsFinalized()) { | 
 |     type.SetIsFinalized(); | 
 |     if (IsCanonical()) { | 
 |       type ^= type.Canonicalize(T); | 
 |     } | 
 |   } | 
 |   return RecordType::Cast(type).ptr(); | 
 | } | 
 |  | 
 | bool RecordType::IsEquivalent( | 
 |     const Instance& other, | 
 |     TypeEquality kind, | 
 |     FunctionTypeMapping* function_type_equivalence) const { | 
 |   ASSERT(!IsNull()); | 
 |   if (ptr() == other.ptr()) { | 
 |     return true; | 
 |   } | 
 |   if (!other.IsRecordType()) { | 
 |     return false; | 
 |   } | 
 |   const RecordType& other_type = RecordType::Cast(other); | 
 |   // Equal record types must have the same shape | 
 |   // (number of fields and named fields). | 
 |   if (shape() != other_type.shape()) { | 
 |     return false; | 
 |   } | 
 |   Thread* thread = Thread::Current(); | 
 |   Zone* zone = thread->zone(); | 
 |   if (!IsNullabilityEquivalent(thread, other_type, kind)) { | 
 |     return false; | 
 |   } | 
 |   // Equal record types must have equal field types. | 
 |   AbstractType& field_type = Type::Handle(zone); | 
 |   AbstractType& other_field_type = Type::Handle(zone); | 
 |   const intptr_t num_fields = NumFields(); | 
 |   for (intptr_t i = 0; i < num_fields; ++i) { | 
 |     field_type = FieldTypeAt(i); | 
 |     other_field_type = other_type.FieldTypeAt(i); | 
 |     if (!field_type.IsEquivalent(other_field_type, kind, | 
 |                                  function_type_equivalence)) { | 
 |       return false; | 
 |     } | 
 |   } | 
 |   return true; | 
 | } | 
 |  | 
 | uword RecordType::ComputeHash() const { | 
 |   ASSERT(IsFinalized()); | 
 |   uint32_t result = 0; | 
 |   result = CombineHashes(result, static_cast<uint32_t>(nullability())); | 
 |   result = CombineHashes(result, static_cast<uint32_t>(shape().AsInt())); | 
 |   AbstractType& type = AbstractType::Handle(); | 
 |   const intptr_t num_fields = NumFields(); | 
 |   for (intptr_t i = 0; i < num_fields; ++i) { | 
 |     type = FieldTypeAt(i); | 
 |     result = CombineHashes(result, type.Hash()); | 
 |   } | 
 |   result = FinalizeHash(result, kHashBits); | 
 |   SetHash(result); | 
 |   return result; | 
 | } | 
 |  | 
 | AbstractTypePtr RecordType::Canonicalize(Thread* thread) const { | 
 |   ASSERT(IsFinalized()); | 
 |   Zone* zone = thread->zone(); | 
 |   AbstractType& type = AbstractType::Handle(zone); | 
 |   if (IsCanonical()) { | 
 | #ifdef DEBUG | 
 |     // Verify that all fields are allocated in old space and are canonical. | 
 |     ASSERT(Array::Handle(zone, field_types()).IsOld()); | 
 |     const intptr_t num_fields = NumFields(); | 
 |     for (intptr_t i = 0; i < num_fields; ++i) { | 
 |       type = FieldTypeAt(i); | 
 |       ASSERT(type.IsOld()); | 
 |       ASSERT(type.IsCanonical()); | 
 |     } | 
 | #endif | 
 |     return ptr(); | 
 |   } | 
 |   auto isolate_group = thread->isolate_group(); | 
 |   ObjectStore* object_store = isolate_group->object_store(); | 
 |   RecordType& rec = RecordType::Handle(zone); | 
 |   { | 
 |     SafepointMutexLocker ml(isolate_group->type_canonicalization_mutex()); | 
 |     CanonicalRecordTypeSet table(zone, object_store->canonical_record_types()); | 
 |     rec ^= table.GetOrNull(CanonicalRecordTypeKey(*this)); | 
 |     ASSERT(object_store->canonical_record_types() == table.Release().ptr()); | 
 |   } | 
 |   if (rec.IsNull()) { | 
 |     ASSERT(Array::Handle(zone, field_types()).IsOld()); | 
 |     const intptr_t num_fields = NumFields(); | 
 |     for (intptr_t i = 0; i < num_fields; ++i) { | 
 |       type = FieldTypeAt(i); | 
 |       if (!type.IsCanonical()) { | 
 |         type = type.Canonicalize(thread); | 
 |         SetFieldTypeAt(i, type); | 
 |       } | 
 |     } | 
 |     // Check to see if the record type got added to canonical table as part | 
 |     // of the canonicalization of its signature types. | 
 |     SafepointMutexLocker ml(isolate_group->type_canonicalization_mutex()); | 
 |     CanonicalRecordTypeSet table(zone, object_store->canonical_record_types()); | 
 |     rec ^= table.GetOrNull(CanonicalRecordTypeKey(*this)); | 
 |     if (rec.IsNull()) { | 
 |       // Add this record type into the canonical table of record types. | 
 |       if (this->IsNew()) { | 
 |         rec ^= Object::Clone(*this, Heap::kOld); | 
 |       } else { | 
 |         rec = this->ptr(); | 
 |       } | 
 |       ASSERT(rec.IsOld()); | 
 |       rec.SetCanonical();  // Mark object as being canonical. | 
 |       bool present = table.Insert(rec); | 
 |       ASSERT(!present); | 
 |     } | 
 |     object_store->set_canonical_record_types(table.Release()); | 
 |   } | 
 |   return rec.ptr(); | 
 | } | 
 |  | 
 | void RecordType::EnumerateURIs(URIs* uris) const { | 
 |   AbstractType& type = AbstractType::Handle(); | 
 |   const intptr_t num_fields = NumFields(); | 
 |   for (intptr_t i = 0; i < num_fields; ++i) { | 
 |     type = FieldTypeAt(i); | 
 |     type.EnumerateURIs(uris); | 
 |   } | 
 | } | 
 |  | 
 | void RecordType::PrintName(NameVisibility name_visibility, | 
 |                            BaseTextBuffer* printer) const { | 
 |   RecordType::Cast(*this).Print(name_visibility, printer); | 
 | } | 
 |  | 
 | AbstractTypePtr RecordType::InstantiateFrom( | 
 |     const TypeArguments& instantiator_type_arguments, | 
 |     const TypeArguments& function_type_arguments, | 
 |     intptr_t num_free_fun_type_params, | 
 |     Heap::Space space, | 
 |     FunctionTypeMapping* function_type_mapping, | 
 |     intptr_t num_parent_type_args_adjustment) const { | 
 |   ASSERT(IsFinalized()); | 
 |   Zone* zone = Thread::Current()->zone(); | 
 |  | 
 |   const intptr_t num_fields = NumFields(); | 
 |   const Array& old_field_types = Array::Handle(zone, field_types()); | 
 |   const Array& new_field_types = | 
 |       Array::Handle(zone, Array::New(num_fields, space)); | 
 |   AbstractType& type = AbstractType::Handle(zone); | 
 |   for (intptr_t i = 0; i < num_fields; ++i) { | 
 |     type ^= old_field_types.At(i); | 
 |     if (!type.IsInstantiated()) { | 
 |       type = type.InstantiateFrom( | 
 |           instantiator_type_arguments, function_type_arguments, | 
 |           num_free_fun_type_params, space, function_type_mapping, | 
 |           num_parent_type_args_adjustment); | 
 |       // A returned null type indicates a failed instantiation in dead code that | 
 |       // must be propagated up to the caller, the optimizing compiler. | 
 |       if (type.IsNull()) { | 
 |         return RecordType::null(); | 
 |       } | 
 |     } | 
 |     new_field_types.SetAt(i, type); | 
 |   } | 
 |  | 
 |   const auto& rec = RecordType::Handle( | 
 |       zone, RecordType::New(shape(), new_field_types, nullability(), space)); | 
 |  | 
 |   rec.SetIsFinalized(); | 
 |  | 
 |   // Canonicalization is not part of instantiation. | 
 |   return rec.ptr(); | 
 | } | 
 |  | 
 | AbstractTypePtr RecordType::UpdateFunctionTypes( | 
 |     intptr_t num_parent_type_args_adjustment, | 
 |     intptr_t num_free_fun_type_params, | 
 |     Heap::Space space, | 
 |     FunctionTypeMapping* function_type_mapping) const { | 
 |   ASSERT(IsFinalized()); | 
 |   ASSERT(num_parent_type_args_adjustment >= 0); | 
 |   Zone* zone = Thread::Current()->zone(); | 
 |   const auto& types = Array::Handle(zone, field_types()); | 
 |   Array* updated_types = nullptr; | 
 |   auto& type = AbstractType::Handle(zone); | 
 |   auto& updated = AbstractType::Handle(zone); | 
 |   for (intptr_t i = 0, n = NumFields(); i < n; ++i) { | 
 |     type ^= types.At(i); | 
 |     updated = type.UpdateFunctionTypes(num_parent_type_args_adjustment, | 
 |                                        num_free_fun_type_params, space, | 
 |                                        function_type_mapping); | 
 |     if (type.ptr() != updated.ptr()) { | 
 |       if (updated_types == nullptr) { | 
 |         updated_types = &Array::Handle(zone, Array::New(n, space)); | 
 |         for (intptr_t j = 0; j < i; ++j) { | 
 |           type ^= types.At(j); | 
 |           updated_types->SetAt(j, type); | 
 |         } | 
 |       } | 
 |     } | 
 |     if (updated_types != nullptr) { | 
 |       updated_types->SetAt(i, updated); | 
 |     } | 
 |   } | 
 |   if (updated_types == nullptr) { | 
 |     return ptr(); | 
 |   } | 
 |   const auto& new_rt = RecordType::Handle( | 
 |       zone, RecordType::New(shape(), *updated_types, nullability(), space)); | 
 |   new_rt.SetIsFinalized(); | 
 |   return new_rt.ptr(); | 
 | } | 
 |  | 
 | bool RecordType::IsSubtypeOf( | 
 |     const RecordType& other, | 
 |     Heap::Space space, | 
 |     FunctionTypeMapping* function_type_equivalence) const { | 
 |   if (ptr() == other.ptr()) { | 
 |     return true; | 
 |   } | 
 |   ASSERT(IsFinalized()); | 
 |   ASSERT(other.IsFinalized()); | 
 |   const intptr_t num_fields = NumFields(); | 
 |   if (shape() != other.shape()) { | 
 |     // Different number of fields or different named fields. | 
 |     return false; | 
 |   } | 
 |   Thread* const thread = Thread::Current(); | 
 |   if (!IsNullabilityEquivalent(thread, other, TypeEquality::kInSubtypeTest)) { | 
 |     return false; | 
 |   } | 
 |   // Check subtyping of record field types. | 
 |   Zone* const zone = thread->zone(); | 
 |   AbstractType& field_type = Type::Handle(zone); | 
 |   AbstractType& other_field_type = Type::Handle(zone); | 
 |   for (intptr_t i = 0; i < num_fields; ++i) { | 
 |     field_type = FieldTypeAt(i); | 
 |     other_field_type = other.FieldTypeAt(i); | 
 |     if (!field_type.IsSubtypeOf(other_field_type, space, | 
 |                                 function_type_equivalence)) { | 
 |       return false; | 
 |     } | 
 |   } | 
 |   return true; | 
 | } | 
 |  | 
 | RecordPtr Record::New(RecordShape shape, Heap::Space space) { | 
 |   const intptr_t num_fields = shape.num_fields(); | 
 |   ASSERT(num_fields >= 0); | 
 |   auto raw = Object::Allocate<Record>(space, num_fields); | 
 |   NoSafepointScope no_safepoint; | 
 |   raw->untag()->set_shape(shape.AsSmi()); | 
 |   return raw; | 
 | } | 
 |  | 
 | const char* Record::ToCString() const { | 
 |   if (IsNull()) { | 
 |     return "Record: null"; | 
 |   } | 
 |   Thread* thread = Thread::Current(); | 
 |   Zone* zone = thread->zone(); | 
 |   ZoneTextBuffer printer(zone); | 
 |   const intptr_t num_fields = this->num_fields(); | 
 |   const Array& field_names = Array::Handle(zone, GetFieldNames(thread)); | 
 |   const intptr_t num_positional_fields = num_fields - field_names.Length(); | 
 |   Object& obj = Object::Handle(zone); | 
 |   printer.AddString("Record ("); | 
 |   for (intptr_t i = 0; i < num_fields; ++i) { | 
 |     if (i != 0) { | 
 |       printer.AddString(", "); | 
 |     } | 
 |     if (i >= num_positional_fields) { | 
 |       obj = field_names.At(i - num_positional_fields); | 
 |       printer.AddString(obj.ToCString()); | 
 |       printer.AddString(": "); | 
 |     } | 
 |     obj = FieldAt(i); | 
 |     printer.AddString(obj.ToCString()); | 
 |   } | 
 |   printer.AddString(")"); | 
 |   return printer.buffer(); | 
 | } | 
 |  | 
 | bool Record::CanonicalizeEquals(const Instance& other) const { | 
 |   if (this->ptr() == other.ptr()) { | 
 |     return true; | 
 |   } | 
 |  | 
 |   if (!other.IsRecord() || other.IsNull()) { | 
 |     return false; | 
 |   } | 
 |  | 
 |   const Record& other_rec = Record::Cast(other); | 
 |   if (shape() != other_rec.shape()) { | 
 |     return false; | 
 |   } | 
 |  | 
 |   const intptr_t num_fields = this->num_fields(); | 
 |   for (intptr_t i = 0; i < num_fields; ++i) { | 
 |     if (this->FieldAt(i) != other_rec.FieldAt(i)) { | 
 |       return false; | 
 |     } | 
 |   } | 
 |   return true; | 
 | } | 
 |  | 
 | uint32_t Record::CanonicalizeHash() const { | 
 |   Thread* thread = Thread::Current(); | 
 |   uint32_t hash = thread->heap()->GetCanonicalHash(ptr()); | 
 |   if (hash != 0) { | 
 |     return hash; | 
 |   } | 
 |   hash = shape().AsInt(); | 
 |   Instance& element = Instance::Handle(); | 
 |   const intptr_t num_fields = this->num_fields(); | 
 |   for (intptr_t i = 0; i < num_fields; ++i) { | 
 |     element ^= FieldAt(i); | 
 |     hash = CombineHashes(hash, element.CanonicalizeHash()); | 
 |   } | 
 |   hash = FinalizeHash(hash, kHashBits); | 
 |   thread->heap()->SetCanonicalHash(ptr(), hash); | 
 |   return hash; | 
 | } | 
 |  | 
 | void Record::CanonicalizeFieldsLocked(Thread* thread) const { | 
 |   Zone* zone = thread->zone(); | 
 |   Instance& obj = Instance::Handle(zone); | 
 |   const intptr_t num_fields = this->num_fields(); | 
 |   for (intptr_t i = 0; i < num_fields; ++i) { | 
 |     obj ^= FieldAt(i); | 
 |     obj = obj.CanonicalizeLocked(thread); | 
 |     SetFieldAt(i, obj); | 
 |   } | 
 | } | 
 |  | 
 | RecordTypePtr Record::GetRecordType() const { | 
 |   Zone* const zone = Thread::Current()->zone(); | 
 |   const intptr_t num_fields = this->num_fields(); | 
 |   const Array& field_types = | 
 |       Array::Handle(zone, Array::New(num_fields, Heap::kOld)); | 
 |   Instance& obj = Instance::Handle(zone); | 
 |   AbstractType& type = AbstractType::Handle(zone); | 
 |   for (intptr_t i = 0; i < num_fields; ++i) { | 
 |     obj ^= FieldAt(i); | 
 |     type = obj.GetType(Heap::kNew); | 
 |     field_types.SetAt(i, type); | 
 |   } | 
 |   type = RecordType::New(shape(), field_types, Nullability::kNonNullable); | 
 |   type = ClassFinalizer::FinalizeType(type); | 
 |   return RecordType::Cast(type).ptr(); | 
 | } | 
 |  | 
 | intptr_t Record::GetPositionalFieldIndexFromFieldName( | 
 |     const String& field_name) { | 
 |   if (field_name.IsOneByteString() && field_name.Length() >= 1 && | 
 |       field_name.CharAt(0) == '$') { | 
 |     int64_t value = 0; | 
 |     const char* cstr = field_name.ToCString(); | 
 |     if (OS::StringToInt64(cstr + 1 /* skip '$' */, &value)) { | 
 |       if (value >= 1 && value < kMaxElements) { | 
 |         return static_cast<intptr_t>(value - 1); | 
 |       } | 
 |     } | 
 |   } | 
 |   return -1; | 
 | } | 
 |  | 
 | intptr_t Record::GetFieldIndexByName(Thread* thread, | 
 |                                      const String& field_name) const { | 
 |   ASSERT(field_name.IsSymbol()); | 
 |   const intptr_t field_index = | 
 |       Record::GetPositionalFieldIndexFromFieldName(field_name); | 
 |   const Array& field_names = Array::Handle(GetFieldNames(thread)); | 
 |   const intptr_t num_positional_fields = num_fields() - field_names.Length(); | 
 |   if ((field_index >= 0) && (field_index < num_positional_fields)) { | 
 |     return field_index; | 
 |   } else { | 
 |     for (intptr_t i = 0, n = field_names.Length(); i < n; ++i) { | 
 |       if (field_names.At(i) == field_name.ptr()) { | 
 |         return num_positional_fields + i; | 
 |       } | 
 |     } | 
 |   } | 
 |   return -1; | 
 | } | 
 |  | 
 | class RecordFieldNamesMapTraits { | 
 |  public: | 
 |   static const char* Name() { return "RecordFieldNamesMapTraits"; } | 
 |   static bool ReportStats() { return false; } | 
 |  | 
 |   static bool IsMatch(const Object& a, const Object& b) { | 
 |     return Array::Cast(a).CanonicalizeEquals(Array::Cast(b)); | 
 |   } | 
 |  | 
 |   static uword Hash(const Object& key) { | 
 |     return Array::Cast(key).CanonicalizeHash(); | 
 |   } | 
 |  | 
 |   static ObjectPtr NewKey(const Array& arr) { return arr.ptr(); } | 
 | }; | 
 | typedef UnorderedHashMap<RecordFieldNamesMapTraits> RecordFieldNamesMap; | 
 |  | 
 | RecordShape RecordShape::Register(Thread* thread, | 
 |                                   intptr_t num_fields, | 
 |                                   const Array& field_names) { | 
 |   ASSERT(!field_names.IsNull()); | 
 |   ASSERT(field_names.IsImmutable()); | 
 |   ASSERT(field_names.ptr() == Object::empty_array().ptr() || | 
 |          field_names.Length() > 0); | 
 |  | 
 |   Zone* zone = thread->zone(); | 
 |   IsolateGroup* isolate_group = thread->isolate_group(); | 
 |   ObjectStore* object_store = isolate_group->object_store(); | 
 |  | 
 |   if (object_store->record_field_names<std::memory_order_acquire>() == | 
 |       Array::null()) { | 
 |     // First-time initialization. | 
 |     SafepointWriteRwLocker ml(thread, isolate_group->program_lock()); | 
 |     if (object_store->record_field_names() == Array::null()) { | 
 |       // Reserve record field names index 0 for records without named fields. | 
 |       RecordFieldNamesMap map( | 
 |           HashTables::New<RecordFieldNamesMap>(16, Heap::kOld)); | 
 |       map.InsertOrGetValue(Object::empty_array(), | 
 |                            Smi::Handle(zone, Smi::New(0))); | 
 |       ASSERT(map.NumOccupied() == 1); | 
 |       object_store->set_record_field_names_map(map.Release()); | 
 |       const auto& table = Array::Handle(zone, Array::New(16)); | 
 |       table.SetAt(0, Object::empty_array()); | 
 |       object_store->set_record_field_names<std::memory_order_release>(table); | 
 |     } | 
 |   } | 
 |  | 
 | #if defined(DART_PRECOMPILER) | 
 |   const intptr_t kMaxNumFields = compiler::target::RecordShape::kMaxNumFields; | 
 |   const intptr_t kMaxFieldNamesIndex = | 
 |       compiler::target::RecordShape::kMaxFieldNamesIndex; | 
 | #else | 
 |   const intptr_t kMaxNumFields = RecordShape::kMaxNumFields; | 
 |   const intptr_t kMaxFieldNamesIndex = RecordShape::kMaxFieldNamesIndex; | 
 | #endif | 
 |  | 
 |   if (num_fields > kMaxNumFields) { | 
 |     FATAL("Too many record fields"); | 
 |   } | 
 |   if (field_names.ptr() == Object::empty_array().ptr()) { | 
 |     return RecordShape::ForUnnamed(num_fields); | 
 |   } | 
 |  | 
 |   { | 
 |     SafepointReadRwLocker ml(thread, isolate_group->program_lock()); | 
 |     RecordFieldNamesMap map(object_store->record_field_names_map()); | 
 |     Smi& index = Smi::Handle(zone); | 
 |     index ^= map.GetOrNull(field_names); | 
 |     ASSERT(map.Release().ptr() == object_store->record_field_names_map()); | 
 |     if (!index.IsNull()) { | 
 |       return RecordShape(num_fields, index.Value()); | 
 |     } | 
 |   } | 
 |  | 
 |   SafepointWriteRwLocker ml(thread, isolate_group->program_lock()); | 
 |   RecordFieldNamesMap map(object_store->record_field_names_map()); | 
 |   const intptr_t new_index = map.NumOccupied(); | 
 |   if (new_index > kMaxFieldNamesIndex) { | 
 |     FATAL("Too many record shapes"); | 
 |   } | 
 |  | 
 |   const intptr_t index = Smi::Value(Smi::RawCast(map.InsertOrGetValue( | 
 |       field_names, Smi::Handle(zone, Smi::New(new_index))))); | 
 |   ASSERT(index > 0); | 
 |  | 
 |   if (index == new_index) { | 
 |     ASSERT(map.NumOccupied() == (new_index + 1)); | 
 |     Array& table = Array::Handle(zone, object_store->record_field_names()); | 
 |     intptr_t capacity = table.Length(); | 
 |     if (index >= table.Length()) { | 
 |       capacity = capacity + (capacity >> 2); | 
 |       table = Array::Grow(table, capacity); | 
 |       object_store->set_record_field_names(table); | 
 |     } | 
 |     table.SetAt(index, field_names); | 
 |   } else { | 
 |     ASSERT(index < new_index); | 
 |   } | 
 |   object_store->set_record_field_names_map(map.Release()); | 
 |  | 
 |   const RecordShape shape(num_fields, index); | 
 |   ASSERT(shape.GetFieldNames(thread) == field_names.ptr()); | 
 |   ASSERT(shape.num_fields() == num_fields); | 
 |   return shape; | 
 | } | 
 |  | 
 | ArrayPtr RecordShape::GetFieldNames(Thread* thread) const { | 
 |   ObjectStore* object_store = thread->isolate_group()->object_store(); | 
 |   Array& table = | 
 |       Array::Handle(thread->zone(), object_store->record_field_names()); | 
 |   ASSERT(!table.IsNull()); | 
 |   return Array::RawCast(table.At(field_names_index())); | 
 | } | 
 |  | 
 | }  // namespace dart |