| // Copyright (c) 2016, the Dart project authors. Please see the AUTHORS file |
| // for details. All rights reserved. Use of this source code is governed by a |
| // BSD-style license that can be found in the LICENSE file. |
| |
| #include <memory> |
| #include <utility> |
| |
| #include "vm/app_snapshot.h" |
| |
| #include "platform/assert.h" |
| #include "vm/bootstrap.h" |
| #include "vm/bss_relocs.h" |
| #include "vm/canonical_tables.h" |
| #include "vm/class_id.h" |
| #include "vm/code_observers.h" |
| #include "vm/compiler/api/print_filter.h" |
| #include "vm/compiler/assembler/disassembler.h" |
| #include "vm/dart.h" |
| #include "vm/dart_entry.h" |
| #include "vm/dispatch_table.h" |
| #include "vm/flag_list.h" |
| #include "vm/growable_array.h" |
| #include "vm/heap/heap.h" |
| #include "vm/image_snapshot.h" |
| #include "vm/native_entry.h" |
| #include "vm/object.h" |
| #include "vm/object_store.h" |
| #include "vm/program_visitor.h" |
| #include "vm/stub_code.h" |
| #include "vm/symbols.h" |
| #include "vm/timeline.h" |
| #include "vm/v8_snapshot_writer.h" |
| #include "vm/version.h" |
| #include "vm/zone_text_buffer.h" |
| |
| #if !defined(DART_PRECOMPILED_RUNTIME) |
| #include "vm/compiler/backend/code_statistics.h" |
| #include "vm/compiler/backend/il_printer.h" |
| #include "vm/compiler/relocation.h" |
| #endif // !defined(DART_PRECOMPILED_RUNTIME) |
| |
| namespace dart { |
| |
| #if !defined(DART_PRECOMPILED_RUNTIME) |
| DEFINE_FLAG(bool, |
| print_cluster_information, |
| false, |
| "Print information about clusters written to snapshot"); |
| #endif |
| |
| #if defined(DART_PRECOMPILER) |
| DEFINE_FLAG(charp, |
| write_v8_snapshot_profile_to, |
| NULL, |
| "Write a snapshot profile in V8 format to a file."); |
| #endif // defined(DART_PRECOMPILER) |
| |
| namespace { |
| // StorageTrait for HashTable which allows to create hash tables backed by |
| // zone memory. Used to compute cluster order for canonical clusters. |
| struct GrowableArrayStorageTraits { |
| class Array : public ZoneAllocated { |
| public: |
| explicit Array(Zone* zone, intptr_t length) |
| : length_(length), array_(zone->Alloc<ObjectPtr>(length)) {} |
| |
| intptr_t Length() const { return length_; } |
| void SetAt(intptr_t index, const Object& value) const { |
| array_[index] = value.ptr(); |
| } |
| ObjectPtr At(intptr_t index) const { return array_[index]; } |
| |
| private: |
| intptr_t length_ = 0; |
| ObjectPtr* array_ = nullptr; |
| DISALLOW_COPY_AND_ASSIGN(Array); |
| }; |
| |
| using ArrayPtr = Array*; |
| class ArrayHandle : public ZoneAllocated { |
| public: |
| explicit ArrayHandle(ArrayPtr ptr) : ptr_(ptr) {} |
| ArrayHandle() {} |
| |
| void SetFrom(const ArrayHandle& other) { ptr_ = other.ptr_; } |
| void Clear() { ptr_ = nullptr; } |
| bool IsNull() const { return ptr_ == nullptr; } |
| ArrayPtr ptr() { return ptr_; } |
| |
| intptr_t Length() const { return ptr_->Length(); } |
| void SetAt(intptr_t index, const Object& value) const { |
| ptr_->SetAt(index, value); |
| } |
| ObjectPtr At(intptr_t index) const { return ptr_->At(index); } |
| |
| private: |
| ArrayPtr ptr_ = nullptr; |
| DISALLOW_COPY_AND_ASSIGN(ArrayHandle); |
| }; |
| |
| static ArrayHandle& PtrToHandle(ArrayPtr ptr) { |
| return *new ArrayHandle(ptr); |
| } |
| |
| static void SetHandle(ArrayHandle& dst, const ArrayHandle& src) { // NOLINT |
| dst.SetFrom(src); |
| } |
| |
| static void ClearHandle(ArrayHandle& dst) { // NOLINT |
| dst.Clear(); |
| } |
| |
| static ArrayPtr New(Zone* zone, intptr_t length, Heap::Space space) { |
| return new (zone) Array(zone, length); |
| } |
| |
| static bool IsImmutable(const ArrayHandle& handle) { return false; } |
| |
| static ObjectPtr At(ArrayHandle* array, intptr_t index) { |
| return array->At(index); |
| } |
| |
| static void SetAt(ArrayHandle* array, intptr_t index, const Object& value) { |
| array->SetAt(index, value); |
| } |
| }; |
| } // namespace |
| |
| #if defined(DART_PRECOMPILER) && !defined(TARGET_ARCH_IA32) |
| |
| static void RelocateCodeObjects( |
| bool is_vm, |
| GrowableArray<CodePtr>* code_objects, |
| GrowableArray<ImageWriterCommand>* image_writer_commands) { |
| auto thread = Thread::Current(); |
| auto isolate_group = |
| is_vm ? Dart::vm_isolate()->group() : thread->isolate_group(); |
| |
| WritableCodePages writable_code_pages(thread, isolate_group); |
| CodeRelocator::Relocate(thread, code_objects, image_writer_commands, is_vm); |
| } |
| |
| #endif // defined(DART_PRECOMPILER) && !defined(TARGET_ARCH_IA32) |
| |
| void Deserializer::InitializeHeader(ObjectPtr raw, |
| intptr_t class_id, |
| intptr_t size, |
| bool is_canonical) { |
| ASSERT(Utils::IsAligned(size, kObjectAlignment)); |
| uword tags = 0; |
| tags = UntaggedObject::ClassIdTag::update(class_id, tags); |
| tags = UntaggedObject::SizeTag::update(size, tags); |
| tags = UntaggedObject::CanonicalBit::update(is_canonical, tags); |
| tags = UntaggedObject::OldBit::update(true, tags); |
| tags = UntaggedObject::OldAndNotMarkedBit::update(true, tags); |
| tags = UntaggedObject::OldAndNotRememberedBit::update(true, tags); |
| tags = UntaggedObject::NewBit::update(false, tags); |
| raw->untag()->tags_ = tags; |
| } |
| |
| #if !defined(DART_PRECOMPILED_RUNTIME) |
| void SerializationCluster::WriteAndMeasureAlloc(Serializer* serializer) { |
| intptr_t start_size = serializer->bytes_written(); |
| intptr_t start_data = serializer->GetDataSize(); |
| intptr_t start_objects = serializer->next_ref_index(); |
| uint64_t cid_and_canonical = |
| (static_cast<uint64_t>(cid_) << 1) | (is_canonical() ? 0x1 : 0x0); |
| serializer->Write<uint64_t>(cid_and_canonical); |
| WriteAlloc(serializer); |
| intptr_t stop_size = serializer->bytes_written(); |
| intptr_t stop_data = serializer->GetDataSize(); |
| intptr_t stop_objects = serializer->next_ref_index(); |
| if (FLAG_print_cluster_information) { |
| OS::PrintErr("Snapshot 0x%" Pp " (%" Pd "), ", start_size, |
| stop_size - start_size); |
| OS::PrintErr("Data 0x%" Pp " (%" Pd "): ", start_data, |
| stop_data - start_data); |
| OS::PrintErr("Alloc %s (%" Pd ")\n", name(), stop_objects - start_objects); |
| } |
| size_ += (stop_size - start_size) + (stop_data - start_data); |
| num_objects_ += (stop_objects - start_objects); |
| if (target_instance_size_ != kSizeVaries) { |
| target_memory_size_ += num_objects_ * target_instance_size_; |
| } |
| } |
| |
| void SerializationCluster::WriteAndMeasureFill(Serializer* serializer) { |
| intptr_t start = serializer->bytes_written(); |
| WriteFill(serializer); |
| intptr_t stop = serializer->bytes_written(); |
| if (FLAG_print_cluster_information) { |
| OS::PrintErr("Snapshot 0x%" Pp " (%" Pd "): Fill %s\n", start, stop - start, |
| name()); |
| } |
| size_ += (stop - start); |
| } |
| #endif // !DART_PRECOMPILED_RUNTIME |
| |
| DART_NOINLINE |
| void DeserializationCluster::ReadAllocFixedSize(Deserializer* d, |
| intptr_t instance_size) { |
| start_index_ = d->next_index(); |
| PageSpace* old_space = d->heap()->old_space(); |
| intptr_t count = d->ReadUnsigned(); |
| for (intptr_t i = 0; i < count; i++) { |
| d->AssignRef(old_space->AllocateSnapshot(instance_size)); |
| } |
| stop_index_ = d->next_index(); |
| } |
| |
| #if !defined(DART_PRECOMPILED_RUNTIME) |
| static UnboxedFieldBitmap CalculateTargetUnboxedFieldsBitmap( |
| Serializer* s, |
| intptr_t class_id) { |
| const auto unboxed_fields_bitmap_host = |
| s->isolate_group()->shared_class_table()->GetUnboxedFieldsMapAt(class_id); |
| |
| UnboxedFieldBitmap unboxed_fields_bitmap; |
| if (unboxed_fields_bitmap_host.IsEmpty() || |
| kWordSize == compiler::target::kWordSize) { |
| unboxed_fields_bitmap = unboxed_fields_bitmap_host; |
| } else { |
| ASSERT(kWordSize == 8 && compiler::target::kWordSize == 4); |
| // A new bitmap is built if the word sizes in the target and |
| // host are different |
| unboxed_fields_bitmap.Reset(); |
| intptr_t target_i = 0, host_i = 0; |
| |
| while (host_i < UnboxedFieldBitmap::Length()) { |
| // Each unboxed field has constant length, therefore the number of |
| // words used by it should double when compiling from 64-bit to 32-bit. |
| if (unboxed_fields_bitmap_host.Get(host_i++)) { |
| unboxed_fields_bitmap.Set(target_i++); |
| unboxed_fields_bitmap.Set(target_i++); |
| } else { |
| // For object pointers, the field is always one word length |
| target_i++; |
| } |
| } |
| } |
| |
| return unboxed_fields_bitmap; |
| } |
| |
| class ClassSerializationCluster : public SerializationCluster { |
| public: |
| explicit ClassSerializationCluster(intptr_t num_cids) |
| : SerializationCluster("Class", |
| kClassCid, |
| compiler::target::Class::InstanceSize()), |
| predefined_(kNumPredefinedCids), |
| objects_(num_cids) {} |
| ~ClassSerializationCluster() {} |
| |
| void Trace(Serializer* s, ObjectPtr object) { |
| ClassPtr cls = Class::RawCast(object); |
| intptr_t class_id = cls->untag()->id_; |
| |
| if (class_id == kIllegalCid) { |
| // Classes expected to be dropped by the precompiler should not be traced. |
| s->UnexpectedObject(cls, "Class with illegal cid"); |
| } |
| if (class_id < kNumPredefinedCids) { |
| // These classes are allocated by Object::Init or Object::InitOnce, so the |
| // deserializer must find them in the class table instead of allocating |
| // them. |
| predefined_.Add(cls); |
| } else { |
| objects_.Add(cls); |
| } |
| |
| PushFromTo(cls); |
| } |
| |
| void WriteAlloc(Serializer* s) { |
| intptr_t count = predefined_.length(); |
| s->WriteUnsigned(count); |
| for (intptr_t i = 0; i < count; i++) { |
| ClassPtr cls = predefined_[i]; |
| s->AssignRef(cls); |
| AutoTraceObject(cls); |
| intptr_t class_id = cls->untag()->id_; |
| s->WriteCid(class_id); |
| } |
| count = objects_.length(); |
| s->WriteUnsigned(count); |
| for (intptr_t i = 0; i < count; i++) { |
| ClassPtr cls = objects_[i]; |
| s->AssignRef(cls); |
| } |
| } |
| |
| void WriteFill(Serializer* s) { |
| intptr_t count = predefined_.length(); |
| for (intptr_t i = 0; i < count; i++) { |
| WriteClass(s, predefined_[i]); |
| } |
| count = objects_.length(); |
| for (intptr_t i = 0; i < count; i++) { |
| WriteClass(s, objects_[i]); |
| } |
| } |
| |
| private: |
| void WriteClass(Serializer* s, ClassPtr cls) { |
| AutoTraceObjectName(cls, cls->untag()->name()); |
| WriteFromTo(cls); |
| intptr_t class_id = cls->untag()->id_; |
| if (class_id == kIllegalCid) { |
| s->UnexpectedObject(cls, "Class with illegal cid"); |
| } |
| s->WriteCid(class_id); |
| if (s->kind() == Snapshot::kFullCore && |
| RequireCanonicalTypeErasureOfConstants(cls)) { |
| s->UnexpectedObject(cls, "Class with non mode agnostic constants"); |
| } |
| if (s->kind() != Snapshot::kFullAOT) { |
| s->Write<uint32_t>(cls->untag()->kernel_offset_); |
| } |
| s->Write<int32_t>(Class::target_instance_size_in_words(cls)); |
| s->Write<int32_t>(Class::target_next_field_offset_in_words(cls)); |
| s->Write<int32_t>(Class::target_type_arguments_field_offset_in_words(cls)); |
| s->Write<int16_t>(cls->untag()->num_type_arguments_); |
| s->Write<uint16_t>(cls->untag()->num_native_fields_); |
| if (s->kind() != Snapshot::kFullAOT) { |
| s->WriteTokenPosition(cls->untag()->token_pos_); |
| s->WriteTokenPosition(cls->untag()->end_token_pos_); |
| } |
| s->Write<uint32_t>(cls->untag()->state_bits_); |
| |
| // In AOT, the bitmap of unboxed fields should also be serialized |
| if (FLAG_precompiled_mode && !ClassTable::IsTopLevelCid(class_id)) { |
| s->WriteUnsigned64( |
| CalculateTargetUnboxedFieldsBitmap(s, class_id).Value()); |
| } |
| } |
| |
| GrowableArray<ClassPtr> predefined_; |
| GrowableArray<ClassPtr> objects_; |
| |
| bool RequireCanonicalTypeErasureOfConstants(ClassPtr cls) { |
| // Do not generate a core snapshot containing constants that would require |
| // a canonical erasure of their types if loaded in an isolate running in |
| // unsound nullability mode. |
| if (cls->untag()->host_type_arguments_field_offset_in_words_ == |
| Class::kNoTypeArguments || |
| cls->untag()->constants() == Array::null()) { |
| return false; |
| } |
| Zone* zone = Thread::Current()->zone(); |
| const Class& clazz = Class::Handle(zone, cls); |
| return clazz.RequireCanonicalTypeErasureOfConstants(zone); |
| } |
| }; |
| #endif // !DART_PRECOMPILED_RUNTIME |
| |
| class ClassDeserializationCluster : public DeserializationCluster { |
| public: |
| ClassDeserializationCluster() : DeserializationCluster("Class") {} |
| ~ClassDeserializationCluster() {} |
| |
| void ReadAlloc(Deserializer* d) { |
| predefined_start_index_ = d->next_index(); |
| PageSpace* old_space = d->heap()->old_space(); |
| intptr_t count = d->ReadUnsigned(); |
| ClassTable* table = d->isolate_group()->class_table(); |
| for (intptr_t i = 0; i < count; i++) { |
| intptr_t class_id = d->ReadCid(); |
| ASSERT(table->HasValidClassAt(class_id)); |
| ClassPtr cls = table->At(class_id); |
| ASSERT(cls != nullptr); |
| d->AssignRef(cls); |
| } |
| predefined_stop_index_ = d->next_index(); |
| |
| start_index_ = d->next_index(); |
| count = d->ReadUnsigned(); |
| for (intptr_t i = 0; i < count; i++) { |
| d->AssignRef(old_space->AllocateSnapshot(Class::InstanceSize())); |
| } |
| stop_index_ = d->next_index(); |
| } |
| |
| void ReadFill(Deserializer* d, bool primary) { |
| ClassTable* table = d->isolate_group()->class_table(); |
| |
| for (intptr_t id = predefined_start_index_; id < predefined_stop_index_; |
| id++) { |
| ClassPtr cls = static_cast<ClassPtr>(d->Ref(id)); |
| ReadFromTo(cls); |
| intptr_t class_id = d->ReadCid(); |
| cls->untag()->id_ = class_id; |
| #if !defined(DART_PRECOMPILED_RUNTIME) |
| if (d->kind() != Snapshot::kFullAOT) { |
| cls->untag()->kernel_offset_ = d->Read<uint32_t>(); |
| } |
| #endif |
| if (!IsInternalVMdefinedClassId(class_id)) { |
| cls->untag()->host_instance_size_in_words_ = d->Read<int32_t>(); |
| cls->untag()->host_next_field_offset_in_words_ = d->Read<int32_t>(); |
| #if defined(DART_PRECOMPILER) |
| // Only one pair is serialized. The target field only exists when |
| // DART_PRECOMPILER is defined |
| cls->untag()->target_instance_size_in_words_ = |
| cls->untag()->host_instance_size_in_words_; |
| cls->untag()->target_next_field_offset_in_words_ = |
| cls->untag()->host_next_field_offset_in_words_; |
| #endif // defined(DART_PRECOMPILER) |
| } else { |
| d->Read<int32_t>(); // Skip. |
| d->Read<int32_t>(); // Skip. |
| } |
| cls->untag()->host_type_arguments_field_offset_in_words_ = |
| d->Read<int32_t>(); |
| #if defined(DART_PRECOMPILER) |
| cls->untag()->target_type_arguments_field_offset_in_words_ = |
| cls->untag()->host_type_arguments_field_offset_in_words_; |
| #endif // defined(DART_PRECOMPILER) |
| cls->untag()->num_type_arguments_ = d->Read<int16_t>(); |
| cls->untag()->num_native_fields_ = d->Read<uint16_t>(); |
| #if !defined(DART_PRECOMPILED_RUNTIME) |
| ASSERT(d->kind() != Snapshot::kFullAOT); |
| cls->untag()->token_pos_ = d->ReadTokenPosition(); |
| cls->untag()->end_token_pos_ = d->ReadTokenPosition(); |
| #endif // !defined(DART_PRECOMPILED_RUNTIME) |
| cls->untag()->state_bits_ = d->Read<uint32_t>(); |
| |
| if (FLAG_precompiled_mode) { |
| d->ReadUnsigned64(); // Skip unboxed fields bitmap. |
| } |
| } |
| |
| auto shared_class_table = d->isolate_group()->shared_class_table(); |
| for (intptr_t id = start_index_; id < stop_index_; id++) { |
| ClassPtr cls = static_cast<ClassPtr>(d->Ref(id)); |
| Deserializer::InitializeHeader(cls, kClassCid, Class::InstanceSize()); |
| ReadFromTo(cls); |
| |
| intptr_t class_id = d->ReadCid(); |
| ASSERT(class_id >= kNumPredefinedCids); |
| cls->untag()->id_ = class_id; |
| |
| #if !defined(DART_PRECOMPILED_RUNTIME) |
| if (d->kind() != Snapshot::kFullAOT) { |
| cls->untag()->kernel_offset_ = d->Read<uint32_t>(); |
| } |
| #endif |
| cls->untag()->host_instance_size_in_words_ = d->Read<int32_t>(); |
| cls->untag()->host_next_field_offset_in_words_ = d->Read<int32_t>(); |
| cls->untag()->host_type_arguments_field_offset_in_words_ = |
| d->Read<int32_t>(); |
| #if defined(DART_PRECOMPILER) |
| cls->untag()->target_instance_size_in_words_ = |
| cls->untag()->host_instance_size_in_words_; |
| cls->untag()->target_next_field_offset_in_words_ = |
| cls->untag()->host_next_field_offset_in_words_; |
| cls->untag()->target_type_arguments_field_offset_in_words_ = |
| cls->untag()->host_type_arguments_field_offset_in_words_; |
| #endif // defined(DART_PRECOMPILER) |
| cls->untag()->num_type_arguments_ = d->Read<int16_t>(); |
| cls->untag()->num_native_fields_ = d->Read<uint16_t>(); |
| #if !defined(DART_PRECOMPILED_RUNTIME) |
| ASSERT(d->kind() != Snapshot::kFullAOT); |
| cls->untag()->token_pos_ = d->ReadTokenPosition(); |
| cls->untag()->end_token_pos_ = d->ReadTokenPosition(); |
| #endif // !defined(DART_PRECOMPILED_RUNTIME) |
| cls->untag()->state_bits_ = d->Read<uint32_t>(); |
| |
| table->AllocateIndex(class_id); |
| table->SetAt(class_id, cls); |
| |
| if (FLAG_precompiled_mode && !ClassTable::IsTopLevelCid(class_id)) { |
| const UnboxedFieldBitmap unboxed_fields_map(d->ReadUnsigned64()); |
| shared_class_table->SetUnboxedFieldsMapAt(class_id, unboxed_fields_map); |
| } |
| } |
| } |
| |
| private: |
| intptr_t predefined_start_index_; |
| intptr_t predefined_stop_index_; |
| }; |
| |
| // Super classes for writing out clusters which contain objects grouped into |
| // a canonical set (e.g. String, Type, TypeArguments, etc). |
| // To save space in the snapshot we avoid writing such canonical sets |
| // explicitly as Array objects into the snapshot and instead utilize a different |
| // encoding: objects in a cluster representing a canonical set are sorted |
| // to appear in the same order they appear in the Array representing the set, |
| // and we additionaly write out array of values describing gaps between objects. |
| // |
| // In some situations not all canonical objects of the some type need to |
| // be added to the resulting canonical set because they are cached in some |
| // special way (see Type::Canonicalize as an example, which caches declaration |
| // types in a special way). In this case subclass can set |
| // kAllCanonicalObjectsAreIncludedIntoSet to |false| and override |
| // IsInCanonicalSet filter. |
| #if !defined(DART_PRECOMPILED_RUNTIME) |
| template <typename SetType, |
| typename HandleType, |
| typename PointerType, |
| bool kAllCanonicalObjectsAreIncludedIntoSet = true> |
| class CanonicalSetSerializationCluster : public SerializationCluster { |
| protected: |
| CanonicalSetSerializationCluster(intptr_t cid, |
| bool is_canonical, |
| bool represents_canonical_set, |
| const char* name, |
| intptr_t target_instance_size = 0) |
| : SerializationCluster(name, cid, target_instance_size, is_canonical), |
| represents_canonical_set_(represents_canonical_set) {} |
| |
| virtual bool IsInCanonicalSet(Serializer* s, PointerType ptr) { |
| // Must override this function if kAllCanonicalObjectsAreIncludedIntoSet |
| // is set to |false|. |
| ASSERT(kAllCanonicalObjectsAreIncludedIntoSet); |
| return true; |
| } |
| |
| void ReorderObjects(Serializer* s) { |
| if (!represents_canonical_set_) { |
| return; |
| } |
| |
| // Sort objects before writing them out so that they appear in the same |
| // order as they would appear in a CanonicalStringSet. |
| using ZoneCanonicalSet = |
| HashTable<typename SetType::Traits, 0, 0, GrowableArrayStorageTraits>; |
| |
| // Compute required capacity for the hashtable (to avoid overallocating). |
| intptr_t required_capacity = 0; |
| for (auto ptr : objects_) { |
| if (kAllCanonicalObjectsAreIncludedIntoSet || IsInCanonicalSet(s, ptr)) { |
| required_capacity++; |
| } |
| } |
| // Over-allocate capacity so a few inserts can happen at startup without |
| // causing a rehash. |
| const intptr_t kSpareCapacity = 32; |
| required_capacity = static_cast<intptr_t>( |
| static_cast<double>(required_capacity + kSpareCapacity) / |
| HashTables::kMaxLoadFactor); |
| |
| intptr_t num_occupied = 0; |
| |
| // Build canonical set out of objects that should belong to it. |
| // Objects that don't belong to it are copied to the prefix of objects_. |
| ZoneCanonicalSet table( |
| s->zone(), HashTables::New<ZoneCanonicalSet>(required_capacity)); |
| HandleType& element = HandleType::Handle(s->zone()); |
| for (auto ptr : objects_) { |
| if (kAllCanonicalObjectsAreIncludedIntoSet || IsInCanonicalSet(s, ptr)) { |
| element ^= ptr; |
| intptr_t entry = -1; |
| const bool present = table.FindKeyOrDeletedOrUnused(element, &entry); |
| if (!present) { |
| table.InsertKey(entry, element); |
| } else { |
| // Two recursive types with different topology (and hashes) |
| // may be equal. |
| ASSERT(element.IsRecursive()); |
| objects_[num_occupied++] = ptr; |
| } |
| } else { |
| objects_[num_occupied++] = ptr; |
| } |
| } |
| |
| const auto prefix_length = num_occupied; |
| |
| // Compute objects_ order and gaps based on canonical set layout. |
| auto& arr = table.Release(); |
| intptr_t last_occupied = ZoneCanonicalSet::kFirstKeyIndex - 1; |
| for (intptr_t i = ZoneCanonicalSet::kFirstKeyIndex, length = arr.Length(); |
| i < length; i++) { |
| ObjectPtr v = arr.At(i); |
| ASSERT(v != ZoneCanonicalSet::DeletedMarker().ptr()); |
| if (v != ZoneCanonicalSet::UnusedMarker().ptr()) { |
| const intptr_t unused_run_length = (i - 1) - last_occupied; |
| gaps_.Add(unused_run_length); |
| objects_[num_occupied++] = static_cast<PointerType>(v); |
| last_occupied = i; |
| } |
| } |
| ASSERT(num_occupied == objects_.length()); |
| ASSERT(prefix_length == (objects_.length() - gaps_.length())); |
| table_length_ = arr.Length(); |
| } |
| |
| void WriteCanonicalSetLayout(Serializer* s) { |
| if (represents_canonical_set_) { |
| s->WriteUnsigned(table_length_); |
| s->WriteUnsigned(objects_.length() - gaps_.length()); |
| for (auto gap : gaps_) { |
| s->WriteUnsigned(gap); |
| } |
| target_memory_size_ += |
| compiler::target::Array::InstanceSize(table_length_); |
| } |
| } |
| |
| GrowableArray<PointerType> objects_; |
| |
| private: |
| const bool represents_canonical_set_; |
| GrowableArray<intptr_t> gaps_; |
| intptr_t table_length_ = 0; |
| }; |
| #endif |
| |
| template <typename SetType, bool kAllCanonicalObjectsAreIncludedIntoSet = true> |
| class CanonicalSetDeserializationCluster : public DeserializationCluster { |
| public: |
| CanonicalSetDeserializationCluster(bool is_canonical, |
| bool is_root_unit, |
| const char* name) |
| : DeserializationCluster(name, is_canonical), |
| is_root_unit_(is_root_unit), |
| table_(Array::Handle()) {} |
| |
| void BuildCanonicalSetFromLayout(Deserializer* d) { |
| if (!is_root_unit_ || !is_canonical()) { |
| return; |
| } |
| |
| const auto table_length = d->ReadUnsigned(); |
| first_element_ = d->ReadUnsigned(); |
| const intptr_t count = stop_index_ - (start_index_ + first_element_); |
| auto table = StartDeserialization(d, table_length, count); |
| for (intptr_t i = start_index_ + first_element_; i < stop_index_; i++) { |
| table.FillGap(d->ReadUnsigned()); |
| table.WriteElement(d, d->Ref(i)); |
| } |
| table_ = table.Finish(); |
| } |
| |
| protected: |
| const bool is_root_unit_; |
| intptr_t first_element_; |
| Array& table_; |
| |
| void VerifyCanonicalSet(Deserializer* d, |
| const Array& refs, |
| const Array& current_table) { |
| #if defined(DEBUG) |
| // First check that we are not overwriting a table and loosing information. |
| if (!current_table.IsNull()) { |
| SetType current_set(d->zone(), current_table.ptr()); |
| ASSERT(current_set.NumOccupied() == 0); |
| current_set.Release(); |
| } |
| |
| // Now check that manually created table behaves correctly as a canonical |
| // set. |
| SetType canonical_set(d->zone(), table_.ptr()); |
| Object& key = Object::Handle(); |
| for (intptr_t i = start_index_ + first_element_; i < stop_index_; i++) { |
| key = refs.At(i); |
| ASSERT(canonical_set.GetOrNull(key) != Object::null()); |
| } |
| canonical_set.Release(); |
| #endif // defined(DEBUG) |
| } |
| |
| private: |
| struct DeserializationFinger { |
| ArrayPtr table; |
| intptr_t current_index; |
| ObjectPtr gap_element; |
| |
| void FillGap(int length) { |
| for (intptr_t j = 0; j < length; j++) { |
| table->untag()->data()[current_index + j] = gap_element; |
| } |
| current_index += length; |
| } |
| |
| void WriteElement(Deserializer* d, ObjectPtr object) { |
| table->untag()->data()[current_index++] = object; |
| } |
| |
| ArrayPtr Finish() { |
| if (table != Array::null()) { |
| FillGap(Smi::Value(table->untag()->length()) - current_index); |
| } |
| auto result = table; |
| table = Array::null(); |
| return result; |
| } |
| }; |
| |
| static DeserializationFinger StartDeserialization(Deserializer* d, |
| intptr_t length, |
| intptr_t count) { |
| const intptr_t instance_size = Array::InstanceSize(length); |
| ArrayPtr table = static_cast<ArrayPtr>( |
| d->heap()->old_space()->AllocateSnapshot(instance_size)); |
| Deserializer::InitializeHeader(table, kArrayCid, instance_size); |
| table->untag()->type_arguments_ = TypeArguments::null(); |
| table->untag()->length_ = CompressedSmiPtr(Smi::New(length)); |
| for (intptr_t i = 0; i < SetType::kFirstKeyIndex; i++) { |
| table->untag()->data()[i] = Smi::New(0); |
| } |
| table->untag()->data()[SetType::kOccupiedEntriesIndex] = Smi::New(count); |
| return {table, SetType::kFirstKeyIndex, SetType::UnusedMarker().ptr()}; |
| } |
| }; |
| |
| #if !defined(DART_PRECOMPILED_RUNTIME) |
| class TypeParametersSerializationCluster : public SerializationCluster { |
| public: |
| TypeParametersSerializationCluster() |
| : SerializationCluster("TypeParameters", |
| kTypeParametersCid, |
| compiler::target::TypeParameters::InstanceSize()) { |
| } |
| ~TypeParametersSerializationCluster() {} |
| |
| void Trace(Serializer* s, ObjectPtr object) { |
| TypeParametersPtr type_params = TypeParameters::RawCast(object); |
| objects_.Add(type_params); |
| PushFromTo(type_params); |
| } |
| |
| void WriteAlloc(Serializer* s) { |
| const intptr_t count = objects_.length(); |
| s->WriteUnsigned(count); |
| for (intptr_t i = 0; i < count; i++) { |
| TypeParametersPtr type_params = objects_[i]; |
| s->AssignRef(type_params); |
| } |
| } |
| |
| void WriteFill(Serializer* s) { |
| const intptr_t count = objects_.length(); |
| for (intptr_t i = 0; i < count; i++) { |
| TypeParametersPtr type_params = objects_[i]; |
| AutoTraceObject(type_params); |
| WriteFromTo(type_params); |
| } |
| } |
| |
| private: |
| GrowableArray<TypeParametersPtr> objects_; |
| }; |
| #endif // !DART_PRECOMPILED_RUNTIME |
| |
| class TypeParametersDeserializationCluster : public DeserializationCluster { |
| public: |
| TypeParametersDeserializationCluster() |
| : DeserializationCluster("TypeParameters") {} |
| ~TypeParametersDeserializationCluster() {} |
| |
| void ReadAlloc(Deserializer* d) { |
| ReadAllocFixedSize(d, TypeParameters::InstanceSize()); |
| } |
| |
| void ReadFill(Deserializer* d, bool primary) { |
| ASSERT(!is_canonical()); // Never canonical. |
| for (intptr_t id = start_index_; id < stop_index_; id++) { |
| TypeParametersPtr type_params = |
| static_cast<TypeParametersPtr>(d->Ref(id)); |
| Deserializer::InitializeHeader(type_params, kTypeParametersCid, |
| TypeParameters::InstanceSize()); |
| ReadFromTo(type_params); |
| } |
| } |
| }; |
| |
| #if !defined(DART_PRECOMPILED_RUNTIME) |
| class TypeArgumentsSerializationCluster |
| : public CanonicalSetSerializationCluster<CanonicalTypeArgumentsSet, |
| TypeArguments, |
| TypeArgumentsPtr> { |
| public: |
| TypeArgumentsSerializationCluster(bool is_canonical, |
| bool represents_canonical_set) |
| : CanonicalSetSerializationCluster(kTypeArgumentsCid, |
| is_canonical, |
| represents_canonical_set, |
| "TypeArguments") {} |
| ~TypeArgumentsSerializationCluster() {} |
| |
| void Trace(Serializer* s, ObjectPtr object) { |
| TypeArgumentsPtr type_args = TypeArguments::RawCast(object); |
| objects_.Add(type_args); |
| |
| s->Push(type_args->untag()->instantiations()); |
| const intptr_t length = Smi::Value(type_args->untag()->length()); |
| for (intptr_t i = 0; i < length; i++) { |
| s->Push(type_args->untag()->element(i)); |
| } |
| } |
| |
| void WriteAlloc(Serializer* s) { |
| const intptr_t count = objects_.length(); |
| s->WriteUnsigned(count); |
| ReorderObjects(s); |
| for (intptr_t i = 0; i < count; i++) { |
| TypeArgumentsPtr type_args = objects_[i]; |
| s->AssignRef(type_args); |
| AutoTraceObject(type_args); |
| const intptr_t length = Smi::Value(type_args->untag()->length()); |
| s->WriteUnsigned(length); |
| target_memory_size_ += |
| compiler::target::TypeArguments::InstanceSize(length); |
| } |
| WriteCanonicalSetLayout(s); |
| } |
| |
| void WriteFill(Serializer* s) { |
| const intptr_t count = objects_.length(); |
| for (intptr_t i = 0; i < count; i++) { |
| TypeArgumentsPtr type_args = objects_[i]; |
| AutoTraceObject(type_args); |
| const intptr_t length = Smi::Value(type_args->untag()->length()); |
| s->WriteUnsigned(length); |
| intptr_t hash = Smi::Value(type_args->untag()->hash()); |
| s->Write<int32_t>(hash); |
| const intptr_t nullability = |
| Smi::Value(type_args->untag()->nullability()); |
| s->WriteUnsigned(nullability); |
| WriteField(type_args, instantiations()); |
| for (intptr_t j = 0; j < length; j++) { |
| s->WriteElementRef(type_args->untag()->element(j), j); |
| } |
| } |
| } |
| }; |
| #endif // !DART_PRECOMPILED_RUNTIME |
| |
| class TypeArgumentsDeserializationCluster |
| : public CanonicalSetDeserializationCluster<CanonicalTypeArgumentsSet> { |
| public: |
| explicit TypeArgumentsDeserializationCluster(bool is_canonical, |
| bool is_root_unit) |
| : CanonicalSetDeserializationCluster(is_canonical, |
| is_root_unit, |
| "TypeArguments") {} |
| ~TypeArgumentsDeserializationCluster() {} |
| |
| void ReadAlloc(Deserializer* d) { |
| start_index_ = d->next_index(); |
| PageSpace* old_space = d->heap()->old_space(); |
| const intptr_t count = d->ReadUnsigned(); |
| for (intptr_t i = 0; i < count; i++) { |
| const intptr_t length = d->ReadUnsigned(); |
| d->AssignRef( |
| old_space->AllocateSnapshot(TypeArguments::InstanceSize(length))); |
| } |
| stop_index_ = d->next_index(); |
| BuildCanonicalSetFromLayout(d); |
| } |
| |
| void ReadFill(Deserializer* d, bool primary) { |
| for (intptr_t id = start_index_; id < stop_index_; id++) { |
| TypeArgumentsPtr type_args = static_cast<TypeArgumentsPtr>(d->Ref(id)); |
| const intptr_t length = d->ReadUnsigned(); |
| Deserializer::InitializeHeader(type_args, kTypeArgumentsCid, |
| TypeArguments::InstanceSize(length), |
| primary && is_canonical()); |
| type_args->untag()->length_ = Smi::New(length); |
| type_args->untag()->hash_ = Smi::New(d->Read<int32_t>()); |
| type_args->untag()->nullability_ = Smi::New(d->ReadUnsigned()); |
| type_args->untag()->instantiations_ = static_cast<ArrayPtr>(d->ReadRef()); |
| for (intptr_t j = 0; j < length; j++) { |
| type_args->untag()->types()[j] = |
| static_cast<AbstractTypePtr>(d->ReadRef()); |
| } |
| } |
| } |
| |
| void PostLoad(Deserializer* d, const Array& refs, bool primary) { |
| if (!table_.IsNull()) { |
| auto object_store = d->isolate_group()->object_store(); |
| VerifyCanonicalSet( |
| d, refs, Array::Handle(object_store->canonical_type_arguments())); |
| object_store->set_canonical_type_arguments(table_); |
| } else if (!primary && is_canonical()) { |
| TypeArguments& type_arg = TypeArguments::Handle(d->zone()); |
| for (intptr_t i = start_index_; i < stop_index_; i++) { |
| type_arg ^= refs.At(i); |
| type_arg = type_arg.Canonicalize(d->thread(), nullptr); |
| refs.SetAt(i, type_arg); |
| } |
| } |
| } |
| }; |
| |
| #if !defined(DART_PRECOMPILED_RUNTIME) |
| class PatchClassSerializationCluster : public SerializationCluster { |
| public: |
| PatchClassSerializationCluster() |
| : SerializationCluster("PatchClass", |
| kPatchClassCid, |
| compiler::target::PatchClass::InstanceSize()) {} |
| ~PatchClassSerializationCluster() {} |
| |
| void Trace(Serializer* s, ObjectPtr object) { |
| PatchClassPtr cls = PatchClass::RawCast(object); |
| objects_.Add(cls); |
| PushFromTo(cls); |
| } |
| |
| void WriteAlloc(Serializer* s) { |
| const intptr_t count = objects_.length(); |
| s->WriteUnsigned(count); |
| for (intptr_t i = 0; i < count; i++) { |
| PatchClassPtr cls = objects_[i]; |
| s->AssignRef(cls); |
| } |
| } |
| |
| void WriteFill(Serializer* s) { |
| const intptr_t count = objects_.length(); |
| for (intptr_t i = 0; i < count; i++) { |
| PatchClassPtr cls = objects_[i]; |
| AutoTraceObject(cls); |
| WriteFromTo(cls); |
| if (s->kind() != Snapshot::kFullAOT) { |
| s->Write<int32_t>(cls->untag()->library_kernel_offset_); |
| } |
| } |
| } |
| |
| private: |
| GrowableArray<PatchClassPtr> objects_; |
| }; |
| #endif // !DART_PRECOMPILED_RUNTIME |
| |
| class PatchClassDeserializationCluster : public DeserializationCluster { |
| public: |
| PatchClassDeserializationCluster() : DeserializationCluster("PatchClass") {} |
| ~PatchClassDeserializationCluster() {} |
| |
| void ReadAlloc(Deserializer* d) { |
| ReadAllocFixedSize(d, PatchClass::InstanceSize()); |
| } |
| |
| void ReadFill(Deserializer* d, bool primary) { |
| ASSERT(!is_canonical()); // Never canonical. |
| for (intptr_t id = start_index_; id < stop_index_; id++) { |
| PatchClassPtr cls = static_cast<PatchClassPtr>(d->Ref(id)); |
| Deserializer::InitializeHeader(cls, kPatchClassCid, |
| PatchClass::InstanceSize()); |
| ReadFromTo(cls); |
| #if !defined(DART_PRECOMPILED_RUNTIME) |
| if (d->kind() != Snapshot::kFullAOT) { |
| cls->untag()->library_kernel_offset_ = d->Read<int32_t>(); |
| } |
| #endif |
| } |
| } |
| }; |
| |
| #if !defined(DART_PRECOMPILED_RUNTIME) |
| class FunctionSerializationCluster : public SerializationCluster { |
| public: |
| FunctionSerializationCluster() |
| : SerializationCluster("Function", |
| kFunctionCid, |
| compiler::target::Function::InstanceSize()) {} |
| ~FunctionSerializationCluster() {} |
| |
| void Trace(Serializer* s, ObjectPtr object) { |
| Snapshot::Kind kind = s->kind(); |
| FunctionPtr func = Function::RawCast(object); |
| objects_.Add(func); |
| |
| PushFromTo(func); |
| if (kind == Snapshot::kFullAOT) { |
| s->Push(func->untag()->code()); |
| } else if (kind == Snapshot::kFullJIT) { |
| NOT_IN_PRECOMPILED(s->Push(func->untag()->unoptimized_code())); |
| s->Push(func->untag()->code()); |
| s->Push(func->untag()->ic_data_array()); |
| } |
| if (kind != Snapshot::kFullAOT) { |
| NOT_IN_PRECOMPILED(s->Push(func->untag()->positional_parameter_names())); |
| } |
| } |
| |
| void WriteAlloc(Serializer* s) { |
| const intptr_t count = objects_.length(); |
| s->WriteUnsigned(count); |
| for (intptr_t i = 0; i < count; i++) { |
| FunctionPtr func = objects_[i]; |
| s->AssignRef(func); |
| } |
| } |
| |
| void WriteFill(Serializer* s) { |
| Snapshot::Kind kind = s->kind(); |
| const intptr_t count = objects_.length(); |
| for (intptr_t i = 0; i < count; i++) { |
| FunctionPtr func = objects_[i]; |
| AutoTraceObjectName(func, MakeDisambiguatedFunctionName(s, func)); |
| WriteFromTo(func); |
| if (kind == Snapshot::kFullAOT) { |
| #if defined(DART_PRECOMPILER) |
| CodePtr code = func->untag()->code(); |
| const auto code_index = s->GetCodeIndex(code); |
| s->WriteUnsigned(code_index); |
| s->AttributePropertyRef(code, "code_"); |
| #else |
| UNREACHABLE(); |
| #endif |
| } else if (s->kind() == Snapshot::kFullJIT) { |
| NOT_IN_PRECOMPILED(WriteCompressedField(func, unoptimized_code)); |
| WriteCompressedField(func, code); |
| WriteCompressedField(func, ic_data_array); |
| } |
| |
| if (kind != Snapshot::kFullAOT) { |
| NOT_IN_PRECOMPILED( |
| WriteCompressedField(func, positional_parameter_names)); |
| s->WriteTokenPosition(func->untag()->token_pos_); |
| s->WriteTokenPosition(func->untag()->end_token_pos_); |
| s->Write<uint32_t>(func->untag()->kernel_offset_); |
| } |
| |
| s->Write<uint32_t>(func->untag()->packed_fields_); |
| s->Write<uint32_t>(func->untag()->kind_tag_); |
| } |
| } |
| |
| static const char* MakeDisambiguatedFunctionName(Serializer* s, |
| FunctionPtr f) { |
| if (s->profile_writer() == nullptr) { |
| return nullptr; |
| } |
| |
| REUSABLE_FUNCTION_HANDLESCOPE(s->thread()); |
| Function& fun = reused_function_handle.Handle(); |
| fun = f; |
| ZoneTextBuffer printer(s->thread()->zone()); |
| fun.PrintName(NameFormattingParams::DisambiguatedUnqualified( |
| Object::NameVisibility::kInternalName), |
| &printer); |
| return printer.buffer(); |
| } |
| |
| private: |
| GrowableArray<FunctionPtr> objects_; |
| }; |
| #endif // !DART_PRECOMPILED_RUNTIME |
| |
| template <bool need_entry_point_for_non_discarded> |
| DART_FORCE_INLINE static CodePtr GetCodeAndEntryPointByIndex( |
| const Deserializer* d, |
| intptr_t code_index, |
| uword* entry_point) { |
| code_index -= 1; // 0 is reserved for LazyCompile stub. |
| |
| // In root unit and VM isolate snapshot code_indices are self-contained |
| // they point into instruction table and/or into the code cluster. |
| // In non-root units we might also refer to code objects from the |
| // parent unit which means code_index is biased by num_base_objects_ |
| const intptr_t base = d->is_non_root_unit() ? d->num_base_objects() : 0; |
| if (code_index < base) { |
| CodePtr code = static_cast<CodePtr>(d->Ref(code_index)); |
| if (need_entry_point_for_non_discarded) { |
| *entry_point = Code::EntryPointOf(code); |
| } |
| return code; |
| } |
| code_index -= base; |
| |
| // At this point code_index is refering to a code object which is either |
| // discarded or exists in the Code cluster. Non-discarded Code objects |
| // are associated with the tail of the instruction table and have the |
| // same order there and in the Code cluster. This means that |
| // subtracting first_entry_with_code yields index into the Code cluster. |
| // This also works for deferred code objects in root unit's snapshot |
| // due to the choice of encoding (see Serializer::GetCodeIndex). |
| const intptr_t first_entry_with_code = |
| d->instructions_table().rodata()->first_entry_with_code; |
| if (code_index < first_entry_with_code) { |
| *entry_point = d->instructions_table().EntryPointAt(code_index); |
| return StubCode::UnknownDartCode().ptr(); |
| } else { |
| const intptr_t cluster_index = code_index - first_entry_with_code; |
| CodePtr code = |
| static_cast<CodePtr>(d->Ref(d->code_start_index() + cluster_index)); |
| if (need_entry_point_for_non_discarded) { |
| *entry_point = Code::EntryPointOf(code); |
| } |
| return code; |
| } |
| } |
| |
| CodePtr Deserializer::GetCodeByIndex(intptr_t code_index, |
| uword* entry_point) const { |
| // See Serializer::GetCodeIndex for how code_index is encoded. |
| if (code_index == 0) { |
| return StubCode::LazyCompile().ptr(); |
| } else if (FLAG_precompiled_mode) { |
| return GetCodeAndEntryPointByIndex< |
| /*need_entry_point_for_non_discarded=*/false>(this, code_index, |
| entry_point); |
| } else { |
| // -1 below because 0 is reserved for LazyCompile stub. |
| const intptr_t ref = code_start_index_ + code_index - 1; |
| ASSERT(code_start_index_ <= ref && ref < code_stop_index_); |
| return static_cast<CodePtr>(Ref(ref)); |
| } |
| } |
| |
| intptr_t Deserializer::CodeIndexToClusterIndex(const InstructionsTable& table, |
| intptr_t code_index) { |
| // Note: code indices we are interpreting here originate from the root |
| // loading unit which means base is equal to 0. |
| // See comments which clarify the connection between code_index and |
| // index into the Code cluster. |
| ASSERT(FLAG_precompiled_mode); |
| const intptr_t first_entry_with_code = table.rodata()->first_entry_with_code; |
| return code_index - 1 - first_entry_with_code; |
| } |
| |
| uword Deserializer::GetEntryPointByCodeIndex(intptr_t code_index) const { |
| // See Deserializer::GetCodeByIndex which this code repeats. |
| ASSERT(FLAG_precompiled_mode); |
| uword entry_point = 0; |
| GetCodeAndEntryPointByIndex</*need_entry_point_for_non_discarded=*/true>( |
| this, code_index, &entry_point); |
| return entry_point; |
| } |
| |
| class FunctionDeserializationCluster : public DeserializationCluster { |
| public: |
| FunctionDeserializationCluster() : DeserializationCluster("Function") {} |
| ~FunctionDeserializationCluster() {} |
| |
| void ReadAlloc(Deserializer* d) { |
| ReadAllocFixedSize(d, Function::InstanceSize()); |
| } |
| |
| void ReadFill(Deserializer* d, bool primary) { |
| ASSERT(!is_canonical()); // Never canonical. |
| Snapshot::Kind kind = d->kind(); |
| |
| for (intptr_t id = start_index_; id < stop_index_; id++) { |
| FunctionPtr func = static_cast<FunctionPtr>(d->Ref(id)); |
| Deserializer::InitializeHeader(func, kFunctionCid, |
| Function::InstanceSize()); |
| ReadFromTo(func); |
| |
| #if defined(DEBUG) |
| func->untag()->entry_point_ = 0; |
| func->untag()->unchecked_entry_point_ = 0; |
| #endif |
| |
| if (kind == Snapshot::kFullAOT) { |
| const intptr_t code_index = d->ReadUnsigned(); |
| uword entry_point = 0; |
| CodePtr code = d->GetCodeByIndex(code_index, &entry_point); |
| func->untag()->code_ = code; |
| if (entry_point != 0) { |
| func->untag()->entry_point_ = entry_point; |
| func->untag()->unchecked_entry_point_ = entry_point; |
| } |
| } else if (kind == Snapshot::kFullJIT) { |
| NOT_IN_PRECOMPILED(func->untag()->unoptimized_code_ = |
| static_cast<CodePtr>(d->ReadRef())); |
| func->untag()->code_ = static_cast<CodePtr>(d->ReadRef()); |
| func->untag()->ic_data_array_ = static_cast<ArrayPtr>(d->ReadRef()); |
| } |
| |
| #if !defined(DART_PRECOMPILED_RUNTIME) |
| if (kind != Snapshot::kFullAOT) { |
| func->untag()->positional_parameter_names_ = |
| static_cast<ArrayPtr>(d->ReadRef()); |
| func->untag()->token_pos_ = d->ReadTokenPosition(); |
| func->untag()->end_token_pos_ = d->ReadTokenPosition(); |
| func->untag()->kernel_offset_ = d->Read<uint32_t>(); |
| } |
| func->untag()->unboxed_parameters_info_.Reset(); |
| #endif |
| func->untag()->packed_fields_ = d->Read<uint32_t>(); |
| func->untag()->kind_tag_ = d->Read<uint32_t>(); |
| if (kind == Snapshot::kFullAOT) { |
| // Omit fields used to support de/reoptimization. |
| } else { |
| #if !defined(DART_PRECOMPILED_RUNTIME) |
| func->untag()->usage_counter_ = 0; |
| func->untag()->optimized_instruction_count_ = 0; |
| func->untag()->optimized_call_site_count_ = 0; |
| func->untag()->deoptimization_counter_ = 0; |
| func->untag()->state_bits_ = 0; |
| func->untag()->inlining_depth_ = 0; |
| #endif |
| } |
| } |
| } |
| |
| void PostLoad(Deserializer* d, const Array& refs, bool primary) { |
| if (d->kind() == Snapshot::kFullAOT) { |
| Function& func = Function::Handle(d->zone()); |
| for (intptr_t i = start_index_; i < stop_index_; i++) { |
| func ^= refs.At(i); |
| auto const code = func.ptr()->untag()->code(); |
| ASSERT(code->IsCode()); |
| if (!Code::IsUnknownDartCode(code)) { |
| uword entry_point = code->untag()->entry_point_; |
| ASSERT(entry_point != 0); |
| func.ptr()->untag()->entry_point_ = entry_point; |
| uword unchecked_entry_point = code->untag()->unchecked_entry_point_; |
| ASSERT(unchecked_entry_point != 0); |
| func.ptr()->untag()->unchecked_entry_point_ = unchecked_entry_point; |
| } |
| } |
| } else if (d->kind() == Snapshot::kFullJIT) { |
| Function& func = Function::Handle(d->zone()); |
| Code& code = Code::Handle(d->zone()); |
| for (intptr_t i = start_index_; i < stop_index_; i++) { |
| func ^= refs.At(i); |
| code = func.CurrentCode(); |
| if (func.HasCode() && !code.IsDisabled()) { |
| func.SetInstructionsSafe(code); // Set entrypoint. |
| func.SetWasCompiled(true); |
| } else { |
| func.ClearCodeSafe(); // Set code and entrypoint to lazy compile stub |
| } |
| } |
| } else { |
| Function& func = Function::Handle(d->zone()); |
| for (intptr_t i = start_index_; i < stop_index_; i++) { |
| func ^= refs.At(i); |
| func.ClearCodeSafe(); // Set code and entrypoint to lazy compile stub. |
| } |
| } |
| } |
| }; |
| |
| #if !defined(DART_PRECOMPILED_RUNTIME) |
| class ClosureDataSerializationCluster : public SerializationCluster { |
| public: |
| ClosureDataSerializationCluster() |
| : SerializationCluster("ClosureData", |
| kClosureDataCid, |
| compiler::target::ClosureData::InstanceSize()) {} |
| ~ClosureDataSerializationCluster() {} |
| |
| void Trace(Serializer* s, ObjectPtr object) { |
| ClosureDataPtr data = ClosureData::RawCast(object); |
| objects_.Add(data); |
| |
| if (s->kind() != Snapshot::kFullAOT) { |
| s->Push(data->untag()->context_scope()); |
| } |
| s->Push(data->untag()->parent_function()); |
| s->Push(data->untag()->closure()); |
| } |
| |
| void WriteAlloc(Serializer* s) { |
| const intptr_t count = objects_.length(); |
| s->WriteUnsigned(count); |
| for (intptr_t i = 0; i < count; i++) { |
| ClosureDataPtr data = objects_[i]; |
| s->AssignRef(data); |
| } |
| } |
| |
| void WriteFill(Serializer* s) { |
| const intptr_t count = objects_.length(); |
| for (intptr_t i = 0; i < count; i++) { |
| ClosureDataPtr data = objects_[i]; |
| AutoTraceObject(data); |
| if (s->kind() != Snapshot::kFullAOT) { |
| WriteCompressedField(data, context_scope); |
| } |
| WriteCompressedField(data, parent_function); |
| WriteCompressedField(data, closure); |
| s->WriteUnsigned( |
| static_cast<intptr_t>(data->untag()->default_type_arguments_kind_)); |
| } |
| } |
| |
| private: |
| GrowableArray<ClosureDataPtr> objects_; |
| }; |
| #endif // !DART_PRECOMPILED_RUNTIME |
| |
| class ClosureDataDeserializationCluster : public DeserializationCluster { |
| public: |
| ClosureDataDeserializationCluster() : DeserializationCluster("ClosureData") {} |
| ~ClosureDataDeserializationCluster() {} |
| |
| void ReadAlloc(Deserializer* d) { |
| ReadAllocFixedSize(d, ClosureData::InstanceSize()); |
| } |
| |
| void ReadFill(Deserializer* d, bool primary) { |
| ASSERT(!is_canonical()); // Never canonical. |
| for (intptr_t id = start_index_; id < stop_index_; id++) { |
| ClosureDataPtr data = static_cast<ClosureDataPtr>(d->Ref(id)); |
| Deserializer::InitializeHeader(data, kClosureDataCid, |
| ClosureData::InstanceSize()); |
| if (d->kind() == Snapshot::kFullAOT) { |
| data->untag()->context_scope_ = ContextScope::null(); |
| } else { |
| data->untag()->context_scope_ = |
| static_cast<ContextScopePtr>(d->ReadRef()); |
| } |
| data->untag()->parent_function_ = static_cast<FunctionPtr>(d->ReadRef()); |
| data->untag()->closure_ = static_cast<ClosurePtr>(d->ReadRef()); |
| data->untag()->default_type_arguments_kind_ = |
| static_cast<ClosureData::DefaultTypeArgumentsKind>(d->ReadUnsigned()); |
| } |
| } |
| }; |
| |
| #if !defined(DART_PRECOMPILED_RUNTIME) |
| class FfiTrampolineDataSerializationCluster : public SerializationCluster { |
| public: |
| FfiTrampolineDataSerializationCluster() |
| : SerializationCluster( |
| "FfiTrampolineData", |
| kFfiTrampolineDataCid, |
| compiler::target::FfiTrampolineData::InstanceSize()) {} |
| ~FfiTrampolineDataSerializationCluster() {} |
| |
| void Trace(Serializer* s, ObjectPtr object) { |
| FfiTrampolineDataPtr data = FfiTrampolineData::RawCast(object); |
| objects_.Add(data); |
| PushFromTo(data); |
| } |
| |
| void WriteAlloc(Serializer* s) { |
| const intptr_t count = objects_.length(); |
| s->WriteUnsigned(count); |
| for (intptr_t i = 0; i < count; i++) { |
| s->AssignRef(objects_[i]); |
| } |
| } |
| |
| void WriteFill(Serializer* s) { |
| const intptr_t count = objects_.length(); |
| for (intptr_t i = 0; i < count; i++) { |
| FfiTrampolineDataPtr const data = objects_[i]; |
| AutoTraceObject(data); |
| WriteFromTo(data); |
| |
| if (s->kind() == Snapshot::kFullAOT) { |
| s->WriteUnsigned(data->untag()->callback_id_); |
| } else { |
| // FFI callbacks can only be written to AOT snapshots. |
| ASSERT(data->untag()->callback_target() == Object::null()); |
| } |
| } |
| } |
| |
| private: |
| GrowableArray<FfiTrampolineDataPtr> objects_; |
| }; |
| #endif // !DART_PRECOMPILED_RUNTIME |
| |
| class FfiTrampolineDataDeserializationCluster : public DeserializationCluster { |
| public: |
| FfiTrampolineDataDeserializationCluster() |
| : DeserializationCluster("FfiTrampolineData") {} |
| ~FfiTrampolineDataDeserializationCluster() {} |
| |
| void ReadAlloc(Deserializer* d) { |
| ReadAllocFixedSize(d, FfiTrampolineData::InstanceSize()); |
| } |
| |
| void ReadFill(Deserializer* d, bool primary) { |
| ASSERT(!is_canonical()); // Never canonical. |
| for (intptr_t id = start_index_; id < stop_index_; id++) { |
| FfiTrampolineDataPtr data = static_cast<FfiTrampolineDataPtr>(d->Ref(id)); |
| Deserializer::InitializeHeader(data, kFfiTrampolineDataCid, |
| FfiTrampolineData::InstanceSize()); |
| ReadFromTo(data); |
| data->untag()->callback_id_ = |
| d->kind() == Snapshot::kFullAOT ? d->ReadUnsigned() : 0; |
| } |
| } |
| }; |
| |
| #if !defined(DART_PRECOMPILED_RUNTIME) |
| class FieldSerializationCluster : public SerializationCluster { |
| public: |
| FieldSerializationCluster() |
| : SerializationCluster("Field", |
| kFieldCid, |
| compiler::target::Field::InstanceSize()) {} |
| ~FieldSerializationCluster() {} |
| |
| void Trace(Serializer* s, ObjectPtr object) { |
| FieldPtr field = Field::RawCast(object); |
| objects_.Add(field); |
| |
| Snapshot::Kind kind = s->kind(); |
| |
| s->Push(field->untag()->name()); |
| s->Push(field->untag()->owner()); |
| s->Push(field->untag()->type()); |
| // Write out the initializer function |
| s->Push(field->untag()->initializer_function()); |
| |
| if (kind != Snapshot::kFullAOT) { |
| s->Push(field->untag()->guarded_list_length()); |
| } |
| if (kind == Snapshot::kFullJIT) { |
| s->Push(field->untag()->dependent_code()); |
| } |
| // Write out either the initial static value or field offset. |
| if (Field::StaticBit::decode(field->untag()->kind_bits_)) { |
| const intptr_t field_id = |
| Smi::Value(field->untag()->host_offset_or_field_id()); |
| s->Push(s->initial_field_table()->At(field_id)); |
| } else { |
| s->Push(Smi::New(Field::TargetOffsetOf(field))); |
| } |
| } |
| |
| void WriteAlloc(Serializer* s) { |
| const intptr_t count = objects_.length(); |
| s->WriteUnsigned(count); |
| for (intptr_t i = 0; i < count; i++) { |
| FieldPtr field = objects_[i]; |
| s->AssignRef(field); |
| } |
| } |
| |
| void WriteFill(Serializer* s) { |
| Snapshot::Kind kind = s->kind(); |
| const intptr_t count = objects_.length(); |
| for (intptr_t i = 0; i < count; i++) { |
| FieldPtr field = objects_[i]; |
| AutoTraceObjectName(field, field->untag()->name()); |
| |
| WriteCompressedField(field, name); |
| WriteCompressedField(field, owner); |
| WriteCompressedField(field, type); |
| // Write out the initializer function and initial value if not in AOT. |
| WriteCompressedField(field, initializer_function); |
| if (kind != Snapshot::kFullAOT) { |
| WriteCompressedField(field, guarded_list_length); |
| } |
| if (kind == Snapshot::kFullJIT) { |
| WriteCompressedField(field, dependent_code); |
| } |
| |
| if (kind != Snapshot::kFullAOT) { |
| s->WriteTokenPosition(field->untag()->token_pos_); |
| s->WriteTokenPosition(field->untag()->end_token_pos_); |
| s->WriteCid(field->untag()->guarded_cid_); |
| s->WriteCid(field->untag()->is_nullable_); |
| s->Write<int8_t>(field->untag()->static_type_exactness_state_); |
| s->Write<uint32_t>(field->untag()->kernel_offset_); |
| } |
| s->Write<uint16_t>(field->untag()->kind_bits_); |
| |
| // Write out either the initial static value or field offset. |
| if (Field::StaticBit::decode(field->untag()->kind_bits_)) { |
| const intptr_t field_id = |
| Smi::Value(field->untag()->host_offset_or_field_id()); |
| WriteFieldValue("static value", s->initial_field_table()->At(field_id)); |
| s->WriteUnsigned(field_id); |
| } else { |
| WriteFieldValue("offset", Smi::New(Field::TargetOffsetOf(field))); |
| } |
| } |
| } |
| |
| private: |
| GrowableArray<FieldPtr> objects_; |
| }; |
| #endif // !DART_PRECOMPILED_RUNTIME |
| |
| class FieldDeserializationCluster : public DeserializationCluster { |
| public: |
| FieldDeserializationCluster() : DeserializationCluster("Field") {} |
| ~FieldDeserializationCluster() {} |
| |
| void ReadAlloc(Deserializer* d) { |
| ReadAllocFixedSize(d, Field::InstanceSize()); |
| } |
| |
| void ReadFill(Deserializer* d, bool primary) { |
| ASSERT(!is_canonical()); // Never canonical. |
| Snapshot::Kind kind = d->kind(); |
| |
| for (intptr_t id = start_index_; id < stop_index_; id++) { |
| FieldPtr field = static_cast<FieldPtr>(d->Ref(id)); |
| Deserializer::InitializeHeader(field, kFieldCid, Field::InstanceSize()); |
| ReadFromTo(field); |
| if (kind != Snapshot::kFullAOT) { |
| field->untag()->guarded_list_length_ = |
| static_cast<SmiPtr>(d->ReadRef()); |
| } |
| if (kind == Snapshot::kFullJIT) { |
| field->untag()->dependent_code_ = static_cast<ArrayPtr>(d->ReadRef()); |
| } |
| if (kind != Snapshot::kFullAOT) { |
| field->untag()->token_pos_ = d->ReadTokenPosition(); |
| field->untag()->end_token_pos_ = d->ReadTokenPosition(); |
| field->untag()->guarded_cid_ = d->ReadCid(); |
| field->untag()->is_nullable_ = d->ReadCid(); |
| const int8_t static_type_exactness_state = d->Read<int8_t>(); |
| #if defined(TARGET_ARCH_X64) |
| field->untag()->static_type_exactness_state_ = |
| static_type_exactness_state; |
| #else |
| // We might produce core snapshots using X64 VM and then consume |
| // them in IA32 or ARM VM. In which case we need to simply ignore |
| // static type exactness state written into snapshot because non-X64 |
| // builds don't have this feature enabled. |
| // TODO(dartbug.com/34170) Support other architectures. |
| USE(static_type_exactness_state); |
| field->untag()->static_type_exactness_state_ = |
| StaticTypeExactnessState::NotTracking().Encode(); |
| #endif // defined(TARGET_ARCH_X64) |
| #if !defined(DART_PRECOMPILED_RUNTIME) |
| field->untag()->kernel_offset_ = d->Read<uint32_t>(); |
| #endif |
| } |
| field->untag()->kind_bits_ = d->Read<uint16_t>(); |
| |
| ObjectPtr value_or_offset = d->ReadRef(); |
| if (Field::StaticBit::decode(field->untag()->kind_bits_)) { |
| const intptr_t field_id = d->ReadUnsigned(); |
| d->initial_field_table()->SetAt( |
| field_id, static_cast<InstancePtr>(value_or_offset)); |
| field->untag()->host_offset_or_field_id_ = Smi::New(field_id); |
| } else { |
| field->untag()->host_offset_or_field_id_ = |
| Smi::RawCast(value_or_offset); |
| #if !defined(DART_PRECOMPILED_RUNTIME) |
| field->untag()->target_offset_ = |
| Smi::Value(field->untag()->host_offset_or_field_id()); |
| #endif // !defined(DART_PRECOMPILED_RUNTIME) |
| } |
| } |
| } |
| |
| void PostLoad(Deserializer* d, const Array& refs, bool primary) { |
| Field& field = Field::Handle(d->zone()); |
| if (!IsolateGroup::Current()->use_field_guards()) { |
| for (intptr_t i = start_index_; i < stop_index_; i++) { |
| field ^= refs.At(i); |
| field.set_guarded_cid_unsafe(kDynamicCid); |
| field.set_is_nullable_unsafe(true); |
| field.set_guarded_list_length_unsafe(Field::kNoFixedLength); |
| field.set_guarded_list_length_in_object_offset_unsafe( |
| Field::kUnknownLengthOffset); |
| field.set_static_type_exactness_state_unsafe( |
| StaticTypeExactnessState::NotTracking()); |
| } |
| } else { |
| for (intptr_t i = start_index_; i < stop_index_; i++) { |
| field ^= refs.At(i); |
| field.InitializeGuardedListLengthInObjectOffset(/*unsafe=*/true); |
| } |
| } |
| } |
| }; |
| |
| #if !defined(DART_PRECOMPILED_RUNTIME) |
| class ScriptSerializationCluster : public SerializationCluster { |
| public: |
| ScriptSerializationCluster() |
| : SerializationCluster("Script", |
| kScriptCid, |
| compiler::target::Script::InstanceSize()) {} |
| ~ScriptSerializationCluster() {} |
| |
| void Trace(Serializer* s, ObjectPtr object) { |
| ScriptPtr script = Script::RawCast(object); |
| objects_.Add(script); |
| PushFromTo(script); |
| } |
| |
| void WriteAlloc(Serializer* s) { |
| const intptr_t count = objects_.length(); |
| s->WriteUnsigned(count); |
| for (intptr_t i = 0; i < count; i++) { |
| ScriptPtr script = objects_[i]; |
| s->AssignRef(script); |
| } |
| } |
| |
| void WriteFill(Serializer* s) { |
| const intptr_t count = objects_.length(); |
| for (intptr_t i = 0; i < count; i++) { |
| ScriptPtr script = objects_[i]; |
| AutoTraceObjectName(script, script->untag()->url()); |
| WriteFromTo(script); |
| if (s->kind() != Snapshot::kFullAOT) { |
| // Clear out the max position cache in snapshots to ensure no |
| // differences in the snapshot due to triggering caching vs. not. |
| int32_t written_flags = |
| UntaggedScript::CachedMaxPositionBitField::update( |
| 0, script->untag()->flags_and_max_position_); |
| written_flags = UntaggedScript::HasCachedMaxPositionBit::update( |
| false, written_flags); |
| s->Write<int32_t>(written_flags); |
| } |
| s->Write<int32_t>(script->untag()->kernel_script_index_); |
| } |
| } |
| |
| private: |
| GrowableArray<ScriptPtr> objects_; |
| }; |
| #endif // !DART_PRECOMPILED_RUNTIME |
| |
| class ScriptDeserializationCluster : public DeserializationCluster { |
| public: |
| ScriptDeserializationCluster() : DeserializationCluster("Script") {} |
| ~ScriptDeserializationCluster() {} |
| |
| void ReadAlloc(Deserializer* d) { |
| ReadAllocFixedSize(d, Script::InstanceSize()); |
| } |
| |
| void ReadFill(Deserializer* d, bool primary) { |
| ASSERT(!is_canonical()); // Never canonical. |
| for (intptr_t id = start_index_; id < stop_index_; id++) { |
| ScriptPtr script = static_cast<ScriptPtr>(d->Ref(id)); |
| Deserializer::InitializeHeader(script, kScriptCid, |
| Script::InstanceSize()); |
| ReadFromTo(script); |
| #if !defined(DART_PRECOMPILED_RUNTIME) |
| script->untag()->flags_and_max_position_ = d->Read<int32_t>(); |
| #endif |
| script->untag()->kernel_script_index_ = d->Read<int32_t>(); |
| script->untag()->load_timestamp_ = 0; |
| } |
| } |
| }; |
| |
| #if !defined(DART_PRECOMPILED_RUNTIME) |
| class LibrarySerializationCluster : public SerializationCluster { |
| public: |
| LibrarySerializationCluster() |
| : SerializationCluster("Library", |
| kLibraryCid, |
| compiler::target::Library::InstanceSize()) {} |
| ~LibrarySerializationCluster() {} |
| |
| void Trace(Serializer* s, ObjectPtr object) { |
| LibraryPtr lib = Library::RawCast(object); |
| objects_.Add(lib); |
| PushFromTo(lib); |
| } |
| |
| void WriteAlloc(Serializer* s) { |
| const intptr_t count = objects_.length(); |
| s->WriteUnsigned(count); |
| for (intptr_t i = 0; i < count; i++) { |
| LibraryPtr lib = objects_[i]; |
| s->AssignRef(lib); |
| } |
| } |
| |
| void WriteFill(Serializer* s) { |
| const intptr_t count = objects_.length(); |
| for (intptr_t i = 0; i < count; i++) { |
| LibraryPtr lib = objects_[i]; |
| AutoTraceObjectName(lib, lib->untag()->url()); |
| WriteFromTo(lib); |
| s->Write<int32_t>(lib->untag()->index_); |
| s->Write<uint16_t>(lib->untag()->num_imports_); |
| s->Write<int8_t>(lib->untag()->load_state_); |
| s->Write<uint8_t>(lib->untag()->flags_); |
| if (s->kind() != Snapshot::kFullAOT) { |
| s->Write<uint32_t>(lib->untag()->kernel_offset_); |
| } |
| } |
| } |
| |
| private: |
| GrowableArray<LibraryPtr> objects_; |
| }; |
| #endif // !DART_PRECOMPILED_RUNTIME |
| |
| class LibraryDeserializationCluster : public DeserializationCluster { |
| public: |
| LibraryDeserializationCluster() : DeserializationCluster("Library") {} |
| ~LibraryDeserializationCluster() {} |
| |
| void ReadAlloc(Deserializer* d) { |
| ReadAllocFixedSize(d, Library::InstanceSize()); |
| } |
| |
| void ReadFill(Deserializer* d, bool primary) { |
| ASSERT(!is_canonical()); // Never canonical. |
| for (intptr_t id = start_index_; id < stop_index_; id++) { |
| LibraryPtr lib = static_cast<LibraryPtr>(d->Ref(id)); |
| Deserializer::InitializeHeader(lib, kLibraryCid, Library::InstanceSize()); |
| ReadFromTo(lib); |
| lib->untag()->native_entry_resolver_ = NULL; |
| lib->untag()->native_entry_symbol_resolver_ = NULL; |
| lib->untag()->index_ = d->Read<int32_t>(); |
| lib->untag()->num_imports_ = d->Read<uint16_t>(); |
| lib->untag()->load_state_ = d->Read<int8_t>(); |
| lib->untag()->flags_ = |
| UntaggedLibrary::InFullSnapshotBit::update(true, d->Read<uint8_t>()); |
| #if !defined(DART_PRECOMPILED_RUNTIME) |
| if (d->kind() != Snapshot::kFullAOT) { |
| lib->untag()->kernel_offset_ = d->Read<uint32_t>(); |
| } |
| #endif |
| } |
| } |
| }; |
| |
| #if !defined(DART_PRECOMPILED_RUNTIME) |
| class NamespaceSerializationCluster : public SerializationCluster { |
| public: |
| NamespaceSerializationCluster() |
| : SerializationCluster("Namespace", |
| kNamespaceCid, |
| compiler::target::Namespace::InstanceSize()) {} |
| ~NamespaceSerializationCluster() {} |
| |
| void Trace(Serializer* s, ObjectPtr object) { |
| NamespacePtr ns = Namespace::RawCast(object); |
| objects_.Add(ns); |
| PushFromTo(ns); |
| } |
| |
| void WriteAlloc(Serializer* s) { |
| const intptr_t count = objects_.length(); |
| s->WriteUnsigned(count); |
| for (intptr_t i = 0; i < count; i++) { |
| NamespacePtr ns = objects_[i]; |
| s->AssignRef(ns); |
| } |
| } |
| |
| void WriteFill(Serializer* s) { |
| const intptr_t count = objects_.length(); |
| for (intptr_t i = 0; i < count; i++) { |
| NamespacePtr ns = objects_[i]; |
| AutoTraceObject(ns); |
| WriteFromTo(ns); |
| } |
| } |
| |
| private: |
| GrowableArray<NamespacePtr> objects_; |
| }; |
| #endif // !DART_PRECOMPILED_RUNTIME |
| |
| class NamespaceDeserializationCluster : public DeserializationCluster { |
| public: |
| NamespaceDeserializationCluster() : DeserializationCluster("Namespace") {} |
| ~NamespaceDeserializationCluster() {} |
| |
| void ReadAlloc(Deserializer* d) { |
| ReadAllocFixedSize(d, Namespace::InstanceSize()); |
| } |
| |
| void ReadFill(Deserializer* d, bool primary) { |
| ASSERT(!is_canonical()); // Never canonical. |
| for (intptr_t id = start_index_; id < stop_index_; id++) { |
| NamespacePtr ns = static_cast<NamespacePtr>(d->Ref(id)); |
| Deserializer::InitializeHeader(ns, kNamespaceCid, |
| Namespace::InstanceSize()); |
| ReadFromTo(ns); |
| } |
| } |
| }; |
| |
| #if !defined(DART_PRECOMPILED_RUNTIME) |
| // KernelProgramInfo objects are not written into a full AOT snapshot. |
| class KernelProgramInfoSerializationCluster : public SerializationCluster { |
| public: |
| KernelProgramInfoSerializationCluster() |
| : SerializationCluster( |
| "KernelProgramInfo", |
| kKernelProgramInfoCid, |
| compiler::target::KernelProgramInfo::InstanceSize()) {} |
| ~KernelProgramInfoSerializationCluster() {} |
| |
| void Trace(Serializer* s, ObjectPtr object) { |
| KernelProgramInfoPtr info = KernelProgramInfo::RawCast(object); |
| objects_.Add(info); |
| PushFromTo(info); |
| } |
| |
| void WriteAlloc(Serializer* s) { |
| const intptr_t count = objects_.length(); |
| s->WriteUnsigned(count); |
| for (intptr_t i = 0; i < count; i++) { |
| KernelProgramInfoPtr info = objects_[i]; |
| s->AssignRef(info); |
| } |
| } |
| |
| void WriteFill(Serializer* s) { |
| const intptr_t count = objects_.length(); |
| for (intptr_t i = 0; i < count; i++) { |
| KernelProgramInfoPtr info = objects_[i]; |
| AutoTraceObject(info); |
| WriteFromTo(info); |
| s->Write<uint32_t>(info->untag()->kernel_binary_version_); |
| } |
| } |
| |
| private: |
| GrowableArray<KernelProgramInfoPtr> objects_; |
| }; |
| |
| // Since KernelProgramInfo objects are not written into full AOT snapshots, |
| // one will never need to read them from a full AOT snapshot. |
| class KernelProgramInfoDeserializationCluster : public DeserializationCluster { |
| public: |
| KernelProgramInfoDeserializationCluster() |
| : DeserializationCluster("KernelProgramInfo") {} |
| ~KernelProgramInfoDeserializationCluster() {} |
| |
| void ReadAlloc(Deserializer* d) { |
| ReadAllocFixedSize(d, KernelProgramInfo::InstanceSize()); |
| } |
| |
| void ReadFill(Deserializer* d, bool primary) { |
| ASSERT(!is_canonical()); // Never canonical. |
| for (intptr_t id = start_index_; id < stop_index_; id++) { |
| KernelProgramInfoPtr info = static_cast<KernelProgramInfoPtr>(d->Ref(id)); |
| Deserializer::InitializeHeader(info, kKernelProgramInfoCid, |
| KernelProgramInfo::InstanceSize()); |
| ReadFromTo(info); |
| info->untag()->kernel_binary_version_ = d->Read<uint32_t>(); |
| } |
| } |
| |
| void PostLoad(Deserializer* d, const Array& refs, bool primary) { |
| Array& array = Array::Handle(d->zone()); |
| KernelProgramInfo& info = KernelProgramInfo::Handle(d->zone()); |
| for (intptr_t id = start_index_; id < stop_index_; id++) { |
| info ^= refs.At(id); |
| array = HashTables::New<UnorderedHashMap<SmiTraits>>(16, Heap::kOld); |
| info.set_libraries_cache(array); |
| array = HashTables::New<UnorderedHashMap<SmiTraits>>(16, Heap::kOld); |
| info.set_classes_cache(array); |
| } |
| } |
| }; |
| |
| class CodeSerializationCluster : public SerializationCluster { |
| public: |
| explicit CodeSerializationCluster(Heap* heap) |
| : SerializationCluster("Code", kCodeCid), array_(Array::Handle()) {} |
| ~CodeSerializationCluster() {} |
| |
| void Trace(Serializer* s, ObjectPtr object) { |
| CodePtr code = Code::RawCast(object); |
| |
| const bool is_deferred = !s->InCurrentLoadingUnitOrRoot(code); |
| if (is_deferred) { |
| s->RecordDeferredCode(code); |
| } else { |
| objects_.Add(code); |
| } |
| |
| // Even if this code object is itself deferred we still need to scan |
| // the pool for references to other code objects (which might reside |
| // in the current loading unit). |
| ObjectPoolPtr pool = code->untag()->object_pool_; |
| if (s->kind() == Snapshot::kFullAOT) { |
| TracePool(s, pool, /*only_code=*/is_deferred); |
| } else { |
| if (s->InCurrentLoadingUnitOrRoot(pool)) { |
| s->Push(pool); |
| } else { |
| TracePool(s, pool, /*only_code=*/true); |
| } |
| } |
| |
| if (s->kind() == Snapshot::kFullJIT) { |
| s->Push(code->untag()->deopt_info_array_); |
| s->Push(code->untag()->static_calls_target_table_); |
| s->Push(code->untag()->compressed_stackmaps_); |
| } else if (s->kind() == Snapshot::kFullAOT) { |
| // Note: we don't trace compressed_stackmaps_ because we are going to emit |
| // a separate mapping table into RO data which is not going to be a real |
| // heap object. |
| #if defined(DART_PRECOMPILER) |
| auto const calls_array = code->untag()->static_calls_target_table_; |
| if (calls_array != Array::null()) { |
| // Some Code entries in the static calls target table may only be |
| // accessible via here, so push the Code objects. |
| array_ = calls_array; |
| for (auto entry : StaticCallsTable(array_)) { |
| auto kind = Code::KindField::decode( |
| Smi::Value(entry.Get<Code::kSCallTableKindAndOffset>())); |
| switch (kind) { |
| case Code::kCallViaCode: |
| // Code object in the pool. |
| continue; |
| case Code::kPcRelativeTTSCall: |
| // TTS will be reachable through type object which itself is |
| // in the pool. |
| continue; |
| case Code::kPcRelativeCall: |
| case Code::kPcRelativeTailCall: |
| auto destination = entry.Get<Code::kSCallTableCodeOrTypeTarget>(); |
| ASSERT(destination->IsHeapObject() && destination->IsCode()); |
| s->Push(destination); |
| } |
| } |
| } |
| #else |
| UNREACHABLE(); |
| #endif |
| } |
| |
| if (Code::IsDiscarded(code)) { |
| ASSERT(s->kind() == Snapshot::kFullAOT && FLAG_dwarf_stack_traces_mode && |
| !FLAG_retain_code_objects); |
| // Only object pool and static call table entries and the compressed |
| // stack maps should be pushed. |
| return; |
| } |
| |
| s->Push(code->untag()->owner_); |
| s->Push(code->untag()->exception_handlers_); |
| s->Push(code->untag()->pc_descriptors_); |
| s->Push(code->untag()->catch_entry_); |
| if (!FLAG_precompiled_mode || !FLAG_dwarf_stack_traces_mode) { |
| s->Push(code->untag()->inlined_id_to_function_); |
| if (s->InCurrentLoadingUnitOrRoot(code->untag()->code_source_map_)) { |
| s->Push(code->untag()->code_source_map_); |
| } |
| } |
| #if !defined(PRODUCT) |
| s->Push(code->untag()->return_address_metadata_); |
| if (FLAG_code_comments) { |
| s->Push(code->untag()->comments_); |
| } |
| #endif |
| } |
| |
| void TracePool(Serializer* s, ObjectPoolPtr pool, bool only_code) { |
| if (pool == ObjectPool::null()) { |
| return; |
| } |
| |
| const intptr_t length = pool->untag()->length_; |
| uint8_t* entry_bits = pool->untag()->entry_bits(); |
| for (intptr_t i = 0; i < length; i++) { |
| auto entry_type = ObjectPool::TypeBits::decode(entry_bits[i]); |
| if (entry_type == ObjectPool::EntryType::kTaggedObject) { |
| const ObjectPtr target = pool->untag()->data()[i].raw_obj_; |
| if (!only_code || target->IsCode()) { |
| s->Push(target); |
| } |
| } |
| } |
| } |
| |
| struct CodeOrderInfo { |
| CodePtr code; |
| intptr_t not_discarded; // 1 if this code was not discarded and |
| // 0 otherwise. |
| intptr_t instructions_id; |
| }; |
| |
| // We sort code objects in such a way that code objects with the same |
| // instructions are grouped together and ensure that all instructions |
| // without associated code objects are grouped together at the beginning of |
| // the code section. InstructionsTable encoding assumes that all |
| // instructions with non-discarded Code objects are grouped at the end. |
| // |
| // Note that in AOT mode we expect that all Code objects pointing to |
| // the same instructions are deduplicated, as in bare instructions mode |
| // there is no way to identify which specific Code object (out of those |
| // which point to the specific instructions range) actually corresponds |
| // to a particular frame. |
| static int CompareCodeOrderInfo(CodeOrderInfo const* a, |
| CodeOrderInfo const* b) { |
| if (a->not_discarded < b->not_discarded) return -1; |
| if (a->not_discarded > b->not_discarded) return 1; |
| if (a->instructions_id < b->instructions_id) return -1; |
| if (a->instructions_id > b->instructions_id) return 1; |
| return 0; |
| } |
| |
| static void Insert(Serializer* s, |
| GrowableArray<CodeOrderInfo>* order_list, |
| IntMap<intptr_t>* order_map, |
| CodePtr code) { |
| InstructionsPtr instr = code->untag()->instructions_; |
| intptr_t key = static_cast<intptr_t>(instr); |
| intptr_t instructions_id = 0; |
| |
| if (order_map->HasKey(key)) { |
| // We are expected to merge code objects which point to the same |
| // instructions in the precompiled mode. |
| RELEASE_ASSERT(!FLAG_precompiled_mode); |
| instructions_id = order_map->Lookup(key); |
| } else { |
| instructions_id = order_map->Length() + 1; |
| order_map->Insert(key, instructions_id); |
| } |
| CodeOrderInfo info; |
| info.code = code; |
| info.instructions_id = instructions_id; |
| info.not_discarded = Code::IsDiscarded(code) ? 0 : 1; |
| order_list->Add(info); |
| } |
| |
| static void Sort(Serializer* s, GrowableArray<CodePtr>* codes) { |
| GrowableArray<CodeOrderInfo> order_list; |
| IntMap<intptr_t> order_map; |
| for (intptr_t i = 0; i < codes->length(); i++) { |
| Insert(s, &order_list, &order_map, (*codes)[i]); |
| } |
| order_list.Sort(CompareCodeOrderInfo); |
| ASSERT(order_list.length() == codes->length()); |
| for (intptr_t i = 0; i < order_list.length(); i++) { |
| (*codes)[i] = order_list[i].code; |
| } |
| } |
| |
| static void Sort(Serializer* s, GrowableArray<Code*>* codes) { |
| GrowableArray<CodeOrderInfo> order_list; |
| IntMap<intptr_t> order_map; |
| for (intptr_t i = 0; i < codes->length(); i++) { |
| Insert(s, &order_list, &order_map, (*codes)[i]->ptr()); |
| } |
| order_list.Sort(CompareCodeOrderInfo); |
| ASSERT(order_list.length() == codes->length()); |
| for (intptr_t i = 0; i < order_list.length(); i++) { |
| *(*codes)[i] = order_list[i].code; |
| } |
| } |
| |
| intptr_t NonDiscardedCodeCount() { |
| intptr_t count = 0; |
| for (auto code : objects_) { |
| if (!Code::IsDiscarded(code)) { |
| count++; |
| } |
| } |
| return count; |
| } |
| |
| void WriteAlloc(Serializer* s) { |
| const intptr_t non_discarded_count = NonDiscardedCodeCount(); |
| const intptr_t count = objects_.length(); |
| ASSERT(count == non_discarded_count || (s->kind() == Snapshot::kFullAOT)); |
| |
| first_ref_ = s->next_ref_index(); |
| s->WriteUnsigned(non_discarded_count); |
| for (auto code : objects_) { |
| if (!Code::IsDiscarded(code)) { |
| WriteAlloc(s, code); |
| } else { |
| // Mark discarded code unreachable, so that we could later |
| // assign artificial references to it. |
| s->heap()->SetObjectId(code, kUnreachableReference); |
| } |
| } |
| |
| s->WriteUnsigned(deferred_objects_.length()); |
| first_deferred_ref_ = s->next_ref_index(); |
| for (auto code : deferred_objects_) { |
| ASSERT(!Code::IsDiscarded(code)); |
| WriteAlloc(s, code); |
| } |
| last_ref_ = s->next_ref_index() - 1; |
| } |
| |
| void WriteAlloc(Serializer* s, CodePtr code) { |
| ASSERT(!Code::IsDiscarded(code)); |
| s->AssignRef(code); |
| AutoTraceObjectName(code, MakeDisambiguatedCodeName(s, code)); |
| const int32_t state_bits = code->untag()->state_bits_; |
| s->Write<int32_t>(state_bits); |
| target_memory_size_ += compiler::target::Code::InstanceSize(0); |
| } |
| |
| void WriteFill(Serializer* s) { |
| Snapshot::Kind kind = s->kind(); |
| const intptr_t count = objects_.length(); |
| for (intptr_t i = 0; i < count; i++) { |
| CodePtr code = objects_[i]; |
| #if defined(DART_PRECOMPILER) |
| if (FLAG_write_v8_snapshot_profile_to != nullptr && |
| Code::IsDiscarded(code)) { |
| s->CreateArtificialNodeIfNeeded(code); |
| } |
| #endif |
| // Note: for discarded code this function will not write anything out |
| // it is only called to produce information into snapshot profile. |
| WriteFill(s, kind, code, /*deferred=*/false); |
| } |
| const intptr_t deferred_count = deferred_objects_.length(); |
| for (intptr_t i = 0; i < deferred_count; i++) { |
| CodePtr code = deferred_objects_[i]; |
| WriteFill(s, kind, code, /*deferred=*/true); |
| } |
| } |
| |
| void WriteFill(Serializer* s, |
| Snapshot::Kind kind, |
| CodePtr code, |
| bool deferred) { |
| const intptr_t bytes_written = s->bytes_written(); |
| AutoTraceObjectName(code, MakeDisambiguatedCodeName(s, code)); |
| |
| intptr_t pointer_offsets_length = |
| Code::PtrOffBits::decode(code->untag()->state_bits_); |
| if (pointer_offsets_length != 0) { |
| FATAL("Cannot serialize code with embedded pointers"); |
| } |
| if (kind == Snapshot::kFullAOT && Code::IsDisabled(code)) { |
| // Disabled code is fatal in AOT since we cannot recompile. |
| s->UnexpectedObject(code, "Disabled code"); |
| } |
| |
| s->WriteInstructions(code->untag()->instructions_, |
| code->untag()->unchecked_offset_, code, deferred); |
| if (kind == Snapshot::kFullJIT) { |
| // TODO(rmacnak): Fix references to disabled code before serializing. |
| // For now, we may write the FixCallersTarget or equivalent stub. This |
| // will cause a fixup if this code is called. |
| const uint32_t active_unchecked_offset = |
| code->untag()->unchecked_entry_point_ - code->untag()->entry_point_; |
| s->WriteInstructions(code->untag()->active_instructions_, |
| active_unchecked_offset, code, deferred); |
| } |
| |
| #if defined(DART_PRECOMPILER) |
| if (FLAG_write_v8_snapshot_profile_to != nullptr) { |
| // If we are writing V8 snapshot profile then attribute references going |
| // through the object pool and static calls to the code object itself. |
| if (kind == Snapshot::kFullAOT && |
| code->untag()->object_pool_ != ObjectPool::null()) { |
| ObjectPoolPtr pool = code->untag()->object_pool_; |
| // Non-empty per-code object pools should not be reachable in this mode. |
| ASSERT(!s->HasRef(pool) || pool == Object::empty_object_pool().ptr()); |
| s->CreateArtificialNodeIfNeeded(pool); |
| s->AttributePropertyRef(pool, "object_pool_"); |
| } |
| if (kind != Snapshot::kFullJIT && |
| code->untag()->static_calls_target_table_ != Array::null()) { |
| auto const table = code->untag()->static_calls_target_table_; |
| // Non-empty static call target tables shouldn't be reachable in this |
| // mode. |
| ASSERT(!s->HasRef(table) || table == Object::empty_array().ptr()); |
| s->CreateArtificialNodeIfNeeded(table); |
| s->AttributePropertyRef(table, "static_calls_target_table_"); |
| } |
| } |
| #endif // defined(DART_PRECOMPILER) |
| |
| if (Code::IsDiscarded(code)) { |
| // No bytes should be written to represent this code. |
| ASSERT(s->bytes_written() == bytes_written); |
| // Only write instructions, compressed stackmaps and state bits |
| // for the discarded Code objects. |
| ASSERT(kind == Snapshot::kFullAOT && FLAG_dwarf_stack_traces_mode && |
| !FLAG_retain_code_objects); |
| #if defined(DART_PRECOMPILER) |
| if (FLAG_write_v8_snapshot_profile_to != nullptr) { |
| // Keep the owner as a (possibly artificial) node for snapshot analysis. |
| const auto& owner = code->untag()->owner_; |
| s->CreateArtificialNodeIfNeeded(owner); |
| s->AttributePropertyRef(owner, "owner_"); |
| } |
| #endif |
| return; |
| } |
| |
| // No need to write object pool out if we are producing full AOT |
| // snapshot with bare instructions. |
| if (kind != Snapshot::kFullAOT) { |
| if (s->InCurrentLoadingUnitOrRoot(code->untag()->object_pool_)) { |
| WriteField(code, object_pool_); |
| } else { |
| WriteFieldValue(object_pool_, ObjectPool::null()); |
| } |
| } |
| WriteField(code, owner_); |
| WriteField(code, exception_handlers_); |
| WriteField(code, pc_descriptors_); |
| WriteField(code, catch_entry_); |
| if (s->kind() == Snapshot::kFullJIT) { |
| WriteField(code, compressed_stackmaps_); |
| } |
| if (FLAG_precompiled_mode && FLAG_dwarf_stack_traces_mode) { |
| WriteFieldValue(inlined_id_to_function_, Array::null()); |
| WriteFieldValue(code_source_map_, CodeSourceMap::null()); |
| } else { |
| WriteField(code, inlined_id_to_function_); |
| if (s->InCurrentLoadingUnitOrRoot(code->untag()->code_source_map_)) { |
| WriteField(code, code_source_map_); |
| } else { |
| WriteFieldValue(code_source_map_, CodeSourceMap::null()); |
| } |
| } |
| if (kind == Snapshot::kFullJIT) { |
| WriteField(code, deopt_info_array_); |
| WriteField(code, static_calls_target_table_); |
| } |
| |
| #if !defined(PRODUCT) |
| WriteField(code, return_address_metadata_); |
| if (FLAG_code_comments) { |
| WriteField(code, comments_); |
| } |
| #endif |
| } |
| |
| GrowableArray<CodePtr>* objects() { return &objects_; } |
| GrowableArray<CodePtr>* deferred_objects() { return &deferred_objects_; } |
| |
| static const char* MakeDisambiguatedCodeName(Serializer* s, CodePtr c) { |
| if (s->profile_writer() == nullptr) { |
| return nullptr; |
| } |
| |
| REUSABLE_CODE_HANDLESCOPE(s->thread()); |
| Code& code = reused_code_handle.Handle(); |
| code = c; |
| return code.QualifiedName( |
| NameFormattingParams::DisambiguatedWithoutClassName( |
| Object::NameVisibility::kInternalName)); |
| } |
| |
| intptr_t first_ref() const { return first_ref_; } |
| intptr_t first_deferred_ref() const { return first_deferred_ref_; } |
| intptr_t last_ref() const { return last_ref_; } |
| |
| private: |
| intptr_t first_ref_; |
| intptr_t first_deferred_ref_; |
| intptr_t last_ref_; |
| GrowableArray<CodePtr> objects_; |
| GrowableArray<CodePtr> deferred_objects_; |
| Array& array_; |
| }; |
| #endif // !DART_PRECOMPILED_RUNTIME |
| |
| class CodeDeserializationCluster : public DeserializationCluster { |
| public: |
| CodeDeserializationCluster() : DeserializationCluster("Code") {} |
| ~CodeDeserializationCluster() {} |
| |
| void ReadAlloc(Deserializer* d) { |
| PageSpace* old_space = d->heap()->old_space(); |
| start_index_ = d->next_index(); |
| d->set_code_start_index(start_index_); |
| const intptr_t count = d->ReadUnsigned(); |
| for (intptr_t i = 0; i < count; i++) { |
| ReadAllocOneCode(d, old_space); |
| } |
| stop_index_ = d->next_index(); |
| d->set_code_stop_index(stop_index_); |
| deferred_start_index_ = d->next_index(); |
| const intptr_t deferred_count = d->ReadUnsigned(); |
| for (intptr_t i = 0; i < deferred_count; i++) { |
| ReadAllocOneCode(d, old_space); |
| } |
| deferred_stop_index_ = d->next_index(); |
| } |
| |
| void ReadAllocOneCode(Deserializer* d, PageSpace* old_space) { |
| const int32_t state_bits = d->Read<int32_t>(); |
| ASSERT(!Code::DiscardedBit::decode(state_bits)); |
| auto code = static_cast<CodePtr>( |
| old_space->AllocateSnapshot(Code::InstanceSize(0))); |
| d->AssignRef(code); |
| code->untag()->state_bits_ = state_bits; |
| } |
| |
| void ReadFill(Deserializer* d, bool primary) { |
| ASSERT(!is_canonical()); // Never canonical. |
| for (intptr_t id = start_index_; id < stop_index_; id++) { |
| ReadFill(d, id, false); |
| } |
| for (intptr_t id = deferred_start_index_; id < deferred_stop_index_; id++) { |
| ReadFill(d, id, true); |
| } |
| } |
| |
| void ReadFill(Deserializer* d, intptr_t id, bool deferred) { |
| auto const code = static_cast<CodePtr>(d->Ref(id)); |
| |
| ASSERT(!Code::IsUnknownDartCode(code)); |
| |
| Deserializer::InitializeHeader(code, kCodeCid, Code::InstanceSize(0)); |
| ASSERT(!Code::IsDiscarded(code)); |
| |
| d->ReadInstructions(code, deferred); |
| |
| // There would be a single global pool if this is a full AOT snapshot |
| // with bare instructions. |
| if (d->kind() != Snapshot::kFullAOT) { |
| code->untag()->object_pool_ = static_cast<ObjectPoolPtr>(d->ReadRef()); |
| } else { |
| code->untag()->object_pool_ = ObjectPool::null(); |
| } |
| code->untag()->owner_ = d->ReadRef(); |
| code->untag()->exception_handlers_ = |
| static_cast<ExceptionHandlersPtr>(d->ReadRef()); |
| code->untag()->pc_descriptors_ = |
| static_cast<PcDescriptorsPtr>(d->ReadRef()); |
| code->untag()->catch_entry_ = d->ReadRef(); |
| if (d->kind() == Snapshot::kFullJIT) { |
| code->untag()->compressed_stackmaps_ = |
| static_cast<CompressedStackMapsPtr>(d->ReadRef()); |
| } else if (d->kind() == Snapshot::kFullAOT) { |
| code->untag()->compressed_stackmaps_ = CompressedStackMaps::null(); |
| } |
| code->untag()->inlined_id_to_function_ = |
| static_cast<ArrayPtr>(d->ReadRef()); |
| code->untag()->code_source_map_ = |
| static_cast<CodeSourceMapPtr>(d->ReadRef()); |
| |
| #if !defined(DART_PRECOMPILED_RUNTIME) |
| if (d->kind() == Snapshot::kFullJIT) { |
| code->untag()->deopt_info_array_ = static_cast<ArrayPtr>(d->ReadRef()); |
| code->untag()->static_calls_target_table_ = |
| static_cast<ArrayPtr>(d->ReadRef()); |
| } |
| #endif // !DART_PRECOMPILED_RUNTIME |
| |
| #if !defined(PRODUCT) |
| code->untag()->return_address_metadata_ = d->ReadRef(); |
| code->untag()->var_descriptors_ = LocalVarDescriptors::null(); |
| code->untag()->comments_ = FLAG_code_comments |
| ? static_cast<ArrayPtr>(d->ReadRef()) |
| : Array::null(); |
| code->untag()->compile_timestamp_ = 0; |
| #endif |
| } |
| |
| void PostLoad(Deserializer* d, const Array& refs, bool primary) { |
| d->EndInstructions(); |
| |
| #if !defined(PRODUCT) |
| if (!CodeObservers::AreActive() && !FLAG_support_disassembler) return; |
| #endif |
| Code& code = Code::Handle(d->zone()); |
| #if !defined(PRODUCT) || defined(FORCE_INCLUDE_DISASSEMBLER) |
| Object& owner = Object::Handle(d->zone()); |
| #endif |
| for (intptr_t id = start_index_; id < stop_index_; id++) { |
| code ^= refs.At(id); |
| #if !defined(DART_PRECOMPILED_RUNTIME) && !defined(PRODUCT) |
| if (CodeObservers::AreActive()) { |
| Code::NotifyCodeObservers(code, code.is_optimized()); |
| } |
| #endif |
| #if !defined(PRODUCT) || defined(FORCE_INCLUDE_DISASSEMBLER) |
| owner = code.owner(); |
| if (owner.IsFunction()) { |
| if ((FLAG_disassemble || |
| (code.is_optimized() && FLAG_disassemble_optimized)) && |
| compiler::PrintFilter::ShouldPrint(Function::Cast(owner))) { |
| Disassembler::DisassembleCode(Function::Cast(owner), code, |
| code.is_optimized()); |
| } |
| } else if (FLAG_disassemble_stubs) { |
| Disassembler::DisassembleStub(code.Name(), code); |
| } |
| #endif // !defined(PRODUCT) || defined(FORCE_INCLUDE_DISASSEMBLER) |
| } |
| } |
| |
| private: |
| intptr_t deferred_start_index_; |
| intptr_t deferred_stop_index_; |
| }; |
| |
| #if !defined(DART_PRECOMPILED_RUNTIME) |
| class ObjectPoolSerializationCluster : public SerializationCluster { |
| public: |
| ObjectPoolSerializationCluster() |
| : SerializationCluster("ObjectPool", kObjectPoolCid) {} |
| ~ObjectPoolSerializationCluster() {} |
| |
| void Trace(Serializer* s, ObjectPtr object) { |
| ObjectPoolPtr pool = ObjectPool::RawCast(object); |
| objects_.Add(pool); |
| |
| if (s->kind() != Snapshot::kFullAOT) { |
| const intptr_t length = pool->untag()->length_; |
| uint8_t* entry_bits = pool->untag()->entry_bits(); |
| for (intptr_t i = 0; i < length; i++) { |
| auto entry_type = ObjectPool::TypeBits::decode(entry_bits[i]); |
| if (entry_type == ObjectPool::EntryType::kTaggedObject) { |
| s->Push(pool->untag()->data()[i].raw_obj_); |
| } |
| } |
| } |
| } |
| |
| void WriteAlloc(Serializer* s) { |
| const intptr_t count = objects_.length(); |
| s->WriteUnsigned(count); |
| for (intptr_t i = 0; i < count; i++) { |
| ObjectPoolPtr pool = objects_[i]; |
| s->AssignRef(pool); |
| AutoTraceObject(pool); |
| const intptr_t length = pool->untag()->length_; |
| s->WriteUnsigned(length); |
| target_memory_size_ += compiler::target::ObjectPool::InstanceSize(length); |
| } |
| } |
| |
| void WriteFill(Serializer* s) { |
| bool weak = s->kind() == Snapshot::kFullAOT; |
| |
| const intptr_t count = objects_.length(); |
| for (intptr_t i = 0; i < count; i++) { |
| ObjectPoolPtr pool = objects_[i]; |
| AutoTraceObject(pool); |
| const intptr_t length = pool->untag()->length_; |
| s->WriteUnsigned(length); |
| uint8_t* entry_bits = pool->untag()->entry_bits(); |
| for (intptr_t j = 0; j < length; j++) { |
| UntaggedObjectPool::Entry& entry = pool->untag()->data()[j]; |
| uint8_t bits = entry_bits[j]; |
| ObjectPool::EntryType type = ObjectPool::TypeBits::decode(bits); |
| if (weak && (type == ObjectPool::EntryType::kTaggedObject)) { |
| // By default, every switchable call site will put (ic_data, code) |
| // into the object pool. The [code] is initialized (at AOT |
| // compile-time) to be [StubCode::SwitchableCallMiss] or |
| // [StubCode::MegamorphicCall]. |
| // |
| // In --use-bare-instruction we reduce the extra indirection via |
| // the [code] object and store instead (ic_data, entrypoint) in |
| // the object pool. |
| // |
| // Since the actual [entrypoint] is only known at AOT runtime we |
| // switch all existing entries for these stubs to entrypoints |
| // encoded as EntryType::kSwitchableCallMissEntryPoint and |
| // EntryType::kMegamorphicCallEntryPoint. |
| if (entry.raw_obj_ == StubCode::SwitchableCallMiss().ptr()) { |
| type = ObjectPool::EntryType::kSwitchableCallMissEntryPoint; |
| bits = ObjectPool::EncodeBits(type, |
| ObjectPool::Patchability::kPatchable); |
| } else if (entry.raw_obj_ == StubCode::MegamorphicCall().ptr()) { |
| type = ObjectPool::EntryType::kMegamorphicCallEntryPoint; |
| bits = ObjectPool::EncodeBits(type, |
| ObjectPool::Patchability::kPatchable); |
| } |
| } |
| s->Write<uint8_t>(bits); |
| switch (type) { |
| case ObjectPool::EntryType::kTaggedObject: { |
| if ((entry.raw_obj_ == StubCode::CallNoScopeNative().ptr()) || |
| (entry.raw_obj_ == StubCode::CallAutoScopeNative().ptr())) { |
| // Natives can run while precompiling, becoming linked and |
| // switching their stub. Reset to the initial stub used for |
| // lazy-linking. |
| s->WriteElementRef(StubCode::CallBootstrapNative().ptr(), j); |
| break; |
| } |
| if (weak && !s->HasRef(entry.raw_obj_)) { |
| // Any value will do, but null has the shortest id. |
| s->WriteElementRef(Object::null(), j); |
| } else { |
| s->WriteElementRef(entry.raw_obj_, j); |
| } |
| break; |
| } |
| case ObjectPool::EntryType::kImmediate: { |
| s->Write<intptr_t>(entry.raw_value_); |
| break; |
| } |
| case ObjectPool::EntryType::kNativeFunction: { |
| // Write nothing. Will initialize with the lazy link entry. |
| break; |
| } |
| case ObjectPool::EntryType::kSwitchableCallMissEntryPoint: |
| case ObjectPool::EntryType::kMegamorphicCallEntryPoint: |
| // Write nothing. Entry point is initialized during |
| // snapshot deserialization. |
| break; |
| default: |
| UNREACHABLE(); |
| } |
| } |
| } |
| } |
| |
| private: |
| GrowableArray<ObjectPoolPtr> objects_; |
| }; |
| #endif // !DART_PRECOMPILED_RUNTIME |
| |
| class ObjectPoolDeserializationCluster : public DeserializationCluster { |
| public: |
| ObjectPoolDeserializationCluster() : DeserializationCluster("ObjectPool") {} |
| ~ObjectPoolDeserializationCluster() {} |
| |
| void ReadAlloc(Deserializer* d) { |
| start_index_ = d->next_index(); |
| PageSpace* old_space = d->heap()->old_space(); |
| const intptr_t count = d->ReadUnsigned(); |
| for (intptr_t i = 0; i < count; i++) { |
| const intptr_t length = d->ReadUnsigned(); |
| d->AssignRef( |
| old_space->AllocateSnapshot(ObjectPool::InstanceSize(length))); |
| } |
| stop_index_ = d->next_index(); |
| } |
| |
| void ReadFill(Deserializer* d, bool primary) { |
| ASSERT(!is_canonical()); // Never canonical. |
| fill_position_ = d->position(); |
| #if defined(DART_PRECOMPILED_RUNTIME) |
| const uint8_t immediate_bits = |
| ObjectPool::EncodeBits(ObjectPool::EntryType::kImmediate, |
| ObjectPool::Patchability::kPatchable); |
| uword switchable_call_miss_entry_point = 0; |
| uword megamorphic_call_entry_point = 0; |
| switchable_call_miss_entry_point = |
| StubCode::SwitchableCallMiss().MonomorphicEntryPoint(); |
| megamorphic_call_entry_point = |
| StubCode::MegamorphicCall().MonomorphicEntryPoint(); |
| #endif // defined(DART_PRECOMPILED_RUNTIME) |
| |
| for (intptr_t id = start_index_; id < stop_index_; id++) { |
| const intptr_t length = d->ReadUnsigned(); |
| ObjectPoolPtr pool = static_cast<ObjectPoolPtr>(d->Ref(id)); |
| Deserializer::InitializeHeader(pool, kObjectPoolCid, |
| ObjectPool::InstanceSize(length)); |
| pool->untag()->length_ = length; |
| for (intptr_t j = 0; j < length; j++) { |
| const uint8_t entry_bits = d->Read<uint8_t>(); |
| pool->untag()->entry_bits()[j] = entry_bits; |
| UntaggedObjectPool::Entry& entry = pool->untag()->data()[j]; |
| switch (ObjectPool::TypeBits::decode(entry_bits)) { |
| case ObjectPool::EntryType::kTaggedObject: |
| entry.raw_obj_ = d->ReadRef(); |
| break; |
| case ObjectPool::EntryType::kImmediate: |
| entry.raw_value_ = d->Read<intptr_t>(); |
| break; |
| case ObjectPool::EntryType::kNativeFunction: { |
| // Read nothing. Initialize with the lazy link entry. |
| uword new_entry = NativeEntry::LinkNativeCallEntry(); |
| entry.raw_value_ = static_cast<intptr_t>(new_entry); |
| break; |
| } |
| #if defined(DART_PRECOMPILED_RUNTIME) |
| case ObjectPool::EntryType::kSwitchableCallMissEntryPoint: |
| pool->untag()->entry_bits()[j] = immediate_bits; |
| entry.raw_value_ = |
| static_cast<intptr_t>(switchable_call_miss_entry_point); |
| break; |
| case ObjectPool::EntryType::kMegamorphicCallEntryPoint: |
| pool->untag()->entry_bits()[j] = immediate_bits; |
| entry.raw_value_ = |
| static_cast<intptr_t>(megamorphic_call_entry_point); |
| break; |
| #endif // defined(DART_PRECOMPILED_RUNTIME) |
| default: |
| UNREACHABLE(); |
| } |
| } |
| } |
| } |
| |
| void PostLoad(Deserializer* d, const Array& refs, bool primary) { |
| if (d->is_non_root_unit()) { |
| // If this is a non-root unit, some pool entries that should be canonical |
| // may have been replaced be with other objects during canonicalization. |
| |
| intptr_t restore_position = d->position(); |
| d->set_position(fill_position_); |
| |
| auto Z = d->zone(); |
| ObjectPool& pool = ObjectPool::Handle(Z); |
| Object& entry = Object::Handle(Z); |
| for (intptr_t id = start_index_; id < stop_index_; id++) { |
| pool ^= refs.At(id); |
| const intptr_t length = d->ReadUnsigned(); |
| for (intptr_t j = 0; j < length; j++) { |
| const uint8_t entry_bits = d->Read<uint8_t>(); |
| switch (ObjectPool::TypeBits::decode(entry_bits)) { |
| case ObjectPool::EntryType::kTaggedObject: |
| entry = refs.At(d->ReadUnsigned()); |
| pool.SetObjectAt(j, entry); |
| break; |
| case ObjectPool::EntryType::kImmediate: |
| d->Read<intptr_t>(); |
| break; |
| case ObjectPool::EntryType::kNativeFunction: { |
| // Read nothing. |
| break; |
| } |
| default: |
| UNREACHABLE(); |
| } |
| } |
| } |
| |
| d->set_position(restore_position); |
| } |
| } |
| |
| private: |
| intptr_t fill_position_ = 0; |
| }; |
| |
| #if defined(DART_PRECOMPILER) |
| class WeakSerializationReferenceSerializationCluster |
| : public SerializationCluster { |
| public: |
| WeakSerializationReferenceSerializationCluster() |
| : SerializationCluster( |
| "WeakSerializationReference", |
| compiler::target::WeakSerializationReference::InstanceSize()) {} |
| ~WeakSerializationReferenceSerializationCluster() {} |
| |
| void Trace(Serializer* s, ObjectPtr object) { |
| ASSERT(s->kind() == Snapshot::kFullAOT); |
| objects_.Add(WeakSerializationReference::RawCast(object)); |
| } |
| |
| void RetraceEphemerons(Serializer* s) { |
| for (intptr_t i = 0; i < objects_.length(); i++) { |
| WeakSerializationReferencePtr weak = objects_[i]; |
| if (!s->IsReachable(weak->untag()->target())) { |
| s->Push(weak->untag()->replacement()); |
| } |
| } |
| } |
| |
| intptr_t Count(Serializer* s) { return objects_.length(); } |
| |
| void CreateArtificialTargetNodesIfNeeded(Serializer* s) { |
| for (intptr_t i = 0; i < objects_.length(); i++) { |
| WeakSerializationReferencePtr weak = objects_[i]; |
| s->CreateArtificialNodeIfNeeded(weak->untag()->target()); |
| } |
| } |
| |
| void WriteAlloc(Serializer* s) { |
| UNREACHABLE(); // No WSRs are serialized, and so this cluster is not added. |
| } |
| |
| void WriteFill(Serializer* s) { |
| UNREACHABLE(); // No WSRs are serialized, and so this cluster is not added. |
| } |
| |
| private: |
| GrowableArray<WeakSerializationReferencePtr> objects_; |
| }; |
| #endif |
| |
| #if !defined(DART_PRECOMPILED_RUNTIME) |
| class PcDescriptorsSerializationCluster : public SerializationCluster { |
| public: |
| PcDescriptorsSerializationCluster() |
| : SerializationCluster("PcDescriptors", kPcDescriptorsCid) {} |
| ~PcDescriptorsSerializationCluster() {} |
| |
| void Trace(Serializer* s, ObjectPtr object) { |
| PcDescriptorsPtr desc = PcDescriptors::RawCast(object); |
| objects_.Add(desc); |
| } |
| |
| void WriteAlloc(Serializer* s) { |
| const intptr_t count = objects_.length(); |
| s->WriteUnsigned(count); |
| for (intptr_t i = 0; i < count; i++) { |
| PcDescriptorsPtr desc = objects_[i]; |
| s->AssignRef(desc); |
| AutoTraceObject(desc); |
| const intptr_t length = desc->untag()->length_; |
| s->WriteUnsigned(length); |
| target_memory_size_ += |
| compiler::target::PcDescriptors::InstanceSize(length); |
| } |
| } |
| |
| void WriteFill(Serializer* s) { |
| const intptr_t count = objects_.length(); |
| for (intptr_t i = 0; i < count; i++) { |
| PcDescriptorsPtr desc = objects_[i]; |
| AutoTraceObject
|