blob: 55d034bda967e19a23fcf5425fe1b69647a8863b [file] [log] [blame] [edit]
// Copyright (c) 2016, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
#include <memory>
#include <utility>
#include "vm/app_snapshot.h"
#include "platform/assert.h"
#include "vm/bootstrap.h"
#include "vm/bss_relocs.h"
#include "vm/canonical_tables.h"
#include "vm/class_id.h"
#include "vm/code_observers.h"
#include "vm/compiler/api/print_filter.h"
#include "vm/compiler/assembler/disassembler.h"
#include "vm/dart.h"
#include "vm/dart_entry.h"
#include "vm/dispatch_table.h"
#include "vm/flag_list.h"
#include "vm/growable_array.h"
#include "vm/heap/heap.h"
#include "vm/image_snapshot.h"
#include "vm/native_entry.h"
#include "vm/object.h"
#include "vm/object_store.h"
#include "vm/program_visitor.h"
#include "vm/raw_object_fields.h"
#include "vm/stub_code.h"
#include "vm/symbols.h"
#include "vm/timeline.h"
#include "vm/v8_snapshot_writer.h"
#include "vm/version.h"
#include "vm/zone_text_buffer.h"
#if !defined(DART_PRECOMPILED_RUNTIME)
#include "vm/compiler/backend/code_statistics.h"
#include "vm/compiler/backend/il_printer.h"
#include "vm/compiler/relocation.h"
#endif // !defined(DART_PRECOMPILED_RUNTIME)
namespace dart {
#if !defined(DART_PRECOMPILED_RUNTIME)
DEFINE_FLAG(bool,
print_cluster_information,
false,
"Print information about clusters written to snapshot");
#endif
#if defined(DART_PRECOMPILER)
DEFINE_FLAG(charp,
write_v8_snapshot_profile_to,
nullptr,
"Write a snapshot profile in V8 format to a file.");
DEFINE_FLAG(bool,
print_array_optimization_candidates,
false,
"Print information about how many array are candidates for Smi and "
"ROData optimizations.");
#endif // defined(DART_PRECOMPILER)
// Forward declarations.
class Serializer;
class Deserializer;
namespace {
// Serialized clusters are identified by their CID. So to insert custom clusters
// we need to assign them a CID that is otherwise never serialized.
static constexpr intptr_t kDeltaEncodedTypedDataCid = kNativePointer;
// StorageTrait for HashTable which allows to create hash tables backed by
// zone memory. Used to compute cluster order for canonical clusters.
struct GrowableArrayStorageTraits {
class Array : public ZoneAllocated {
public:
explicit Array(Zone* zone, intptr_t length)
: length_(length), array_(zone->Alloc<ObjectPtr>(length)) {}
intptr_t Length() const { return length_; }
void SetAt(intptr_t index, const Object& value) const {
array_[index] = value.ptr();
}
ObjectPtr At(intptr_t index) const { return array_[index]; }
private:
intptr_t length_ = 0;
ObjectPtr* array_ = nullptr;
DISALLOW_COPY_AND_ASSIGN(Array);
};
using ArrayPtr = Array*;
class ArrayHandle : public ZoneAllocated {
public:
explicit ArrayHandle(ArrayPtr ptr) : ptr_(ptr) {}
ArrayHandle() {}
void SetFrom(const ArrayHandle& other) { ptr_ = other.ptr_; }
void Clear() { ptr_ = nullptr; }
bool IsNull() const { return ptr_ == nullptr; }
ArrayPtr ptr() { return ptr_; }
intptr_t Length() const { return ptr_->Length(); }
void SetAt(intptr_t index, const Object& value) const {
ptr_->SetAt(index, value);
}
ObjectPtr At(intptr_t index) const { return ptr_->At(index); }
private:
ArrayPtr ptr_ = nullptr;
DISALLOW_COPY_AND_ASSIGN(ArrayHandle);
};
static ArrayHandle& PtrToHandle(ArrayPtr ptr) {
return *new ArrayHandle(ptr);
}
static void SetHandle(ArrayHandle& dst, const ArrayHandle& src) { // NOLINT
dst.SetFrom(src);
}
static void ClearHandle(ArrayHandle& dst) { // NOLINT
dst.Clear();
}
static ArrayPtr New(Zone* zone, intptr_t length, Heap::Space space) {
return new (zone) Array(zone, length);
}
static bool IsImmutable(const ArrayHandle& handle) { return false; }
static ObjectPtr At(ArrayHandle* array, intptr_t index) {
return array->At(index);
}
static void SetAt(ArrayHandle* array, intptr_t index, const Object& value) {
array->SetAt(index, value);
}
};
} // namespace
#if defined(DART_PRECOMPILER) && !defined(TARGET_ARCH_IA32)
static void RelocateCodeObjects(
bool is_vm,
GrowableArray<CodePtr>* code_objects,
GrowableArray<ImageWriterCommand>* image_writer_commands) {
auto thread = Thread::Current();
auto isolate_group =
is_vm ? Dart::vm_isolate()->group() : thread->isolate_group();
WritableCodePages writable_code_pages(thread, isolate_group);
CodeRelocator::Relocate(thread, code_objects, image_writer_commands, is_vm);
}
#endif // defined(DART_PRECOMPILER) && !defined(TARGET_ARCH_IA32)
class SerializationCluster : public ZoneAllocated {
public:
static constexpr intptr_t kSizeVaries = -1;
explicit SerializationCluster(const char* name,
intptr_t cid,
intptr_t target_instance_size = kSizeVaries,
bool is_canonical = false)
: name_(name),
cid_(cid),
target_instance_size_(target_instance_size),
is_canonical_(is_canonical),
is_immutable_(Object::ShouldHaveImmutabilityBitSet(cid)) {
ASSERT(target_instance_size == kSizeVaries || target_instance_size >= 0);
}
virtual ~SerializationCluster() {}
// Add [object] to the cluster and push its outgoing references.
virtual void Trace(Serializer* serializer, ObjectPtr object) = 0;
// Write the cluster type and information needed to allocate the cluster's
// objects. For fixed sized objects, this is just the object count. For
// variable sized objects, this is the object count and length of each object.
virtual void WriteAlloc(Serializer* serializer) = 0;
// Write the byte and reference data of the cluster's objects.
virtual void WriteFill(Serializer* serializer) = 0;
void WriteAndMeasureAlloc(Serializer* serializer);
void WriteAndMeasureFill(Serializer* serializer);
const char* name() const { return name_; }
intptr_t cid() const { return cid_; }
bool is_canonical() const { return is_canonical_; }
bool is_immutable() const { return is_immutable_; }
intptr_t size() const { return size_; }
intptr_t num_objects() const { return num_objects_; }
// Returns number of bytes needed for deserialized objects in
// this cluster. Printed in --print_snapshot_sizes_verbose statistics.
//
// In order to calculate this size, clusters of fixed-size objects
// can pass instance size as [target_instance_size] constructor parameter.
// Otherwise clusters should count [target_memory_size] in
// their [WriteAlloc] methods.
intptr_t target_memory_size() const { return target_memory_size_; }
protected:
const char* const name_;
const intptr_t cid_;
const intptr_t target_instance_size_;
const bool is_canonical_;
const bool is_immutable_;
intptr_t size_ = 0;
intptr_t num_objects_ = 0;
intptr_t target_memory_size_ = 0;
};
class DeserializationCluster : public ZoneAllocated {
public:
explicit DeserializationCluster(const char* name,
bool is_canonical = false,
bool is_immutable = false)
: name_(name),
is_canonical_(is_canonical),
is_immutable_(is_immutable),
start_index_(-1),
stop_index_(-1) {}
virtual ~DeserializationCluster() {}
// Allocate memory for all objects in the cluster and write their addresses
// into the ref array. Do not touch this memory.
virtual void ReadAlloc(Deserializer* deserializer) = 0;
// Initialize the cluster's objects. Do not touch the memory of other objects.
virtual void ReadFill(Deserializer* deserializer) = 0;
// Complete any action that requires the full graph to be deserialized, such
// as rehashing.
virtual void PostLoad(Deserializer* deserializer, const Array& refs) {
// We only need to worry about how canonical values are handled during
// deserialization if there may be multiple loading units, which only
// happens in the precompiled runtime.
#if defined(DART_PRECOMPILED_RUNTIME)
if (is_canonical()) {
FATAL("%s needs canonicalization but doesn't define PostLoad", name());
}
#endif
}
const char* name() const { return name_; }
bool is_canonical() const { return is_canonical_; }
protected:
void ReadAllocFixedSize(Deserializer* deserializer, intptr_t instance_size);
const char* const name_;
const bool is_canonical_;
const bool is_immutable_;
// The range of the ref array that belongs to this cluster.
intptr_t start_index_;
intptr_t stop_index_;
};
class SerializationRoots {
public:
virtual ~SerializationRoots() {}
virtual void AddBaseObjects(Serializer* serializer) = 0;
virtual void PushRoots(Serializer* serializer) = 0;
virtual void WriteRoots(Serializer* serializer) = 0;
virtual const CompressedStackMaps& canonicalized_stack_map_entries() const;
};
class DeserializationRoots {
public:
virtual ~DeserializationRoots() {}
virtual void AddBaseObjects(Deserializer* deserializer) = 0;
virtual void ReadRoots(Deserializer* deserializer) = 0;
virtual void PostLoad(Deserializer* deserializer, const Array& refs) = 0;
};
// Reference value for objects that either are not reachable from the roots or
// should never have a reference in the snapshot (because they are dropped,
// for example). Should be the default value for Heap::GetObjectId.
static constexpr intptr_t kUnreachableReference = 0;
COMPILE_ASSERT(kUnreachableReference == WeakTable::kNoValue);
static constexpr intptr_t kFirstReference = 1;
// Reference value for traced objects that have not been allocated their final
// reference ID.
static constexpr intptr_t kUnallocatedReference = -1;
static constexpr bool IsAllocatedReference(intptr_t ref) {
return ref > kUnreachableReference;
}
static constexpr bool IsArtificialReference(intptr_t ref) {
return ref < kUnallocatedReference;
}
static constexpr bool IsReachableReference(intptr_t ref) {
return ref == kUnallocatedReference || IsAllocatedReference(ref);
}
class CodeSerializationCluster;
class Serializer : public ThreadStackResource {
public:
Serializer(Thread* thread,
Snapshot::Kind kind,
NonStreamingWriteStream* stream,
ImageWriter* image_writer_,
bool vm_,
V8SnapshotProfileWriter* profile_writer = nullptr);
~Serializer();
void AddBaseObject(ObjectPtr base_object,
const char* type = nullptr,
const char* name = nullptr);
intptr_t AssignRef(ObjectPtr object);
intptr_t AssignArtificialRef(ObjectPtr object = nullptr);
intptr_t GetCodeIndex(CodePtr code);
void Push(ObjectPtr object, intptr_t cid_override = kIllegalCid);
void PushWeak(ObjectPtr object);
void AddUntracedRef() { num_written_objects_++; }
void Trace(ObjectPtr object, intptr_t cid_override);
void UnexpectedObject(ObjectPtr object, const char* message);
#if defined(SNAPSHOT_BACKTRACE)
ObjectPtr ParentOf(ObjectPtr object) const;
ObjectPtr ParentOf(const Object& object) const;
#endif
SerializationCluster* NewClusterForClass(intptr_t cid, bool is_canonical);
void ReserveHeader() {
// Make room for recording snapshot buffer size.
stream_->SetPosition(Snapshot::kHeaderSize);
}
void FillHeader(Snapshot::Kind kind) {
Snapshot* header = reinterpret_cast<Snapshot*>(stream_->buffer());
header->set_magic();
header->set_length(stream_->bytes_written());
header->set_kind(kind);
}
void WriteVersionAndFeatures(bool is_vm_snapshot);
ZoneGrowableArray<Object*>* Serialize(SerializationRoots* roots);
void PrintSnapshotSizes();
NonStreamingWriteStream* stream() { return stream_; }
intptr_t bytes_written() { return stream_->bytes_written(); }
intptr_t bytes_heap_allocated() { return bytes_heap_allocated_; }
class WritingObjectScope : ValueObject {
public:
WritingObjectScope(Serializer* serializer,
const char* type,
ObjectPtr object,
StringPtr name)
: WritingObjectScope(
serializer,
ReserveId(serializer,
type,
object,
String::ToCString(serializer->thread(), name)),
object) {}
WritingObjectScope(Serializer* serializer,
const char* type,
ObjectPtr object,
const char* name)
: WritingObjectScope(serializer,
ReserveId(serializer, type, object, name),
object) {}
WritingObjectScope(Serializer* serializer,
const V8SnapshotProfileWriter::ObjectId& id,
ObjectPtr object = nullptr);
WritingObjectScope(Serializer* serializer, ObjectPtr object)
: WritingObjectScope(serializer,
serializer->GetProfileId(object),
object) {}
~WritingObjectScope();
private:
static V8SnapshotProfileWriter::ObjectId ReserveId(Serializer* serializer,
const char* type,
ObjectPtr object,
const char* name);
private:
Serializer* const serializer_;
const ObjectPtr old_object_;
const V8SnapshotProfileWriter::ObjectId old_id_;
const classid_t old_cid_;
};
// Writes raw data to the stream (basic type).
// sizeof(T) must be in {1,2,4,8}.
template <typename T>
void Write(T value) {
BaseWriteStream::Raw<sizeof(T), T>::Write(stream_, value);
}
void WriteRefId(intptr_t value) { stream_->WriteRefId(value); }
void WriteUnsigned(intptr_t value) { stream_->WriteUnsigned(value); }
void WriteUnsigned64(uint64_t value) { stream_->WriteUnsigned(value); }
void WriteWordWith32BitWrites(uword value) {
stream_->WriteWordWith32BitWrites(value);
}
void WriteBytes(const void* addr, intptr_t len) {
stream_->WriteBytes(addr, len);
}
void Align(intptr_t alignment, intptr_t offset = 0) {
stream_->Align(alignment, offset);
}
V8SnapshotProfileWriter::ObjectId GetProfileId(ObjectPtr object) const;
V8SnapshotProfileWriter::ObjectId GetProfileId(intptr_t ref) const;
void WriteRootRef(ObjectPtr object, const char* name = nullptr) {
intptr_t id = RefId(object);
WriteRefId(id);
if (profile_writer_ != nullptr) {
profile_writer_->AddRoot(GetProfileId(object), name);
}
}
// Record a reference from the currently written object to the given object
// and return reference id for the given object.
void AttributeReference(ObjectPtr object,
const V8SnapshotProfileWriter::Reference& reference);
void AttributeElementRef(ObjectPtr object, intptr_t index) {
AttributeReference(object,
V8SnapshotProfileWriter::Reference::Element(index));
}
void WriteElementRef(ObjectPtr object, intptr_t index) {
AttributeElementRef(object, index);
WriteRefId(RefId(object));
}
void AttributePropertyRef(ObjectPtr object, const char* property) {
AttributeReference(object,
V8SnapshotProfileWriter::Reference::Property(property));
}
void WritePropertyRef(ObjectPtr object, const char* property) {
AttributePropertyRef(object, property);
WriteRefId(RefId(object));
}
void WriteOffsetRef(ObjectPtr object, intptr_t offset) {
intptr_t id = RefId(object);
WriteRefId(id);
if (profile_writer_ != nullptr) {
if (auto const property = offsets_table_->FieldNameForOffset(
object_currently_writing_.cid_, offset)) {
AttributePropertyRef(object, property);
} else {
AttributeElementRef(object, offset);
}
}
}
template <typename T, typename... P>
void WriteFromTo(T obj, P&&... args) {
auto* from = obj->untag()->from();
auto* to = obj->untag()->to_snapshot(kind(), args...);
WriteRange(obj, from, to);
}
template <typename T>
DART_NOINLINE void WriteRange(ObjectPtr obj, T from, T to) {
for (auto* p = from; p <= to; p++) {
WriteOffsetRef(
p->Decompress(obj->heap_base()),
reinterpret_cast<uword>(p) - reinterpret_cast<uword>(obj->untag()));
}
}
template <typename T, typename... P>
void PushFromTo(T obj, P&&... args) {
auto* from = obj->untag()->from();
auto* to = obj->untag()->to_snapshot(kind(), args...);
PushRange(obj, from, to);
}
template <typename T>
DART_NOINLINE void PushRange(ObjectPtr obj, T from, T to) {
for (auto* p = from; p <= to; p++) {
Push(p->Decompress(obj->heap_base()));
}
}
void WriteTokenPosition(TokenPosition pos) { Write(pos.Serialize()); }
void WriteCid(intptr_t cid) {
COMPILE_ASSERT(UntaggedObject::kClassIdTagSize <= 32);
Write<int32_t>(cid);
}
// Sorts Code objects and reorders instructions before writing snapshot.
// Builds binary search table for stack maps.
void PrepareInstructions(const CompressedStackMaps& canonical_smap);
void WriteInstructions(InstructionsPtr instr,
uint32_t unchecked_offset,
CodePtr code,
bool deferred);
uint32_t GetDataOffset(ObjectPtr object) const;
void TraceDataOffset(uint32_t offset);
intptr_t GetDataSize() const;
void WriteDispatchTable(const Array& entries);
Heap* heap() const { return heap_; }
Zone* zone() const { return zone_; }
Snapshot::Kind kind() const { return kind_; }
intptr_t next_ref_index() const { return next_ref_index_; }
void DumpCombinedCodeStatistics();
V8SnapshotProfileWriter* profile_writer() const { return profile_writer_; }
// If the given [obj] was not included into the snapshot and have not
// yet gotten an artificial node created for it create an artificial node
// in the profile representing this object.
// Returns true if [obj] has an artificial profile node associated with it.
bool CreateArtificialNodeIfNeeded(ObjectPtr obj);
bool InCurrentLoadingUnitOrRoot(ObjectPtr obj);
void RecordDeferredCode(CodePtr ptr);
GrowableArray<LoadingUnitSerializationData*>* loading_units() const {
return loading_units_;
}
void set_loading_units(GrowableArray<LoadingUnitSerializationData*>* units) {
loading_units_ = units;
}
intptr_t current_loading_unit_id() const { return current_loading_unit_id_; }
void set_current_loading_unit_id(intptr_t id) {
current_loading_unit_id_ = id;
}
// Returns the reference ID for the object. Fails for objects that have not
// been allocated a reference ID yet, so should be used only after all
// WriteAlloc calls.
intptr_t RefId(ObjectPtr object) const;
// Same as RefId, but allows artificial and unreachable references. Still
// fails for unallocated references.
intptr_t UnsafeRefId(ObjectPtr object) const;
// Whether the object is reachable.
bool IsReachable(ObjectPtr object) const {
return IsReachableReference(heap_->GetObjectId(object));
}
// Whether the object has an allocated reference.
bool HasRef(ObjectPtr object) const {
return IsAllocatedReference(heap_->GetObjectId(object));
}
// Whether the object only appears in the V8 snapshot profile.
bool HasArtificialRef(ObjectPtr object) const {
return IsArtificialReference(heap_->GetObjectId(object));
}
// Whether a node for the object already has been added to the V8 snapshot
// profile.
bool HasProfileNode(ObjectPtr object) const {
ASSERT(profile_writer_ != nullptr);
return profile_writer_->HasId(GetProfileId(object));
}
bool IsWritten(ObjectPtr object) const {
return heap_->GetObjectId(object) > num_base_objects_;
}
private:
const char* ReadOnlyObjectType(intptr_t cid);
void FlushProfile();
Heap* heap_;
Zone* zone_;
Snapshot::Kind kind_;
NonStreamingWriteStream* stream_;
ImageWriter* image_writer_;
SerializationCluster** canonical_clusters_by_cid_;
SerializationCluster** clusters_by_cid_;
CodeSerializationCluster* code_cluster_ = nullptr;
struct StackEntry {
ObjectPtr obj;
intptr_t cid_override;
};
GrowableArray<StackEntry> stack_;
intptr_t num_cids_;
intptr_t num_tlc_cids_;
intptr_t num_base_objects_;
intptr_t num_written_objects_;
intptr_t next_ref_index_;
intptr_t dispatch_table_size_ = 0;
intptr_t bytes_heap_allocated_ = 0;
intptr_t instructions_table_len_ = 0;
intptr_t instructions_table_rodata_offset_ = 0;
// True if writing VM snapshot, false for Isolate snapshot.
bool vm_;
V8SnapshotProfileWriter* profile_writer_ = nullptr;
struct ProfilingObject {
ObjectPtr object_ = nullptr;
// Unless within a WritingObjectScope, any bytes written are attributed to
// the artificial root.
V8SnapshotProfileWriter::ObjectId id_ =
V8SnapshotProfileWriter::kArtificialRootId;
intptr_t last_stream_position_ = 0;
intptr_t cid_ = -1;
} object_currently_writing_;
OffsetsTable* offsets_table_ = nullptr;
#if defined(SNAPSHOT_BACKTRACE)
ObjectPtr current_parent_;
GrowableArray<Object*> parent_pairs_;
#endif
#if defined(DART_PRECOMPILER)
IntMap<intptr_t> deduped_instructions_sources_;
IntMap<intptr_t> code_index_;
#endif
intptr_t current_loading_unit_id_ = 0;
GrowableArray<LoadingUnitSerializationData*>* loading_units_ = nullptr;
ZoneGrowableArray<Object*>* objects_ = new ZoneGrowableArray<Object*>();
DISALLOW_IMPLICIT_CONSTRUCTORS(Serializer);
};
#define AutoTraceObject(obj) \
Serializer::WritingObjectScope scope_##__COUNTER__(s, name(), obj, nullptr)
#define AutoTraceObjectName(obj, str) \
Serializer::WritingObjectScope scope_##__COUNTER__(s, name(), obj, str)
#define WriteFieldValue(field, value) s->WritePropertyRef(value, #field);
#define WriteFromTo(obj, ...) s->WriteFromTo(obj, ##__VA_ARGS__);
#define PushFromTo(obj, ...) s->PushFromTo(obj, ##__VA_ARGS__);
#define WriteField(obj, field) s->WritePropertyRef(obj->untag()->field, #field)
#define WriteCompressedField(obj, name) \
s->WritePropertyRef(obj->untag()->name(), #name "_")
class Deserializer : public ThreadStackResource {
public:
Deserializer(Thread* thread,
Snapshot::Kind kind,
const uint8_t* buffer,
intptr_t size,
const uint8_t* data_buffer,
const uint8_t* instructions_buffer,
bool is_non_root_unit,
intptr_t offset = 0);
~Deserializer();
// Verifies the image alignment.
//
// Returns ApiError::null() on success and an ApiError with an an appropriate
// message otherwise.
ApiErrorPtr VerifyImageAlignment();
ObjectPtr Allocate(intptr_t size);
static void InitializeHeader(ObjectPtr raw,
intptr_t cid,
intptr_t size,
bool is_canonical = false) {
InitializeHeader(raw, cid, size, is_canonical,
ShouldHaveImmutabilityBitSetCid(cid));
}
static void InitializeHeader(ObjectPtr raw,
intptr_t cid,
intptr_t size,
bool is_canonical,
bool is_immutable);
// Reads raw data (for basic types).
// sizeof(T) must be in {1,2,4,8}.
template <typename T>
T Read() {
return ReadStream::Raw<sizeof(T), T>::Read(&stream_);
}
intptr_t ReadRefId() { return stream_.ReadRefId(); }
intptr_t ReadUnsigned() { return stream_.ReadUnsigned(); }
uint64_t ReadUnsigned64() { return stream_.ReadUnsigned<uint64_t>(); }
void ReadBytes(uint8_t* addr, intptr_t len) { stream_.ReadBytes(addr, len); }
uword ReadWordWith32BitReads() { return stream_.ReadWordWith32BitReads(); }
intptr_t position() const { return stream_.Position(); }
void set_position(intptr_t p) { stream_.SetPosition(p); }
const uint8_t* AddressOfCurrentPosition() const {
return stream_.AddressOfCurrentPosition();
}
void Advance(intptr_t value) { stream_.Advance(value); }
void Align(intptr_t alignment, intptr_t offset = 0) {
stream_.Align(alignment, offset);
}
void AddBaseObject(ObjectPtr base_object) { AssignRef(base_object); }
void AssignRef(ObjectPtr object) {
ASSERT(next_ref_index_ <= num_objects_);
refs_->untag()->data()[next_ref_index_] = object;
next_ref_index_++;
}
ObjectPtr Ref(intptr_t index) const {
ASSERT(index > 0);
ASSERT(index <= num_objects_);
return refs_->untag()->element(index);
}
CodePtr GetCodeByIndex(intptr_t code_index, uword* entry_point) const;
uword GetEntryPointByCodeIndex(intptr_t code_index) const;
// If |code_index| corresponds to a non-discarded Code object returns
// index within the code cluster that corresponds to this Code object.
// Otherwise, if |code_index| corresponds to the discarded Code then
// returns -1.
static intptr_t CodeIndexToClusterIndex(const InstructionsTable& table,
intptr_t code_index);
ObjectPtr ReadRef() { return Ref(ReadRefId()); }
TokenPosition ReadTokenPosition() {
return TokenPosition::Deserialize(Read<int32_t>());
}
intptr_t ReadCid() {
COMPILE_ASSERT(UntaggedObject::kClassIdTagSize <= 32);
return Read<int32_t>();
}
void ReadInstructions(CodePtr code, bool deferred);
void EndInstructions();
ObjectPtr GetObjectAt(uint32_t offset) const;
void Deserialize(DeserializationRoots* roots);
DeserializationCluster* ReadCluster();
void ReadDispatchTable() {
ReadDispatchTable(&stream_, /*deferred=*/false, InstructionsTable::Handle(),
-1, -1);
}
void ReadDispatchTable(ReadStream* stream,
bool deferred,
const InstructionsTable& root_instruction_table,
intptr_t deferred_code_start_index,
intptr_t deferred_code_end_index);
intptr_t next_index() const { return next_ref_index_; }
Heap* heap() const { return heap_; }
Zone* zone() const { return zone_; }
Snapshot::Kind kind() const {
#if defined(DART_PRECOMPILED_RUNTIME)
return Snapshot::kFullAOT;
#else
return kind_;
#endif
}
bool is_non_root_unit() const { return is_non_root_unit_; }
void set_code_start_index(intptr_t value) { code_start_index_ = value; }
intptr_t code_start_index() const { return code_start_index_; }
void set_code_stop_index(intptr_t value) { code_stop_index_ = value; }
intptr_t code_stop_index() const { return code_stop_index_; }
const InstructionsTable& instructions_table() const {
return instructions_table_;
}
intptr_t num_base_objects() const { return num_base_objects_; }
// This serves to make the snapshot cursor, ref table and null be locals
// during ReadFill, which allows the C compiler to see they are not aliased
// and can be kept in registers.
class Local : public ReadStream {
public:
explicit Local(Deserializer* d)
: ReadStream(d->stream_.buffer_, d->stream_.current_, d->stream_.end_),
d_(d),
refs_(d->refs_),
null_(Object::null()) {
#if defined(DEBUG)
// Can't mix use of Deserializer::Read*.
d->stream_.current_ = nullptr;
#endif
}
~Local() { d_->stream_.current_ = current_; }
ObjectPtr Ref(intptr_t index) const {
ASSERT(index > 0);
ASSERT(index <= d_->num_objects_);
return refs_->untag()->element(index);
}
template <typename T>
T Read() {
return ReadStream::Raw<sizeof(T), T>::Read(this);
}
uint64_t ReadUnsigned64() { return ReadUnsigned<uint64_t>(); }
ObjectPtr ReadRef() { return Ref(ReadRefId()); }
TokenPosition ReadTokenPosition() {
return TokenPosition::Deserialize(Read<int32_t>());
}
intptr_t ReadCid() {
COMPILE_ASSERT(UntaggedObject::kClassIdTagSize <= 32);
return Read<int32_t>();
}
template <typename T, typename... P>
void ReadFromTo(T obj, P&&... params) {
auto* from = obj->untag()->from();
auto* to_snapshot = obj->untag()->to_snapshot(d_->kind(), params...);
auto* to = obj->untag()->to(params...);
for (auto* p = from; p <= to_snapshot; p++) {
*p = ReadRef();
}
// This is necessary because, unlike Object::Allocate, the clustered
// deserializer allocates object without null-initializing them. Instead,
// each deserialization cluster is responsible for initializing every
// field, ensuring that every field is written to exactly once.
for (auto* p = to_snapshot + 1; p <= to; p++) {
*p = null_;
}
}
private:
Deserializer* const d_;
const ArrayPtr refs_;
const ObjectPtr null_;
};
private:
Heap* heap_;
PageSpace* old_space_;
FreeList* freelist_;
Zone* zone_;
Snapshot::Kind kind_;
ReadStream stream_;
ImageReader* image_reader_;
intptr_t num_base_objects_;
intptr_t num_objects_;
intptr_t num_clusters_;
ArrayPtr refs_;
intptr_t next_ref_index_;
intptr_t code_start_index_ = 0;
intptr_t code_stop_index_ = 0;
intptr_t instructions_index_ = 0;
DeserializationCluster** clusters_;
const bool is_non_root_unit_;
InstructionsTable& instructions_table_;
};
DART_FORCE_INLINE
ObjectPtr Deserializer::Allocate(intptr_t size) {
return UntaggedObject::FromAddr(
old_space_->AllocateSnapshotLocked(freelist_, size));
}
void Deserializer::InitializeHeader(ObjectPtr raw,
intptr_t class_id,
intptr_t size,
bool is_canonical,
bool is_immutable) {
ASSERT(Utils::IsAligned(size, kObjectAlignment));
uword tags = 0;
tags = UntaggedObject::ClassIdTag::update(class_id, tags);
tags = UntaggedObject::SizeTag::update(size, tags);
tags = UntaggedObject::CanonicalBit::update(is_canonical, tags);
tags = UntaggedObject::AlwaysSetBit::update(true, tags);
tags = UntaggedObject::NotMarkedBit::update(true, tags);
tags = UntaggedObject::OldAndNotRememberedBit::update(true, tags);
tags = UntaggedObject::NewOrEvacuationCandidateBit::update(false, tags);
tags = UntaggedObject::ImmutableBit::update(is_immutable, tags);
raw->untag()->tags_ = tags;
}
#if !defined(DART_PRECOMPILED_RUNTIME)
void SerializationCluster::WriteAndMeasureAlloc(Serializer* serializer) {
intptr_t start_size = serializer->bytes_written();
intptr_t start_data = serializer->GetDataSize();
intptr_t start_objects = serializer->next_ref_index();
uint32_t tags = UntaggedObject::ClassIdTag::encode(cid_) |
UntaggedObject::CanonicalBit::encode(is_canonical()) |
UntaggedObject::ImmutableBit::encode(is_immutable());
serializer->Write<uint32_t>(tags);
WriteAlloc(serializer);
intptr_t stop_size = serializer->bytes_written();
intptr_t stop_data = serializer->GetDataSize();
intptr_t stop_objects = serializer->next_ref_index();
if (FLAG_print_cluster_information) {
OS::PrintErr("Snapshot 0x%" Pp " (%" Pd "), ", start_size,
stop_size - start_size);
OS::PrintErr("Data 0x%" Pp " (%" Pd "): ", start_data,
stop_data - start_data);
OS::PrintErr("Alloc %s (%" Pd ")\n", name(), stop_objects - start_objects);
}
size_ += (stop_size - start_size) + (stop_data - start_data);
num_objects_ += (stop_objects - start_objects);
if (target_instance_size_ != kSizeVaries) {
target_memory_size_ += num_objects_ * target_instance_size_;
}
}
void SerializationCluster::WriteAndMeasureFill(Serializer* serializer) {
intptr_t start = serializer->bytes_written();
WriteFill(serializer);
intptr_t stop = serializer->bytes_written();
if (FLAG_print_cluster_information) {
OS::PrintErr("Snapshot 0x%" Pp " (%" Pd "): Fill %s\n", start, stop - start,
name());
}
size_ += (stop - start);
}
#endif // !DART_PRECOMPILED_RUNTIME
DART_NOINLINE
void DeserializationCluster::ReadAllocFixedSize(Deserializer* d,
intptr_t instance_size) {
start_index_ = d->next_index();
intptr_t count = d->ReadUnsigned();
for (intptr_t i = 0; i < count; i++) {
d->AssignRef(d->Allocate(instance_size));
}
stop_index_ = d->next_index();
}
#if !defined(DART_PRECOMPILED_RUNTIME)
static UnboxedFieldBitmap CalculateTargetUnboxedFieldsBitmap(
Serializer* s,
intptr_t class_id) {
const auto unboxed_fields_bitmap_host =
s->isolate_group()->class_table()->GetUnboxedFieldsMapAt(class_id);
UnboxedFieldBitmap unboxed_fields_bitmap;
if (unboxed_fields_bitmap_host.IsEmpty() ||
kWordSize == compiler::target::kWordSize) {
unboxed_fields_bitmap = unboxed_fields_bitmap_host;
} else {
ASSERT(kWordSize == 8 && compiler::target::kWordSize == 4);
// A new bitmap is built if the word sizes in the target and
// host are different
unboxed_fields_bitmap.Reset();
intptr_t target_i = 0, host_i = 0;
while (host_i < UnboxedFieldBitmap::Length()) {
// Each unboxed field has constant length, therefore the number of
// words used by it should double when compiling from 64-bit to 32-bit.
if (unboxed_fields_bitmap_host.Get(host_i++)) {
unboxed_fields_bitmap.Set(target_i++);
unboxed_fields_bitmap.Set(target_i++);
} else {
// For object pointers, the field is always one word length
target_i++;
}
}
}
return unboxed_fields_bitmap;
}
class ClassSerializationCluster : public SerializationCluster {
public:
explicit ClassSerializationCluster(intptr_t num_cids)
: SerializationCluster("Class",
kClassCid,
compiler::target::Class::InstanceSize()),
predefined_(kNumPredefinedCids),
objects_(num_cids) {}
~ClassSerializationCluster() {}
void Trace(Serializer* s, ObjectPtr object) {
ClassPtr cls = Class::RawCast(object);
intptr_t class_id = cls->untag()->id_;
if (class_id == kIllegalCid) {
// Classes expected to be dropped by the precompiler should not be traced.
s->UnexpectedObject(cls, "Class with illegal cid");
}
if (class_id < kNumPredefinedCids) {
// These classes are allocated by Object::Init or Object::InitOnce, so the
// deserializer must find them in the class table instead of allocating
// them.
predefined_.Add(cls);
} else {
objects_.Add(cls);
}
PushFromTo(cls);
}
void WriteAlloc(Serializer* s) {
intptr_t count = predefined_.length();
s->WriteUnsigned(count);
for (intptr_t i = 0; i < count; i++) {
ClassPtr cls = predefined_[i];
s->AssignRef(cls);
AutoTraceObject(cls);
intptr_t class_id = cls->untag()->id_;
s->WriteCid(class_id);
}
count = objects_.length();
s->WriteUnsigned(count);
for (intptr_t i = 0; i < count; i++) {
ClassPtr cls = objects_[i];
s->AssignRef(cls);
}
}
void WriteFill(Serializer* s) {
intptr_t count = predefined_.length();
for (intptr_t i = 0; i < count; i++) {
WriteClass(s, predefined_[i]);
}
count = objects_.length();
for (intptr_t i = 0; i < count; i++) {
WriteClass(s, objects_[i]);
}
}
private:
void WriteClass(Serializer* s, ClassPtr cls) {
AutoTraceObjectName(cls, cls->untag()->name());
WriteFromTo(cls);
intptr_t class_id = cls->untag()->id_;
if (class_id == kIllegalCid) {
s->UnexpectedObject(cls, "Class with illegal cid");
}
s->WriteCid(class_id);
if (s->kind() != Snapshot::kFullAOT) {
s->Write<uint32_t>(cls->untag()->kernel_offset_);
}
s->Write<int32_t>(Class::target_instance_size_in_words(cls));
s->Write<int32_t>(Class::target_next_field_offset_in_words(cls));
s->Write<int32_t>(Class::target_type_arguments_field_offset_in_words(cls));
s->Write<int16_t>(cls->untag()->num_type_arguments_);
s->Write<uint16_t>(cls->untag()->num_native_fields_);
if (s->kind() != Snapshot::kFullAOT) {
s->WriteTokenPosition(cls->untag()->token_pos_);
s->WriteTokenPosition(cls->untag()->end_token_pos_);
s->WriteCid(cls->untag()->implementor_cid_);
}
s->Write<uint32_t>(cls->untag()->state_bits_);
if (!ClassTable::IsTopLevelCid(class_id)) {
const auto unboxed_fields_map =
CalculateTargetUnboxedFieldsBitmap(s, class_id);
s->WriteUnsigned64(unboxed_fields_map.Value());
}
}
GrowableArray<ClassPtr> predefined_;
GrowableArray<ClassPtr> objects_;
};
#endif // !DART_PRECOMPILED_RUNTIME
class ClassDeserializationCluster : public DeserializationCluster {
public:
ClassDeserializationCluster() : DeserializationCluster("Class") {}
~ClassDeserializationCluster() {}
void ReadAlloc(Deserializer* d) override {
predefined_start_index_ = d->next_index();
intptr_t count = d->ReadUnsigned();
ClassTable* table = d->isolate_group()->class_table();
for (intptr_t i = 0; i < count; i++) {
intptr_t class_id = d->ReadCid();
ASSERT(table->HasValidClassAt(class_id));
ClassPtr cls = table->At(class_id);
ASSERT(cls != nullptr);
d->AssignRef(cls);
}
predefined_stop_index_ = d->next_index();
start_index_ = d->next_index();
count = d->ReadUnsigned();
for (intptr_t i = 0; i < count; i++) {
d->AssignRef(d->Allocate(Class::InstanceSize()));
}
stop_index_ = d->next_index();
}
void ReadFill(Deserializer* d_) override {
Deserializer::Local d(d_);
for (intptr_t id = predefined_start_index_; id < predefined_stop_index_;
id++) {
ClassPtr cls = static_cast<ClassPtr>(d.Ref(id));
d.ReadFromTo(cls);
intptr_t class_id = d.ReadCid();
cls->untag()->id_ = class_id;
#if !defined(DART_PRECOMPILED_RUNTIME)
ASSERT(d_->kind() != Snapshot::kFullAOT);
cls->untag()->kernel_offset_ = d.Read<uint32_t>();
#endif
if (!IsInternalVMdefinedClassId(class_id)) {
cls->untag()->host_instance_size_in_words_ = d.Read<int32_t>();
cls->untag()->host_next_field_offset_in_words_ = d.Read<int32_t>();
#if defined(DART_PRECOMPILER)
// Only one pair is serialized. The target field only exists when
// DART_PRECOMPILER is defined
cls->untag()->target_instance_size_in_words_ =
cls->untag()->host_instance_size_in_words_;
cls->untag()->target_next_field_offset_in_words_ =
cls->untag()->host_next_field_offset_in_words_;
#endif // defined(DART_PRECOMPILER)
} else {
d.Read<int32_t>(); // Skip.
d.Read<int32_t>(); // Skip.
}
cls->untag()->host_type_arguments_field_offset_in_words_ =
d.Read<int32_t>();
#if defined(DART_PRECOMPILER)
cls->untag()->target_type_arguments_field_offset_in_words_ =
cls->untag()->host_type_arguments_field_offset_in_words_;
#endif // defined(DART_PRECOMPILER)
cls->untag()->num_type_arguments_ = d.Read<int16_t>();
cls->untag()->num_native_fields_ = d.Read<uint16_t>();
#if !defined(DART_PRECOMPILED_RUNTIME)
ASSERT(d_->kind() != Snapshot::kFullAOT);
cls->untag()->token_pos_ = d.ReadTokenPosition();
cls->untag()->end_token_pos_ = d.ReadTokenPosition();
cls->untag()->implementor_cid_ = d.ReadCid();
#endif // !defined(DART_PRECOMPILED_RUNTIME)
cls->untag()->state_bits_ = d.Read<uint32_t>();
d.ReadUnsigned64(); // Skip unboxed fields bitmap.
}
ClassTable* table = d_->isolate_group()->class_table();
for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
ClassPtr cls = static_cast<ClassPtr>(d.Ref(id));
Deserializer::InitializeHeader(cls, kClassCid, Class::InstanceSize());
d.ReadFromTo(cls);
intptr_t class_id = d.ReadCid();
ASSERT(class_id >= kNumPredefinedCids);
cls->untag()->id_ = class_id;
#if !defined(DART_PRECOMPILED_RUNTIME)
ASSERT(d_->kind() != Snapshot::kFullAOT);
cls->untag()->kernel_offset_ = d.Read<uint32_t>();
#endif
cls->untag()->host_instance_size_in_words_ = d.Read<int32_t>();
cls->untag()->host_next_field_offset_in_words_ = d.Read<int32_t>();
cls->untag()->host_type_arguments_field_offset_in_words_ =
d.Read<int32_t>();
#if defined(DART_PRECOMPILER)
cls->untag()->target_instance_size_in_words_ =
cls->untag()->host_instance_size_in_words_;
cls->untag()->target_next_field_offset_in_words_ =
cls->untag()->host_next_field_offset_in_words_;
cls->untag()->target_type_arguments_field_offset_in_words_ =
cls->untag()->host_type_arguments_field_offset_in_words_;
#endif // defined(DART_PRECOMPILER)
cls->untag()->num_type_arguments_ = d.Read<int16_t>();
cls->untag()->num_native_fields_ = d.Read<uint16_t>();
#if !defined(DART_PRECOMPILED_RUNTIME)
ASSERT(d_->kind() != Snapshot::kFullAOT);
cls->untag()->token_pos_ = d.ReadTokenPosition();
cls->untag()->end_token_pos_ = d.ReadTokenPosition();
cls->untag()->implementor_cid_ = d.ReadCid();
#endif // !defined(DART_PRECOMPILED_RUNTIME)
cls->untag()->state_bits_ = d.Read<uint32_t>();
table->AllocateIndex(class_id);
table->SetAt(class_id, cls);
if (!ClassTable::IsTopLevelCid(class_id)) {
const UnboxedFieldBitmap unboxed_fields_map(d.ReadUnsigned64());
table->SetUnboxedFieldsMapAt(class_id, unboxed_fields_map);
}
}
}
private:
intptr_t predefined_start_index_;
intptr_t predefined_stop_index_;
};
// Super classes for writing out clusters which contain objects grouped into
// a canonical set (e.g. String, Type, TypeArguments, etc).
// To save space in the snapshot we avoid writing such canonical sets
// explicitly as Array objects into the snapshot and instead utilize a different
// encoding: objects in a cluster representing a canonical set are sorted
// to appear in the same order they appear in the Array representing the set,
// and we additionally write out array of values describing gaps between
// objects.
//
// In some situations not all canonical objects of the some type need to
// be added to the resulting canonical set because they are cached in some
// special way (see Type::Canonicalize as an example, which caches declaration
// types in a special way). In this case subclass can set
// kAllCanonicalObjectsAreIncludedIntoSet to |false| and override
// IsInCanonicalSet filter.
#if !defined(DART_PRECOMPILED_RUNTIME)
template <typename SetType,
typename HandleType,
typename PointerType,
bool kAllCanonicalObjectsAreIncludedIntoSet = true>
class CanonicalSetSerializationCluster : public SerializationCluster {
protected:
CanonicalSetSerializationCluster(intptr_t cid,
bool is_canonical,
bool represents_canonical_set,
const char* name,
intptr_t target_instance_size = 0)
: SerializationCluster(name, cid, target_instance_size, is_canonical),
represents_canonical_set_(represents_canonical_set) {}
virtual bool IsInCanonicalSet(Serializer* s, PointerType ptr) {
// Must override this function if kAllCanonicalObjectsAreIncludedIntoSet
// is set to |false|.
ASSERT(kAllCanonicalObjectsAreIncludedIntoSet);
return true;
}
void ReorderObjects(Serializer* s) {
if (!represents_canonical_set_) {
return;
}
// Sort objects before writing them out so that they appear in the same
// order as they would appear in a CanonicalStringSet.
using ZoneCanonicalSet =
HashTable<typename SetType::Traits, 0, 0, GrowableArrayStorageTraits>;
// Compute required capacity for the hashtable (to avoid overallocating).
intptr_t required_capacity = 0;
for (auto ptr : objects_) {
if (kAllCanonicalObjectsAreIncludedIntoSet || IsInCanonicalSet(s, ptr)) {
required_capacity++;
}
}
// Over-allocate capacity so a few inserts can happen at startup without
// causing a rehash.
const intptr_t kSpareCapacity = 32;
required_capacity = static_cast<intptr_t>(
static_cast<double>(required_capacity + kSpareCapacity) /
HashTables::kMaxLoadFactor);
intptr_t num_occupied = 0;
// Build canonical set out of objects that should belong to it.
// Objects that don't belong to it are copied to the prefix of objects_.
ZoneCanonicalSet table(
s->zone(), HashTables::New<ZoneCanonicalSet>(required_capacity));
HandleType& element = HandleType::Handle(s->zone());
for (auto ptr : objects_) {
if (kAllCanonicalObjectsAreIncludedIntoSet || IsInCanonicalSet(s, ptr)) {
element ^= ptr;
intptr_t entry = -1;
const bool present = table.FindKeyOrDeletedOrUnused(element, &entry);
ASSERT(!present);
table.InsertKey(entry, element);
} else {
objects_[num_occupied++] = ptr;
}
}
const auto prefix_length = num_occupied;
// Compute objects_ order and gaps based on canonical set layout.
auto& arr = table.Release();
intptr_t last_occupied = ZoneCanonicalSet::kFirstKeyIndex - 1;
for (intptr_t i = ZoneCanonicalSet::kFirstKeyIndex, length = arr.Length();
i < length; i++) {
ObjectPtr v = arr.At(i);
ASSERT(v != ZoneCanonicalSet::DeletedMarker().ptr());
if (v != ZoneCanonicalSet::UnusedMarker().ptr()) {
const intptr_t unused_run_length = (i - 1) - last_occupied;
gaps_.Add(unused_run_length);
objects_[num_occupied++] = static_cast<PointerType>(v);
last_occupied = i;
}
}
ASSERT(num_occupied == objects_.length());
ASSERT(prefix_length == (objects_.length() - gaps_.length()));
table_length_ = arr.Length();
}
void WriteCanonicalSetLayout(Serializer* s) {
if (represents_canonical_set_) {
s->WriteUnsigned(table_length_);
s->WriteUnsigned(objects_.length() - gaps_.length());
for (auto gap : gaps_) {
s->WriteUnsigned(gap);
}
target_memory_size_ +=
compiler::target::Array::InstanceSize(table_length_);
}
}
GrowableArray<PointerType> objects_;
private:
const bool represents_canonical_set_;
GrowableArray<intptr_t> gaps_;
intptr_t table_length_ = 0;
};
#endif
template <typename SetType, bool kAllCanonicalObjectsAreIncludedIntoSet = true>
class CanonicalSetDeserializationCluster : public DeserializationCluster {
public:
CanonicalSetDeserializationCluster(bool is_canonical,
bool is_root_unit,
const char* name)
: DeserializationCluster(name, is_canonical),
is_root_unit_(is_root_unit),
table_(SetType::ArrayHandle::Handle()) {}
void BuildCanonicalSetFromLayout(Deserializer* d) {
if (!is_root_unit_ || !is_canonical()) {
return;
}
const auto table_length = d->ReadUnsigned();
first_element_ = d->ReadUnsigned();
const intptr_t count = stop_index_ - (start_index_ + first_element_);
auto table = StartDeserialization(d, table_length, count);
for (intptr_t i = start_index_ + first_element_; i < stop_index_; i++) {
table.FillGap(d->ReadUnsigned());
table.WriteElement(d, d->Ref(i));
}
table_ = table.Finish();
}
protected:
const bool is_root_unit_;
intptr_t first_element_;
typename SetType::ArrayHandle& table_;
void VerifyCanonicalSet(Deserializer* d,
const Array& refs,
const typename SetType::ArrayHandle& current_table) {
#if defined(DEBUG)
// First check that we are not overwriting a table and loosing information.
if (!current_table.IsNull()) {
SetType current_set(d->zone(), current_table.ptr());
ASSERT(current_set.NumOccupied() == 0);
current_set.Release();
}
// Now check that manually created table behaves correctly as a canonical
// set.
SetType canonical_set(d->zone(), table_.ptr());
Object& key = Object::Handle();
for (intptr_t i = start_index_ + first_element_; i < stop_index_; i++) {
key = refs.At(i);
ASSERT(canonical_set.GetOrNull(key) != Object::null());
}
canonical_set.Release();
#endif // defined(DEBUG)
}
private:
struct DeserializationFinger {
typename SetType::ArrayPtr table;
intptr_t current_index;
ObjectPtr gap_element;
void FillGap(int length) {
for (intptr_t j = 0; j < length; j++) {
table->untag()->data()[current_index + j] = gap_element;
}
current_index += length;
}
void WriteElement(Deserializer* d, ObjectPtr object) {
table->untag()->data()[current_index++] = object;
}
typename SetType::ArrayPtr Finish() {
if (table != SetType::ArrayHandle::null()) {
FillGap(Smi::Value(table->untag()->length()) - current_index);
}
auto result = table;
table = SetType::ArrayHandle::null();
return result;
}
};
static DeserializationFinger StartDeserialization(Deserializer* d,
intptr_t length,
intptr_t count) {
const intptr_t instance_size = SetType::ArrayHandle::InstanceSize(length);
typename SetType::ArrayPtr table =
static_cast<typename SetType::ArrayPtr>(d->Allocate(instance_size));
Deserializer::InitializeHeader(table, SetType::Storage::ArrayCid,
instance_size);
if ((SetType::Storage::ArrayCid == kArrayCid) &&
Array::UseCardMarkingForAllocation(length)) {
table->untag()->SetCardRememberedBitUnsynchronized();
Page::Of(table)->AllocateCardTable();
}
InitTypeArgsOrNext(table);
table->untag()->length_ = Smi::New(length);
for (intptr_t i = 0; i < SetType::kFirstKeyIndex; i++) {
table->untag()->data()[i] = Smi::New(0);
}
table->untag()->data()[SetType::kOccupiedEntriesIndex] = Smi::New(count);
return {table, SetType::kFirstKeyIndex, SetType::UnusedMarker().ptr()};
}
static void InitTypeArgsOrNext(ArrayPtr table) {
table->untag()->type_arguments_ = TypeArguments::null();
}
static void InitTypeArgsOrNext(WeakArrayPtr table) {
table->untag()->next_seen_by_gc_ = WeakArray::null();
}
};
#if !defined(DART_PRECOMPILED_RUNTIME)
class TypeParametersSerializationCluster : public SerializationCluster {
public:
TypeParametersSerializationCluster()
: SerializationCluster("TypeParameters",
kTypeParametersCid,
compiler::target::TypeParameters::InstanceSize()) {
}
~TypeParametersSerializationCluster() {}
void Trace(Serializer* s, ObjectPtr object) {
TypeParametersPtr type_params = TypeParameters::RawCast(object);
objects_.Add(type_params);
PushFromTo(type_params);
}
void WriteAlloc(Serializer* s) {
const intptr_t count = objects_.length();
s->WriteUnsigned(count);
for (intptr_t i = 0; i < count; i++) {
TypeParametersPtr type_params = objects_[i];
s->AssignRef(type_params);
}
}
void WriteFill(Serializer* s) {
const intptr_t count = objects_.length();
for (intptr_t i = 0; i < count; i++) {
TypeParametersPtr type_params = objects_[i];
AutoTraceObject(type_params);
WriteFromTo(type_params);
}
}
private:
GrowableArray<TypeParametersPtr> objects_;
};
#endif // !DART_PRECOMPILED_RUNTIME
class TypeParametersDeserializationCluster : public DeserializationCluster {
public:
TypeParametersDeserializationCluster()
: DeserializationCluster("TypeParameters") {}
~TypeParametersDeserializationCluster() {}
void ReadAlloc(Deserializer* d) override {
ReadAllocFixedSize(d, TypeParameters::InstanceSize());
}
void ReadFill(Deserializer* d_) override {
Deserializer::Local d(d_);
ASSERT(!is_canonical()); // Never canonical.
for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
TypeParametersPtr type_params = static_cast<TypeParametersPtr>(d.Ref(id));
Deserializer::InitializeHeader(type_params, kTypeParametersCid,
TypeParameters::InstanceSize());
d.ReadFromTo(type_params);
}
}
};
#if !defined(DART_PRECOMPILED_RUNTIME)
class TypeArgumentsSerializationCluster
: public CanonicalSetSerializationCluster<CanonicalTypeArgumentsSet,
TypeArguments,
TypeArgumentsPtr> {
public:
TypeArgumentsSerializationCluster(bool is_canonical,
bool represents_canonical_set)
: CanonicalSetSerializationCluster(kTypeArgumentsCid,
is_canonical,
represents_canonical_set,
"TypeArguments") {}
~TypeArgumentsSerializationCluster() {}
void Trace(Serializer* s, ObjectPtr object) {
TypeArgumentsPtr type_args = TypeArguments::RawCast(object);
objects_.Add(type_args);
s->Push(type_args->untag()->instantiations());
const intptr_t length = Smi::Value(type_args->untag()->length());
for (intptr_t i = 0; i < length; i++) {
s->Push(type_args->untag()->element(i));
}
}
void WriteAlloc(Serializer* s) {
const intptr_t count = objects_.length();
s->WriteUnsigned(count);
ReorderObjects(s);
for (intptr_t i = 0; i < count; i++) {
TypeArgumentsPtr type_args = objects_[i];
s->AssignRef(type_args);
AutoTraceObject(type_args);
const intptr_t length = Smi::Value(type_args->untag()->length());
s->WriteUnsigned(length);
target_memory_size_ +=
compiler::target::TypeArguments::InstanceSize(length);
}
WriteCanonicalSetLayout(s);
}
void WriteFill(Serializer* s) {
const intptr_t count = objects_.length();
for (intptr_t i = 0; i < count; i++) {
TypeArgumentsPtr type_args = objects_[i];
AutoTraceObject(type_args);
const intptr_t length = Smi::Value(type_args->untag()->length());
s->WriteUnsigned(length);
intptr_t hash = Smi::Value(type_args->untag()->hash());
s->Write<int32_t>(hash);
const intptr_t nullability =
Smi::Value(type_args->untag()->nullability());
s->WriteUnsigned(nullability);
WriteField(type_args, instantiations());
for (intptr_t j = 0; j < length; j++) {
s->WriteElementRef(type_args->untag()->element(j), j);
}
}
}
};
#endif // !DART_PRECOMPILED_RUNTIME
class TypeArgumentsDeserializationCluster
: public CanonicalSetDeserializationCluster<CanonicalTypeArgumentsSet> {
public:
explicit TypeArgumentsDeserializationCluster(bool is_canonical,
bool is_root_unit)
: CanonicalSetDeserializationCluster(is_canonical,
is_root_unit,
"TypeArguments") {}
~TypeArgumentsDeserializationCluster() {}
void ReadAlloc(Deserializer* d) override {
start_index_ = d->next_index();
const intptr_t count = d->ReadUnsigned();
for (intptr_t i = 0; i < count; i++) {
const intptr_t length = d->ReadUnsigned();
d->AssignRef(d->Allocate(TypeArguments::InstanceSize(length)));
}
stop_index_ = d->next_index();
BuildCanonicalSetFromLayout(d);
}
void ReadFill(Deserializer* d_) override {
Deserializer::Local d(d_);
const bool mark_canonical = is_root_unit_ && is_canonical();
for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
TypeArgumentsPtr type_args = static_cast<TypeArgumentsPtr>(d.Ref(id));
const intptr_t length = d.ReadUnsigned();
Deserializer::InitializeHeader(type_args, kTypeArgumentsCid,
TypeArguments::InstanceSize(length),
mark_canonical);
type_args->untag()->length_ = Smi::New(length);
type_args->untag()->hash_ = Smi::New(d.Read<int32_t>());
type_args->untag()->nullability_ = Smi::New(d.ReadUnsigned());
type_args->untag()->instantiations_ = static_cast<ArrayPtr>(d.ReadRef());
for (intptr_t j = 0; j < length; j++) {
type_args->untag()->types()[j] =
static_cast<AbstractTypePtr>(d.ReadRef());
}
}
}
void PostLoad(Deserializer* d, const Array& refs) override {
if (!table_.IsNull()) {
auto object_store = d->isolate_group()->object_store();
VerifyCanonicalSet(
d, refs, Array::Handle(object_store->canonical_type_arguments()));
object_store->set_canonical_type_arguments(table_);
} else if (!is_root_unit_ && is_canonical()) {
TypeArguments& type_arg = TypeArguments::Handle(d->zone());
for (intptr_t i = start_index_, n = stop_index_; i < n; i++) {
type_arg ^= refs.At(i);
type_arg = type_arg.Canonicalize(d->thread());
refs.SetAt(i, type_arg);
}
}
}
};
#if !defined(DART_PRECOMPILED_RUNTIME)
class PatchClassSerializationCluster : public SerializationCluster {
public:
PatchClassSerializationCluster()
: SerializationCluster("PatchClass",
kPatchClassCid,
compiler::target::PatchClass::InstanceSize()) {}
~PatchClassSerializationCluster() {}
void Trace(Serializer* s, ObjectPtr object) {
PatchClassPtr cls = PatchClass::RawCast(object);
objects_.Add(cls);
PushFromTo(cls);
}
void WriteAlloc(Serializer* s) {
const intptr_t count = objects_.length();
s->WriteUnsigned(count);
for (intptr_t i = 0; i < count; i++) {
PatchClassPtr cls = objects_[i];
s->AssignRef(cls);
}
}
void WriteFill(Serializer* s) {
const intptr_t count = objects_.length();
for (intptr_t i = 0; i < count; i++) {
PatchClassPtr cls = objects_[i];
AutoTraceObject(cls);
WriteFromTo(cls);
if (s->kind() != Snapshot::kFullAOT) {
s->Write<int32_t>(cls->untag()->kernel_library_index_);
}
}
}
private:
GrowableArray<PatchClassPtr> objects_;
};
#endif // !DART_PRECOMPILED_RUNTIME
class PatchClassDeserializationCluster : public DeserializationCluster {
public:
PatchClassDeserializationCluster() : DeserializationCluster("PatchClass") {}
~PatchClassDeserializationCluster() {}
void ReadAlloc(Deserializer* d) override {
ReadAllocFixedSize(d, PatchClass::InstanceSize());
}
void ReadFill(Deserializer* d_) override {
Deserializer::Local d(d_);
ASSERT(!is_canonical()); // Never canonical.
for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
PatchClassPtr cls = static_cast<PatchClassPtr>(d.Ref(id));
Deserializer::InitializeHeader(cls, kPatchClassCid,
PatchClass::InstanceSize());
d.ReadFromTo(cls);
#if !defined(DART_PRECOMPILED_RUNTIME)
ASSERT(d_->kind() != Snapshot::kFullAOT);
cls->untag()->kernel_library_index_ = d.Read<int32_t>();
#endif
}
}
};
#if !defined(DART_PRECOMPILED_RUNTIME)
class FunctionSerializationCluster : public SerializationCluster {
public:
FunctionSerializationCluster()
: SerializationCluster("Function",
kFunctionCid,
compiler::target::Function::InstanceSize()) {}
~FunctionSerializationCluster() {}
void Trace(Serializer* s, ObjectPtr object) {
Snapshot::Kind kind = s->kind();
FunctionPtr func = Function::RawCast(object);
objects_.Add(func);
PushFromTo(func);
if (kind == Snapshot::kFullAOT) {
s->Push(func->untag()->code());
} else if (kind == Snapshot::kFullJIT) {
NOT_IN_PRECOMPILED(s->Push(func->untag()->unoptimized_code()));
s->Push(func->untag()->code());
s->Push(func->untag()->ic_data_array_or_bytecode());
}
if (kind != Snapshot::kFullAOT) {
NOT_IN_PRECOMPILED(s->Push(func->untag()->positional_parameter_names()));
}
}
void WriteAlloc(Serializer* s) {
const intptr_t count = objects_.length();
s->WriteUnsigned(count);
for (intptr_t i = 0; i < count; i++) {
FunctionPtr func = objects_[i];
s->AssignRef(func);
}
}
void WriteFill(Serializer* s) {
Snapshot::Kind kind = s->kind();
const intptr_t count = objects_.length();
for (intptr_t i = 0; i < count; i++) {
FunctionPtr func = objects_[i];
AutoTraceObjectName(func, MakeDisambiguatedFunctionName(s, func));
WriteFromTo(func);
if (kind == Snapshot::kFullAOT) {
#if defined(DART_PRECOMPILER)
CodePtr code = func->untag()->code();
const auto code_index = s->GetCodeIndex(code);
s->WriteUnsigned(code_index);
s->AttributePropertyRef(code, "code_");
#else
UNREACHABLE();
#endif
} else if (s->kind() == Snapshot::kFullJIT) {
NOT_IN_PRECOMPILED(WriteCompressedField(func, unoptimized_code));
WriteCompressedField(func, code);
WriteCompressedField(func, ic_data_array_or_bytecode);
}
if (kind != Snapshot::kFullAOT) {
NOT_IN_PRECOMPILED(
WriteCompressedField(func, positional_parameter_names));
}
#if defined(DART_PRECOMPILER) && !defined(PRODUCT)
TokenPosition token_pos = func->untag()->token_pos_;
if (kind == Snapshot::kFullAOT) {
// We use then token_pos property to store the line number
// in AOT snapshots.
intptr_t line = -1;
const Function& function = Function::Handle(func);
const Script& script = Script::Handle(function.script());
if (!script.IsNull()) {
script.GetTokenLocation(token_pos, &line, nullptr);
}
token_pos = line == -1 ? TokenPosition::kNoSource
: TokenPosition::Deserialize(line);
}
s->WriteTokenPosition(token_pos);
#else
if (kind != Snapshot::kFullAOT) {
s->WriteTokenPosition(func->untag()->token_pos_);
}
#endif
if (kind != Snapshot::kFullAOT) {
s->WriteTokenPosition(func->untag()->end_token_pos_);
s->Write<uint32_t>(func->untag()->kernel_offset_);
s->Write<bool>(func->untag()->is_optimizable_);
}
s->Write<uint32_t>(func->untag()->kind_tag_);
}
}
static const char* MakeDisambiguatedFunctionName(Serializer* s,
FunctionPtr f) {
if (s->profile_writer() == nullptr) {
return nullptr;
}
REUSABLE_FUNCTION_HANDLESCOPE(s->thread());
Function& fun = reused_function_handle.Handle();
fun = f;
ZoneTextBuffer printer(s->thread()->zone());
fun.PrintName(NameFormattingParams::DisambiguatedUnqualified(
Object::NameVisibility::kInternalName),
&printer);
return printer.buffer();
}
private:
GrowableArray<FunctionPtr> objects_;
};
#endif // !DART_PRECOMPILED_RUNTIME
template <bool need_entry_point_for_non_discarded>
DART_FORCE_INLINE static CodePtr GetCodeAndEntryPointByIndex(
const Deserializer* d,
intptr_t code_index,
uword* entry_point) {
code_index -= 1; // 0 is reserved for LazyCompile stub.
// In root unit and VM isolate snapshot code_indices are self-contained
// they point into instruction table and/or into the code cluster.
// In non-root units we might also refer to code objects from the
// parent unit which means code_index is biased by num_base_objects_
const intptr_t base = d->is_non_root_unit() ? d->num_base_objects() : 0;
if (code_index < base) {
CodePtr code = static_cast<CodePtr>(d->Ref(code_index));
if (need_entry_point_for_non_discarded) {
*entry_point = Code::EntryPointOf(code);
}
return code;
}
code_index -= base;
// At this point code_index is referring to a code object which is either
// discarded or exists in the Code cluster. Non-discarded Code objects
// are associated with the tail of the instruction table and have the
// same order there and in the Code cluster. This means that
// subtracting first_entry_with_code yields index into the Code cluster.
// This also works for deferred code objects in root unit's snapshot
// due to the choice of encoding (see Serializer::GetCodeIndex).
const intptr_t first_entry_with_code =
d->instructions_table().rodata()->first_entry_with_code;
if (code_index < first_entry_with_code) {
*entry_point = d->instructions_table().EntryPointAt(code_index);
return StubCode::UnknownDartCode().ptr();
} else {
const intptr_t cluster_index = code_index - first_entry_with_code;
CodePtr code =
static_cast<CodePtr>(d->Ref(d->code_start_index() + cluster_index));
if (need_entry_point_for_non_discarded) {
*entry_point = Code::EntryPointOf(code);
}
return code;
}
}
CodePtr Deserializer::GetCodeByIndex(intptr_t code_index,
uword* entry_point) const {
// See Serializer::GetCodeIndex for how code_index is encoded.
if (code_index == 0) {
return StubCode::LazyCompile().ptr();
} else if (FLAG_precompiled_mode) {
return GetCodeAndEntryPointByIndex<
/*need_entry_point_for_non_discarded=*/false>(this, code_index,
entry_point);
} else {
// -1 below because 0 is reserved for LazyCompile stub.
const intptr_t ref = code_start_index_ + code_index - 1;
ASSERT(code_start_index_ <= ref && ref < code_stop_index_);
return static_cast<CodePtr>(Ref(ref));
}
}
intptr_t Deserializer::CodeIndexToClusterIndex(const InstructionsTable& table,
intptr_t code_index) {
// Note: code indices we are interpreting here originate from the root
// loading unit which means base is equal to 0.
// See comments which clarify the connection between code_index and
// index into the Code cluster.
ASSERT(FLAG_precompiled_mode);
const intptr_t first_entry_with_code = table.rodata()->first_entry_with_code;
return code_index - 1 - first_entry_with_code;
}
uword Deserializer::GetEntryPointByCodeIndex(intptr_t code_index) const {
// See Deserializer::GetCodeByIndex which this code repeats.
ASSERT(FLAG_precompiled_mode);
uword entry_point = 0;
GetCodeAndEntryPointByIndex</*need_entry_point_for_non_discarded=*/true>(
this, code_index, &entry_point);
return entry_point;
}
class FunctionDeserializationCluster : public DeserializationCluster {
public:
FunctionDeserializationCluster() : DeserializationCluster("Function") {}
~FunctionDeserializationCluster() {}
void ReadAlloc(Deserializer* d) override {
ReadAllocFixedSize(d, Function::InstanceSize());
}
void ReadFill(Deserializer* d_) override {
Deserializer::Local d(d_);
ASSERT(!is_canonical()); // Never canonical.
Snapshot::Kind kind = d_->kind();
for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
FunctionPtr func = static_cast<FunctionPtr>(d.Ref(id));
Deserializer::InitializeHeader(func, kFunctionCid,
Function::InstanceSize());
d.ReadFromTo(func);
#if defined(DEBUG)
func->untag()->entry_point_ = 0;
func->untag()->unchecked_entry_point_ = 0;
#endif
#if defined(DART_PRECOMPILED_RUNTIME)
ASSERT(kind == Snapshot::kFullAOT);
const intptr_t code_index = d.ReadUnsigned();
uword entry_point = 0;
CodePtr code = d_->GetCodeByIndex(code_index, &entry_point);
func->untag()->code_ = code;
if (entry_point != 0) {
func->untag()->entry_point_ = entry_point;
func->untag()->unchecked_entry_point_ = entry_point;
}
#else
ASSERT(kind != Snapshot::kFullAOT);
if (kind == Snapshot::kFullJIT) {
func->untag()->unoptimized_code_ = static_cast<CodePtr>(d.ReadRef());
func->untag()->code_ = static_cast<CodePtr>(d.ReadRef());
func->untag()->ic_data_array_or_bytecode_ = d.ReadRef();
}
#endif
#if !defined(DART_PRECOMPILED_RUNTIME)
ASSERT(kind != Snapshot::kFullAOT);
func->untag()->positional_parameter_names_ =
static_cast<ArrayPtr>(d.ReadRef());
#endif
#if !defined(DART_PRECOMPILED_RUNTIME) || \
(defined(DART_PRECOMPILED_RUNTIME) && !defined(PRODUCT))
func->untag()->token_pos_ = d.ReadTokenPosition();
#endif
#if !defined(DART_PRECOMPILED_RUNTIME)
func->untag()->end_token_pos_ = d.ReadTokenPosition();
func->untag()->kernel_offset_ = d.Read<uint32_t>();
func->untag()->unboxed_parameters_info_.Reset();
func->untag()->is_optimizable_ = d.Read<bool>();
#endif
func->untag()->kind_tag_ = d.Read<uint32_t>();
#if !defined(DART_PRECOMPILED_RUNTIME)
func->untag()->usage_counter_ = 0;
func->untag()->optimized_instruction_count_ = 0;
func->untag()->optimized_call_site_count_ = 0;
func->untag()->deoptimization_counter_ = 0;
func->untag()->state_bits_ = 0;
func->untag()->inlining_depth_ = 0;
#endif
}
}
void PostLoad(Deserializer* d, const Array& refs) override {
if (d->kind() == Snapshot::kFullAOT) {
Function& func = Function::Handle(d->zone());
for (intptr_t i = start_index_, n = stop_index_; i < n; i++) {
func ^= refs.At(i);
auto const code = func.ptr()->untag()->code();
ASSERT(code->IsCode());
if (!Code::IsUnknownDartCode(code)) {
uword entry_point = code->untag()->entry_point_;
ASSERT(entry_point != 0);
func.ptr()->untag()->entry_point_ = entry_point;
uword unchecked_entry_point = code->untag()->unchecked_entry_point_;
ASSERT(unchecked_entry_point != 0);
func.ptr()->untag()->unchecked_entry_point_ = unchecked_entry_point;
}
}
} else if (d->kind() == Snapshot::kFullJIT) {
Function& func = Function::Handle(d->zone());
Code& code = Code::Handle(d->zone());
for (intptr_t i = start_index_, n = stop_index_; i < n; i++) {
func ^= refs.At(i);
code = func.CurrentCode();
if (func.HasCode() && !code.IsDisabled()) {
func.SetInstructionsSafe(code); // Set entrypoint.
func.SetWasCompiled(true);
} else {
func.ClearCodeSafe(); // Set code and entrypoint to lazy compile stub
}
}
} else {
Function& func = Function::Handle(d->zone());
for (intptr_t i = start_index_, n = stop_index_; i < n; i++) {
func ^= refs.At(i);
func.ClearCodeSafe(); // Set code and entrypoint to lazy compile stub.
}
}
}
};
#if !defined(DART_PRECOMPILED_RUNTIME)
class ClosureDataSerializationCluster : public SerializationCluster {
public:
ClosureDataSerializationCluster()
: SerializationCluster("ClosureData",
kClosureDataCid,
compiler::target::ClosureData::InstanceSize()) {}
~ClosureDataSerializationCluster() {}
void Trace(Serializer* s, ObjectPtr object) {
ClosureDataPtr data = ClosureData::RawCast(object);
objects_.Add(data);
if (s->kind() != Snapshot::kFullAOT) {
s->Push(data->untag()->context_scope());
}
s->Push(data->untag()->parent_function());
s->Push(data->untag()->closure());
}
void WriteAlloc(Serializer* s) {
const intptr_t count = objects_.length();
s->WriteUnsigned(count);
for (intptr_t i = 0; i < count; i++) {
ClosureDataPtr data = objects_[i];
s->AssignRef(data);
}
}
void WriteFill(Serializer* s) {
const intptr_t count = objects_.length();
for (intptr_t i = 0; i < count; i++) {
ClosureDataPtr data = objects_[i];
AutoTraceObject(data);
if (s->kind() != Snapshot::kFullAOT) {
WriteCompressedField(data, context_scope);
}
WriteCompressedField(data, parent_function);
WriteCompressedField(data, closure);
s->WriteUnsigned(static_cast<uint32_t>(data->untag()->packed_fields_));
}
}
private:
GrowableArray<ClosureDataPtr> objects_;
};
#endif // !DART_PRECOMPILED_RUNTIME
class ClosureDataDeserializationCluster : public DeserializationCluster {
public:
ClosureDataDeserializationCluster() : DeserializationCluster("ClosureData") {}
~ClosureDataDeserializationCluster() {}
void ReadAlloc(Deserializer* d) override {
ReadAllocFixedSize(d, ClosureData::InstanceSize());
}
void ReadFill(Deserializer* d_) override {
Deserializer::Local d(d_);
ASSERT(!is_canonical()); // Never canonical.
for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
ClosureDataPtr data = static_cast<ClosureDataPtr>(d.Ref(id));
Deserializer::InitializeHeader(data, kClosureDataCid,
ClosureData::InstanceSize());
if (d_->kind() == Snapshot::kFullAOT) {
data->untag()->context_scope_ = ContextScope::null();
} else {
data->untag()->context_scope_ =
static_cast<ContextScopePtr>(d.ReadRef());
}
data->untag()->parent_function_ = static_cast<FunctionPtr>(d.ReadRef());
data->untag()->closure_ = static_cast<ClosurePtr>(d.ReadRef());
data->untag()->packed_fields_ = d.ReadUnsigned<uint32_t>();
}
}
};
#if !defined(DART_PRECOMPILED_RUNTIME)
class FfiTrampolineDataSerializationCluster : public SerializationCluster {
public:
FfiTrampolineDataSerializationCluster()
: SerializationCluster(
"FfiTrampolineData",
kFfiTrampolineDataCid,
compiler::target::FfiTrampolineData::InstanceSize()) {}
~FfiTrampolineDataSerializationCluster() {}
void Trace(Serializer* s, ObjectPtr object) {
FfiTrampolineDataPtr data = FfiTrampolineData::RawCast(object);
objects_.Add(data);
PushFromTo(data);
}
void WriteAlloc(Serializer* s) {
const intptr_t count = objects_.length();
s->WriteUnsigned(count);
for (intptr_t i = 0; i < count; i++) {
s->AssignRef(objects_[i]);
}
}
void WriteFill(Serializer* s) {
const intptr_t count = objects_.length();
for (intptr_t i = 0; i < count; i++) {
FfiTrampolineDataPtr const data = objects_[i];
AutoTraceObject(data);
WriteFromTo(data);
s->Write<int32_t>(data->untag()->callback_id_);
s->Write<uint8_t>(data->untag()->ffi_function_kind_);
}
}
private:
GrowableArray<FfiTrampolineDataPtr> objects_;
};
#endif // !DART_PRECOMPILED_RUNTIME
class FfiTrampolineDataDeserializationCluster : public DeserializationCluster {
public:
FfiTrampolineDataDeserializationCluster()
: DeserializationCluster("FfiTrampolineData") {}
~FfiTrampolineDataDeserializationCluster() {}
void ReadAlloc(Deserializer* d) override {
ReadAllocFixedSize(d, FfiTrampolineData::InstanceSize());
}
void ReadFill(Deserializer* d_) override {
Deserializer::Local d(d_);
ASSERT(!is_canonical()); // Never canonical.
for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
FfiTrampolineDataPtr data = static_cast<FfiTrampolineDataPtr>(d.Ref(id));
Deserializer::InitializeHeader(data, kFfiTrampolineDataCid,
FfiTrampolineData::InstanceSize());
d.ReadFromTo(data);
data->untag()->callback_id_ = d.Read<int32_t>();
data->untag()->ffi_function_kind_ = d.Read<uint8_t>();
}
}
};
#if !defined(DART_PRECOMPILED_RUNTIME)
class FieldSerializationCluster : public SerializationCluster {
public:
FieldSerializationCluster()
: SerializationCluster("Field",
kFieldCid,
compiler::target::Field::InstanceSize()) {}
~FieldSerializationCluster() {}
void Trace(Serializer* s, ObjectPtr object) {
FieldPtr field = Field::RawCast(object);
objects_.Add(field);
Snapshot::Kind kind = s->kind();
s->Push(field->untag()->name());
s->Push(field->untag()->owner());
s->Push(field->untag()->type());
// Write out the initializer function
s->Push(field->untag()->initializer_function());
if (kind != Snapshot::kFullAOT) {
s->Push(field->untag()->guarded_list_length());
}
if (kind == Snapshot::kFullJIT) {
s->Push(field->untag()->dependent_code());
}
// Write out either the initial static value or field offset.
if (Field::StaticBit::decode(field->untag()->kind_bits_)) {
s->Push(field->untag()->host_offset_or_field_id());
} else {
s->Push(Smi::New(Field::TargetOffsetOf(field)));
}
}
void WriteAlloc(Serializer* s) {
const intptr_t count = objects_.length();
s->WriteUnsigned(count);
for (intptr_t i = 0; i < count; i++) {
FieldPtr field = objects_[i];
s->AssignRef(field);
}
}
void WriteFill(Serializer* s) {
Snapshot::Kind kind = s->kind();
const intptr_t count = objects_.length();
for (intptr_t i = 0; i < count; i++) {
FieldPtr field = objects_[i];
AutoTraceObjectName(field, field->untag()->name());
WriteCompressedField(field, name);
WriteCompressedField(field, owner);
WriteCompressedField(field, type);
// Write out the initializer function and initial value if not in AOT.
WriteCompressedField(field, initializer_function);
if (kind != Snapshot::kFullAOT) {
WriteCompressedField(field, guarded_list_length);
}
if (kind == Snapshot::kFullJIT) {
WriteCompressedField(field, dependent_code);
}
if (kind != Snapshot::kFullAOT) {
s->WriteTokenPosition(field->untag()->token_pos_);
s->WriteTokenPosition(field->untag()->end_token_pos_);
s->WriteCid(field->untag()->guarded_cid_);
s->WriteCid(field->untag()->is_nullable_);
s->Write<int8_t>(field->untag()->static_type_exactness_state_);
s->Write<uint32_t>(field->untag()->kernel_offset_);
}
s->Write<uint16_t>(field->untag()->kind_bits_);
// Write out either the initial static value or field offset.
if (Field::StaticBit::decode(field->untag()->kind_bits_)) {
WriteFieldValue("id", field->untag()->host_offset_or_field_id());
} else {
WriteFieldValue("offset", Smi::New(Field::TargetOffsetOf(field)));
}
}
}
private:
GrowableArray<FieldPtr> objects_;
};
#endif // !DART_PRECOMPILED_RUNTIME
class FieldDeserializationCluster : public DeserializationCluster {
public:
FieldDeserializationCluster() : DeserializationCluster("Field") {}
~FieldDeserializationCluster() {}
void ReadAlloc(Deserializer* d) override {
ReadAllocFixedSize(d, Field::InstanceSize());
}
void ReadFill(Deserializer* d_) override {
Deserializer::Local d(d_);
ASSERT(!is_canonical()); // Never canonical.
#if !defined(DART_PRECOMPILED_RUNTIME)
Snapshot::Kind kind = d_->kind();
#endif
for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
FieldPtr field = static_cast<FieldPtr>(d.Ref(id));
Deserializer::InitializeHeader(field, kFieldCid, Field::InstanceSize());
d.ReadFromTo(field);
#if !defined(DART_PRECOMPILED_RUNTIME)
ASSERT(d_->kind() != Snapshot::kFullAOT);
field->untag()->guarded_list_length_ = static_cast<SmiPtr>(d.ReadRef());
if (kind == Snapshot::kFullJIT) {
field->untag()->dependent_code_ =
static_cast<WeakArrayPtr>(d.ReadRef());
}
field->untag()->token_pos_ = d.ReadTokenPosition();
field->untag()->end_token_pos_ = d.ReadTokenPosition();
field->untag()->guarded_cid_ = d.ReadCid();
field->untag()->is_nullable_ = d.ReadCid();
const int8_t static_type_exactness_state = d.Read<int8_t>();
#if defined(TARGET_ARCH_X64)
field->untag()->static_type_exactness_state_ =
static_type_exactness_state;
#else
// We might produce core snapshots using X64 VM and then consume
// them in IA32 or ARM VM. In which case we need to simply ignore
// static type exactness state written into snapshot because non-X64
// builds don't have this feature enabled.
// TODO(dartbug.com/34170) Support other architectures.
USE(static_type_exactness_state);
field->untag()->static_type_exactness_state_ =
StaticTypeExactnessState::NotTracking().Encode();
#endif // defined(TARGET_ARCH_X64)
field->untag()->kernel_offset_ = d.Read<uint32_t>();
#endif
field->untag()->kind_bits_ = d.Read<uint16_t>();
field->untag()->host_offset_or_field_id_ =
static_cast<SmiPtr>(d.ReadRef());
#if !defined(DART_PRECOMPILED_RUNTIME)
field->untag()->target_offset_ =
Smi::Value(field->untag()->host_offset_or_field_id());
#endif // !defined(DART_PRECOMPILED_RUNTIME)
}
}
void PostLoad(Deserializer* d, const Array& refs) override {
Field& field = Field::Handle(d->zone());
if (!IsolateGroup::Current()->use_field_guards()) {
for (intptr_t i = start_index_, n = stop_index_; i < n; i++) {
field ^= refs.At(i);
field.set_guarded_cid_unsafe(kDynamicCid);
field.set_is_nullable_unsafe(true);
field.set_guarded_list_length_unsafe(Field::kNoFixedLength);
field.set_guarded_list_length_in_object_offset_unsafe(
Field::kUnknownLengthOffset);
field.set_static_type_exactness_state_unsafe(
StaticTypeExactnessState::NotTracking());
}
} else {
for (intptr_t i = start_index_, n = stop_index_; i < n; i++) {
field ^= refs.At(i);
field.InitializeGuardedListLengthInObjectOffset(/*unsafe=*/true);
}
}
}
};
#if !defined(DART_PRECOMPILED_RUNTIME)
class ScriptSerializationCluster : public SerializationCluster {
public:
ScriptSerializationCluster()
: SerializationCluster("Script",
kScriptCid,
compiler::target::Script::InstanceSize()) {}
~ScriptSerializationCluster() {}
void Trace(Serializer* s, ObjectPtr object) {
ScriptPtr script = Script::RawCast(object);
objects_.Add(script);
auto* from = script->untag()->from();
auto* to = script->untag()->to_snapshot(s->kind());
for (auto* p = from; p <= to; p++) {
const intptr_t offset =
reinterpret_cast<uword>(p) - reinterpret_cast<uword>(script->untag());
const ObjectPtr obj = p->Decompress(script->heap_base());
if (offset == Script::line_starts_offset()) {
// Line starts are delta encoded.
s->Push(obj, kDeltaEncodedTypedDataCid);
} else {
s->Push(obj);
}
}
}
void WriteAlloc(Serializer* s) {
const intptr_t count = objects_.length();
s->WriteUnsigned(count);
for (intptr_t i = 0; i < count; i++) {
ScriptPtr script = objects_[i];
s->AssignRef(script);
}
}
void WriteFill(Serializer* s) {
const intptr_t count = objects_.length();
for (intptr_t i = 0; i < count; i++) {
ScriptPtr script = objects_[i];
AutoTraceObjectName(script, script->untag()->url());
WriteFromTo(script);
if (s->kind() != Snapshot::kFullAOT) {
// Clear out the max position cache in snapshots to ensure no
// differences in the snapshot due to triggering caching vs. not.
int32_t written_flags =
UntaggedScript::CachedMaxPositionBitField::update(
0, script->untag()->flags_and_max_position_);
written_flags = UntaggedScript::HasCachedMaxPositionBit::update(
false, written_flags);
s->Write<int32_t>(written_flags);
}
s->Write<int32_t>(script->untag()->kernel_script_index_);
}
}
private:
GrowableArray<ScriptPtr> objects_;
};
#endif // !DART_PRECOMPILED_RUNTIME
class ScriptDeserializationCluster : public DeserializationCluster {
public:
ScriptDeserializationCluster() : DeserializationCluster("Script") {}
~ScriptDeserializationCluster() {}
void ReadAlloc(Deserializer* d) override {
ReadAllocFixedSize(d, Script::InstanceSize());
}
void ReadFill(Deserializer* d_) override {
Deserializer::Local d(d_);
ASSERT(!is_canonical()); // Never canonical.
for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
ScriptPtr script = static_cast<ScriptPtr>(d.Ref(id));
Deserializer::InitializeHeader(script, kScriptCid,
Script::InstanceSize());
d.ReadFromTo(script);
#if !defined(DART_PRECOMPILED_RUNTIME)
script->untag()->flags_and_max_position_ = d.Read<int32_t>();
#endif
script->untag()->kernel_script_index_ = d.Read<int32_t>();
script->untag()->load_timestamp_ = 0;
}
}
};
#if !defined(DART_PRECOMPILED_RUNTIME)
class LibrarySerializationCluster : public SerializationCluster {
public:
LibrarySerializationCluster()
: SerializationCluster("Library",
kLibraryCid,
compiler::target::Library::InstanceSize()) {}
~LibrarySerializationCluster() {}
void Trace(Serializer* s, ObjectPtr object) {
LibraryPtr lib = Library::RawCast(object);
objects_.Add(lib);
PushFromTo(lib);
}
void WriteAlloc(Serializer* s) {
const intptr_t count = objects_.length();
s->WriteUnsigned(count);
for (intptr_t i = 0; i < count; i++) {
LibraryPtr lib = objects_[i];
s->AssignRef(lib);
}
}
void WriteFill(Serializer* s) {
const intptr_t count = objects_.length();
for (intptr_t i = 0; i < count; i++) {
LibraryPtr lib = objects_[i];
AutoTraceObjectName(lib, lib->untag()->url());
WriteFromTo(lib);
s->Write<int32_t>(lib->untag()->index_);
s->Write<uint16_t>(lib->untag()->num_imports_);
s->Write<int8_t>(lib->untag()->load_state_);
s->Write<uint8_t>(lib->untag()->flags_);
if (s->kind() != Snapshot::kFullAOT) {
s->Write<uint32_t>(lib->untag()->kernel_library_index_);
}
}
}
private:
GrowableArray<LibraryPtr> objects_;
};
#endif // !DART_PRECOMPILED_RUNTIME
class LibraryDeserializationCluster : public DeserializationCluster {
public:
LibraryDeserializationCluster() : DeserializationCluster("Library") {}
~LibraryDeserializationCluster() {}
void ReadAlloc(Deserializer* d) override {
ReadAllocFixedSize(d, Library::InstanceSize());
}
void ReadFill(Deserializer* d_) override {
Deserializer::Local d(d_);
ASSERT(!is_canonical()); // Never canonical.
for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
LibraryPtr lib = static_cast<LibraryPtr>(d.Ref(id));
Deserializer::InitializeHeader(lib, kLibraryCid, Library::InstanceSize());
d.ReadFromTo(lib);
lib->untag()->native_entry_resolver_ = nullptr;
lib->untag()->native_entry_symbol_resolver_ = nullptr;
lib->untag()->ffi_native_resolver_ = nullptr;
lib->untag()->index_ = d.Read<int32_t>();
lib->untag()->num_imports_ = d.Read<uint16_t>();
lib->untag()->load_state_ = d.Read<int8_t>();
lib->untag()->flags_ =
UntaggedLibrary::InFullSnapshotBit::update(true, d.Read<uint8_t>());
#if !defined(DART_PRECOMPILED_RUNTIME)
ASSERT(d_->kind() != Snapshot::kFullAOT);
lib->untag()->kernel_library_index_ = d.Read<uint32_t>();
#endif
}
}
};
#if !defined(DART_PRECOMPILED_RUNTIME)
class NamespaceSerializationCluster : public SerializationCluster {
public:
NamespaceSerializationCluster()
: SerializationCluster("Namespace",
kNamespaceCid,
compiler::target::Namespace::InstanceSize()) {}
~NamespaceSerializationCluster() {}
void Trace(Serializer* s, ObjectPtr object) {
NamespacePtr ns = Namespace::RawCast(object);
objects_.Add(ns);
PushFromTo(ns);
}
void WriteAlloc(Serializer* s) {
const intptr_t count = objects_.length();
s->WriteUnsigned(count);
for (intptr_t i = 0; i < count; i++) {
NamespacePtr ns = objects_[i];
s->AssignRef(ns);
}
}
void WriteFill(Serializer* s) {
const intptr_t count = objects_.length();
for (intptr_t i = 0; i < count; i++) {
NamespacePtr ns = objects_[i];
AutoTraceObject(ns);
WriteFromTo(ns);
}
}
private:
GrowableArray<NamespacePtr> objects_;
};
#endif // !DART_PRECOMPILED_RUNTIME
class NamespaceDeserializationCluster : public DeserializationCluster {
public:
NamespaceDeserializationCluster() : DeserializationCluster("Namespace") {}
~NamespaceDeserializationCluster() {}
void ReadAlloc(Deserializer* d) override {
ReadAllocFixedSize(d, Namespace::InstanceSize());
}
void ReadFill(Deserializer* d_) override {
Deserializer::Local d(d_);
ASSERT(!is_canonical()); // Never canonical.
for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
NamespacePtr ns = static_cast<NamespacePtr>(d.Ref(id));
Deserializer::InitializeHeader(ns, kNamespaceCid,
Namespace::InstanceSize());
d.ReadFromTo(ns);
}
}
};
#if !defined(DART_PRECOMPILED_RUNTIME)
// KernelProgramInfo objects are not written into a full AOT snapshot.
class KernelProgramInfoSerializationCluster : public SerializationCluster {
public:
KernelProgramInfoSerializationCluster()
: SerializationCluster(
"KernelProgramInfo",
kKernelProgramInfoCid,
compiler::target::KernelProgramInfo::InstanceSize()) {}
~KernelProgramInfoSerializationCluster() {}
void Trace(Serializer* s, ObjectPtr object) {
KernelProgramInfoPtr info = KernelProgramInfo::RawCast(object);
objects_.Add(info);
PushFromTo(info);
}
void WriteAlloc(Serializer* s) {
const intptr_t count = objects_.length();
s->WriteUnsigned(count);
for (intptr_t i = 0; i < count; i++) {
KernelProgramInfoPtr info = objects_[i];
s->AssignRef(info);
}
}
void WriteFill(Serializer* s) {
const intptr_t count = objects_.length();
for (intptr_t i = 0; i < count; i++) {
KernelProgramInfoPtr info = objects_[i];
AutoTraceObject(info);
WriteFromTo(info);
}
}
private:
GrowableArray<KernelProgramInfoPtr> objects_;
};
// Since KernelProgramInfo objects are not written into full AOT snapshots,
// one will never need to read them from a full AOT snapshot.
class KernelProgramInfoDeserializationCluster : public DeserializationCluster {
public:
KernelProgramInfoDeserializationCluster()
: DeserializationCluster("KernelProgramInfo") {}
~KernelProgramInfoDeserializationCluster() {}
void ReadAlloc(Deserializer* d) override {
ReadAllocFixedSize(d, KernelProgramInfo::InstanceSize());
}
void ReadFill(Deserializer* d_) override {
Deserializer::Local d(d_);
ASSERT(!is_canonical()); // Never canonical.
for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
KernelProgramInfoPtr info = static_cast<KernelProgramInfoPtr>(d.Ref(id));
Deserializer::InitializeHeader(info, kKernelProgramInfoCid,
KernelProgramInfo::InstanceSize());
d.ReadFromTo(info);
}
}
void PostLoad(Deserializer* d, const Array& refs) override {
Array& array = Array::Handle(d->zone());
KernelProgramInfo& info = KernelProgramInfo::Handle(d->zone());
for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
info ^= refs.At(id);
array = HashTables::New<UnorderedHashMap<SmiTraits>>(16, Heap::kOld);
info.set_libraries_cache(array);
array = HashTables::New<UnorderedHashMap<SmiTraits>>(16, Heap::kOld);
info.set_classes_cache(array);
}
}
};
class CodeSerializationCluster : public SerializationCluster {
public:
explicit CodeSerializationCluster(Heap* heap)
: SerializationCluster("Code", kCodeCid), array_(Array::Handle()) {}
~CodeSerializationCluster() {}
void Trace(Serializer* s, ObjectPtr object) {
CodePtr code = Code::RawCast(object);
const bool is_deferred = !s->InCurrentLoadingUnitOrRoot(code);
if (is_deferred) {
s->RecordDeferredCode(code);
} else {
objects_.Add(code);
}
// Even if this code object is itself deferred we still need to scan
// the pool for references to other code objects (which might reside
// in the current loading unit).
ObjectPoolPtr pool = code->untag()->object_pool_;
if (s->kind() == Snapshot::kFullAOT) {
TracePool(s, pool, /*only_call_targets=*/is_deferred);
} else {
if (s->InCurrentLoadingUnitOrRoot(pool)) {
s->Push(pool);
} else {
TracePool(s, pool, /*only_call_targets=*/true);
}
}
if (s->kind() == Snapshot::kFullJIT) {
s->Push(code->untag()->deopt_info_array_);
s->Push(code->untag()->static_calls_target_table_);
s->Push(code->untag()->compressed_stackmaps_);
} else if (s->kind() == Snapshot::kFullAOT) {
// Note: we don't trace compressed_stackmaps_ because we are going to emit
// a separate mapping table into RO data which is not going to be a real
// heap object.
#if defined(DART_PRECOMPILER)
auto const calls_array = code->untag()->static_calls_target_table_;
if (calls_array != Array::null()) {
// Some Code entries in the static calls target table may only be
// accessible via here, so push the Code objects.
array_ = calls_array;
for (auto entry : StaticCallsTable(array_)) {
auto kind = Code::KindField::decode(
Smi::Value(entry.Get<Code::kSCallTableKindAndOffset>()));
switch (kind) {
case Code::kCallViaCode:
// Code object in the pool.
continue;
case Code::kPcRelativeTTSCall:
// TTS will be reachable through type object which itself is
// in the pool.
continue;
case Code::kPcRelativeCall:
case Code::kPcRelativeTailCall:
auto destination = entry.Get<Code::kSCallTableCodeOrTypeTarget>();
ASSERT(destination->IsHeapObject() && destination->IsCode());
s->Push(destination);
}
}
}
#else
UNREACHABLE();
#endif
}
if (Code::IsDiscarded(code)) {
ASSERT(s->kind() == Snapshot::kFullAOT && FLAG_dwarf_stack_traces_mode &&
!FLAG_retain_code_objects);
// Only object pool and static call table entries and the compressed
// stack maps should be pushed.
return;
}
s->Push(code->untag()->owner_);
s->Push(code->untag()->exception_handlers_);
s->Push(code->untag()->pc_descriptors_);
s->Push(code->untag()->catch_entry_);
if (!FLAG_precompiled_mode || !FLAG_dwarf_stack_traces_mode) {
s->Push(code->untag()->inlined_id_to_function_);
if (s->InCurrentLoadingUnitOrRoot(code->untag()->code_source_map_)) {
s->Push(code->untag()->code_source_map_);
}
}
#if !defined(PRODUCT)
s->Push(code->untag()->return_address_metadata_);
if (FLAG_code_comments) {
s->Push(code->untag()->comments_);
}
#endif
}
void TracePool(Serializer* s, ObjectPoolPtr pool, bool only_call_targets) {
if (pool == ObjectPool::null()) {
return;
}
const intptr_t length = pool->untag()->length_;
uint8_t* entry_bits = pool->untag()->entry_bits();
for (intptr_t i = 0; i < length; i++) {
auto entry_type = ObjectPool::TypeBits::decode(entry_bits[i]);
if (entry_type == ObjectPool::EntryType::kTaggedObject) {
const ObjectPtr target = pool->untag()->data()[i].raw_obj_;
// A field is a call target because its initializer may be called
// indirectly by passing the field to the runtime. A const closure
// is a call target because its function may be called indirectly
// via a closure call.
intptr_t cid = target->GetClassId();
if (!only_call_targets || (cid == kCodeCid) || (cid == kFunctionCid) ||
(cid == kFieldCid) || (cid == kClosureCid)) {
s->Push(target);
} else if (cid >= kNumPredefinedCids) {
s->Push(s->isolate_group()->class_table()->At(cid));
}
}
}
}
struct CodeOrderInfo {
CodePtr code;
intptr_t not_discarded; // 1 if this code was not discarded and