blob: ed7f206192c85dbece7e39719e79bc023957f53d [file] [log] [blame]
// Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
#ifndef RUNTIME_VM_OBJECT_H_
#define RUNTIME_VM_OBJECT_H_
#if defined(SHOULD_NOT_INCLUDE_RUNTIME)
#error "Should not include runtime"
#endif
#include <limits>
#include <tuple>
#include "include/dart_api.h"
#include "platform/assert.h"
#include "platform/atomic.h"
#include "platform/thread_sanitizer.h"
#include "platform/utils.h"
#include "vm/bitmap.h"
#include "vm/code_comments.h"
#include "vm/code_entry_kind.h"
#include "vm/compiler/assembler/object_pool_builder.h"
#include "vm/compiler/method_recognizer.h"
#include "vm/compiler/runtime_api.h"
#include "vm/dart.h"
#include "vm/flags.h"
#include "vm/globals.h"
#include "vm/growable_array.h"
#include "vm/handles.h"
#include "vm/heap/heap.h"
#include "vm/isolate.h"
#include "vm/json_stream.h"
#include "vm/os.h"
#include "vm/raw_object.h"
#include "vm/report.h"
#include "vm/static_type_exactness_state.h"
#include "vm/thread.h"
#include "vm/token_position.h"
namespace dart {
// Forward declarations.
namespace compiler {
class Assembler;
}
namespace kernel {
class Program;
class TreeNode;
} // namespace kernel
#define DEFINE_FORWARD_DECLARATION(clazz) class clazz;
CLASS_LIST(DEFINE_FORWARD_DECLARATION)
#undef DEFINE_FORWARD_DECLARATION
class Api;
class ArgumentsDescriptor;
class Closure;
class Code;
class DeoptInstr;
class DisassemblyFormatter;
class FinalizablePersistentHandle;
class FlowGraphCompiler;
class HierarchyInfo;
class LocalScope;
class CallSiteResetter;
class CodeStatistics;
class IsolateGroupReloadContext;
class ObjectGraphCopier;
class NativeArguments;
#define REUSABLE_FORWARD_DECLARATION(name) class Reusable##name##HandleScope;
REUSABLE_HANDLE_LIST(REUSABLE_FORWARD_DECLARATION)
#undef REUSABLE_FORWARD_DECLARATION
class Symbols;
class BaseTextBuffer;
#if defined(DEBUG)
#define CHECK_HANDLE() CheckHandle();
#else
#define CHECK_HANDLE()
#endif
// For AllStatic classes like OneByteString. Checks that
// ContainsCompressedPointers() returns the same value for AllStatic class and
// class used for handles.
#define ALLSTATIC_CONTAINS_COMPRESSED_IMPLEMENTATION(object, handle) \
static_assert(std::is_base_of<dart::handle##Ptr, dart::object##Ptr>::value, \
#object "Ptr must be a subtype of " #handle "Ptr"); \
static_assert(dart::handle::ContainsCompressedPointers() == \
dart::Untagged##object::kContainsCompressedPointers, \
"Pointer compression in Untagged" #object \
" must match pointer compression in Untagged" #handle); \
static constexpr bool ContainsCompressedPointers() { \
return dart::Untagged##object::kContainsCompressedPointers; \
}
#define BASE_OBJECT_IMPLEMENTATION(object, super) \
public: /* NOLINT */ \
using UntaggedObjectType = dart::Untagged##object; \
using ObjectPtrType = dart::object##Ptr; \
static_assert(!dart::super::ContainsCompressedPointers() || \
UntaggedObjectType::kContainsCompressedPointers, \
"Untagged" #object \
" must have compressed pointers, as supertype Untagged" #super \
" has compressed pointers"); \
static constexpr bool ContainsCompressedPointers() { \
return UntaggedObjectType::kContainsCompressedPointers; \
} \
object##Ptr ptr() const { return static_cast<object##Ptr>(ptr_); } \
bool Is##object() const { return true; } \
DART_NOINLINE static object& Handle() { \
return static_cast<object&>( \
HandleImpl(Thread::Current()->zone(), object::null(), kClassId)); \
} \
DART_NOINLINE static object& Handle(Zone* zone) { \
return static_cast<object&>(HandleImpl(zone, object::null(), kClassId)); \
} \
DART_NOINLINE static object& Handle(object##Ptr ptr) { \
return static_cast<object&>( \
HandleImpl(Thread::Current()->zone(), ptr, kClassId)); \
} \
DART_NOINLINE static object& Handle(Zone* zone, object##Ptr ptr) { \
return static_cast<object&>(HandleImpl(zone, ptr, kClassId)); \
} \
DART_NOINLINE static object& ZoneHandle() { \
return static_cast<object&>( \
ZoneHandleImpl(Thread::Current()->zone(), object::null(), kClassId)); \
} \
DART_NOINLINE static object& ZoneHandle(Zone* zone) { \
return static_cast<object&>( \
ZoneHandleImpl(zone, object::null(), kClassId)); \
} \
DART_NOINLINE static object& ZoneHandle(object##Ptr ptr) { \
return static_cast<object&>( \
ZoneHandleImpl(Thread::Current()->zone(), ptr, kClassId)); \
} \
DART_NOINLINE static object& ZoneHandle(Zone* zone, object##Ptr ptr) { \
return static_cast<object&>(ZoneHandleImpl(zone, ptr, kClassId)); \
} \
static object* ReadOnlyHandle() { \
return static_cast<object*>(ReadOnlyHandleImpl(kClassId)); \
} \
DART_NOINLINE static object& CheckedHandle(Zone* zone, ObjectPtr ptr) { \
object* obj = reinterpret_cast<object*>(VMHandles::AllocateHandle(zone)); \
initializeHandle(obj, ptr); \
if (!obj->Is##object()) { \
FATAL2("Handle check failed: saw %s expected %s", obj->ToCString(), \
#object); \
} \
return *obj; \
} \
DART_NOINLINE static object& CheckedZoneHandle(Zone* zone, ObjectPtr ptr) { \
object* obj = \
reinterpret_cast<object*>(VMHandles::AllocateZoneHandle(zone)); \
initializeHandle(obj, ptr); \
if (!obj->Is##object()) { \
FATAL2("Handle check failed: saw %s expected %s", obj->ToCString(), \
#object); \
} \
return *obj; \
} \
DART_NOINLINE static object& CheckedZoneHandle(ObjectPtr ptr) { \
return CheckedZoneHandle(Thread::Current()->zone(), ptr); \
} \
/* T::Cast cannot be applied to a null Object, because the object vtable */ \
/* is not setup for type T, although some methods are supposed to work */ \
/* with null, for example Instance::Equals(). */ \
static const object& Cast(const Object& obj) { \
ASSERT(obj.Is##object()); \
return reinterpret_cast<const object&>(obj); \
} \
static object##Ptr RawCast(ObjectPtr raw) { \
ASSERT(Object::Handle(raw).IsNull() || Object::Handle(raw).Is##object()); \
return static_cast<object##Ptr>(raw); \
} \
static object##Ptr null() { \
return static_cast<object##Ptr>(Object::null()); \
} \
virtual const char* ToCString() const; \
static const ClassId kClassId = k##object##Cid; \
\
private: /* NOLINT */ \
/* Initialize the handle based on the ptr in the presence of null. */ \
static void initializeHandle(object* obj, ObjectPtr ptr) { \
obj->SetPtr(ptr, kClassId); \
} \
/* Disallow allocation, copy constructors and override super assignment. */ \
public: /* NOLINT */ \
void operator delete(void* pointer) { UNREACHABLE(); } \
\
private: /* NOLINT */ \
void* operator new(size_t size); \
object(const object& value) = delete; \
void operator=(super##Ptr value) = delete; \
void operator=(const object& value) = delete; \
void operator=(const super& value) = delete;
// Conditionally include object_service.cc functionality in the vtable to avoid
// link errors like the following:
//
// object.o:(.rodata._ZTVN4....E[_ZTVN4...E]+0x278):
// undefined reference to
// `dart::Instance::PrintSharedInstanceJSON(dart::JSONObject*, bool) const'.
//
#ifndef PRODUCT
#define OBJECT_SERVICE_SUPPORT(object) \
protected: /* NOLINT */ \
/* Object is printed as JSON into stream. If ref is true only a header */ \
/* with an object id is printed. If ref is false the object is fully */ \
/* printed. */ \
virtual void PrintJSONImpl(JSONStream* stream, bool ref) const; \
virtual const char* JSONType() const { return "" #object; }
#else
#define OBJECT_SERVICE_SUPPORT(object) protected: /* NOLINT */
#endif // !PRODUCT
#define SNAPSHOT_SUPPORT(object) \
friend class object##MessageSerializationCluster; \
friend class object##MessageDeserializationCluster;
#define OBJECT_IMPLEMENTATION(object, super) \
public: /* NOLINT */ \
DART_NOINLINE void operator=(object##Ptr value) { \
initializeHandle(this, value); \
} \
DART_NOINLINE void operator^=(ObjectPtr value) { \
initializeHandle(this, value); \
ASSERT(IsNull() || Is##object()); \
} \
\
protected: /* NOLINT */ \
object() : super() {} \
BASE_OBJECT_IMPLEMENTATION(object, super) \
OBJECT_SERVICE_SUPPORT(object) \
friend class Object;
extern "C" void DFLRT_ExitSafepoint(NativeArguments __unusable_);
#define HEAP_OBJECT_IMPLEMENTATION(object, super) \
OBJECT_IMPLEMENTATION(object, super); \
Untagged##object* untag() const { \
ASSERT(ptr() != null()); \
return const_cast<Untagged##object*>(ptr()->untag()); \
} \
SNAPSHOT_SUPPORT(object) \
friend class StackFrame; \
friend class Thread; \
friend void DFLRT_ExitSafepoint(NativeArguments __unusable_);
// This macro is used to denote types that do not have a sub-type.
#define FINAL_HEAP_OBJECT_IMPLEMENTATION_HELPER(object, rettype, super) \
public: /* NOLINT */ \
void operator=(object##Ptr value) { \
ptr_ = value; \
CHECK_HANDLE(); \
} \
void operator^=(ObjectPtr value) { \
ptr_ = value; \
CHECK_HANDLE(); \
} \
\
private: /* NOLINT */ \
object() : super() {} \
BASE_OBJECT_IMPLEMENTATION(object, super) \
OBJECT_SERVICE_SUPPORT(object) \
Untagged##object* untag() const { \
ASSERT(ptr() != null()); \
return const_cast<Untagged##object*>(ptr()->untag()); \
} \
static intptr_t NextFieldOffset() { return -kWordSize; } \
SNAPSHOT_SUPPORT(rettype) \
friend class Object; \
friend class StackFrame; \
friend class Thread; \
friend void DFLRT_ExitSafepoint(NativeArguments __unusable_);
#define FINAL_HEAP_OBJECT_IMPLEMENTATION(object, super) \
FINAL_HEAP_OBJECT_IMPLEMENTATION_HELPER(object, object, super)
#define MINT_OBJECT_IMPLEMENTATION(object, rettype, super) \
FINAL_HEAP_OBJECT_IMPLEMENTATION_HELPER(object, rettype, super)
// In precompiled runtime, there is no access to runtime_api.cc since host
// and target are the same. In those cases, the namespace dart is used to refer
// to the target namespace
#if defined(DART_PRECOMPILED_RUNTIME)
namespace RTN = dart;
#else
namespace RTN = dart::compiler::target;
#endif // defined(DART_PRECOMPILED_RUNTIME)
class Object {
public:
using UntaggedObjectType = UntaggedObject;
using ObjectPtrType = ObjectPtr;
// We use 30 bits for the hash code so hashes in a snapshot taken on a
// 64-bit architecture stay in Smi range when loaded on a 32-bit
// architecture.
static const intptr_t kHashBits = 30;
static ObjectPtr RawCast(ObjectPtr obj) { return obj; }
virtual ~Object() {}
static constexpr bool ContainsCompressedPointers() {
return UntaggedObject::kContainsCompressedPointers;
}
ObjectPtr ptr() const { return ptr_; }
void operator=(ObjectPtr value) { initializeHandle(this, value); }
bool IsCanonical() const { return ptr()->untag()->IsCanonical(); }
void SetCanonical() const { ptr()->untag()->SetCanonical(); }
void ClearCanonical() const { ptr()->untag()->ClearCanonical(); }
intptr_t GetClassId() const {
return !ptr()->IsHeapObject() ? static_cast<intptr_t>(kSmiCid)
: ptr()->untag()->GetClassId();
}
inline ClassPtr clazz() const;
static intptr_t tags_offset() { return OFFSET_OF(UntaggedObject, tags_); }
// Class testers.
#define DEFINE_CLASS_TESTER(clazz) \
virtual bool Is##clazz() const { return false; }
CLASS_LIST_FOR_HANDLES(DEFINE_CLASS_TESTER);
#undef DEFINE_CLASS_TESTER
bool IsNull() const { return ptr_ == null_; }
// Matches Object.toString on instances (except String::ToCString, bug 20583).
virtual const char* ToCString() const {
if (IsNull()) {
return "null";
} else {
return "Object";
}
}
#ifndef PRODUCT
void PrintJSON(JSONStream* stream, bool ref = true) const;
virtual void PrintJSONImpl(JSONStream* stream, bool ref) const;
virtual const char* JSONType() const { return IsNull() ? "null" : "Object"; }
#endif
// Returns the name that is used to identify an object in the
// namespace dictionary.
// Object::DictionaryName() returns String::null(). Only subclasses
// of Object that need to be entered in the library and library prefix
// namespaces need to provide an implementation.
virtual StringPtr DictionaryName() const;
bool IsNew() const { return ptr()->IsNewObject(); }
bool IsOld() const { return ptr()->IsOldObject(); }
#if defined(DEBUG)
bool InVMIsolateHeap() const;
#else
bool InVMIsolateHeap() const { return ptr()->untag()->InVMIsolateHeap(); }
#endif // DEBUG
// Print the object on stdout for debugging.
void Print() const;
bool IsZoneHandle() const {
return VMHandles::IsZoneHandle(reinterpret_cast<uword>(this));
}
bool IsReadOnlyHandle() const;
bool IsNotTemporaryScopedHandle() const;
static Object& Handle(Zone* zone, ObjectPtr ptr) {
Object* obj = reinterpret_cast<Object*>(VMHandles::AllocateHandle(zone));
initializeHandle(obj, ptr);
return *obj;
}
static Object* ReadOnlyHandle() {
Object* obj = reinterpret_cast<Object*>(Dart::AllocateReadOnlyHandle());
initializeHandle(obj, Object::null());
return obj;
}
static Object& Handle() { return Handle(Thread::Current()->zone(), null_); }
static Object& Handle(Zone* zone) { return Handle(zone, null_); }
static Object& Handle(ObjectPtr ptr) {
return Handle(Thread::Current()->zone(), ptr);
}
static Object& ZoneHandle(Zone* zone, ObjectPtr ptr) {
Object* obj =
reinterpret_cast<Object*>(VMHandles::AllocateZoneHandle(zone));
initializeHandle(obj, ptr);
return *obj;
}
static Object& ZoneHandle(Zone* zone) { return ZoneHandle(zone, null_); }
static Object& ZoneHandle() {
return ZoneHandle(Thread::Current()->zone(), null_);
}
static Object& ZoneHandle(ObjectPtr ptr) {
return ZoneHandle(Thread::Current()->zone(), ptr);
}
static ObjectPtr null() { return null_; }
#if defined(HASH_IN_OBJECT_HEADER)
static uint32_t GetCachedHash(const ObjectPtr obj) {
return obj->untag()->GetHeaderHash();
}
static uint32_t SetCachedHashIfNotSet(ObjectPtr obj, uint32_t hash) {
return obj->untag()->SetHeaderHashIfNotSet(hash);
}
#endif
// The list below enumerates read-only handles for singleton
// objects that are shared between the different isolates.
//
// - sentinel is a value that cannot be produced by Dart code. It can be used
// to mark special values, for example to distinguish "uninitialized" fields.
// - transition_sentinel is a value marking that we are transitioning from
// sentinel, e.g., computing a field value. Used to detect circular
// initialization.
// - unknown_constant and non_constant are optimizing compiler's constant
// propagation constants.
#define SHARED_READONLY_HANDLES_LIST(V) \
V(Object, null_object) \
V(Class, null_class) \
V(Array, null_array) \
V(String, null_string) \
V(Instance, null_instance) \
V(Function, null_function) \
V(FunctionType, null_function_type) \
V(TypeArguments, null_type_arguments) \
V(CompressedStackMaps, null_compressed_stackmaps) \
V(TypeArguments, empty_type_arguments) \
V(Array, empty_array) \
V(Array, zero_array) \
V(ContextScope, empty_context_scope) \
V(ObjectPool, empty_object_pool) \
V(CompressedStackMaps, empty_compressed_stackmaps) \
V(PcDescriptors, empty_descriptors) \
V(LocalVarDescriptors, empty_var_descriptors) \
V(ExceptionHandlers, empty_exception_handlers) \
V(ExceptionHandlers, empty_async_exception_handlers) \
V(Array, extractor_parameter_types) \
V(Array, extractor_parameter_names) \
V(Sentinel, sentinel) \
V(Sentinel, transition_sentinel) \
V(Sentinel, unknown_constant) \
V(Sentinel, non_constant) \
V(Bool, bool_true) \
V(Bool, bool_false) \
V(Smi, smi_illegal_cid) \
V(Smi, smi_zero) \
V(ApiError, no_callbacks_error) \
V(UnwindError, unwind_in_progress_error) \
V(LanguageError, snapshot_writer_error) \
V(LanguageError, branch_offset_error) \
V(LanguageError, speculative_inlining_error) \
V(LanguageError, background_compilation_error) \
V(LanguageError, out_of_memory_error) \
V(Array, vm_isolate_snapshot_object_table) \
V(Type, dynamic_type) \
V(Type, void_type) \
V(AbstractType, null_abstract_type)
#define DEFINE_SHARED_READONLY_HANDLE_GETTER(Type, name) \
static const Type& name() { \
ASSERT(name##_ != nullptr); \
return *name##_; \
}
SHARED_READONLY_HANDLES_LIST(DEFINE_SHARED_READONLY_HANDLE_GETTER)
#undef DEFINE_SHARED_READONLY_HANDLE_GETTER
static void set_vm_isolate_snapshot_object_table(const Array& table);
static ClassPtr class_class() { return class_class_; }
static ClassPtr dynamic_class() { return dynamic_class_; }
static ClassPtr void_class() { return void_class_; }
static ClassPtr type_parameters_class() { return type_parameters_class_; }
static ClassPtr type_arguments_class() { return type_arguments_class_; }
static ClassPtr patch_class_class() { return patch_class_class_; }
static ClassPtr function_class() { return function_class_; }
static ClassPtr closure_data_class() { return closure_data_class_; }
static ClassPtr ffi_trampoline_data_class() {
return ffi_trampoline_data_class_;
}
static ClassPtr field_class() { return field_class_; }
static ClassPtr script_class() { return script_class_; }
static ClassPtr library_class() { return library_class_; }
static ClassPtr namespace_class() { return namespace_class_; }
static ClassPtr kernel_program_info_class() {
return kernel_program_info_class_;
}
static ClassPtr code_class() { return code_class_; }
static ClassPtr instructions_class() { return instructions_class_; }
static ClassPtr instructions_section_class() {
return instructions_section_class_;
}
static ClassPtr instructions_table_class() {
return instructions_table_class_;
}
static ClassPtr object_pool_class() { return object_pool_class_; }
static ClassPtr pc_descriptors_class() { return pc_descriptors_class_; }
static ClassPtr code_source_map_class() { return code_source_map_class_; }
static ClassPtr compressed_stackmaps_class() {
return compressed_stackmaps_class_;
}
static ClassPtr var_descriptors_class() { return var_descriptors_class_; }
static ClassPtr exception_handlers_class() {
return exception_handlers_class_;
}
static ClassPtr deopt_info_class() { return deopt_info_class_; }
static ClassPtr context_class() { return context_class_; }
static ClassPtr context_scope_class() { return context_scope_class_; }
static ClassPtr sentinel_class() { return sentinel_class_; }
static ClassPtr api_error_class() { return api_error_class_; }
static ClassPtr language_error_class() { return language_error_class_; }
static ClassPtr unhandled_exception_class() {
return unhandled_exception_class_;
}
static ClassPtr unwind_error_class() { return unwind_error_class_; }
static ClassPtr singletargetcache_class() { return singletargetcache_class_; }
static ClassPtr unlinkedcall_class() { return unlinkedcall_class_; }
static ClassPtr monomorphicsmiablecall_class() {
return monomorphicsmiablecall_class_;
}
static ClassPtr icdata_class() { return icdata_class_; }
static ClassPtr megamorphic_cache_class() { return megamorphic_cache_class_; }
static ClassPtr subtypetestcache_class() { return subtypetestcache_class_; }
static ClassPtr loadingunit_class() { return loadingunit_class_; }
static ClassPtr weak_serialization_reference_class() {
return weak_serialization_reference_class_;
}
// Initialize the VM isolate.
static void InitNullAndBool(IsolateGroup* isolate_group);
static void Init(IsolateGroup* isolate_group);
static void InitVtables();
static void FinishInit(IsolateGroup* isolate_group);
static void FinalizeVMIsolate(IsolateGroup* isolate_group);
static void FinalizeReadOnlyObject(ObjectPtr object);
static void Cleanup();
// Initialize a new isolate either from a Kernel IR, from source, or from a
// snapshot.
static ErrorPtr Init(IsolateGroup* isolate_group,
const uint8_t* kernel_buffer,
intptr_t kernel_buffer_size);
static void MakeUnusedSpaceTraversable(const Object& obj,
intptr_t original_size,
intptr_t used_size);
static intptr_t InstanceSize() {
return RoundedAllocationSize(sizeof(UntaggedObject));
}
template <class FakeObject>
static void VerifyBuiltinVtable(intptr_t cid) {
FakeObject fake;
if (cid >= kNumPredefinedCids) {
cid = kInstanceCid;
}
ASSERT(builtin_vtables_[cid] == fake.vtable());
}
static void VerifyBuiltinVtables();
static const ClassId kClassId = kObjectCid;
// Different kinds of name visibility.
enum NameVisibility {
// Internal names are the true names of classes, fields,
// etc. inside the vm. These names include privacy suffixes,
// getter prefixes, and trailing dots on unnamed constructors.
//
// The names of core implementation classes (like _OneByteString)
// are preserved as well.
//
// e.g.
// private getter -> get:foo@6be832b
// private constructor -> _MyClass@6b3832b.
// private named constructor -> _MyClass@6b3832b.named
// core impl class name shown -> _OneByteString
kInternalName = 0,
// Scrubbed names drop privacy suffixes, getter prefixes, and
// trailing dots on unnamed constructors. These names are used in
// the vm service.
//
// e.g.
// get:foo@6be832b -> foo
// _MyClass@6b3832b. -> _MyClass
// _MyClass@6b3832b.named -> _MyClass.named
// _OneByteString -> _OneByteString (not remapped)
kScrubbedName,
// User visible names are appropriate for reporting type errors
// directly to programmers. The names have been scrubbed and
// the names of core implementation classes are remapped to their
// public interface names.
//
// e.g.
// get:foo@6be832b -> foo
// _MyClass@6b3832b. -> _MyClass
// _MyClass@6b3832b.named -> _MyClass.named
// _OneByteString -> String (remapped)
kUserVisibleName
};
// Sometimes simple formating might produce the same name for two different
// entities, for example we might inject a synthetic forwarder into the
// class which has the same name as an already existing function, or
// two different types can be formatted as X<T> because T has different
// meaning (refers to a different type parameter) in these two types.
// Such ambiguity might be acceptable in some contexts but not in others, so
// some formatting methods have two modes - one which tries to be more
// user friendly, and another one which tries to avoid name conflicts by
// emitting longer and less user friendly names.
enum class NameDisambiguation {
kYes,
kNo,
};
protected:
friend ObjectPtr AllocateObject(intptr_t, intptr_t);
// Used for extracting the C++ vtable during bringup.
Object() : ptr_(null_) {}
uword raw_value() const { return static_cast<uword>(ptr()); }
inline void SetPtr(ObjectPtr value, intptr_t default_cid);
void CheckHandle() const;
DART_NOINLINE static Object& HandleImpl(Zone* zone,
ObjectPtr ptr,
intptr_t default_cid) {
Object* obj = reinterpret_cast<Object*>(VMHandles::AllocateHandle(zone));
obj->SetPtr(ptr, default_cid);
return *obj;
}
DART_NOINLINE static Object& ZoneHandleImpl(Zone* zone,
ObjectPtr ptr,
intptr_t default_cid) {
Object* obj =
reinterpret_cast<Object*>(VMHandles::AllocateZoneHandle(zone));
obj->SetPtr(ptr, default_cid);
return *obj;
}
DART_NOINLINE static Object* ReadOnlyHandleImpl(intptr_t cid) {
Object* obj = reinterpret_cast<Object*>(Dart::AllocateReadOnlyHandle());
obj->SetPtr(Object::null(), cid);
return obj;
}
// Memcpy to account for the strict aliasing rule.
// Explicit cast to silence -Wdynamic-class-memaccess.
// This is still undefined behavior because we're messing with the internal
// representation of C++ objects, but works okay in practice with
// -fno-strict-vtable-pointers.
cpp_vtable vtable() const {
cpp_vtable result;
memcpy(&result, reinterpret_cast<const void*>(this), // NOLINT
sizeof(result));
return result;
}
void set_vtable(cpp_vtable value) {
memcpy(reinterpret_cast<void*>(this), &value, // NOLINT
sizeof(cpp_vtable));
}
static ObjectPtr Allocate(intptr_t cls_id,
intptr_t size,
Heap::Space space,
bool compressed);
static constexpr intptr_t RoundedAllocationSize(intptr_t size) {
return Utils::RoundUp(size, kObjectAlignment);
}
bool Contains(uword addr) const { return ptr()->untag()->Contains(addr); }
// Start of field mutator guards.
//
// All writes to heap objects should ultimately pass through one of the
// methods below or their counterparts in RawObject, to ensure that the
// write barrier is correctly applied.
template <typename type, std::memory_order order = std::memory_order_relaxed>
type LoadPointer(type const* addr) const {
return ptr()->untag()->LoadPointer<type, order>(addr);
}
template <typename type, std::memory_order order = std::memory_order_relaxed>
void StorePointer(type const* addr, type value) const {
ptr()->untag()->StorePointer<type, order>(addr, value);
}
template <typename type,
typename compressed_type,
std::memory_order order = std::memory_order_relaxed>
void StoreCompressedPointer(compressed_type const* addr, type value) const {
ptr()->untag()->StoreCompressedPointer<type, compressed_type, order>(addr,
value);
}
// Use for storing into an explicitly Smi-typed field of an object
// (i.e., both the previous and new value are Smis).
void StoreSmi(SmiPtr const* addr, SmiPtr value) const {
ptr()->untag()->StoreSmi(addr, value);
}
template <typename FieldType>
void StoreSimd128(const FieldType* addr, simd128_value_t value) const {
ASSERT(Contains(reinterpret_cast<uword>(addr)));
value.writeTo(const_cast<FieldType*>(addr));
}
template <typename FieldType>
FieldType LoadNonPointer(const FieldType* addr) const {
return *const_cast<FieldType*>(addr);
}
template <typename FieldType, std::memory_order order>
FieldType LoadNonPointer(const FieldType* addr) const {
return reinterpret_cast<std::atomic<FieldType>*>(
const_cast<FieldType*>(addr))
->load(order);
}
// Needs two template arguments to allow assigning enums to fixed-size ints.
template <typename FieldType, typename ValueType>
void StoreNonPointer(const FieldType* addr, ValueType value) const {
// Can't use Contains, as it uses tags_, which is set through this method.
ASSERT(reinterpret_cast<uword>(addr) >= UntaggedObject::ToAddr(ptr()));
*const_cast<FieldType*>(addr) = value;
}
template <typename FieldType, typename ValueType, std::memory_order order>
void StoreNonPointer(const FieldType* addr, ValueType value) const {
// Can't use Contains, as it uses tags_, which is set through this method.
ASSERT(reinterpret_cast<uword>(addr) >= UntaggedObject::ToAddr(ptr()));
reinterpret_cast<std::atomic<FieldType>*>(const_cast<FieldType*>(addr))
->store(value, order);
}
// Provides non-const access to non-pointer fields within the object. Such
// access does not need a write barrier, but it is *not* GC-safe, since the
// object might move, hence must be fully contained within a NoSafepointScope.
template <typename FieldType>
FieldType* UnsafeMutableNonPointer(const FieldType* addr) const {
// Allow pointers at the end of variable-length data, and disallow pointers
// within the header word.
ASSERT(Contains(reinterpret_cast<uword>(addr) - 1) &&
Contains(reinterpret_cast<uword>(addr) - kWordSize));
// At least check that there is a NoSafepointScope and hope it's big enough.
ASSERT(Thread::Current()->no_safepoint_scope_depth() > 0);
return const_cast<FieldType*>(addr);
}
// Fail at link time if StoreNonPointer or UnsafeMutableNonPointer is
// instantiated with an object pointer type.
#define STORE_NON_POINTER_ILLEGAL_TYPE(type) \
template <typename ValueType> \
void StoreNonPointer(type##Ptr const* addr, ValueType value) const { \
UnimplementedMethod(); \
} \
type##Ptr* UnsafeMutableNonPointer(type##Ptr const* addr) const { \
UnimplementedMethod(); \
return NULL; \
}
CLASS_LIST(STORE_NON_POINTER_ILLEGAL_TYPE);
void UnimplementedMethod() const;
#undef STORE_NON_POINTER_ILLEGAL_TYPE
// Allocate an object and copy the body of 'orig'.
static ObjectPtr Clone(const Object& orig,
Heap::Space space,
bool load_with_relaxed_atomics = false);
// End of field mutator guards.
ObjectPtr ptr_; // The raw object reference.
protected:
void AddCommonObjectProperties(JSONObject* jsobj,
const char* protocol_type,
bool ref) const;
private:
static intptr_t NextFieldOffset() {
// Indicates this class cannot be extended by dart code.
return -kWordSize;
}
static void InitializeObject(uword address,
intptr_t id,
intptr_t size,
bool compressed);
static void RegisterClass(const Class& cls,
const String& name,
const Library& lib);
static void RegisterPrivateClass(const Class& cls,
const String& name,
const Library& lib);
/* Initialize the handle based on the ptr in the presence of null. */
static void initializeHandle(Object* obj, ObjectPtr ptr) {
obj->SetPtr(ptr, kObjectCid);
}
static cpp_vtable builtin_vtables_[kNumPredefinedCids];
// The static values below are singletons shared between the different
// isolates. They are all allocated in the non-GC'd Dart::vm_isolate_.
static ObjectPtr null_;
static BoolPtr true_;
static BoolPtr false_;
static ClassPtr class_class_; // Class of the Class vm object.
static ClassPtr dynamic_class_; // Class of the 'dynamic' type.
static ClassPtr void_class_; // Class of the 'void' type.
static ClassPtr type_parameters_class_; // Class of TypeParameters vm object.
static ClassPtr type_arguments_class_; // Class of TypeArguments vm object.
static ClassPtr patch_class_class_; // Class of the PatchClass vm object.
static ClassPtr function_class_; // Class of the Function vm object.
static ClassPtr closure_data_class_; // Class of ClosureData vm obj.
static ClassPtr ffi_trampoline_data_class_; // Class of FfiTrampolineData
// vm obj.
static ClassPtr field_class_; // Class of the Field vm object.
static ClassPtr script_class_; // Class of the Script vm object.
static ClassPtr library_class_; // Class of the Library vm object.
static ClassPtr namespace_class_; // Class of Namespace vm object.
static ClassPtr kernel_program_info_class_; // Class of KernelProgramInfo vm
// object.
static ClassPtr code_class_; // Class of the Code vm object.
static ClassPtr instructions_class_; // Class of the Instructions vm object.
static ClassPtr instructions_section_class_; // Class of InstructionsSection.
static ClassPtr instructions_table_class_; // Class of InstructionsTable.
static ClassPtr object_pool_class_; // Class of the ObjectPool vm object.
static ClassPtr pc_descriptors_class_; // Class of PcDescriptors vm object.
static ClassPtr code_source_map_class_; // Class of CodeSourceMap vm object.
static ClassPtr compressed_stackmaps_class_; // Class of CompressedStackMaps.
static ClassPtr var_descriptors_class_; // Class of LocalVarDescriptors.
static ClassPtr exception_handlers_class_; // Class of ExceptionHandlers.
static ClassPtr deopt_info_class_; // Class of DeoptInfo.
static ClassPtr context_class_; // Class of the Context vm object.
static ClassPtr context_scope_class_; // Class of ContextScope vm object.
static ClassPtr sentinel_class_; // Class of Sentinel vm object.
static ClassPtr singletargetcache_class_; // Class of SingleTargetCache.
static ClassPtr unlinkedcall_class_; // Class of UnlinkedCall.
static ClassPtr
monomorphicsmiablecall_class_; // Class of MonomorphicSmiableCall.
static ClassPtr icdata_class_; // Class of ICData.
static ClassPtr megamorphic_cache_class_; // Class of MegamorphiCache.
static ClassPtr subtypetestcache_class_; // Class of SubtypeTestCache.
static ClassPtr loadingunit_class_; // Class of LoadingUnit.
static ClassPtr api_error_class_; // Class of ApiError.
static ClassPtr language_error_class_; // Class of LanguageError.
static ClassPtr unhandled_exception_class_; // Class of UnhandledException.
static ClassPtr unwind_error_class_; // Class of UnwindError.
// Class of WeakSerializationReference.
static ClassPtr weak_serialization_reference_class_;
#define DECLARE_SHARED_READONLY_HANDLE(Type, name) static Type* name##_;
SHARED_READONLY_HANDLES_LIST(DECLARE_SHARED_READONLY_HANDLE)
#undef DECLARE_SHARED_READONLY_HANDLE
friend void ClassTable::Register(const Class& cls);
friend void UntaggedObject::Validate(IsolateGroup* isolate_group) const;
friend class Closure;
friend class InstanceDeserializationCluster;
friend class ObjectGraphCopier; // For Object::InitializeObject
friend class Simd128MessageDeserializationCluster;
friend class OneByteString;
friend class TwoByteString;
friend class ExternalOneByteString;
friend class ExternalTwoByteString;
friend class Thread;
#define REUSABLE_FRIEND_DECLARATION(name) \
friend class Reusable##name##HandleScope;
REUSABLE_HANDLE_LIST(REUSABLE_FRIEND_DECLARATION)
#undef REUSABLE_FRIEND_DECLARATION
DISALLOW_ALLOCATION();
DISALLOW_COPY_AND_ASSIGN(Object);
};
// Used to declare setters and getters for untagged object fields that are
// defined with the WSR_COMPRESSED_POINTER_FIELD macro.
//
// In the precompiler, the getter transparently unwraps the
// WeakSerializationReference, if present, to get the wrapped value of the
// appropriate type, since a WeakSerializationReference object should be
// transparent to the parts of the precompiler that are not the serializer.
// Meanwhile, the setter takes an Object to allow the precompiler to set the
// field to a WeakSerializationReference.
//
// Since WeakSerializationReferences are only used during precompilation,
// this macro creates the normally expected getter and setter otherwise.
#if defined(DART_PRECOMPILER)
#define PRECOMPILER_WSR_FIELD_DECLARATION(Type, Name) \
Type##Ptr Name() const; \
void set_##Name(const Object& value) const { \
untag()->set_##Name(value.ptr()); \
}
#else
#define PRECOMPILER_WSR_FIELD_DECLARATION(Type, Name) \
Type##Ptr Name() const { return untag()->Name(); } \
void set_##Name(const Type& value) const;
#endif
class PassiveObject : public Object {
public:
void operator=(ObjectPtr value) { ptr_ = value; }
void operator^=(ObjectPtr value) { ptr_ = value; }
static PassiveObject& Handle(Zone* zone, ObjectPtr ptr) {
PassiveObject* obj =
reinterpret_cast<PassiveObject*>(VMHandles::AllocateHandle(zone));
obj->ptr_ = ptr;
obj->set_vtable(0);
return *obj;
}
static PassiveObject& Handle(ObjectPtr ptr) {
return Handle(Thread::Current()->zone(), ptr);
}
static PassiveObject& Handle() {
return Handle(Thread::Current()->zone(), Object::null());
}
static PassiveObject& Handle(Zone* zone) {
return Handle(zone, Object::null());
}
static PassiveObject& ZoneHandle(Zone* zone, ObjectPtr ptr) {
PassiveObject* obj =
reinterpret_cast<PassiveObject*>(VMHandles::AllocateZoneHandle(zone));
obj->ptr_ = ptr;
obj->set_vtable(0);
return *obj;
}
static PassiveObject& ZoneHandle(ObjectPtr ptr) {
return ZoneHandle(Thread::Current()->zone(), ptr);
}
static PassiveObject& ZoneHandle() {
return ZoneHandle(Thread::Current()->zone(), Object::null());
}
static PassiveObject& ZoneHandle(Zone* zone) {
return ZoneHandle(zone, Object::null());
}
private:
PassiveObject() : Object() {}
DISALLOW_ALLOCATION();
DISALLOW_COPY_AND_ASSIGN(PassiveObject);
};
typedef ZoneGrowableHandlePtrArray<const AbstractType> Trail;
typedef ZoneGrowableHandlePtrArray<const AbstractType>* TrailPtr;
// A URIs array contains triplets of strings.
// The first string in the triplet is a type name (usually a class).
// The second string in the triplet is the URI of the type.
// The third string in the triplet is "print" if the triplet should be printed.
typedef ZoneGrowableHandlePtrArray<const String> URIs;
enum class Nullability : uint8_t {
kNullable = 0,
kNonNullable = 1,
kLegacy = 2,
// Adjust kNullabilityBitSize in app_snapshot.cc if adding new values.
};
// Equality kind between types.
enum class TypeEquality {
kCanonical = 0,
kSyntactical = 1,
kInSubtypeTest = 2,
};
// The NNBDMode reflects the opted-in status of libraries.
// Note that the weak or strong checking mode is not reflected in NNBDMode.
enum class NNBDMode {
// Status of the library:
kLegacyLib = 0, // Library is legacy.
kOptedInLib = 1, // Library is opted-in.
};
// The NNBDCompiledMode reflects the mode in which constants of the library were
// compiled by CFE.
enum class NNBDCompiledMode {
kWeak = 0,
kStrong = 1,
kAgnostic = 2,
kInvalid = 3,
};
class Class : public Object {
public:
enum InvocationDispatcherEntry {
kInvocationDispatcherName,
kInvocationDispatcherArgsDesc,
kInvocationDispatcherFunction,
kInvocationDispatcherEntrySize,
};
bool HasCompressedPointers() const;
intptr_t host_instance_size() const {
ASSERT(is_finalized() || is_prefinalized());
return (untag()->host_instance_size_in_words_ * kCompressedWordSize);
}
intptr_t target_instance_size() const {
ASSERT(is_finalized() || is_prefinalized());
#if defined(DART_PRECOMPILER)
return (untag()->target_instance_size_in_words_ *
compiler::target::kCompressedWordSize);
#else
return host_instance_size();
#endif // defined(DART_PRECOMPILER)
}
static intptr_t host_instance_size(ClassPtr clazz) {
return (clazz->untag()->host_instance_size_in_words_ * kCompressedWordSize);
}
static intptr_t target_instance_size(ClassPtr clazz) {
#if defined(DART_PRECOMPILER)
return (clazz->untag()->target_instance_size_in_words_ *
compiler::target::kCompressedWordSize);
#else
return host_instance_size(clazz);
#endif // defined(DART_PRECOMPILER)
}
void set_instance_size(intptr_t host_value_in_bytes,
intptr_t target_value_in_bytes) const {
ASSERT(kCompressedWordSize != 0);
set_instance_size_in_words(
host_value_in_bytes / kCompressedWordSize,
target_value_in_bytes / compiler::target::kCompressedWordSize);
}
void set_instance_size_in_words(intptr_t host_value,
intptr_t target_value) const {
ASSERT(
Utils::IsAligned((host_value * kCompressedWordSize), kObjectAlignment));
StoreNonPointer(&untag()->host_instance_size_in_words_, host_value);
#if defined(DART_PRECOMPILER)
ASSERT(
Utils::IsAligned((target_value * compiler::target::kCompressedWordSize),
compiler::target::kObjectAlignment));
StoreNonPointer(&untag()->target_instance_size_in_words_, target_value);
#else
// Could be different only during cross-compilation.
ASSERT_EQUAL(host_value, target_value);
#endif // defined(DART_PRECOMPILER)
}
intptr_t host_next_field_offset() const {
return untag()->host_next_field_offset_in_words_ * kCompressedWordSize;
}
intptr_t target_next_field_offset() const {
#if defined(DART_PRECOMPILER)
return untag()->target_next_field_offset_in_words_ *
compiler::target::kCompressedWordSize;
#else
return host_next_field_offset();
#endif // defined(DART_PRECOMPILER)
}
void set_next_field_offset(intptr_t host_value_in_bytes,
intptr_t target_value_in_bytes) const {
set_next_field_offset_in_words(
host_value_in_bytes / kCompressedWordSize,
target_value_in_bytes / compiler::target::kCompressedWordSize);
}
void set_next_field_offset_in_words(intptr_t host_value,
intptr_t target_value) const {
// Assert that the next field offset is either negative (ie, this object
// can't be extended by dart code), or rounds up to the kObjectAligned
// instance size.
ASSERT((host_value < 0) ||
((host_value <= untag()->host_instance_size_in_words_) &&
(host_value + (kObjectAlignment / kCompressedWordSize) >
untag()->host_instance_size_in_words_)));
StoreNonPointer(&untag()->host_next_field_offset_in_words_, host_value);
#if defined(DART_PRECOMPILER)
ASSERT((target_value < 0) ||
((target_value <= untag()->target_instance_size_in_words_) &&
(target_value + (compiler::target::kObjectAlignment /
compiler::target::kCompressedWordSize) >
untag()->target_instance_size_in_words_)));
StoreNonPointer(&untag()->target_next_field_offset_in_words_, target_value);
#else
// Could be different only during cross-compilation.
ASSERT_EQUAL(host_value, target_value);
#endif // defined(DART_PRECOMPILER)
}
static bool is_valid_id(intptr_t value) {
return UntaggedObject::ClassIdTag::is_valid(value);
}
intptr_t id() const { return untag()->id_; }
void set_id(intptr_t value) const {
ASSERT(value >= 0 && value < std::numeric_limits<classid_t>::max());
StoreNonPointer(&untag()->id_, value);
}
static intptr_t id_offset() { return OFFSET_OF(UntaggedClass, id_); }
#if !defined(DART_PRECOMPILED_RUNTIME)
// If the interface of this class has a single concrete implementation, either
// via `extends` or by `implements`, returns its CID.
// If it has no implementation, returns kIllegalCid.
// If it has more than one implementation, returns kDynamicCid.
intptr_t implementor_cid() const { return untag()->implementor_cid_; }
// Returns true if the implementor tracking state changes and so must be
// propagated to this class's superclass and interfaces.
bool NoteImplementor(const Class& implementor) const;
#endif
static intptr_t num_type_arguments_offset() {
return OFFSET_OF(UntaggedClass, num_type_arguments_);
}
StringPtr Name() const;
StringPtr ScrubbedName() const;
const char* ScrubbedNameCString() const;
StringPtr UserVisibleName() const;
const char* UserVisibleNameCString() const;
const char* NameCString(NameVisibility name_visibility) const;
// The mixin for this class if one exists. Otherwise, returns a raw pointer
// to this class.
ClassPtr Mixin() const;
// The NNBD mode of the library declaring this class.
NNBDMode nnbd_mode() const;
bool IsInFullSnapshot() const;
virtual StringPtr DictionaryName() const { return Name(); }
ScriptPtr script() const { return untag()->script(); }
void set_script(const Script& value) const;
TokenPosition token_pos() const {
#if defined(DART_PRECOMPILED_RUNTIME)
return TokenPosition::kNoSource;
#else
return untag()->token_pos_;
#endif // defined(DART_PRECOMPILED_RUNTIME)
}
#if !defined(DART_PRECOMPILED_RUNTIME)
void set_token_pos(TokenPosition value) const;
#endif // !defined(DART_PRECOMPILED_RUNTIME)
TokenPosition end_token_pos() const {
#if defined(DART_PRECOMPILED_RUNTIME)
return TokenPosition::kNoSource;
#else
return untag()->end_token_pos_;
#endif // defined(DART_PRECOMPILED_RUNTIME)
}
#if !defined(DART_PRECOMPILED_RUNTIME)
void set_end_token_pos(TokenPosition value) const;
#endif // !defined(DART_PRECOMPILED_RUNTIME)
int32_t SourceFingerprint() const;
// Return the Type with type parameters declared by this class filled in with
// dynamic and type parameters declared in superclasses filled in as declared
// in superclass clauses.
AbstractTypePtr RareType() const;
// Return the Type whose arguments are the type parameters declared by this
// class preceded by the type arguments declared for superclasses, etc.
// e.g. given
// class B<T, S>
// class C<R> extends B<R, int>
// C.DeclarationType() --> C [R, int, R]
// The declaration type's nullability is either legacy or non-nullable when
// the non-nullable experiment is enabled.
TypePtr DeclarationType() const;
static intptr_t declaration_type_offset() {
return OFFSET_OF(UntaggedClass, declaration_type_);
}
LibraryPtr library() const { return untag()->library(); }
void set_library(const Library& value) const;
// The formal type parameters and their bounds (no defaults), are specified as
// an object of type TypeParameters.
TypeParametersPtr type_parameters() const {
ASSERT(is_declaration_loaded());
return untag()->type_parameters();
}
void set_type_parameters(const TypeParameters& value) const;
intptr_t NumTypeParameters(Thread* thread) const;
intptr_t NumTypeParameters() const {
return NumTypeParameters(Thread::Current());
}
// Return the type parameter declared at index.
TypeParameterPtr TypeParameterAt(
intptr_t index,
Nullability nullability = Nullability::kNonNullable) const;
// The type argument vector is flattened and includes the type arguments of
// the super class.
intptr_t NumTypeArguments() const;
// Return true if this class declares type parameters.
bool IsGeneric() const {
// If the declaration is not loaded, fall back onto NumTypeParameters.
if (!is_declaration_loaded()) {
return NumTypeParameters(Thread::Current()) > 0;
}
return type_parameters() != Object::null();
}
// Returns a canonicalized vector of the type parameters instantiated
// to bounds. If non-generic, the empty type arguments vector is returned.
TypeArgumentsPtr InstantiateToBounds(Thread* thread) const;
// If this class is parameterized, each instance has a type_arguments field.
static const intptr_t kNoTypeArguments = -1;
intptr_t host_type_arguments_field_offset() const {
ASSERT(is_type_finalized() || is_prefinalized());
if (untag()->host_type_arguments_field_offset_in_words_ ==
kNoTypeArguments) {
return kNoTypeArguments;
}
return untag()->host_type_arguments_field_offset_in_words_ *
kCompressedWordSize;
}
intptr_t target_type_arguments_field_offset() const {
#if defined(DART_PRECOMPILER)
ASSERT(is_type_finalized() || is_prefinalized());
if (untag()->target_type_arguments_field_offset_in_words_ ==
compiler::target::Class::kNoTypeArguments) {
return compiler::target::Class::kNoTypeArguments;
}
return untag()->target_type_arguments_field_offset_in_words_ *
compiler::target::kCompressedWordSize;
#else
return host_type_arguments_field_offset();
#endif // defined(DART_PRECOMPILER)
}
void set_type_arguments_field_offset(intptr_t host_value_in_bytes,
intptr_t target_value_in_bytes) const {
intptr_t host_value, target_value;
if (host_value_in_bytes == kNoTypeArguments ||
target_value_in_bytes == RTN::Class::kNoTypeArguments) {
ASSERT(host_value_in_bytes == kNoTypeArguments &&
target_value_in_bytes == RTN::Class::kNoTypeArguments);
host_value = kNoTypeArguments;
target_value = RTN::Class::kNoTypeArguments;
} else {
ASSERT(kCompressedWordSize != 0 && compiler::target::kCompressedWordSize);
host_value = host_value_in_bytes / kCompressedWordSize;
target_value =
target_value_in_bytes / compiler::target::kCompressedWordSize;
}
set_type_arguments_field_offset_in_words(host_value, target_value);
}
void set_type_arguments_field_offset_in_words(intptr_t host_value,
intptr_t target_value) const {
StoreNonPointer(&untag()->host_type_arguments_field_offset_in_words_,
host_value);
#if defined(DART_PRECOMPILER)
StoreNonPointer(&untag()->target_type_arguments_field_offset_in_words_,
target_value);
#else
// Could be different only during cross-compilation.
ASSERT_EQUAL(host_value, target_value);
#endif // defined(DART_PRECOMPILER)
}
static intptr_t host_type_arguments_field_offset_in_words_offset() {
return OFFSET_OF(UntaggedClass, host_type_arguments_field_offset_in_words_);
}
// The super type of this class, Object type if not explicitly specified.
AbstractTypePtr super_type() const {
ASSERT(is_declaration_loaded());
return untag()->super_type();
}
void set_super_type(const AbstractType& value) const;
static intptr_t super_type_offset() {
return OFFSET_OF(UntaggedClass, super_type_);
}
// Asserts that the class of the super type has been resolved.
// |original_classes| only has an effect when reloading. If true and we
// are reloading, it will prefer the original classes to the replacement
// classes.
ClassPtr SuperClass(bool original_classes = false) const;
// Interfaces is an array of Types.
ArrayPtr interfaces() const {
ASSERT(is_declaration_loaded());
return untag()->interfaces();
}
void set_interfaces(const Array& value) const;
// Returns whether a path from [this] to [cls] can be found, where the first
// element is a direct supertype of [this], each following element is a direct
// supertype of the previous element and the final element has [cls] as its
// type class. If [this] and [cls] are the same class, then the path is empty.
//
// If [path] is not nullptr, then the elements of the path are added to it.
// This path can then be used to compute type arguments of [cls] given type
// arguments for an instance of [this].
//
// Note: There may be multiple paths to [cls], but the result of applying each
// path must be equal to the other results.
bool FindInstantiationOf(Zone* zone,
const Class& cls,
GrowableArray<const AbstractType*>* path,
bool consider_only_super_classes = false) const;
bool FindInstantiationOf(Zone* zone,
const Class& cls,
bool consider_only_super_classes = false) const {
return FindInstantiationOf(zone, cls, /*path=*/nullptr,
consider_only_super_classes);
}
// Returns whether a path from [this] to [type] can be found, where the first
// element is a direct supertype of [this], each following element is a direct
// supertype of the previous element and the final element has the same type
// class as [type]. If [this] is the type class of [type], then the path is
// empty.
//
// If [path] is not nullptr, then the elements of the path are added to it.
// This path can then be used to compute type arguments of [type]'s type
// class given type arguments for an instance of [this].
//
// Note: There may be multiple paths to [type]'s type class, but the result of
// applying each path must be equal to the other results.
bool FindInstantiationOf(Zone* zone,
const Type& type,
GrowableArray<const AbstractType*>* path,
bool consider_only_super_classes = false) const;
bool FindInstantiationOf(Zone* zone,
const Type& type,
bool consider_only_super_classes = false) const {
return FindInstantiationOf(zone, type, /*path=*/nullptr,
consider_only_super_classes);
}
// If [this] is a subtype of a type with type class [cls], then this
// returns [cls]<X_0, ..., X_n>, where n is the number of type arguments for
// [cls] and where each type argument X_k is either instantiated or has free
// class type parameters corresponding to the type parameters of [this].
// Thus, given an instance of [this], the result can be instantiated
// with the instance type arguments to get the type of the instance.
//
// If [this] is not a subtype of a type with type class [cls], returns null.
TypePtr GetInstantiationOf(Zone* zone, const Class& cls) const;
// If [this] is a subtype of [type], then this returns [cls]<X_0, ..., X_n>,
// where [cls] is the type class of [type], n is the number of type arguments
// for [cls], and where each type argument X_k is either instantiated or has
// free class type parameters corresponding to the type parameters of [this].
// Thus, given an instance of [this], the result can be instantiated with the
// instance type arguments to get the type of the instance.
//
// If [this] is not a subtype of a type with type class [cls], returns null.
TypePtr GetInstantiationOf(Zone* zone, const Type& type) const;
#if !defined(PRODUCT) || !defined(DART_PRECOMPILED_RUNTIME)
// Returns the list of classes directly implementing this class.
GrowableObjectArrayPtr direct_implementors() const {
DEBUG_ASSERT(
IsolateGroup::Current()->program_lock()->IsCurrentThreadReader());
return untag()->direct_implementors();
}
GrowableObjectArrayPtr direct_implementors_unsafe() const {
return untag()->direct_implementors();
}
#endif // !defined(PRODUCT) || !defined(DART_PRECOMPILED_RUNTIME)
#if !defined(DART_PRECOMPILED_RUNTIME)
void set_direct_implementors(const GrowableObjectArray& implementors) const;
void AddDirectImplementor(const Class& subclass, bool is_mixin) const;
#endif // !defined(DART_PRECOMPILED_RUNTIME)
#if !defined(PRODUCT) || !defined(DART_PRECOMPILED_RUNTIME)
// Returns the list of classes having this class as direct superclass.
GrowableObjectArrayPtr direct_subclasses() const {
DEBUG_ASSERT(
IsolateGroup::Current()->program_lock()->IsCurrentThreadReader());
return direct_subclasses_unsafe();
}
GrowableObjectArrayPtr direct_subclasses_unsafe() const {
return untag()->direct_subclasses();
}
#endif // !defined(PRODUCT) || !defined(DART_PRECOMPILED_RUNTIME)
#if !defined(DART_PRECOMPILED_RUNTIME)
void set_direct_subclasses(const GrowableObjectArray& subclasses) const;
void AddDirectSubclass(const Class& subclass) const;
#endif // !defined(DART_PRECOMPILED_RUNTIME)
// Check if this class represents the class of null.
bool IsNullClass() const { return id() == kNullCid; }
// Check if this class represents the 'dynamic' class.
bool IsDynamicClass() const { return id() == kDynamicCid; }
// Check if this class represents the 'void' class.
bool IsVoidClass() const { return id() == kVoidCid; }
// Check if this class represents the 'Never' class.
bool IsNeverClass() const { return id() == kNeverCid; }
// Check if this class represents the 'Object' class.
bool IsObjectClass() const { return id() == kInstanceCid; }
// Check if this class represents the 'Function' class.
bool IsDartFunctionClass() const;
// Check if this class represents the 'Future' class.
bool IsFutureClass() const;
// Check if this class represents the 'FutureOr' class.
bool IsFutureOrClass() const { return id() == kFutureOrCid; }
// Check if this class represents the 'Closure' class.
bool IsClosureClass() const { return id() == kClosureCid; }
static bool IsClosureClass(ClassPtr cls) {
return GetClassId(cls) == kClosureCid;
}
static bool IsInFullSnapshot(ClassPtr cls) {
NoSafepointScope no_safepoint;
return UntaggedLibrary::InFullSnapshotBit::decode(
cls->untag()->library()->untag()->flags_);
}
static intptr_t GetClassId(ClassPtr cls) {
NoSafepointScope no_safepoint;
return cls->untag()->id_;
}
// Returns true if the type specified by cls, type_arguments, and nullability
// is a subtype of the other type.
static bool IsSubtypeOf(const Class& cls,
const TypeArguments& type_arguments,
Nullability nullability,
const AbstractType& other,
Heap::Space space,
TrailPtr trail = nullptr);
// Check if this is the top level class.
bool IsTopLevel() const;
bool IsPrivate() const;
DART_WARN_UNUSED_RESULT
ErrorPtr VerifyEntryPoint() const;
// Returns an array of instance and static fields defined by this class.
ArrayPtr fields() const {
// We rely on the fact that any loads from the array are dependent loads
// and avoid the load-acquire barrier here.
return untag()->fields();
}
void SetFields(const Array& value) const;
void AddField(const Field& field) const;
void AddFields(const GrowableArray<const Field*>& fields) const;
// If this is a dart:internal.ClassID class, then inject our own const
// fields. Returns true if synthetic fields are injected and regular
// field declarations should be ignored.
bool InjectCIDFields() const;
// Returns an array of all instance fields of this class and its superclasses
// indexed by offset in words.
// |original_classes| only has an effect when reloading. If true and we
// are reloading, it will prefer the original classes to the replacement
// classes.
ArrayPtr OffsetToFieldMap(bool original_classes = false) const;
// Returns true if non-static fields are defined.
bool HasInstanceFields() const;
ArrayPtr current_functions() const {
// We rely on the fact that any loads from the array are dependent loads
// and avoid the load-acquire barrier here.
return untag()->functions();
}
ArrayPtr functions() const {
DEBUG_ASSERT(
IsolateGroup::Current()->program_lock()->IsCurrentThreadReader());
return current_functions();
}
void SetFunctions(const Array& value) const;
void AddFunction(const Function& function) const;
FunctionPtr FunctionFromIndex(intptr_t idx) const;
intptr_t FindImplicitClosureFunctionIndex(const Function& needle) const;
FunctionPtr ImplicitClosureFunctionFromIndex(intptr_t idx) const;
FunctionPtr LookupFunctionReadLocked(const String& name) const;
FunctionPtr LookupDynamicFunctionUnsafe(const String& name) const;
FunctionPtr LookupDynamicFunctionAllowPrivate(const String& name) const;
FunctionPtr LookupStaticFunction(const String& name) const;
FunctionPtr LookupStaticFunctionAllowPrivate(const String& name) const;
FunctionPtr LookupConstructor(const String& name) const;
FunctionPtr LookupConstructorAllowPrivate(const String& name) const;
FunctionPtr LookupFactory(const String& name) const;
FunctionPtr LookupFactoryAllowPrivate(const String& name) const;
FunctionPtr LookupFunctionAllowPrivate(const String& name) const;
FunctionPtr LookupGetterFunction(const String& name) const;
FunctionPtr LookupSetterFunction(const String& name) const;
FieldPtr LookupInstanceField(const String& name) const;
FieldPtr LookupStaticField(const String& name) const;
FieldPtr LookupField(const String& name) const;
FieldPtr LookupFieldAllowPrivate(const String& name,
bool instance_only = false) const;
FieldPtr LookupInstanceFieldAllowPrivate(const String& name) const;
FieldPtr LookupStaticFieldAllowPrivate(const String& name) const;
// The methods above are more efficient than this generic one.
InstancePtr LookupCanonicalInstance(Zone* zone, const Instance& value) const;
InstancePtr InsertCanonicalConstant(Zone* zone,
const Instance& constant) const;
void RehashConstants(Zone* zone) const;
bool RequireCanonicalTypeErasureOfConstants(Zone* zone) const;
static intptr_t InstanceSize() {
return RoundedAllocationSize(sizeof(UntaggedClass));
}
// Returns true if any class implements this interface via `implements`.
// Returns false if all possible implementations of this interface must be
// instances of this class or its subclasses.
bool is_implemented() const { return ImplementedBit::decode(state_bits()); }
void set_is_implemented() const;
void set_is_implemented_unsafe() const;
bool is_abstract() const { return AbstractBit::decode(state_bits()); }
void set_is_abstract() const;
UntaggedClass::ClassLoadingState class_loading_state() const {
return ClassLoadingBits::decode(state_bits());
}
bool is_declaration_loaded() const {
return class_loading_state() >= UntaggedClass::kDeclarationLoaded;
}
void set_is_declaration_loaded() const;
void set_is_declaration_loaded_unsafe() const;
bool is_type_finalized() const {
return class_loading_state() >= UntaggedClass::kTypeFinalized;
}
void set_is_type_finalized() const;
bool is_synthesized_class() const {
return SynthesizedClassBit::decode(state_bits());
}
void set_is_synthesized_class() const;
void set_is_synthesized_class_unsafe() const;
bool is_enum_class() const { return EnumBit::decode(state_bits()); }
void set_is_enum_class() const;
bool is_finalized() const {
return ClassFinalizedBits::decode(state_bits()) ==
UntaggedClass::kFinalized ||
ClassFinalizedBits::decode(state_bits()) ==
UntaggedClass::kAllocateFinalized;
}
void set_is_finalized() const;
void set_is_finalized_unsafe() const;
bool is_allocate_finalized() const {
return ClassFinalizedBits::decode(state_bits()) ==
UntaggedClass::kAllocateFinalized;
}
void set_is_allocate_finalized() const;
bool is_prefinalized() const {
return ClassFinalizedBits::decode(state_bits()) ==
UntaggedClass::kPreFinalized;
}
void set_is_prefinalized() const;
bool is_const() const { return ConstBit::decode(state_bits()); }
void set_is_const() const;
// Tests if this is a mixin application class which was desugared
// to a normal class by kernel mixin transformation
// (pkg/kernel/lib/transformations/mixin_full_resolution.dart).
//
// In such case, its mixed-in type was pulled into the end of
// interfaces list.
bool is_transformed_mixin_application() const {
return TransformedMixinApplicationBit::decode(state_bits());
}
void set_is_transformed_mixin_application() const;
bool is_fields_marked_nullable() const {
return FieldsMarkedNullableBit::decode(state_bits());
}
void set_is_fields_marked_nullable() const;
bool is_allocated() const { return IsAllocatedBit::decode(state_bits()); }
void set_is_allocated(bool value) const;
void set_is_allocated_unsafe(bool value) const;
bool is_loaded() const { return IsLoadedBit::decode(state_bits()); }
void set_is_loaded(bool value) const;
uint16_t num_native_fields() const { return untag()->num_native_fields_; }
void set_num_native_fields(uint16_t value) const {
StoreNonPointer(&untag()->num_native_fields_, value);
}
static uint16_t NumNativeFieldsOf(ClassPtr clazz) {
return clazz->untag()->num_native_fields_;
}
static bool ImplementsFinalizable(ClassPtr clazz) {
ASSERT(Class::Handle(clazz).is_type_finalized());
return ImplementsFinalizableBit::decode(clazz->untag()->state_bits_);
}
#if !defined(DART_PRECOMPILED_RUNTIME)
CodePtr allocation_stub() const { return untag()->allocation_stub(); }
void set_allocation_stub(const Code& value) const;
#endif // !defined(DART_PRECOMPILED_RUNTIME)
intptr_t kernel_offset() const {
#if defined(DART_PRECOMPILED_RUNTIME)
return 0;
#else
return untag()->kernel_offset_;
#endif
}
void set_kernel_offset(intptr_t value) const {
#if defined(DART_PRECOMPILED_RUNTIME)
UNREACHABLE();
#else
ASSERT(value >= 0);
StoreNonPointer(&untag()->kernel_offset_, value);
#endif
}
void DisableAllocationStub() const;
ArrayPtr constants() const;
void set_constants(const Array& value) const;
intptr_t FindInvocationDispatcherFunctionIndex(const Function& needle) const;
FunctionPtr InvocationDispatcherFunctionFromIndex(intptr_t idx) const;
FunctionPtr GetInvocationDispatcher(const String& target_name,
const Array& args_desc,
UntaggedFunction::Kind kind,
bool create_if_absent) const;
void Finalize() const;
ObjectPtr Invoke(const String& selector,
const Array& arguments,
const Array& argument_names,
bool respect_reflectable = true,
bool check_is_entrypoint = false) const;
ObjectPtr InvokeGetter(const String& selector,
bool throw_nsm_if_absent,
bool respect_reflectable = true,
bool check_is_entrypoint = false) const;
ObjectPtr InvokeSetter(const String& selector,
const Instance& argument,
bool respect_reflectable = true,
bool check_is_entrypoint = false) const;
// Evaluate the given expression as if it appeared in a static method of this
// class and return the resulting value, or an error object if evaluating the
// expression fails. The method has the formal (type) parameters given in
// (type_)param_names, and is invoked with the (type)argument values given in
// (type_)param_values.
ObjectPtr EvaluateCompiledExpression(
const ExternalTypedData& kernel_buffer,
const Array& type_definitions,
const Array& param_values,
const TypeArguments& type_param_values) const;
// Load class declaration (super type, interfaces, type parameters and
// number of type arguments) if it is not loaded yet.
void EnsureDeclarationLoaded() const;
ErrorPtr EnsureIsFinalized(Thread* thread) const;
ErrorPtr EnsureIsAllocateFinalized(Thread* thread) const;
// Allocate a class used for VM internal objects.
template <class FakeObject, class TargetFakeObject>
static ClassPtr New(IsolateGroup* isolate_group, bool register_class = true);
// Allocate instance classes.
static ClassPtr New(const Library& lib,
const String& name,
const Script& script,
TokenPosition token_pos,
bool register_class = true);
static ClassPtr NewNativeWrapper(const Library& library,
const String& name,
int num_fields);
// Allocate the raw string classes.
static ClassPtr NewStringClass(intptr_t class_id,
IsolateGroup* isolate_group);
// Allocate the raw TypedData classes.
static ClassPtr NewTypedDataClass(intptr_t class_id,
IsolateGroup* isolate_group);
// Allocate the raw TypedDataView/ByteDataView classes.
static ClassPtr NewTypedDataViewClass(intptr_t class_id,
IsolateGroup* isolate_group);
static ClassPtr NewUnmodifiableTypedDataViewClass(
intptr_t class_id,
IsolateGroup* isolate_group);
// Allocate the raw ExternalTypedData classes.
static ClassPtr NewExternalTypedDataClass(intptr_t class_id,
IsolateGroup* isolate);
// Allocate the raw Pointer classes.
static ClassPtr NewPointerClass(intptr_t class_id,
IsolateGroup* isolate_group);
#if !defined(DART_PRECOMPILED_RUNTIME)
// Register code that has used CHA for optimization.
// TODO(srdjan): Also register kind of CHA optimization (e.g.: leaf class,
// leaf method, ...).
void RegisterCHACode(const Code& code);
void DisableCHAOptimizedCode(const Class& subclass);
void DisableAllCHAOptimizedCode();
void DisableCHAImplementorUsers() { DisableAllCHAOptimizedCode(); }
// Return the list of code objects that were compiled using CHA of this class.
// These code objects will be invalidated if new subclasses of this class
// are finalized.
ArrayPtr dependent_code() const;
void set_dependent_code(const Array& array) const;
#endif // !defined(DART_PRECOMPILED_RUNTIME)
bool TraceAllocation(IsolateGroup* isolate_group) const;
void SetTraceAllocation(bool trace_allocation) const;
void ReplaceEnum(ProgramReloadContext* reload_context,
const Class& old_enum) const;
void CopyStaticFieldValues(ProgramReloadContext* reload_context,
const Class& old_cls) const;
void PatchFieldsAndFunctions() const;
void MigrateImplicitStaticClosures(ProgramReloadContext* context,
const Class& new_cls) const;
void CopyCanonicalConstants(const Class& old_cls) const;
void CopyDeclarationType(const Class& old_cls) const;
void CheckReload(const Class& replacement,
ProgramReloadContext* context) const;
void AddInvocationDispatcher(const String& target_name,
const Array& args_desc,
const Function& dispatcher) const;
static int32_t host_instance_size_in_words(const ClassPtr cls) {
return cls->untag()->host_instance_size_in_words_;
}
static int32_t target_instance_size_in_words(const ClassPtr cls) {
#if defined(DART_PRECOMPILER)
return cls->untag()->target_instance_size_in_words_;
#else
return host_instance_size_in_words(cls);
#endif // defined(DART_PRECOMPILER)
}
static int32_t host_next_field_offset_in_words(const ClassPtr cls) {
return cls->untag()->host_next_field_offset_in_words_;
}
static int32_t target_next_field_offset_in_words(const ClassPtr cls) {
#if defined(DART_PRECOMPILER)
return cls->untag()->target_next_field_offset_in_words_;
#else
return host_next_field_offset_in_words(cls);
#endif // defined(DART_PRECOMPILER)
}
static int32_t host_type_arguments_field_offset_in_words(const ClassPtr cls) {
return cls->untag()->host_type_arguments_field_offset_in_words_;
}
static int32_t target_type_arguments_field_offset_in_words(
const ClassPtr cls) {
#if defined(DART_PRECOMPILER)
return cls->untag()->target_type_arguments_field_offset_in_words_;
#else
return host_type_arguments_field_offset_in_words(cls);
#endif // defined(DART_PRECOMPILER)
}
private:
TypePtr declaration_type() const {
return untag()->declaration_type<std::memory_order_acquire>();
}
// Caches the declaration type of this class.
void set_declaration_type(const Type& type) const;
bool CanReloadFinalized(const Class& replacement,
ProgramReloadContext* context) const;
bool CanReloadPreFinalized(const Class& replacement,
ProgramReloadContext* context) const;
// Tells whether instances need morphing for reload.
bool RequiresInstanceMorphing(const Class& replacement) const;
template <class FakeInstance, class TargetFakeInstance>
static ClassPtr NewCommon(intptr_t index);
enum MemberKind {
kAny = 0,
kStatic,
kInstance,
kInstanceAllowAbstract,
kConstructor,
kFactory,
};
enum StateBits {
kConstBit = 0,
kImplementedBit = 1,
kClassFinalizedPos = 2,
kClassFinalizedSize = 2,
kClassLoadingPos = kClassFinalizedPos + kClassFinalizedSize, // = 4
kClassLoadingSize = 2,
kAbstractBit = kClassLoadingPos + kClassLoadingSize, // = 6
kSynthesizedClassBit,
kMixinAppAliasBit,
kMixinTypeAppliedBit,
kFieldsMarkedNullableBit,
kEnumBit,
kTransformedMixinApplicationBit,
kIsAllocatedBit,
kIsLoadedBit,
kHasPragmaBit,
kImplementsFinalizableBit,
};
class ConstBit : public BitField<uint32_t, bool, kConstBit, 1> {};
class ImplementedBit : public BitField<uint32_t, bool, kImplementedBit, 1> {};
class ClassFinalizedBits : public BitField<uint32_t,
UntaggedClass::ClassFinalizedState,
kClassFinalizedPos,
kClassFinalizedSize> {};
class ClassLoadingBits : public BitField<uint32_t,
UntaggedClass::ClassLoadingState,
kClassLoadingPos,
kClassLoadingSize> {};
class AbstractBit : public BitField<uint32_t, bool, kAbstractBit, 1> {};
class SynthesizedClassBit
: public BitField<uint32_t, bool, kSynthesizedClassBit, 1> {};
class FieldsMarkedNullableBit
: public BitField<uint32_t, bool, kFieldsMarkedNullableBit, 1> {};
class EnumBit : public BitField<uint32_t, bool, kEnumBit, 1> {};
class TransformedMixinApplicationBit
: public BitField<uint32_t, bool, kTransformedMixinApplicationBit, 1> {};
class IsAllocatedBit : public BitField<uint32_t, bool, kIsAllocatedBit, 1> {};
class IsLoadedBit : public BitField<uint32_t, bool, kIsLoadedBit, 1> {};
class HasPragmaBit : public BitField<uint32_t, bool, kHasPragmaBit, 1> {};
class ImplementsFinalizableBit
: public BitField<uint32_t, bool, kImplementsFinalizableBit, 1> {};
void set_name(const String& value) const;
void set_user_name(const String& value) const;
const char* GenerateUserVisibleName() const;
void set_state_bits(intptr_t bits) const;
void set_implementor_cid(intptr_t value) const;
FunctionPtr CreateInvocationDispatcher(const String& target_name,
const Array& args_desc,
UntaggedFunction::Kind kind) const;
// Returns the bitmap of unboxed fields
UnboxedFieldBitmap CalculateFieldOffsets() const;
// functions_hash_table is in use iff there are at least this many functions.
static const intptr_t kFunctionLookupHashTreshold = 16;
// Initial value for the cached number of type arguments.
static const intptr_t kUnknownNumTypeArguments = -1;
int16_t num_type_arguments() const {
return LoadNonPointer<int16_t, std::memory_order_relaxed>(
&untag()->num_type_arguments_);
}
uint32_t state_bits() const {
// Ensure any following load instructions do not get performed before this
// one.
return LoadNonPointer<uint32_t, std::memory_order_acquire>(
&untag()->state_bits_);
}
public:
void set_num_type_arguments(intptr_t value) const;
void set_num_type_arguments_unsafe(intptr_t value) const;
bool has_pragma() const { return HasPragmaBit::decode(state_bits()); }
void set_has_pragma(bool has_pragma) const;
bool implements_finalizable() const {
ASSERT(is_type_finalized());
return ImplementsFinalizable(ptr());
}
void set_implements_finalizable(bool value) const;
private:
void set_functions(const Array& value) const;
void set_fields(const Array& value) const;
void set_invocation_dispatcher_cache(const Array& cache) const;
ArrayPtr invocation_dispatcher_cache() const;
// Calculates number of type arguments of this class.
// This includes type arguments of a superclass and takes overlapping
// of type arguments into account.
intptr_t ComputeNumTypeArguments() const;
// Assigns empty array to all raw class array fields.
void InitEmptyFields();
static FunctionPtr CheckFunctionType(const Function& func, MemberKind kind);
FunctionPtr LookupFunctionReadLocked(const String& name,
MemberKind kind) const;
FunctionPtr LookupFunctionAllowPrivate(const String& name,
MemberKind kind) const;
FieldPtr LookupField(const String& name, MemberKind kind) const;
FunctionPtr LookupAccessorFunction(const char* prefix,
intptr_t prefix_length,
const String& name) const;
// Allocate an instance class which has a VM implementation.
template <class FakeInstance, class TargetFakeInstance>
static ClassPtr New(intptr_t id,
IsolateGroup* isolate_group,
bool register_class = true,
bool is_abstract = false);
// Helper that calls 'Class::New<Instance>(kIllegalCid)'.
static ClassPtr NewInstanceClass();
FINAL_HEAP_OBJECT_IMPLEMENTATION(Class, Object);
friend class AbstractType;
friend class Instance;
friend class Object;
friend class Type;
friend class Intrinsifier;
friend class ProgramWalker;
friend class Precompiler;
friend class ClassFinalizer;
};
// Classification of type genericity according to type parameter owners.
enum Genericity {
kAny, // Consider type params of current class and functions.
kCurrentClass, // Consider type params of current class only.
kFunctions, // Consider type params of current and parent functions.
};
class PatchClass : public Object {
public:
ClassPtr patched_class() const { return untag()->patched_class(); }
ClassPtr origin_class() const { return untag()->origin_class(); }
ScriptPtr script() const { return untag()->script(); }
ExternalTypedDataPtr library_kernel_data() const {
return untag()->library_kernel_data();
}
void set_library_kernel_data(const ExternalTypedData& data) const;
intptr_t library_kernel_offset() const {
#if !defined(DART_PRECOMPILED_RUNTIME)
return untag()->library_kernel_offset_;
#else
return -1;
#endif
}
void set_library_kernel_offset(intptr_t offset) const {
NOT_IN_PRECOMPILED(
StoreNonPointer(&untag()->library_kernel_offset_, offset));
}
static intptr_t InstanceSize() {
return RoundedAllocationSize(sizeof(UntaggedPatchClass));
}
static bool IsInFullSnapshot(PatchClassPtr cls) {
NoSafepointScope no_safepoint;
return Class::IsInFullSnapshot(cls->untag()->patched_class());
}
static PatchClassPtr New(const Class& patched_class,
const Class& origin_class);
static PatchClassPtr New(const Class& patched_class, const Script& source);
private:
void set_patched_class(const Class& value) const;
void set_origin_class(const Class& value) const;
void set_script(const Script& value) const;
static PatchClassPtr New();
FINAL_HEAP_OBJECT_IMPLEMENTATION(PatchClass, Object);
friend class Class;
};
class SingleTargetCache : public Object {
public:
CodePtr target() const { return untag()->target(); }
void set_target(const Code& target) const;
static intptr_t target_offset() {
return OFFSET_OF(UntaggedSingleTargetCache, target_);
}
#define DEFINE_NON_POINTER_FIELD_ACCESSORS(type, name) \
type name() const { return untag()->name##_; } \
void set_##name(type value) const { \
StoreNonPointer(&untag()->name##_, value); \
} \
static intptr_t name##_offset() { \
return OFFSET_OF(UntaggedSingleTargetCache, name##_); \
}
DEFINE_NON_POINTER_FIELD_ACCESSORS(uword, entry_point);
DEFINE_NON_POINTER_FIELD_ACCESSORS(intptr_t, lower_limit);
DEFINE_NON_POINTER_FIELD_ACCESSORS(intptr_t, upper_limit);
#undef DEFINE_NON_POINTER_FIELD_ACCESSORS
static intptr_t InstanceSize() {
return RoundedAllocationSize(sizeof(UntaggedSingleTargetCache));
}
static SingleTargetCachePtr New();
private:
FINAL_HEAP_OBJECT_IMPLEMENTATION(SingleTargetCache, Object);
friend class Class;
};
class MonomorphicSmiableCall : public Object {
public:
classid_t expected_cid() const { return untag()->expected_cid_; }
static intptr_t InstanceSize() {
return RoundedAllocationSize(sizeof(UntaggedMonomorphicSmiableCall));
}
static MonomorphicSmiableCallPtr New(classid_t expected_cid,
const Code& target);
static intptr_t expected_cid_offset() {
return OFFSET_OF(UntaggedMonomorphicSmiableCall, expected_cid_);
}
static intptr_t entrypoint_offset() {
return OFFSET_OF(UntaggedMonomorphicSmiableCall, entrypoint_);
}
private:
FINAL_HEAP_OBJECT_IMPLEMENTATION(MonomorphicSmiableCall, Object);
friend class Class;
};
class CallSiteData : public Object {
public:
StringPtr target_name() const { return untag()->target_name(); }
ArrayPtr arguments_descriptor() const { return untag()->args_descriptor(); }
intptr_t TypeArgsLen() const;
intptr_t CountWithTypeArgs() const;
intptr_t CountWithoutTypeArgs() const;
intptr_t SizeWithoutTypeArgs() const;
intptr_t SizeWithTypeArgs() const;
static intptr_t target_name_offset() {
return OFFSET_OF(UntaggedCallSiteData, target_name_);
}
static intptr_t arguments_descriptor_offset() {
return OFFSET_OF(UntaggedCallSiteData, args_descriptor_);
}
private:
void set_target_name(const String& value) const;
void set_arguments_descriptor(const Array& value) const;
HEAP_OBJECT_IMPLEMENTATION(CallSiteData, Object)
friend class ICData;
friend class MegamorphicCache;
};
class UnlinkedCall : public CallSiteData {
public:
bool can_patch_to_monomorphic() const {
return untag()->can_patch_to_monomorphic_;
}
static intptr_t InstanceSize() {
return RoundedAllocationSize(sizeof(UntaggedUnlinkedCall));
}
uword Hash() const;
bool Equals(const UnlinkedCall& other) const;
static UnlinkedCallPtr New();
private:
friend class ICData; // For set_*() methods.
void set_can_patch_to_monomorphic(bool value) const;
FINAL_HEAP_OBJECT_IMPLEMENTATION(UnlinkedCall, CallSiteData);
friend class Class;
};
// Object holding information about an IC: test classes and their
// corresponding targets. The owner of the ICData can be either the function
// or the original ICData object. In case of background compilation we
// copy the ICData in a child object, thus freezing it during background
// compilation. Code may contain only original ICData objects.
//
// ICData's backing store is an array that logically contains several valid
// entries followed by a sentinal entry.
//
// [<entry-0>, <...>, <entry-N>, <sentinel>]
//
// Each entry has the following form:
//
// [arg0?, arg1?, argN?, count, target-function/code, exactness?]
//
// The <entry-X> need to contain valid type feedback.
// The <sentinel> entry and must have kIllegalCid value for all
// members of the entry except for the last one (`exactness` if
// present, otherwise `target-function/code`) - which we use as a backref:
//
// * For empty ICData we use a cached/shared backing store. So there is no
// unique backref, we use kIllegalCid instead.
// * For non-empty ICData the backref in the backing store array will point to
// the ICData object.
//
// Updating the ICData happens under a lock to avoid phantom-reads. The backing
// is treated as an immutable Copy-on-Write data structure: Adding to the ICData
// makes a copy with length+1 which will be store-release'd so any reader can
// see it (and doesn't need to hold a lock).
class ICData : public CallSiteData {
public:
FunctionPtr Owner() const;
ICDataPtr Original() const;
void SetOriginal(const ICData& value) const;
bool IsOriginal() const { return Original() == this->ptr(); }
intptr_t NumArgsTested() const;
intptr_t deopt_id() const {
#if defined(DART_PRECOMPILED_RUNTIME)
UNREACHABLE();
return -1;
#else
return untag()->deopt_id_;
#endif
}
bool IsImmutable() const;
#if !defined(DART_PRECOMPILED_RUNTIME)
AbstractTypePtr receivers_static_type() const {
return untag()->receivers_static_type();
}
bool is_tracking_exactness() const {
return untag()->state_bits_.Read<TrackingExactnessBit>();
}
#else
bool is_tracking_exactness() const { return false; }
#endif
// Note: only deopts with reasons before Unknown in this list are recorded in
// the ICData. All other reasons are used purely for informational messages
// printed during deoptimization itself.
#define DEOPT_REASONS(V) \
V(BinarySmiOp) \
V(BinaryInt64Op) \
V(DoubleToSmi) \
V(CheckSmi) \
V(CheckClass) \
V(Unknown) \
V(PolymorphicInstanceCallTestFail) \
V(UnaryInt64Op) \
V(BinaryDoubleOp) \
V(UnaryOp) \
V(UnboxInteger) \
V(Unbox) \
V(CheckArrayBound) \
V(AtCall) \
V(GuardField) \
V(TestCids) \
V(NumReasons)
enum DeoptReasonId {
#define DEFINE_ENUM_LIST(name) kDeopt##name,
DEOPT_REASONS(DEFINE_ENUM_LIST)
#undef DEFINE_ENUM_LIST
};
static const intptr_t kLastRecordedDeoptReason = kDeoptUnknown - 1;
enum DeoptFlags {
// Deoptimization is caused by an optimistically hoisted instruction.
kHoisted = 1 << 0,
// Deoptimization is caused by an optimistically generalized bounds check.
kGeneralized = 1 << 1
};
bool HasDeoptReasons() const { return DeoptReasons() != 0; }
uint32_t DeoptReasons() const;
void SetDeoptReasons(uint32_t reasons) const;
bool HasDeoptReason(ICData::DeoptReasonId reason) const;
void AddDeoptReason(ICData::DeoptReasonId reason) const;
// Call site classification that is helpful for hot-reload. Call sites with
// different `RebindRule` have to be rebound differently.
#define FOR_EACH_REBIND_RULE(V) \
V(Instance) \
V(NoRebind) \
V(NSMDispatch) \
V(Optimized) \
V(Static) \
V(Super)
enum RebindRule {
#define REBIND_ENUM_DEF(name) k##name,
FOR_EACH_REBIND_RULE(REBIND_ENUM_DEF)
#undef REBIND_ENUM_DEF
kNumRebindRules,
};
static const char* RebindRuleToCString(RebindRule r);
static bool ParseRebindRule(const char* str, RebindRule* out);
RebindRule rebind_rule() const;
void set_is_megamorphic(bool value) const {
untag()->state_bits_.UpdateBool<MegamorphicBit, std::memory_order_release>(
value);
}
// The length of the array. This includes all sentinel entries including
// the final one.
intptr_t Length() const;
intptr_t NumberOfChecks() const;
// Discounts any checks with usage of zero.
// Takes O(result)) time!
intptr_t NumberOfUsedChecks() const;
bool NumberOfChecksIs(intptr_t n) const;
bool IsValidEntryIndex(intptr_t index) const {
return 0 <= index && index < NumberOfChecks();
}
static intptr_t InstanceSize() {
return RoundedAllocationSize(sizeof(UntaggedICData));
}
static intptr_t state_bits_offset() {
return OFFSET_OF(UntaggedICData, state_bits_);
}
static intptr_t NumArgsTestedShift() { return kNumArgsTestedPos; }
static intptr_t NumArgsTestedMask() {
return ((1 << kNumArgsTestedSize) - 1) << kNumArgsTestedPos;
}
static intptr_t entries_offset() {
return OFFSET_OF(UntaggedICData, entries_);
}
static intptr_t owner_offset() { return OFFSET_OF(UntaggedICData, owner_); }
#if !defined(DART_PRECOMPILED_RUNTIME)
static intptr_t receivers_static_type_offset() {
return OFFSET_OF(UntaggedICData, receivers_static_type_);
}
#endif
// NOTE: Can only be called during reload.
void Clear(const CallSiteResetter& proof_of_reload) const {
TruncateTo(0, proof_of_reload);
}
// NOTE: Can only be called during reload.
void TruncateTo(intptr_t num_checks,
const CallSiteResetter& proof_of_reload) const;
// Clears the count for entry |index|.
// NOTE: Can only be called during reload.
void ClearCountAt(intptr_t index,
const CallSiteResetter& proof_of_reload) const;
// Clear all entries with the sentinel value and reset the first entry
// with the dummy target entry.
// NOTE: Can only be called during reload.
void ClearAndSetStaticTarget(const Function& func,
const CallSiteResetter& proof_of_reload) const;
void DebugDump() const;
// Adding checks.
// Ensures there is a check for [class_ids].
//
// Calls [AddCheck] iff there is no existing check. Ensures test (and
// potential update) will be performed under exclusive lock to guard against
// multiple threads trying to add the same check.
void EnsureHasCheck(const GrowableArray<intptr_t>& class_ids,
const Function& target,
intptr_t count = 1) const;
// Adds one more class test to ICData. Length of 'classes' must be equal to
// the number of arguments tested. Use only for num_args_tested > 1.
void AddCheck(const GrowableArray<intptr_t>& class_ids,
const Function& target,
intptr_t count = 1) const;
StaticTypeExactnessState GetExactnessAt(intptr_t count) const;
// Ensures there is a receiver check for [receiver_class_id].
//
// Calls [AddCheckReceiverCheck] iff there is no existing check. Ensures
// test (and potential update) will be performed under exclusive lock to
// guard against multiple threads trying to add the same check.
void EnsureHasReceiverCheck(
intptr_t receiver_class_id,
const Function& target,
intptr_t count = 1,
StaticTypeExactnessState exactness =
StaticTypeExactnessState::NotTracking()) const;
// Adds sorted so that Smi is the first class-id. Use only for
// num_args_tested == 1.
void AddReceiverCheck(intptr_t receiver_class_id,
const Function& target,
intptr_t count = 1,
StaticTypeExactnessState exactness =
StaticTypeExactnessState::NotTracking()) const;
// Retrieving checks.
void GetCheckAt(intptr_t index,
GrowableArray<intptr_t>* class_ids,
Function* target) const;
void GetClassIdsAt(intptr_t index, GrowableArray<intptr_t>* class_ids) const;
// Only for 'num_args_checked == 1'.
void GetOneClassCheckAt(intptr_t index,
intptr_t* class_id,
Function* target) const;
// Only for 'num_args_checked == 1'.
intptr_t GetCidAt(intptr_t index) const;
intptr_t GetReceiverClassIdAt(intptr_t index) const;
intptr_t GetClassIdAt(intptr_t index, intptr_t arg_nr) const;
FunctionPtr GetTargetAt(intptr_t index) const;
void IncrementCountAt(intptr_t index, intptr_t value) const;
void SetCountAt(intptr_t index, intptr_t value) const;
intptr_t GetCountAt(intptr_t index) const;
intptr_t AggregateCount() const;
// Returns this->untag() if num_args_tested == 1 and arg_nr == 1, otherwise
// returns a new ICData object containing only unique arg_nr checks.
// Returns only used entries.
ICDataPtr AsUnaryClassChecksForArgNr(intptr_t arg_nr) const;
ICDataPtr AsUnaryClassChecks() const { return AsUnaryClassChecksForArgNr(0); }
// Returns ICData with aggregated receiver count, sorted by highest count.
// Smi not first!! (the convention for ICData used in code generation is that
// Smi check is first)
// Used for printing and optimizations.
ICDataPtr AsUnaryClassChecksSortedByCount() const;
UnlinkedCallPtr AsUnlinkedCall() const;
bool HasReceiverClassId(intptr_t class_id) const;
// Note: passing non-null receiver_type enables exactness tracking for
// the receiver type. Receiver type is expected to be a fully
// instantiated generic (but not a FutureOr).
// See StaticTypeExactnessState for more information.
static ICDataPtr New(
const Function& owner,
const String& target_name,
const Array& arguments_descriptor,
intptr_t deopt_id,
intptr_t num_args_tested,
RebindRule rebind_rule,
const AbstractType& receiver_type = Object::null_abstract_type());
// Similar to [New] makes the ICData have an initial (cids, target) entry.
static ICDataPtr NewWithCheck(
const Function& owner,
const String& target_name,
const Array& arguments_descriptor,
intptr_t deopt_id,
intptr_t num_args_tested,
RebindRule rebind_rule,
GrowableArray<intptr_t>* cids,
const Function& target,
const AbstractType& receiver_type = Object::null_abstract_type());
static ICDataPtr NewForStaticCall(const Function& owner,
const Function& target,
const Array& arguments_descriptor,
intptr_t deopt_id,
intptr_t num_args_tested,
RebindRule rebind_rule);
static ICDataPtr NewFrom(const ICData& from, intptr_t num_args_tested);
// Generates a new ICData with descriptor and data array copied (deep clone).
static ICDataPtr Clone(const ICData& from);
// Gets the [ICData] from the [ICData::entries_] array (which stores a back
// ref).
//
// May return `null` if the [ICData] is empty.
static ICDataPtr ICDataOfEntriesArray(const Array& array);
static intptr_t TestEntryLengthFor(intptr_t num_args,
bool tracking_exactness);
static intptr_t CountIndexFor(intptr_t num_args) { return num_args; }
static intptr_t EntryPointIndexFor(intptr_t num_args) { return num_args; }
static intptr_t TargetIndexFor(intptr_t num_args) { return num_args + 1; }
static intptr_t CodeIndexFor(intptr_t num_args) { return num_args + 1; }
static intptr_t ExactnessIndexFor(intptr_t num_args) { return num_args + 2; }
bool IsUsedAt(intptr_t i) const;
void PrintToJSONArray(const JSONArray& jsarray,
TokenPosition token_pos) const;
// Initialize the preallocated empty ICData entry arrays.
static void Init();
// Clear the preallocated empty ICData entry arrays.
static void Cleanup();
// We cache ICData with 0, 1, 2 arguments tested without exactness
// tracking and with 1 argument tested with exactness tracking.
enum {
kCachedICDataZeroArgTestedWithoutExactnessTrackingIdx = 0,
kCachedICDataMaxArgsTestedWithoutExactnessTracking = 2,
kCachedICDataOneArgWithExactnessTrackingIdx =
kCachedICDataZeroArgTestedWithoutExactnessTrackingIdx +
kCachedICDataMaxArgsTestedWithoutExactnessTracking + 1,
kCachedICDataArrayCount = kCachedICDataOneArgWithExactnessTrackingIdx + 1,
};
bool is_static_call() const;
intptr_t FindCheck(const GrowableArray<intptr_t>& cids) const;
ArrayPtr entries() const {
return untag()->entries<std::memory_order_acquire>();
}
bool receiver_cannot_be_smi() const {
return untag()->state_bits_.Read<ReceiverCannotBeSmiBit>();
}
void set_receiver_cannot_be_smi(bool value) const {
untag()->state_bits_.UpdateBool<ReceiverCannotBeSmiBit>(value);
}
uword Hash() const;
private:
static ICDataPtr New();
// Grows the array and also sets the argument to the index that should be used
// for the new entry.
ArrayPtr Grow(intptr_t* index) const;
void set_deopt_id(intptr_t value) const;
void set_entries(const Array& value) const;
void set_owner(const Function& value) const;
void set_rebind_rule(uint32_t rebind_rule) const;
void clear_state_bits() const;
void set_tracking_exactness(bool value) const {
untag()->state_bits_.UpdateBool<TrackingExactnessBit>(value);
}
// Does entry |index| contain the sentinel value?
void SetNumArgsTested(intptr_t value) const;
void SetReceiversStaticType(const AbstractType& type) const;
DEBUG_ONLY(void AssertInvariantsAreSatisfied() const;)
static void SetTargetAtPos(const Array& data,
intptr_t data_pos,
intptr_t num_args_tested,
const Function& target);
void AddCheckInternal(const GrowableArray<intptr_t>& class_ids,
const Function& target,
intptr_t count) const;
void AddReceiverCheckInternal(intptr_t receiver_class_id,
const Function& target,
intptr_t count,
StaticTypeExactnessState exactness) const;
// This bit is set when a call site becomes megamorphic and starts using a
// MegamorphicCache instead of ICData. It means that the entries in the
// ICData are incomplete and the MegamorphicCache needs to also be consulted
// to list the call site's observed receiver classes and targets.
// In the compiler, this should only be read once by CallTargets to avoid the
// compiler seeing an unstable set of feedback.
bool is_megamorphic() const {
// Ensure any following load instructions do not get performed before this
// one.
return untag()
->state_bits_.Read<MegamorphicBit, std::memory_order_acquire>();
}
bool ValidateInterceptor(const Function& target) const;
enum {
kNumArgsTestedPos = 0,
kNumArgsTestedSize = 2,
kTrackingExactnessPos = kNumArgsTestedPos + kNumArgsTestedSize,
kTrackingExactnessSize = 1,
kDeoptReasonPos = kTrackingExactnessPos + kTrackingExactnessSize,
kDeoptReasonSize = kLastRecordedDeoptReason + 1,
kRebindRulePos = kDeoptReasonPos + kDeoptReasonSize,
kRebindRuleSize = 3,
kMegamorphicPos = kRebindRulePos + kRebindRuleSize,
kMegamorphicSize = 1,
kReceiverCannotBeSmiPos = kMegamorphicPos + kMegamorphicSize,
kReceiverCannotBeSmiSize = 1,
};
COMPILE_ASSERT(kReceiverCannotBeSmiPos + kReceiverCannotBeSmiSize <=
sizeof(UntaggedICData::state_bits_) * kBitsPerWord);
COMPILE_ASSERT(kNumRebindRules <= (1 << kRebindRuleSize));
class NumArgsTestedBits : public BitField<uint32_t,
uint32_t,
kNumArgsTestedPos,
kNumArgsTestedSize> {};
class TrackingExactnessBit : public BitField<uint32_t,
bool,
kTrackingExactnessPos,
kTrackingExactnessSize> {};
class DeoptReasonBits : public BitField<uint32_t,
uint32_t,
ICData::kDeoptReasonPos,
ICData::kDeoptReasonSize> {};
class RebindRuleBits : public BitField<uint32_t,
uint32_t,
ICData::kRebindRulePos,
ICData::kRebindRuleSize> {};
class MegamorphicBit
: public BitField<uint32_t, bool, kMegamorphicPos, kMegamorphicSize> {};
class ReceiverCannotBeSmiBit : public BitField<uint32_t,
bool,
kReceiverCannotBeSmiPos,
kReceiverCannotBeSmiSize> {};
#if defined(DEBUG)
// Used in asserts to verify that a check is not added twice.
bool HasCheck(const GrowableArray<intptr_t>& cids) const;
#endif // DEBUG
intptr_t TestEntryLength() const;
static ArrayPtr NewNonCachedEmptyICDataArray(intptr_t num_args_tested,
bool tracking_exactness);
static ArrayPtr CachedEmptyICDataArray(intptr_t num_args_tested,
bool tracking_exactness);
static bool IsCachedEmptyEntry(const Array& array);
static ICDataPtr NewDescriptor(Zone* zone,
const Function& owner,
const String& target_name,
const Array& arguments_descriptor,
intptr_t deopt_id,
intptr_t num_args_tested,
RebindRule rebind_rule,
const AbstractType& receiver_type);
static void WriteSentinel(const Array& data,
intptr_t test_entry_length,
const Object& back_ref);
// A cache of VM heap allocated preinitialized empty ic data entry arrays.
static ArrayPtr cached_icdata_arrays_[kCachedICDataArrayCount];
FINAL_HEAP_OBJECT_IMPLEMENTATION(ICData, CallSiteData);
friend class CallSiteResetter;
friend class CallTargets;
friend class Class;
friend class VMDeserializationRoots;
friend class ICDataTestTask;
friend class VMSerializationRoots;
};
// Often used constants for number of free function type parameters.
enum {
kNoneFree = 0,
// 'kCurrentAndEnclosingFree' is used when partially applying a signature
// function to a set of type arguments. It indicates that the set of type
// parameters declared by the current function and enclosing functions should
// be considered free, and the current function type parameters should be
// substituted as well.
//
// For instance, if the signature "<T>(T, R) => T" is instantiated with
// function type arguments [int, String] and kCurrentAndEnclosingFree is
// supplied, the result of the instantiation will be "(String, int) => int".
kCurrentAndEnclosingFree = kMaxInt32 - 1,
// Only parameters declared by enclosing functions are free.
kAllFree = kMaxInt32,
};
// Formatting configuration for Function::PrintName.
struct NameFormattingParams {
Object::NameVisibility name_visibility;
bool disambiguate_names;
// By default function name includes the name of the enclosing class if any.
// However in some contexts this information is redundant and class name
// is already known. In this case setting |include_class_name| to false
// allows you to exclude this information from the formatted name.
bool include_class_name = true;
// By default function name includes the name of the enclosing function if
// any. However in some contexts this information is redundant and
// the name of the enclosing function is already known. In this case
// setting |include_parent_name| to false allows to exclude this information
// from the formatted name.
bool include_parent_name = true;
NameFormattingParams(Object::NameVisibility visibility,
Object::NameDisambiguation name_disambiguation =
Object::NameDisambiguation::kNo)
: name_visibility(visibility),
disambiguate_names(name_disambiguation ==
Object::NameDisambiguation::kYes) {}
static NameFormattingParams DisambiguatedWithoutClassName(
Object::NameVisibility visibility) {
NameFormattingParams params(visibility, Object::NameDisambiguation::kYes);
params.include_class_name = false;
return params;
}
static NameFormattingParams DisambiguatedUnqualified(
Object::NameVisibility visibility) {
NameFormattingParams params(visibility, Object::NameDisambiguation::kYes);
params.include_class_name = false;
params.include_parent_name = false;
return params;
}
};
class Function : public Object {
public:
StringPtr name() const { return untag()->name(); }
StringPtr UserVisibleName() const; // Same as scrubbed name.
const char* UserVisibleNameCString() const;
const char* NameCString(NameVisibility name_visibility) const;
void PrintName(const NameFormattingParams& params,
BaseTextBuffer* printer) const;
StringPtr QualifiedScrubbedName() const;
const char* QualifiedScrubbedNameCString() const;
StringPtr QualifiedUserVisibleName() const;
const char* QualifiedUserVisibleNameCString() const;
virtual StringPtr DictionaryName() const { return name(); }
StringPtr GetSource() const;
// Set the "C signature" for an FFI trampoline.
// Can only be used on FFI trampolines.
void SetFfiCSignature(const FunctionType& sig) const;
// Retrieves the "C signature" for an FFI trampoline.
// Can only be used on FFI trampolines.
FunctionTypePtr FfiCSignature() const;
bool FfiCSignatureContainsHandles() const;
bool FfiCSignatureReturnsStruct() const;
// Can only be called on FFI trampolines.
// -1 for Dart -> native calls.
int32_t FfiCallbackId() const;
// Can only be called on FFI trampolines.
void SetFfiCallbackId(int32_t value) const;
// Can only be called on FFI trampolines.
bool FfiIsLeaf() const;
// Can only be called on FFI trampolines.
void SetFfiIsLeaf(bool is_leaf) const;
// Can only be called on FFI trampolines.
// Null for Dart -> native calls.
FunctionPtr FfiCallbackTarget() const;
// Can only be called on FFI trampolines.
void SetFfiCallbackTarget(const Function& target) const;
// Can only be called on FFI trampolines.
// Null for Dart -> native calls.
InstancePtr FfiCallbackExceptionalReturn() const;
// Can only be called on FFI trampolines.
void SetFfiCallbackExceptionalReturn(const Instance& value) const;
// Return the signature of this function.
PRECOMPILER_WSR_FIELD_DECLARATION(FunctionType, signature);
void SetSignature(const FunctionType& value) const;
static intptr_t signature_offset() {
return OFFSET_OF(UntaggedFunction, signature_);
}
// Build a string of the form '<T>(T, {B b, C c}) => R' representing the
// internal signature of the given function. In this example, T is a type
// parameter of this function and R is a type parameter of class C, the owner
// of the function. B and C are not type parameters.
StringPtr InternalSignature() const;
// Build a string of the form '<T>(T, {B b, C c}) => R' representing the
// user visible signature of the given function. In this example, T is a type
// parameter of this function and R is a type parameter of class C, the owner
// of the function. B and C are not type parameters.
// Implicit parameters are hidden.
StringPtr UserVisibleSignature() const;
// Returns true if the signature of this function is instantiated, i.e. if it
// does not involve generic parameter types or generic result type.
// Note that function type parameters declared by this function do not make
// its signature uninstantiated, only type parameters declared by parent
// generic functions or class type parameters.
bool HasInstantiatedSignature(Genericity genericity = kAny,
intptr_t num_free_fun_type_params = kAllFree,
TrailPtr trail = nullptr) const;
bool IsPrivate() const;
ClassPtr Owner() const;
void set_owner(const Object& value) const;
ClassPtr origin() const;
ScriptPtr script() const;
ObjectPtr RawOwner() const { return untag()->owner(); }
// The NNBD mode of the library declaring this function.
// TODO(alexmarkov): nnbd_mode() doesn't work for mixins.
// It should be either removed or fixed.
NNBDMode nnbd_mode() const { return Class::Handle(origin()).nnbd_mode(); }
RegExpPtr regexp() const;
intptr_t string_specialization_cid() const;
bool is_sticky_specialization() const;
void SetRegExpData(const RegExp& regexp,
intptr_t string_specialization_cid,
bool sticky) const;
StringPtr native_name() const;
void set_native_name(const String& name) const;
AbstractTypePtr result_type() const {
return signature()->untag()->result_type();
}
// The parameters, starting with NumImplicitParameters() parameters which are
// only visible to the VM, but not to Dart users.
// Note that type checks exclude implicit parameters.
AbstractTypePtr ParameterTypeAt(intptr_t index) const;
ArrayPtr parameter_types() const {
return signature()->untag()->parameter_types();
}
// Outside of the AOT runtime, functions store the names for their positional
// parameters, and delegate storage of the names for named parameters to
// their signature. These methods handle fetching the name from and
// setting the name to the correct location.
StringPtr ParameterNameAt(intptr_t index) const;
// Only valid for positional parameter indexes, as this should be called