Version 2.14.0-266.0.dev
Merge commit '5bd31fbd56ef46f931b9ee4d80d8d17c2d6dc78c' into 'dev'
diff --git a/runtime/vm/clustered_snapshot.cc b/runtime/vm/clustered_snapshot.cc
index 42df295..2cb3b2d 100644
--- a/runtime/vm/clustered_snapshot.cc
+++ b/runtime/vm/clustered_snapshot.cc
@@ -3907,7 +3907,7 @@
for (intptr_t id = start_index_; id < stop_index_; id++) {
type ^= refs.At(id);
stub = TypeTestingStubGenerator::DefaultCodeForType(type);
- type.SetTypeTestingStub(stub);
+ type.InitializeTypeTestingStubNonAtomic(stub);
}
}
}
@@ -4032,7 +4032,7 @@
for (intptr_t id = start_index_; id < stop_index_; id++) {
type ^= refs.At(id);
stub = TypeTestingStubGenerator::DefaultCodeForType(type);
- type.SetTypeTestingStub(stub);
+ type.InitializeTypeTestingStubNonAtomic(stub);
}
}
}
@@ -4116,7 +4116,7 @@
for (intptr_t id = start_index_; id < stop_index_; id++) {
type_ref ^= refs.At(id);
stub = TypeTestingStubGenerator::DefaultCodeForType(type_ref);
- type_ref.SetTypeTestingStub(stub);
+ type_ref.InitializeTypeTestingStubNonAtomic(stub);
}
}
}
@@ -4242,7 +4242,7 @@
for (intptr_t id = start_index_; id < stop_index_; id++) {
type_param ^= refs.At(id);
stub = TypeTestingStubGenerator::DefaultCodeForType(type_param);
- type_param.SetTypeTestingStub(stub);
+ type_param.InitializeTypeTestingStubNonAtomic(stub);
}
}
}
diff --git a/runtime/vm/object.cc b/runtime/vm/object.cc
index 119e5a2..f64d5df 100644
--- a/runtime/vm/object.cc
+++ b/runtime/vm/object.cc
@@ -1249,10 +1249,10 @@
Code& code = Code::Handle();
code = TypeTestingStubGenerator::DefaultCodeForType(*dynamic_type_);
- dynamic_type_->SetTypeTestingStub(code);
+ dynamic_type_->InitializeTypeTestingStubNonAtomic(code);
code = TypeTestingStubGenerator::DefaultCodeForType(*void_type_);
- void_type_->SetTypeTestingStub(code);
+ void_type_->InitializeTypeTestingStubNonAtomic(code);
}
void Object::Cleanup() {
@@ -10256,7 +10256,7 @@
result.SetHash(0);
result.StoreNonPointer(&result.untag()->type_state_,
UntaggedType::kAllocated);
- result.SetTypeTestingStub(
+ result.InitializeTypeTestingStubNonAtomic(
Code::Handle(Z, TypeTestingStubGenerator::DefaultCodeForType(result)));
return result.ptr();
}
@@ -20279,6 +20279,32 @@
void AbstractType::SetTypeTestingStub(const Code& stub) const {
if (stub.IsNull()) {
+ InitializeTypeTestingStubNonAtomic(stub);
+ return;
+ }
+
+ auto& old = Code::Handle(Thread::Current()->zone());
+ while (true) {
+ // We load the old TTS and it's entrypoint.
+ old = untag()->type_test_stub<std::memory_order_acquire>();
+ uword old_entry_point = old.IsNull() ? 0 : old.EntryPoint();
+
+ // If we can successfully update the entrypoint of the TTS, we will
+ // unconditionally also set the [Code] of the TTS.
+ //
+ // Any competing writer would do the same, lose the compare-exchange, loop
+ // around and continue loading the old [Code] TTS and continue to lose the
+ // race until we have finally also updated the [Code] TTS.
+ if (untag()->type_test_stub_entry_point_.compare_exchange_strong(
+ old_entry_point, stub.EntryPoint())) {
+ untag()->set_type_test_stub<std::memory_order_release>(stub.ptr());
+ return;
+ }
+ }
+}
+
+void AbstractType::InitializeTypeTestingStubNonAtomic(const Code& stub) const {
+ if (stub.IsNull()) {
// This only happens during bootstrapping when creating Type objects before
// we have the instructions.
ASSERT(type_class_id() == kDynamicCid || type_class_id() == kVoidCid);
@@ -20286,9 +20312,6 @@
untag()->set_type_test_stub(stub.ptr());
return;
}
-
- Thread* thread = Thread::Current();
- SafepointWriteRwLocker ml(thread, thread->isolate_group()->program_lock());
StoreNonPointer(&untag()->type_test_stub_entry_point_, stub.EntryPoint());
untag()->set_type_test_stub(stub.ptr());
}
@@ -20448,7 +20471,7 @@
type ^= Object::Clone(*this, space);
type.set_nullability(value);
type.SetHash(0);
- type.SetTypeTestingStub(
+ type.InitializeTypeTestingStubNonAtomic(
Code::Handle(TypeTestingStubGenerator::DefaultCodeForType(type)));
if (IsCanonical()) {
// Object::Clone does not clone canonical bit.
@@ -20470,7 +20493,7 @@
type ^= Object::Clone(*this, space);
type.set_nullability(value);
type.SetHash(0);
- type.SetTypeTestingStub(
+ type.InitializeTypeTestingStubNonAtomic(
Code::Handle(TypeTestingStubGenerator::DefaultCodeForType(type)));
if (IsCanonical()) {
// Object::Clone does not clone canonical bit.
@@ -21076,7 +21099,7 @@
UntaggedType::kAllocated);
result.set_nullability(nullability);
- result.SetTypeTestingStub(
+ result.InitializeTypeTestingStubNonAtomic(
Code::Handle(Z, TypeTestingStubGenerator::DefaultCodeForType(result)));
return result.ptr();
}
@@ -21390,7 +21413,7 @@
ASSERT(!instantiated_ref_type.IsTypeRef());
instantiated_type_ref.set_type(instantiated_ref_type);
- instantiated_type_ref.SetTypeTestingStub(Code::Handle(
+ instantiated_type_ref.InitializeTypeTestingStubNonAtomic(Code::Handle(
TypeTestingStubGenerator::DefaultCodeForType(instantiated_type_ref)));
return instantiated_type_ref.ptr();
}
@@ -21477,7 +21500,7 @@
const TypeRef& result = TypeRef::Handle(Z, TypeRef::New());
result.set_type(type);
- result.SetTypeTestingStub(
+ result.InitializeTypeTestingStubNonAtomic(
Code::Handle(Z, TypeTestingStubGenerator::DefaultCodeForType(result)));
return result.ptr();
}
@@ -21525,7 +21548,7 @@
type_parameter ^= Object::Clone(*this, space);
type_parameter.set_nullability(value);
type_parameter.SetHash(0);
- type_parameter.SetTypeTestingStub(Code::Handle(
+ type_parameter.InitializeTypeTestingStubNonAtomic(Code::Handle(
TypeTestingStubGenerator::DefaultCodeForType(type_parameter)));
if (IsCanonical()) {
// Object::Clone does not clone canonical bit.
@@ -21903,7 +21926,7 @@
result.set_nullability(nullability);
result.SetHash(0);
- result.SetTypeTestingStub(
+ result.InitializeTypeTestingStubNonAtomic(
Code::Handle(Z, TypeTestingStubGenerator::DefaultCodeForType(result)));
return result.ptr();
}
diff --git a/runtime/vm/object.h b/runtime/vm/object.h
index 10d2874..18cb982 100644
--- a/runtime/vm/object.h
+++ b/runtime/vm/object.h
@@ -8156,7 +8156,22 @@
}
CodePtr type_test_stub() const { return untag()->type_test_stub(); }
+ // Sets the TTS to [stub].
+ //
+ // The update will ensure both fields (code as well as the cached entrypoint)
+ // are updated together.
+ //
+ // Can be used concurrently by multiple threads - the updates will be applied
+ // in undetermined order - but always consistently.
void SetTypeTestingStub(const Code& stub) const;
+
+ // Sets the TTS to the [stub].
+ //
+ // The caller has to ensure no other thread can concurrently try to update the
+ // TTS. This should mainly be used when initializing newly allocated Type
+ // objects.
+ void InitializeTypeTestingStubNonAtomic(const Code& stub) const;
+
void UpdateTypeTestingStubEntryPoint() const {
StoreNonPointer(&untag()->type_test_stub_entry_point_,
Code::EntryPointOf(untag()->type_test_stub()));
diff --git a/runtime/vm/raw_object.h b/runtime/vm/raw_object.h
index 6e0e5eb..044f9c7 100644
--- a/runtime/vm/raw_object.h
+++ b/runtime/vm/raw_object.h
@@ -2644,9 +2644,10 @@
protected:
static constexpr intptr_t kTypeStateBitSize = 2;
+ COMPILE_ASSERT(sizeof(std::atomic<word>) == sizeof(word));
// Accessed from generated code.
- uword type_test_stub_entry_point_;
+ std::atomic<uword> type_test_stub_entry_point_;
COMPRESSED_POINTER_FIELD(CodePtr, type_test_stub)
VISIT_FROM(type_test_stub)
diff --git a/runtime/vm/raw_object_snapshot.cc b/runtime/vm/raw_object_snapshot.cc
index bbabf60..7e6df08 100644
--- a/runtime/vm/raw_object_snapshot.cc
+++ b/runtime/vm/raw_object_snapshot.cc
@@ -113,7 +113,7 @@
// Fill in the type testing stub.
Code& code = *reader->CodeHandle();
code = TypeTestingStubGenerator::DefaultCodeForType(type);
- type.SetTypeTestingStub(code);
+ type.InitializeTypeTestingStubNonAtomic(code);
if (is_canonical) {
type ^= type.Canonicalize(Thread::Current(), nullptr);
@@ -194,7 +194,7 @@
// Fill in the type testing stub.
Code& code = *reader->CodeHandle();
code = TypeTestingStubGenerator::DefaultCodeForType(type_ref);
- type_ref.SetTypeTestingStub(code);
+ type_ref.InitializeTypeTestingStubNonAtomic(code);
return type_ref.ptr();
}
@@ -256,7 +256,7 @@
// Fill in the type testing stub.
Code& code = *reader->CodeHandle();
code = TypeTestingStubGenerator::DefaultCodeForType(type_parameter);
- type_parameter.SetTypeTestingStub(code);
+ type_parameter.InitializeTypeTestingStubNonAtomic(code);
if (is_canonical) {
type_parameter ^= type_parameter.Canonicalize(Thread::Current(), nullptr);
diff --git a/runtime/vm/snapshot.cc b/runtime/vm/snapshot.cc
index 0b47906..f849156 100644
--- a/runtime/vm/snapshot.cc
+++ b/runtime/vm/snapshot.cc
@@ -362,7 +362,7 @@
for (intptr_t i = 0; i < types_to_postprocess_.Length(); ++i) {
type ^= types_to_postprocess_.At(i);
code = TypeTestingStubGenerator::DefaultCodeForType(type);
- type.SetTypeTestingStub(code);
+ type.InitializeTypeTestingStubNonAtomic(code);
}
}
diff --git a/runtime/vm/symbols.cc b/runtime/vm/symbols.cc
index fdddf37..c6819b0 100644
--- a/runtime/vm/symbols.cc
+++ b/runtime/vm/symbols.cc
@@ -353,60 +353,23 @@
if (symbol.IsNull()) {
IsolateGroup* group = thread->isolate_group();
ObjectStore* object_store = group->object_store();
- if (thread->IsAtSafepoint()) {
- // There are two cases where we can cause symbol allocation while holding
- // a safepoint:
- // - FLAG_enable_isolate_groups in AOT due to the usage of
- // `RunWithStoppedMutators` in SwitchableCall runtime entry.
- // - non-PRODUCT mode where the vm-service uses a HeapIterationScope
- // while building instances
- // Ideally we should get rid of both cases to avoid this unsafe usage of
- // the symbol table (we are assuming here that no other thread holds the
- // symbols_lock).
- // TODO(https://dartbug.com/41943): Get rid of the symbol table accesses
- // within safepoint operation scope.
- RELEASE_ASSERT(group->safepoint_handler()->IsOwnedByTheThread(thread));
- RELEASE_ASSERT(IsolateGroup::AreIsolateGroupsEnabled() || !USING_PRODUCT);
+ RELEASE_ASSERT(!thread->IsAtSafepoint());
- // Uncommon case: We are at a safepoint, all mutators are stopped and we
- // have therefore exclusive access to the symbol table.
+ // Most common case: The symbol is already in the table.
+ {
+ SafepointReadRwLocker sl(thread, group->symbols_lock());
+ data = object_store->symbol_table();
+ CanonicalStringSet table(&key, &value, &data);
+ symbol ^= table.GetOrNull(str);
+ table.Release();
+ }
+ // Otherwise we'll have to get exclusive access and get-or-insert it.
+ if (symbol.IsNull()) {
+ SafepointWriteRwLocker sl(thread, group->symbols_lock());
data = object_store->symbol_table();
CanonicalStringSet table(&key, &value, &data);
symbol ^= table.InsertNewOrGet(str);
object_store->set_symbol_table(table.Release());
- } else {
- // Most common case: We are not at a safepoint and the symbol is available
- // in the symbol table: We require only read access.
- {
- SafepointReadRwLocker sl(thread, group->symbols_lock());
- data = object_store->symbol_table();
- CanonicalStringSet table(&key, &value, &data);
- symbol ^= table.GetOrNull(str);
- table.Release();
- }
- // Second common case: We are not at a safepoint and the symbol is not
- // available in the symbol table: We require only exclusive access.
- if (symbol.IsNull()) {
- auto insert_or_get = [&]() {
- data = object_store->symbol_table();
- CanonicalStringSet table(&key, &value, &data);
- symbol ^= table.InsertNewOrGet(str);
- object_store->set_symbol_table(table.Release());
- };
-
- SafepointWriteRwLocker sl(thread, group->symbols_lock());
- if (IsolateGroup::AreIsolateGroupsEnabled() || !USING_PRODUCT) {
- // NOTE: Strictly speaking we should use a safepoint operation scope
- // here to ensure the lock-free usage inside safepoint operations (see
- // above) is safe. Though this would really kill the performance.
- // TODO(https://dartbug.com/41943): Get rid of the symbol table
- // accesses within safepoint operation scope.
- group->RunWithStoppedMutators(insert_or_get,
- /*force_heap_growth=*/true);
- } else {
- insert_or_get();
- }
- }
}
}
ASSERT(symbol.IsSymbol());
diff --git a/tools/VERSION b/tools/VERSION
index 4d5e8b0..1390739 100644
--- a/tools/VERSION
+++ b/tools/VERSION
@@ -27,5 +27,5 @@
MAJOR 2
MINOR 14
PATCH 0
-PRERELEASE 265
+PRERELEASE 266
PRERELEASE_PATCH 0
\ No newline at end of file