[vm] `Finalizer` / `NativeFinalizer` fixes

1. Run native finalizers before sending isolate exit message. This way
   users can rely on the isolate exit message for both (1) no Dart code
   executing anymore on that isolate _and_ (2) all native finalizers
   having run. Alternatively, we could opt to not provide this second
   guarantee and document this.

2. Fix UBSAN error by making SetArgumentTo42's argument a `void*`.

3. Fix bug in debug mode, stop reading tags from forwarding addresses.

4. Small optimization: Don't promote 0 bytes from new to old space.

5. Skip finalizer_isolate_groups_run_gc_test in optcounter mode.
   In hot reload mode, GC is guaranteed to run. On all other bots GC
   does not run (nothing happens in the other isolate group). However,
   in optcounter mode enough happens in the other isolate group to
   trigger GCs. Alternatively, we could loosen the test to allow either
   a GC to happen or not.

TEST=runtime/tests/vm/dart(_2)/finalizer/*
TEST=runtime/tests/vm/dart(_2)/isolates/fast_object_copy_test.dart
TEST=runtime/vm/object_test.cc
TEST=tests/ffi(_2)/vmspecific_native_finalizer_*

Closes: https://github.com/dart-lang/sdk/issues/48740
Closes: https://github.com/dart-lang/sdk/issues/48715
Closes: https://github.com/dart-lang/sdk/issues/48674

Change-Id: I5e260e087aef48524f2214f5b332caeda18f2e37
Cq-Include-Trybots: luci.dart.try:vm-kernel-ubsan-linux-release-x64-try,vm-kernel-precomp-ubsan-linux-release-x64-try,vm-canary-linux-debug-try,vm-kernel-optcounter-threshold-linux-release-x64-try,vm-kernel-optcounter-threshold-linux-release-ia32-try,app-kernel-linux-debug-x64-try,vm-kernel-precomp-linux-debug-x64-try
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/240043
Reviewed-by: Martin Kustermann <kustermann@google.com>
Reviewed-by: Ryan Macnak <rmacnak@google.com>
Commit-Queue: Daco Harkes <dacoharkes@google.com>
diff --git a/runtime/bin/ffi_test/ffi_test_functions_vmspecific.cc b/runtime/bin/ffi_test/ffi_test_functions_vmspecific.cc
index 36516fc..a323582 100644
--- a/runtime/bin/ffi_test/ffi_test_functions_vmspecific.cc
+++ b/runtime/bin/ffi_test/ffi_test_functions_vmspecific.cc
@@ -1111,8 +1111,8 @@
 ////////////////////////////////////////////////////////////////////////////////
 // NativeFinalizer tests
 
-DART_EXPORT void SetArgumentTo42(intptr_t* token) {
-  *token = 42;
+DART_EXPORT void SetArgumentTo42(void* token) {
+  *reinterpret_cast<intptr_t*>(token) = 42;
 }
 
 ////////////////////////////////////////////////////////////////////////////////
diff --git a/runtime/lib/ffi.cc b/runtime/lib/ffi.cc
index 20f6756..a27fcf6 100644
--- a/runtime/lib/ffi.cc
+++ b/runtime/lib/ffi.cc
@@ -273,10 +273,10 @@
   }
   // The next call cannot be in safepoint.
   if (external_size_diff > 0) {
-    IsolateGroup::Current()->heap()->AllocatedExternal(external_size_diff,
+    thread->isolate_group()->heap()->AllocatedExternal(external_size_diff,
                                                        space);
   } else {
-    IsolateGroup::Current()->heap()->FreedExternal(-external_size_diff, space);
+    thread->isolate_group()->heap()->FreedExternal(-external_size_diff, space);
   }
 };
 
diff --git a/runtime/tests/vm/vm.status b/runtime/tests/vm/vm.status
index 8feee00..bba74ca 100644
--- a/runtime/tests/vm/vm.status
+++ b/runtime/tests/vm/vm.status
@@ -58,10 +58,12 @@
 [ $builder_tag == optimization_counter_threshold ]
 cc/*: Skip # Many tests want see unoptimized code running
 dart/appjit*: SkipByDesign # Test needs to a particular opt-counter value
+dart/finalizer/finalizer_isolate_groups_run_gc_test: SkipByDesign # GC happens at unpredictable times.
 dart/kernel_determinism_test: SkipSlow
 dart/minimal_kernel_test: SkipSlow # gen_kernel is too slow with optimization_counter_threshold
 dart/null_safety_autodetection_in_kernel_compiler_test: SkipSlow # gen_kernel is too slow with optimization_counter_threshold
 dart_2/appjit*: SkipByDesign # Test needs to a particular opt-counter value
+dart_2/finalizer/finalizer_isolate_groups_run_gc_test: SkipByDesign # GC happens at unpredictable times.
 dart_2/kernel_determinism_test: SkipSlow
 dart_2/minimal_kernel_test: SkipSlow # gen_kernel is too slow with optimization_counter_threshold
 dart_2/null_safety_autodetection_in_kernel_compiler_test: SkipSlow # gen_kernel is too slow with optimization_counter_threshold
diff --git a/runtime/vm/heap/gc_shared.cc b/runtime/vm/heap/gc_shared.cc
index a9e7275..2f204d9 100644
--- a/runtime/vm/heap/gc_shared.cc
+++ b/runtime/vm/heap/gc_shared.cc
@@ -37,7 +37,9 @@
 }
 
 Heap::Space SpaceForExternal(FinalizerEntryPtr raw_entry) {
-  ASSERT(!raw_entry->untag()->value().IsSmi());
+  // Not Smi, these cannot be integers. They either need to be valid Expando
+  // keys (Finalizer) or Finalizable (NativeFinalizer).
+  ASSERT(raw_entry->untag()->value().IsHeapObject());
   return raw_entry->untag()->value()->IsOldObject() ? Heap::kOld : Heap::kNew;
 }
 
diff --git a/runtime/vm/heap/scavenger.cc b/runtime/vm/heap/scavenger.cc
index e11759a..d2f7de4 100644
--- a/runtime/vm/heap/scavenger.cc
+++ b/runtime/vm/heap/scavenger.cc
@@ -1212,9 +1212,13 @@
       if (UNLIKELY(class_id == kFinalizerEntryCid)) {
         FinalizerEntryPtr raw_entry =
             static_cast<FinalizerEntryPtr>(raw_object);
+        if (FLAG_trace_finalizers) {
+          THR_Print("Scavenger::IterateStoreBuffers Processing Entry %p\n",
+                    raw_entry->untag());
+        }
         // Detect `FinalizerEntry::value` promotion to update external space.
         //
-        // This treats old-space FinalizerEntry fields as strong. Values, deatch
+        // This treats old-space FinalizerEntry fields as strong. Values, detach
         // keys, and finalizers in new space won't be reclaimed until after they
         // are promoted.
         // This will only visit the strong references, end enqueue the entry.
@@ -1225,13 +1229,15 @@
           const Heap::Space after_gc_space = SpaceForExternal(raw_entry);
           if (after_gc_space == Heap::kOld) {
             const intptr_t external_size = raw_entry->untag()->external_size_;
-            if (FLAG_trace_finalizers) {
-              THR_Print(
-                  "Scavenger %p Store buffer, promoting external size %" Pd
-                  " bytes from new to old space\n",
-                  visitor, external_size);
+            if (external_size > 0) {
+              if (FLAG_trace_finalizers) {
+                THR_Print(
+                    "Scavenger %p Store buffer, promoting external size %" Pd
+                    " bytes from new to old space\n",
+                    visitor, external_size);
+              }
+              visitor->isolate_group()->heap()->PromotedExternal(external_size);
             }
-            visitor->isolate_group()->heap()->PromotedExternal(external_size);
           }
         }
       } else {
diff --git a/runtime/vm/isolate.cc b/runtime/vm/isolate.cc
index e797f5c..6541884 100644
--- a/runtime/vm/isolate.cc
+++ b/runtime/vm/isolate.cc
@@ -2462,7 +2462,9 @@
 }
 #endif  // !defined(PRODUCT)
 
-void Isolate::LowLevelShutdown() {
+void Isolate::RunAndCleanupFinalizersOnShutdown() {
+  if (finalizers_ == GrowableObjectArray::null()) return;
+
   // Ensure we have a zone and handle scope so that we can call VM functions,
   // but we no longer allocate new heap objects.
   Thread* thread = Thread::Current();
@@ -2470,15 +2472,6 @@
   HandleScope handle_scope(thread);
   NoSafepointScope no_safepoint_scope;
 
-  // Notify exit listeners that this isolate is shutting down.
-  if (group()->object_store() != nullptr) {
-    const Error& error = Error::Handle(thread->sticky_error());
-    if (error.IsNull() || !error.IsUnwindError() ||
-        UnwindError::Cast(error).is_user_initiated()) {
-      NotifyExitListeners();
-    }
-  }
-
   // Set live finalizers isolate to null, before deleting the message handler.
   const auto& finalizers =
       GrowableObjectArray::Handle(stack_zone.GetZone(), finalizers_);
@@ -2517,6 +2510,24 @@
       }
     }
   }
+}
+
+void Isolate::LowLevelShutdown() {
+  // Ensure we have a zone and handle scope so that we can call VM functions,
+  // but we no longer allocate new heap objects.
+  Thread* thread = Thread::Current();
+  StackZone stack_zone(thread);
+  HandleScope handle_scope(thread);
+  NoSafepointScope no_safepoint_scope;
+
+  // Notify exit listeners that this isolate is shutting down.
+  if (group()->object_store() != nullptr) {
+    const Error& error = Error::Handle(thread->sticky_error());
+    if (error.IsNull() || !error.IsUnwindError() ||
+        UnwindError::Cast(error).is_user_initiated()) {
+      NotifyExitListeners();
+    }
+  }
 
   // Close all the ports owned by this isolate.
   PortMap::ClosePorts(message_handler());
@@ -2629,6 +2640,11 @@
   // Then, proceed with low-level teardown.
   Isolate::UnMarkIsolateReady(this);
 
+  // Ensure native finalizers are run before isolate has shutdown message is
+  // sent. This way users can rely on the exit message that an isolate will not
+  // run any Dart code anymore _and_ will not run any native finalizers anymore.
+  RunAndCleanupFinalizersOnShutdown();
+
   // Post message before LowLevelShutdown that sends onExit message.
   // This ensures that exit message comes last.
   if (bequest_.get() != nullptr) {
diff --git a/runtime/vm/isolate.h b/runtime/vm/isolate.h
index 76bbd89..5b7eaad 100644
--- a/runtime/vm/isolate.h
+++ b/runtime/vm/isolate.h
@@ -1482,6 +1482,7 @@
   void KillLocked(LibMsgId msg_id);
 
   void Shutdown();
+  void RunAndCleanupFinalizersOnShutdown();
   void LowLevelShutdown();
 
   // Unregister the [isolate] from the thread, remove it from the isolate group,