[vm] Verify transitions in generated code

The transitions in C++ have asserts which run in debug mode to validate
that the transitions are happening in the right order.

The transitions in the generated code had no equivalent. This CL
introduces the equivalent for the generated code.

The transitions to generated code are misnamed currently. They come
from either VM or Native, not only Native. For example the FFI
callbacks and call return sequence can already transition from native
into the VM when entering the safepoint.

These checks don't catch the invalid FFI callbacks.
Bug: https://github.com/dart-lang/sdk/issues/60021
These are caught when looking up the FFI metadata:
https://dart-review.googlesource.com/c/sdk/+/407023

This CL omits to implement the check in ia32, since ia32 is scheduled
for removal.

The check is only enabled in debug mode. The asserts for the C++ are
also only run in debug mode. And enabling this check in release for
AOT regresses the parent CL benchmark by ~30% on Mac Arm64.

TEST=test/ffi

Change-Id: Ieaf1e43533baae294af37b2e171b68bd314fdbe0
Cq-Include-Trybots: dart/try:vm-aot-android-release-arm64c-try,vm-aot-android-release-arm_x64-try,vm-aot-asan-linux-release-x64-try,vm-aot-linux-debug-x64-try,vm-aot-linux-debug-x64c-try,vm-aot-mac-release-arm64-try,vm-aot-mac-release-x64-try,vm-aot-msan-linux-release-x64-try,vm-aot-obfuscate-linux-release-x64-try,vm-aot-optimization-level-linux-release-x64-try,vm-aot-tsan-linux-release-x64-try,vm-aot-ubsan-linux-release-x64-try,vm-aot-win-debug-arm64-try,vm-aot-win-debug-x64-try,vm-aot-win-debug-x64c-try,vm-appjit-linux-debug-x64-try,vm-asan-linux-release-arm64-try,vm-asan-linux-release-x64-try,vm-checked-mac-release-arm64-try,vm-ffi-android-debug-arm-try,vm-ffi-android-debug-arm64c-try,vm-ffi-qemu-linux-release-arm-try,vm-ffi-qemu-linux-release-riscv64-try,vm-fuchsia-release-arm64-try,vm-fuchsia-release-x64-try,vm-linux-debug-ia32-try,vm-linux-debug-x64-try,vm-linux-debug-x64c-try,vm-mac-debug-arm64-try,vm-mac-debug-x64-try,vm-msan-linux-release-arm64-try,vm-msan-linux-release-x64-try,vm-reload-linux-debug-x64-try,vm-reload-rollback-linux-debug-x64-try,vm-tsan-linux-release-arm64-try,vm-tsan-linux-release-x64-try,vm-ubsan-linux-release-arm64-try,vm-ubsan-linux-release-x64-try,vm-win-debug-arm64-try,vm-win-debug-x64-try,vm-win-debug-x64c-try,vm-win-release-ia32-try
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/407021
Reviewed-by: Liam Appelbe <liama@google.com>
Reviewed-by: Slava Egorov <vegorov@google.com>
diff --git a/runtime/vm/compiler/assembler/assembler_arm.cc b/runtime/vm/compiler/assembler/assembler_arm.cc
index a2653d1..ffae85e 100644
--- a/runtime/vm/compiler/assembler/assembler_arm.cc
+++ b/runtime/vm/compiler/assembler/assembler_arm.cc
@@ -623,6 +623,7 @@
                 target::Thread::exit_through_ffi_offset());
   Register tmp2 = exit_through_ffi;
 
+  VerifyInGenerated(tmp1);
   // Mark that the thread is executing native code.
   StoreToOffset(destination_address, THR, target::Thread::vm_tag_offset());
   LoadImmediate(tmp1, target::Thread::native_execution_state());
@@ -700,6 +701,7 @@
 #endif
   }
 
+  VerifyNotInGenerated(TMP);
   // Mark that the thread is executing Dart code.
   if (set_tag) {
     LoadImmediate(state, target::Thread::vm_tag_dart_id());
@@ -714,6 +716,32 @@
   StoreToOffset(state, THR, target::Thread::exit_through_ffi_offset());
 }
 
+void Assembler::VerifyInGenerated(Register scratch) {
+#if defined(DEBUG)
+  // Verify the thread is in generated.
+  Comment("VerifyInGenerated");
+  ldr(scratch, Address(THR, target::Thread::execution_state_offset()));
+  Label ok;
+  CompareImmediate(scratch, target::Thread::generated_execution_state());
+  BranchIf(EQUAL, &ok, Assembler::kNearJump);
+  Breakpoint();
+  Bind(&ok);
+#endif
+}
+
+void Assembler::VerifyNotInGenerated(Register scratch) {
+#if defined(DEBUG)
+  // Verify the thread is in native or VM.
+  Comment("VerifyNotInGenerated");
+  ldr(scratch, Address(THR, target::Thread::execution_state_offset()));
+  CompareImmediate(scratch, target::Thread::generated_execution_state());
+  Label ok;
+  BranchIf(NOT_EQUAL, &ok, Assembler::kNearJump);
+  Breakpoint();
+  Bind(&ok);
+#endif
+}
+
 void Assembler::clrex() {
   int32_t encoding = (kSpecialCondition << kConditionShift) | B26 | B24 | B22 |
                      B21 | B20 | (0xff << 12) | B4 | 0xf;
diff --git a/runtime/vm/compiler/assembler/assembler_arm.h b/runtime/vm/compiler/assembler/assembler_arm.h
index 5c194b6..55b7a4a 100644
--- a/runtime/vm/compiler/assembler/assembler_arm.h
+++ b/runtime/vm/compiler/assembler/assembler_arm.h
@@ -623,6 +623,8 @@
                                    bool exit_safepoint,
                                    bool ignore_unwind_in_progress = false,
                                    bool set_tag = true);
+  void VerifyInGenerated(Register scratch);
+  void VerifyNotInGenerated(Register scratch);
   void EnterFullSafepoint(Register scratch0, Register scratch1);
   void ExitFullSafepoint(Register scratch0,
                          Register scratch1,
diff --git a/runtime/vm/compiler/assembler/assembler_arm64.cc b/runtime/vm/compiler/assembler/assembler_arm64.cc
index 12060e5..2d09b80 100644
--- a/runtime/vm/compiler/assembler/assembler_arm64.cc
+++ b/runtime/vm/compiler/assembler/assembler_arm64.cc
@@ -1591,6 +1591,7 @@
                 target::Thread::exit_through_ffi_offset());
   Register tmp = new_exit_through_ffi;
 
+  VerifyInGenerated(tmp);
   // Mark that the thread is executing native code.
   StoreToOffset(destination, THR, target::Thread::vm_tag_offset());
   LoadImmediate(tmp, target::Thread::native_execution_state());
@@ -1680,6 +1681,7 @@
 #endif
   }
 
+  VerifyNotInGenerated(TMP);
   // Mark that the thread is executing Dart code.
   if (set_tag) {
     LoadImmediate(state, target::Thread::vm_tag_dart_id());
@@ -1694,6 +1696,32 @@
   StoreToOffset(state, THR, target::Thread::exit_through_ffi_offset());
 }
 
+void Assembler::VerifyInGenerated(Register scratch) {
+#if defined(DEBUG)
+  // Verify the thread is in generated.
+  Comment("VerifyInGenerated");
+  ldr(scratch, Address(THR, target::Thread::execution_state_offset()));
+  Label ok;
+  CompareImmediate(scratch, target::Thread::generated_execution_state());
+  BranchIf(EQUAL, &ok, Assembler::kNearJump);
+  Breakpoint();
+  Bind(&ok);
+#endif
+}
+
+void Assembler::VerifyNotInGenerated(Register scratch) {
+#if defined(DEBUG)
+  // Verify the thread is in native or VM.
+  Comment("VerifyNotInGenerated");
+  ldr(scratch, Address(THR, target::Thread::execution_state_offset()));
+  CompareImmediate(scratch, target::Thread::generated_execution_state());
+  Label ok;
+  BranchIf(NOT_EQUAL, &ok, Assembler::kNearJump);
+  Breakpoint();
+  Bind(&ok);
+#endif
+}
+
 void Assembler::CallRuntime(const RuntimeEntry& entry,
                             intptr_t argument_count) {
   ASSERT(!entry.is_leaf());
diff --git a/runtime/vm/compiler/assembler/assembler_arm64.h b/runtime/vm/compiler/assembler/assembler_arm64.h
index 8081386..326e364 100644
--- a/runtime/vm/compiler/assembler/assembler_arm64.h
+++ b/runtime/vm/compiler/assembler/assembler_arm64.h
@@ -2113,6 +2113,8 @@
                                    bool exit_safepoint,
                                    bool ignore_unwind_in_progress = false,
                                    bool set_tag = true);
+  void VerifyInGenerated(Register scratch);
+  void VerifyNotInGenerated(Register scratch);
   void EnterFullSafepoint(Register scratch);
   void ExitFullSafepoint(Register scratch, bool ignore_unwind_in_progress);
 
diff --git a/runtime/vm/compiler/assembler/assembler_riscv.cc b/runtime/vm/compiler/assembler/assembler_riscv.cc
index e92323e..a89553c 100644
--- a/runtime/vm/compiler/assembler/assembler_riscv.cc
+++ b/runtime/vm/compiler/assembler/assembler_riscv.cc
@@ -4381,6 +4381,12 @@
      Address(THR, target::Thread::exit_through_ffi_offset()));
   Register tmp = new_exit_through_ffi;
 
+#if defined(DEBUG)
+  ASSERT(T2 != TMP2);
+  mv(T2, TMP2);  // BranchIf in VerifyInGenerated clobbers TMP2.
+  VerifyInGenerated(tmp);
+  mv(TMP2, T2);
+#endif
   // Mark that the thread is executing native code.
   sx(destination, Address(THR, target::Thread::vm_tag_offset()));
   li(tmp, target::Thread::native_execution_state());
@@ -4413,6 +4419,7 @@
 #endif
   }
 
+  VerifyNotInGenerated(state);
   // Mark that the thread is executing Dart code.
   if (set_tag) {
     li(state, target::Thread::vm_tag_dart_id());
@@ -4426,6 +4433,32 @@
   sx(ZR, Address(THR, target::Thread::exit_through_ffi_offset()));
 }
 
+void Assembler::VerifyInGenerated(Register scratch) {
+#if defined(DEBUG)
+  // Verify the thread is in generated.
+  Comment("VerifyInGenerated");
+  lx(scratch, Address(THR, target::Thread::execution_state_offset()));
+  Label ok;
+  CompareImmediate(scratch, target::Thread::generated_execution_state());
+  BranchIf(EQUAL, &ok, Assembler::kNearJump);
+  Breakpoint();
+  Bind(&ok);
+#endif
+}
+
+void Assembler::VerifyNotInGenerated(Register scratch) {
+#if defined(DEBUG)
+  // Verify the thread is in native or VM.
+  Comment("VerifyNotInGenerated");
+  lx(scratch, Address(THR, target::Thread::execution_state_offset()));
+  CompareImmediate(scratch, target::Thread::generated_execution_state());
+  Label ok;
+  BranchIf(NOT_EQUAL, &ok, Assembler::kNearJump);
+  Breakpoint();
+  Bind(&ok);
+#endif
+}
+
 void Assembler::EnterFullSafepoint(Register state) {
   // We generate the same number of instructions whether or not the slow-path is
   // forced. This simplifies GenerateJitCallbackTrampolines.
diff --git a/runtime/vm/compiler/assembler/assembler_riscv.h b/runtime/vm/compiler/assembler/assembler_riscv.h
index d77da40..fddf884 100644
--- a/runtime/vm/compiler/assembler/assembler_riscv.h
+++ b/runtime/vm/compiler/assembler/assembler_riscv.h
@@ -1454,6 +1454,8 @@
                                    bool exit_safepoint,
                                    bool ignore_unwind_in_progress = false,
                                    bool set_tag = true);
+  void VerifyInGenerated(Register scratch);
+  void VerifyNotInGenerated(Register scratch);
   void EnterFullSafepoint(Register scratch);
   void ExitFullSafepoint(Register scratch, bool ignore_unwind_in_progress);
 
diff --git a/runtime/vm/compiler/assembler/assembler_x64.cc b/runtime/vm/compiler/assembler/assembler_x64.cc
index 2d6f977d..8009bbb 100644
--- a/runtime/vm/compiler/assembler/assembler_x64.cc
+++ b/runtime/vm/compiler/assembler/assembler_x64.cc
@@ -190,6 +190,8 @@
                          compiler::target::Thread::exit_through_ffi_offset()),
        new_exit_through_ffi);
 
+  VerifyInGenerated(TMP);
+  // Mark that the thread is executing native code.
   movq(Assembler::VMTagAddress(), destination_address);
   movq(Address(THR, target::Thread::execution_state_offset()),
        Immediate(target::Thread::native_execution_state()));
@@ -259,10 +261,10 @@
   Bind(&done);
 }
 
-void Assembler::TransitionNativeToGenerated(bool leave_safepoint,
+void Assembler::TransitionNativeToGenerated(bool exit_safepoint,
                                             bool ignore_unwind_in_progress,
                                             bool set_tag) {
-  if (leave_safepoint) {
+  if (exit_safepoint) {
     ExitFullSafepoint(ignore_unwind_in_progress);
   } else {
     // flag only makes sense if we are leaving safepoint
@@ -278,6 +280,8 @@
 #endif
   }
 
+  VerifyNotInGenerated(TMP);
+  // Mark that the thread is executing Dart code.
   if (set_tag) {
     movq(Assembler::VMTagAddress(),
          Immediate(target::Thread::vm_tag_dart_id()));
@@ -293,6 +297,32 @@
        compiler::Immediate(0));
 }
 
+void Assembler::VerifyInGenerated(Register scratch) {
+#if defined(DEBUG)
+  // Verify the thread is in generated.
+  Comment("VerifyInGenerated");
+  movq(scratch, Address(THR, target::Thread::execution_state_offset()));
+  CompareImmediate(scratch, target::Thread::generated_execution_state());
+  Label ok;
+  BranchIf(EQUAL, &ok, Assembler::kNearJump);
+  Breakpoint();
+  Bind(&ok);
+#endif
+}
+
+void Assembler::VerifyNotInGenerated(Register scratch) {
+#if defined(DEBUG)
+  // Verify the thread is in native or VM.
+  Comment("VerifyNotInGenerated");
+  movq(scratch, Address(THR, target::Thread::execution_state_offset()));
+  CompareImmediate(scratch, target::Thread::generated_execution_state());
+  Label ok;
+  BranchIf(NOT_EQUAL, &ok, Assembler::kNearJump);
+  Breakpoint();
+  Bind(&ok);
+#endif
+}
+
 void Assembler::EmitQ(int reg,
                       const Address& address,
                       int opcode,
diff --git a/runtime/vm/compiler/assembler/assembler_x64.h b/runtime/vm/compiler/assembler/assembler_x64.h
index 3ed5af8..4e74800 100644
--- a/runtime/vm/compiler/assembler/assembler_x64.h
+++ b/runtime/vm/compiler/assembler/assembler_x64.h
@@ -324,9 +324,11 @@
                                    Register new_exit_frame,
                                    Register new_exit_through_ffi,
                                    bool enter_safepoint);
-  void TransitionNativeToGenerated(bool leave_safepoint,
+  void TransitionNativeToGenerated(bool exit_safepoint,
                                    bool ignore_unwind_in_progress = false,
                                    bool set_tag = true);
+  void VerifyInGenerated(Register scratch);
+  void VerifyNotInGenerated(Register scratch);
 
 // Register-register, register-address and address-register instructions.
 #define RR(width, name, ...)                                                   \
diff --git a/runtime/vm/compiler/backend/il_riscv.cc b/runtime/vm/compiler/backend/il_riscv.cc
index 92d42bb..3d6d309 100644
--- a/runtime/vm/compiler/backend/il_riscv.cc
+++ b/runtime/vm/compiler/backend/il_riscv.cc
@@ -1550,7 +1550,7 @@
       __ jalr(target);
 
       // Update information in the thread object and leave the safepoint.
-      __ TransitionNativeToGenerated(temp1, /*leave_safepoint=*/true);
+      __ TransitionNativeToGenerated(temp1, /*exit_safepoint=*/true);
     } else {
       // We cannot trust that this code will be executable within a safepoint.
       // Therefore we delegate the responsibility of entering/exiting the
diff --git a/runtime/vm/compiler/backend/il_x64.cc b/runtime/vm/compiler/backend/il_x64.cc
index 1cdf3a2..2faf61d 100644
--- a/runtime/vm/compiler/backend/il_x64.cc
+++ b/runtime/vm/compiler/backend/il_x64.cc
@@ -1348,7 +1348,7 @@
       __ CallCFunction(target_address, /*restore_rsp=*/true);
 
       // Update information in the thread object and leave the safepoint.
-      __ TransitionNativeToGenerated(/*leave_safepoint=*/true);
+      __ TransitionNativeToGenerated(/*exit_safepoint=*/true);
     } else {
       // We cannot trust that this code will be executable within a safepoint.
       // Therefore we delegate the responsibility of entering/exiting the
diff --git a/runtime/vm/compiler/stub_code_compiler_arm.cc b/runtime/vm/compiler/stub_code_compiler_arm.cc
index 3e57e65..5f07a49 100644
--- a/runtime/vm/compiler/stub_code_compiler_arm.cc
+++ b/runtime/vm/compiler/stub_code_compiler_arm.cc
@@ -231,6 +231,7 @@
   SPILLS_LR_TO_FRAME(__ EnterFrame((1 << FP) | (1 << LR), 0));
   __ ReserveAlignedFrameSpace(0);
 
+  __ VerifyNotInGenerated(R0);
   // Set the execution state to VM while waiting for the safepoint to end.
   // This isn't strictly necessary but enables tests to check that we're not
   // in native code anymore. See tests/ffi/function_gc_test.dart for example.
@@ -3081,7 +3082,7 @@
   __ cmp(tmp1, Operand(tmp2));
   __ b(&exit_through_non_ffi, NE);
   __ TransitionNativeToGenerated(tmp1, tmp2,
-                                 /*leave_safepoint=*/true,
+                                 /*exit_safepoint=*/true,
                                  /*ignore_unwind_in_progress=*/true);
   __ Bind(&exit_through_non_ffi);
 
diff --git a/runtime/vm/compiler/stub_code_compiler_arm64.cc b/runtime/vm/compiler/stub_code_compiler_arm64.cc
index 330a1f6..e333d6d 100644
--- a/runtime/vm/compiler/stub_code_compiler_arm64.cc
+++ b/runtime/vm/compiler/stub_code_compiler_arm64.cc
@@ -373,6 +373,7 @@
   __ ReserveAlignedFrameSpace(0);
   __ mov(CSP, SP);
 
+  __ VerifyNotInGenerated(R0);
   // Set the execution state to VM while waiting for the safepoint to end.
   // This isn't strictly necessary but enables tests to check that we're not
   // in native code anymore. See tests/ffi/function_gc_test.dart for example.
@@ -437,7 +438,7 @@
   __ mov(SP, CSP);
   __ mov(CSP, R25);
 
-  __ TransitionNativeToGenerated(R10, /*leave_safepoint=*/true);
+  __ TransitionNativeToGenerated(R10, /*exit_safepoint=*/true);
   __ ret(R19);
 }
 
@@ -3467,7 +3468,7 @@
   __ LoadImmediate(tmp2, target::Thread::exit_through_ffi());
   __ cmp(tmp1, Operand(tmp2));
   __ b(&exit_through_non_ffi, NE);
-  __ TransitionNativeToGenerated(tmp1, /*leave_safepoint=*/true,
+  __ TransitionNativeToGenerated(tmp1, /*exit_safepoint=*/true,
                                  /*ignore_unwind_in_progress=*/true);
   __ Bind(&exit_through_non_ffi);
 
diff --git a/runtime/vm/compiler/stub_code_compiler_ia32.cc b/runtime/vm/compiler/stub_code_compiler_ia32.cc
index f084c1a..405389d 100644
--- a/runtime/vm/compiler/stub_code_compiler_ia32.cc
+++ b/runtime/vm/compiler/stub_code_compiler_ia32.cc
@@ -214,7 +214,7 @@
   __ TransitionGeneratedToNative(EAX, FPREG, ECX /*volatile*/,
                                  /*enter_safepoint=*/true);
   __ call(EAX);
-  __ TransitionNativeToGenerated(ECX /*volatile*/, /*leave_safepoint=*/true);
+  __ TransitionNativeToGenerated(ECX /*volatile*/, /*exit_safepoint=*/true);
 
   __ jmp(EBX);
 }
@@ -2971,7 +2971,7 @@
               THR, compiler::target::Thread::exit_through_ffi_offset()),
           compiler::Immediate(target::Thread::exit_through_ffi()));
   __ j(NOT_EQUAL, &exit_through_non_ffi, compiler::Assembler::kNearJump);
-  __ TransitionNativeToGenerated(ECX, /*leave_safepoint=*/true,
+  __ TransitionNativeToGenerated(ECX, /*exit_safepoint=*/true,
                                  /*ignore_unwind_in_progress=*/true);
   __ Bind(&exit_through_non_ffi);
 
diff --git a/runtime/vm/compiler/stub_code_compiler_riscv.cc b/runtime/vm/compiler/stub_code_compiler_riscv.cc
index 9965d81..e540b85 100644
--- a/runtime/vm/compiler/stub_code_compiler_riscv.cc
+++ b/runtime/vm/compiler/stub_code_compiler_riscv.cc
@@ -239,6 +239,7 @@
 
   __ ReserveAlignedFrameSpace(0);
 
+  __ VerifyNotInGenerated(TMP);
   // Set the execution state to VM while waiting for the safepoint to end.
   // This isn't strictly necessary but enables tests to check that we're not
   // in native code anymore. See tests/ffi/function_gc_test.dart for example.
@@ -291,7 +292,7 @@
 
   __ jalr(T0);
 
-  __ TransitionNativeToGenerated(T1, /*leave_safepoint=*/true);
+  __ TransitionNativeToGenerated(T1, /*exit_safepoint=*/true);
   __ jr(S3);
 }
 
@@ -2940,7 +2941,7 @@
                     compiler::target::Thread::exit_through_ffi_offset());
   __ LoadImmediate(TMP2, target::Thread::exit_through_ffi());
   __ bne(TMP, TMP2, &exit_through_non_ffi);
-  __ TransitionNativeToGenerated(TMP, /*leave_safepoint=*/true,
+  __ TransitionNativeToGenerated(TMP, /*exit_safepoint=*/true,
                                  /*ignore_unwind_in_progress=*/true);
   __ Bind(&exit_through_non_ffi);
 
diff --git a/runtime/vm/compiler/stub_code_compiler_x64.cc b/runtime/vm/compiler/stub_code_compiler_x64.cc
index 2c9906a..cbcb066 100644
--- a/runtime/vm/compiler/stub_code_compiler_x64.cc
+++ b/runtime/vm/compiler/stub_code_compiler_x64.cc
@@ -348,6 +348,7 @@
   __ EnterFrame(0);
   __ ReserveAlignedFrameSpace(0);
 
+  __ VerifyNotInGenerated(RAX);
   // Set the execution state to VM while waiting for the safepoint to end.
   // This isn't strictly necessary but enables tests to check that we're not
   // in native code anymore. See tests/ffi/function_gc_test.dart for example.
@@ -390,7 +391,7 @@
   __ popq(R12);
   __ CallCFunction(RBX, /*restore_rsp=*/true);
 
-  __ TransitionNativeToGenerated(/*leave_safepoint=*/true);
+  __ TransitionNativeToGenerated(/*exit_safepoint=*/true);
 
   // Faster than jmp because it doesn't confuse the branch predictor.
   __ pushq(R12);
@@ -3368,7 +3369,7 @@
               THR, compiler::target::Thread::exit_through_ffi_offset()),
           compiler::Immediate(target::Thread::exit_through_ffi()));
   __ j(NOT_EQUAL, &exit_through_non_ffi, compiler::Assembler::kNearJump);
-  __ TransitionNativeToGenerated(/*leave_safepoint=*/true,
+  __ TransitionNativeToGenerated(/*exit_safepoint=*/true,
                                  /*ignore_unwind_in_progress=*/true);
   __ Bind(&exit_through_non_ffi);