Version 2.14.0-99.0.dev

Merge commit '2dd7386ec5dc11a5ef5fdd2baffe5e4caf799556' into 'dev'
diff --git a/pkg/front_end/testcases/strong.status b/pkg/front_end/testcases/strong.status
index 6403bd3..f3bdfc8 100644
--- a/pkg/front_end/testcases/strong.status
+++ b/pkg/front_end/testcases/strong.status
@@ -6,6 +6,8 @@
 # Kernel ASTs directly, that is, code in pkg/fasta/lib/src/kernel/ with
 # strong-mode enabled.
 
+dart2js/late_statics: SemiFuzzFailure # dartbug.com/45854
+
 extension_types/extension_on_nullable: ExpectationFileMismatchSerialized # Expected.
 extension_types/issue45775: ExpectationFileMismatchSerialized # Expected.
 extension_types/simple: ExpectationFileMismatchSerialized # Expected.
diff --git a/runtime/observatory/tests/service/service_kernel.status b/runtime/observatory/tests/service/service_kernel.status
index 62bdfcc..8b88460 100644
--- a/runtime/observatory/tests/service/service_kernel.status
+++ b/runtime/observatory/tests/service/service_kernel.status
@@ -147,6 +147,7 @@
 next_through_simple_linear_2_test: SkipByDesign # Debugger is disabled in AOT mode.
 next_through_simple_linear_test: SkipByDesign # Debugger is disabled in AOT mode.
 notify_debugger_on_exception_test: SkipByDesign # Debugger is disabled in AOT mode.
+notify_debugger_on_exception_yielding_test: SkipByDesign # Debugger is disabled in AOT mode.
 parameters_in_scope_at_entry_test: SkipByDesign # Debugger is disabled in AOT mode.
 pause_idle_isolate_test: SkipByDesign # Debugger is disabled in AOT mode.
 pause_on_exception_from_slow_path_test: SkipByDesign # Debugger is disabled in AOT mode.
diff --git a/runtime/observatory_2/tests/service_2/service_2_kernel.status b/runtime/observatory_2/tests/service_2/service_2_kernel.status
index 62bdfcc..8b88460 100644
--- a/runtime/observatory_2/tests/service_2/service_2_kernel.status
+++ b/runtime/observatory_2/tests/service_2/service_2_kernel.status
@@ -147,6 +147,7 @@
 next_through_simple_linear_2_test: SkipByDesign # Debugger is disabled in AOT mode.
 next_through_simple_linear_test: SkipByDesign # Debugger is disabled in AOT mode.
 notify_debugger_on_exception_test: SkipByDesign # Debugger is disabled in AOT mode.
+notify_debugger_on_exception_yielding_test: SkipByDesign # Debugger is disabled in AOT mode.
 parameters_in_scope_at_entry_test: SkipByDesign # Debugger is disabled in AOT mode.
 pause_idle_isolate_test: SkipByDesign # Debugger is disabled in AOT mode.
 pause_on_exception_from_slow_path_test: SkipByDesign # Debugger is disabled in AOT mode.
diff --git a/runtime/vm/class_table.cc b/runtime/vm/class_table.cc
index dbf8f22..ccba236 100644
--- a/runtime/vm/class_table.cc
+++ b/runtime/vm/class_table.cc
@@ -411,7 +411,7 @@
 }
 
 void ClassTable::Remap(intptr_t* old_to_new_cid) {
-  ASSERT(Thread::Current()->IsAtSafepoint());
+  ASSERT(Thread::Current()->IsAtSafepoint(SafepointLevel::kGCAndDeopt));
   const intptr_t num_cids = NumCids();
   std::unique_ptr<ClassPtr[]> cls_by_old_cid(new ClassPtr[num_cids]);
   auto* table = table_.load();
@@ -422,7 +422,7 @@
 }
 
 void SharedClassTable::Remap(intptr_t* old_to_new_cid) {
-  ASSERT(Thread::Current()->IsAtSafepoint());
+  ASSERT(Thread::Current()->IsAtSafepoint(SafepointLevel::kGCAndDeopt));
   const intptr_t num_cids = NumCids();
   std::unique_ptr<intptr_t[]> size_by_old_cid(new intptr_t[num_cids]);
   auto* table = table_.load();
diff --git a/runtime/vm/compiler/assembler/assembler_arm.cc b/runtime/vm/compiler/assembler/assembler_arm.cc
index 460ef6f..2a0d655 100644
--- a/runtime/vm/compiler/assembler/assembler_arm.cc
+++ b/runtime/vm/compiler/assembler/assembler_arm.cc
@@ -529,7 +529,7 @@
   Emit(kDataMemoryBarrier);
 }
 
-void Assembler::EnterSafepoint(Register addr, Register state) {
+void Assembler::EnterFullSafepoint(Register addr, Register state) {
   // We generate the same number of instructions whether or not the slow-path is
   // forced. This simplifies GenerateJitCallbackTrampolines.
   Label slow_path, done, retry;
@@ -541,10 +541,10 @@
   add(addr, THR, Operand(addr));
   Bind(&retry);
   ldrex(state, addr);
-  cmp(state, Operand(target::Thread::safepoint_state_unacquired()));
+  cmp(state, Operand(target::Thread::full_safepoint_state_unacquired()));
   b(&slow_path, NE);
 
-  mov(state, Operand(target::Thread::safepoint_state_acquired()));
+  mov(state, Operand(target::Thread::full_safepoint_state_acquired()));
   strex(TMP, state, addr);
   cmp(TMP, Operand(0));  // 0 means strex was successful.
   b(&done, EQ);
@@ -580,16 +580,16 @@
   StoreToOffset(tmp1, THR, target::Thread::execution_state_offset());
 
   if (enter_safepoint) {
-    EnterSafepoint(tmp1, tmp2);
+    EnterFullSafepoint(tmp1, tmp2);
   }
 }
 
-void Assembler::ExitSafepoint(Register tmp1, Register tmp2) {
+void Assembler::ExitFullSafepoint(Register tmp1, Register tmp2) {
   Register addr = tmp1;
   Register state = tmp2;
 
   // We generate the same number of instructions whether or not the slow-path is
-  // forced, for consistency with EnterSafepoint.
+  // forced, for consistency with EnterFullSafepoint.
   Label slow_path, done, retry;
   if (FLAG_use_slow_path) {
     b(&slow_path);
@@ -599,10 +599,10 @@
   add(addr, THR, Operand(addr));
   Bind(&retry);
   ldrex(state, addr);
-  cmp(state, Operand(target::Thread::safepoint_state_acquired()));
+  cmp(state, Operand(target::Thread::full_safepoint_state_acquired()));
   b(&slow_path, NE);
 
-  mov(state, Operand(target::Thread::safepoint_state_unacquired()));
+  mov(state, Operand(target::Thread::full_safepoint_state_unacquired()));
   strex(TMP, state, addr);
   cmp(TMP, Operand(0));  // 0 means strex was successful.
   b(&done, EQ);
@@ -623,13 +623,14 @@
                                             Register state,
                                             bool exit_safepoint) {
   if (exit_safepoint) {
-    ExitSafepoint(addr, state);
+    ExitFullSafepoint(addr, state);
   } else {
 #if defined(DEBUG)
     // Ensure we've already left the safepoint.
-    LoadImmediate(state, 1 << target::Thread::safepoint_state_inside_bit());
+    ASSERT(target::Thread::full_safepoint_state_acquired() != 0);
+    LoadImmediate(state, target::Thread::full_safepoint_state_acquired());
     ldr(TMP, Address(THR, target::Thread::safepoint_state_offset()));
-    ands(TMP, TMP, Operand(state));  // Is-at-safepoint is the LSB.
+    ands(TMP, TMP, Operand(state));
     Label ok;
     b(&ok, ZERO);
     Breakpoint();
diff --git a/runtime/vm/compiler/assembler/assembler_arm.h b/runtime/vm/compiler/assembler/assembler_arm.h
index 2e995e1..a3a12aa 100644
--- a/runtime/vm/compiler/assembler/assembler_arm.h
+++ b/runtime/vm/compiler/assembler/assembler_arm.h
@@ -584,8 +584,8 @@
   void TransitionNativeToGenerated(Register scratch0,
                                    Register scratch1,
                                    bool exit_safepoint);
-  void EnterSafepoint(Register scratch0, Register scratch1);
-  void ExitSafepoint(Register scratch0, Register scratch1);
+  void EnterFullSafepoint(Register scratch0, Register scratch1);
+  void ExitFullSafepoint(Register scratch0, Register scratch1);
 
   // Miscellaneous instructions.
   void clrex();
diff --git a/runtime/vm/compiler/assembler/assembler_arm64.cc b/runtime/vm/compiler/assembler/assembler_arm64.cc
index 1cc89a4..f02317e 100644
--- a/runtime/vm/compiler/assembler/assembler_arm64.cc
+++ b/runtime/vm/compiler/assembler/assembler_arm64.cc
@@ -1502,7 +1502,7 @@
   LeaveFrame();
 }
 
-void Assembler::EnterSafepoint(Register state) {
+void Assembler::EnterFullSafepoint(Register state) {
   // We generate the same number of instructions whether or not the slow-path is
   // forced. This simplifies GenerateJitCallbackTrampolines.
 
@@ -1518,10 +1518,10 @@
   add(addr, THR, Operand(addr));
   Bind(&retry);
   ldxr(state, addr);
-  cmp(state, Operand(target::Thread::safepoint_state_unacquired()));
+  cmp(state, Operand(target::Thread::full_safepoint_state_unacquired()));
   b(&slow_path, NE);
 
-  movz(state, Immediate(target::Thread::safepoint_state_acquired()), 0);
+  movz(state, Immediate(target::Thread::full_safepoint_state_acquired()), 0);
   stxr(TMP, state, addr);
   cbz(&done, TMP);  // 0 means stxr was successful.
 
@@ -1555,13 +1555,13 @@
   StoreToOffset(tmp, THR, target::Thread::execution_state_offset());
 
   if (enter_safepoint) {
-    EnterSafepoint(tmp);
+    EnterFullSafepoint(tmp);
   }
 }
 
-void Assembler::ExitSafepoint(Register state) {
+void Assembler::ExitFullSafepoint(Register state) {
   // We generate the same number of instructions whether or not the slow-path is
-  // forced, for consistency with EnterSafepoint.
+  // forced, for consistency with EnterFullSafepoint.
   Register addr = TMP2;
   ASSERT(addr != state);
 
@@ -1574,10 +1574,10 @@
   add(addr, THR, Operand(addr));
   Bind(&retry);
   ldxr(state, addr);
-  cmp(state, Operand(target::Thread::safepoint_state_acquired()));
+  cmp(state, Operand(target::Thread::full_safepoint_state_acquired()));
   b(&slow_path, NE);
 
-  movz(state, Immediate(target::Thread::safepoint_state_unacquired()), 0);
+  movz(state, Immediate(target::Thread::full_safepoint_state_unacquired()), 0);
   stxr(TMP, state, addr);
   cbz(&done, TMP);  // 0 means stxr was successful.
 
@@ -1596,13 +1596,16 @@
 void Assembler::TransitionNativeToGenerated(Register state,
                                             bool exit_safepoint) {
   if (exit_safepoint) {
-    ExitSafepoint(state);
+    ExitFullSafepoint(state);
   } else {
 #if defined(DEBUG)
     // Ensure we've already left the safepoint.
+    ASSERT(target::Thread::full_safepoint_state_acquired() != 0);
+    LoadImmediate(state, target::Thread::full_safepoint_state_acquired());
     ldr(TMP, Address(THR, target::Thread::safepoint_state_offset()));
+    and_(TMP, TMP, Operand(state));
     Label ok;
-    tbz(&ok, TMP, target::Thread::safepoint_state_inside_bit());
+    cbz(&ok, TMP);
     Breakpoint();
     Bind(&ok);
 #endif
diff --git a/runtime/vm/compiler/assembler/assembler_arm64.h b/runtime/vm/compiler/assembler/assembler_arm64.h
index 52873c6..5726ac1 100644
--- a/runtime/vm/compiler/assembler/assembler_arm64.h
+++ b/runtime/vm/compiler/assembler/assembler_arm64.h
@@ -1909,8 +1909,8 @@
                                    Register new_exit_through_ffi,
                                    bool enter_safepoint);
   void TransitionNativeToGenerated(Register scratch, bool exit_safepoint);
-  void EnterSafepoint(Register scratch);
-  void ExitSafepoint(Register scratch);
+  void EnterFullSafepoint(Register scratch);
+  void ExitFullSafepoint(Register scratch);
 
   void CheckCodePointer();
   void RestoreCodePointer();
diff --git a/runtime/vm/compiler/assembler/assembler_ia32.cc b/runtime/vm/compiler/assembler/assembler_ia32.cc
index 427cb95..adcbe1f 100644
--- a/runtime/vm/compiler/assembler/assembler_ia32.cc
+++ b/runtime/vm/compiler/assembler/assembler_ia32.cc
@@ -2249,24 +2249,24 @@
   }
 }
 
-void Assembler::EnterSafepoint(Register scratch) {
+void Assembler::EnterFullSafepoint(Register scratch) {
   // We generate the same number of instructions whether or not the slow-path is
   // forced. This simplifies GenerateJitCallbackTrampolines.
 
-  // Compare and swap the value at Thread::safepoint_state from unacquired to
-  // acquired. On success, jump to 'success'; otherwise, fallthrough.
+  // Compare and swap the value at Thread::safepoint_state from unacquired
+  // to acquired. On success, jump to 'success'; otherwise, fallthrough.
   Label done, slow_path;
   if (FLAG_use_slow_path) {
     jmp(&slow_path);
   }
 
   pushl(EAX);
-  movl(EAX, Immediate(target::Thread::safepoint_state_unacquired()));
-  movl(scratch, Immediate(target::Thread::safepoint_state_acquired()));
+  movl(EAX, Immediate(target::Thread::full_safepoint_state_unacquired()));
+  movl(scratch, Immediate(target::Thread::full_safepoint_state_acquired()));
   LockCmpxchgl(Address(THR, target::Thread::safepoint_state_offset()), scratch);
   movl(scratch, EAX);
   popl(EAX);
-  cmpl(scratch, Immediate(target::Thread::safepoint_state_unacquired()));
+  cmpl(scratch, Immediate(target::Thread::full_safepoint_state_unacquired()));
 
   if (!FLAG_use_slow_path) {
     j(EQUAL, &done);
@@ -2299,29 +2299,29 @@
        Immediate(target::Thread::native_execution_state()));
 
   if (enter_safepoint) {
-    EnterSafepoint(scratch);
+    EnterFullSafepoint(scratch);
   }
 }
 
-void Assembler::ExitSafepoint(Register scratch) {
+void Assembler::ExitFullSafepoint(Register scratch) {
   ASSERT(scratch != EAX);
   // We generate the same number of instructions whether or not the slow-path is
-  // forced, for consistency with EnterSafepoint.
+  // forced, for consistency with EnterFullSafepoint.
 
-  // Compare and swap the value at Thread::safepoint_state from acquired to
-  // unacquired. On success, jump to 'success'; otherwise, fallthrough.
+  // Compare and swap the value at Thread::safepoint_state from acquired
+  // to unacquired. On success, jump to 'success'; otherwise, fallthrough.
   Label done, slow_path;
   if (FLAG_use_slow_path) {
     jmp(&slow_path);
   }
 
   pushl(EAX);
-  movl(EAX, Immediate(target::Thread::safepoint_state_acquired()));
-  movl(scratch, Immediate(target::Thread::safepoint_state_unacquired()));
+  movl(EAX, Immediate(target::Thread::full_safepoint_state_acquired()));
+  movl(scratch, Immediate(target::Thread::full_safepoint_state_unacquired()));
   LockCmpxchgl(Address(THR, target::Thread::safepoint_state_offset()), scratch);
   movl(scratch, EAX);
   popl(EAX);
-  cmpl(scratch, Immediate(target::Thread::safepoint_state_acquired()));
+  cmpl(scratch, Immediate(target::Thread::full_safepoint_state_acquired()));
 
   if (!FLAG_use_slow_path) {
     j(EQUAL, &done);
@@ -2338,12 +2338,12 @@
 void Assembler::TransitionNativeToGenerated(Register scratch,
                                             bool exit_safepoint) {
   if (exit_safepoint) {
-    ExitSafepoint(scratch);
+    ExitFullSafepoint(scratch);
   } else {
 #if defined(DEBUG)
     // Ensure we've already left the safepoint.
     movl(scratch, Address(THR, target::Thread::safepoint_state_offset()));
-    andl(scratch, Immediate(1 << target::Thread::safepoint_state_inside_bit()));
+    andl(scratch, Immediate(target::Thread::full_safepoint_state_acquired()));
     Label ok;
     j(ZERO, &ok);
     Breakpoint();
diff --git a/runtime/vm/compiler/assembler/assembler_ia32.h b/runtime/vm/compiler/assembler/assembler_ia32.h
index 8b91fbd..ceda7d9 100644
--- a/runtime/vm/compiler/assembler/assembler_ia32.h
+++ b/runtime/vm/compiler/assembler/assembler_ia32.h
@@ -800,8 +800,8 @@
                                    Register new_exit_through_ffi,
                                    bool enter_safepoint);
   void TransitionNativeToGenerated(Register scratch, bool exit_safepoint);
-  void EnterSafepoint(Register scratch);
-  void ExitSafepoint(Register scratch);
+  void EnterFullSafepoint(Register scratch);
+  void ExitFullSafepoint(Register scratch);
 
   // Create a frame for calling into runtime that preserves all volatile
   // registers.  Frame's RSP is guaranteed to be correctly aligned and
diff --git a/runtime/vm/compiler/assembler/assembler_x64.cc b/runtime/vm/compiler/assembler/assembler_x64.cc
index 344097e..1c2c200 100644
--- a/runtime/vm/compiler/assembler/assembler_x64.cc
+++ b/runtime/vm/compiler/assembler/assembler_x64.cc
@@ -131,7 +131,7 @@
   EmitUint8(0xC0 + (dst & 0x07));
 }
 
-void Assembler::EnterSafepoint() {
+void Assembler::EnterFullSafepoint() {
   // We generate the same number of instructions whether or not the slow-path is
   // forced, to simplify GenerateJitCallbackTrampolines.
   Label done, slow_path;
@@ -139,15 +139,15 @@
     jmp(&slow_path);
   }
 
-  // Compare and swap the value at Thread::safepoint_state from unacquired to
-  // acquired. If the CAS fails, go to a slow-path stub.
+  // Compare and swap the value at Thread::safepoint_state from
+  // unacquired to acquired. If the CAS fails, go to a slow-path stub.
   pushq(RAX);
-  movq(RAX, Immediate(target::Thread::safepoint_state_unacquired()));
-  movq(TMP, Immediate(target::Thread::safepoint_state_acquired()));
+  movq(RAX, Immediate(target::Thread::full_safepoint_state_unacquired()));
+  movq(TMP, Immediate(target::Thread::full_safepoint_state_acquired()));
   LockCmpxchgq(Address(THR, target::Thread::safepoint_state_offset()), TMP);
   movq(TMP, RAX);
   popq(RAX);
-  cmpq(TMP, Immediate(target::Thread::safepoint_state_unacquired()));
+  cmpq(TMP, Immediate(target::Thread::full_safepoint_state_unacquired()));
 
   if (!FLAG_use_slow_path) {
     j(EQUAL, &done);
@@ -182,28 +182,29 @@
        Immediate(target::Thread::native_execution_state()));
 
   if (enter_safepoint) {
-    EnterSafepoint();
+    EnterFullSafepoint();
   }
 }
 
-void Assembler::LeaveSafepoint() {
+void Assembler::ExitFullSafepoint() {
   // We generate the same number of instructions whether or not the slow-path is
-  // forced, for consistency with EnterSafepoint.
+  // forced, for consistency with EnterFullSafepoint.
   Label done, slow_path;
   if (FLAG_use_slow_path) {
     jmp(&slow_path);
   }
 
-  // Compare and swap the value at Thread::safepoint_state from acquired to
-  // unacquired. On success, jump to 'success'; otherwise, fallthrough.
+  // Compare and swap the value at Thread::safepoint_state from
+  // acquired to unacquired. On success, jump to 'success'; otherwise,
+  // fallthrough.
 
   pushq(RAX);
-  movq(RAX, Immediate(target::Thread::safepoint_state_acquired()));
-  movq(TMP, Immediate(target::Thread::safepoint_state_unacquired()));
+  movq(RAX, Immediate(target::Thread::full_safepoint_state_acquired()));
+  movq(TMP, Immediate(target::Thread::full_safepoint_state_unacquired()));
   LockCmpxchgq(Address(THR, target::Thread::safepoint_state_offset()), TMP);
   movq(TMP, RAX);
   popq(RAX);
-  cmpq(TMP, Immediate(target::Thread::safepoint_state_acquired()));
+  cmpq(TMP, Immediate(target::Thread::full_safepoint_state_acquired()));
 
   if (!FLAG_use_slow_path) {
     j(EQUAL, &done);
@@ -223,12 +224,12 @@
 
 void Assembler::TransitionNativeToGenerated(bool leave_safepoint) {
   if (leave_safepoint) {
-    LeaveSafepoint();
+    ExitFullSafepoint();
   } else {
 #if defined(DEBUG)
     // Ensure we've already left the safepoint.
     movq(TMP, Address(THR, target::Thread::safepoint_state_offset()));
-    andq(TMP, Immediate((1 << target::Thread::safepoint_state_inside_bit())));
+    andq(TMP, Immediate(target::Thread::full_safepoint_state_acquired()));
     Label ok;
     j(ZERO, &ok);
     Breakpoint();
diff --git a/runtime/vm/compiler/assembler/assembler_x64.h b/runtime/vm/compiler/assembler/assembler_x64.h
index f9c4ff1..182c1ec 100644
--- a/runtime/vm/compiler/assembler/assembler_x64.h
+++ b/runtime/vm/compiler/assembler/assembler_x64.h
@@ -318,8 +318,8 @@
 
   void setcc(Condition condition, ByteRegister dst);
 
-  void EnterSafepoint();
-  void LeaveSafepoint();
+  void EnterFullSafepoint();
+  void ExitFullSafepoint();
   void TransitionGeneratedToNative(Register destination_address,
                                    Register new_exit_frame,
                                    Register new_exit_through_ffi,
diff --git a/runtime/vm/compiler/jit/compiler.cc b/runtime/vm/compiler/jit/compiler.cc
index c9f8883..7539cf8 100644
--- a/runtime/vm/compiler/jit/compiler.cc
+++ b/runtime/vm/compiler/jit/compiler.cc
@@ -386,7 +386,7 @@
     function.SetWasCompiled(true);
   } else if (optimized()) {
     // We cannot execute generated code while installing code.
-    ASSERT(Thread::Current()->IsAtSafepoint() ||
+    ASSERT(Thread::Current()->IsAtSafepoint(SafepointLevel::kGCAndDeopt) ||
            (Thread::Current()->IsMutatorThread() &&
             IsolateGroup::Current()->ContainsOnlyOneIsolate()));
     // We are validating our CHA / field guard / ... assumptions. To prevent
@@ -1203,7 +1203,7 @@
 bool BackgroundCompiler::EnqueueCompilation(const Function& function) {
   Thread* thread = Thread::Current();
   ASSERT(thread->IsMutatorThread());
-  ASSERT(!thread->IsAtSafepoint());
+  ASSERT(!thread->IsAtSafepoint(SafepointLevel::kGCAndDeopt));
 
   SafepointMonitorLocker ml_done(&done_monitor_);
   if (disabled_depth_ > 0) return false;
@@ -1239,7 +1239,7 @@
 void BackgroundCompiler::Stop() {
   Thread* thread = Thread::Current();
   ASSERT(thread->isolate() == nullptr || thread->IsMutatorThread());
-  ASSERT(!thread->IsAtSafepoint());
+  ASSERT(!thread->IsAtSafepoint(SafepointLevel::kGCAndDeopt));
 
   SafepointMonitorLocker ml_done(&done_monitor_);
   StopLocked(thread, &ml_done);
@@ -1262,7 +1262,7 @@
 void BackgroundCompiler::Enable() {
   Thread* thread = Thread::Current();
   ASSERT(thread->IsMutatorThread());
-  ASSERT(!thread->IsAtSafepoint());
+  ASSERT(!thread->IsAtSafepoint(SafepointLevel::kGCAndDeopt));
 
   SafepointMonitorLocker ml_done(&done_monitor_);
   disabled_depth_--;
@@ -1274,7 +1274,7 @@
 void BackgroundCompiler::Disable() {
   Thread* thread = Thread::Current();
   ASSERT(thread->IsMutatorThread());
-  ASSERT(!thread->IsAtSafepoint());
+  ASSERT(!thread->IsAtSafepoint(SafepointLevel::kGCAndDeopt));
 
   SafepointMonitorLocker ml_done(&done_monitor_);
   disabled_depth_++;
diff --git a/runtime/vm/compiler/relocation_test.cc b/runtime/vm/compiler/relocation_test.cc
index fa73d3a..0596cd9 100644
--- a/runtime/vm/compiler/relocation_test.cc
+++ b/runtime/vm/compiler/relocation_test.cc
@@ -40,7 +40,7 @@
   explicit RelocatorTestHelper(Thread* thread)
       : thread(thread),
         locker(thread, thread->isolate_group()->program_lock()),
-        safepoint_and_growth_scope(thread) {
+        safepoint_and_growth_scope(thread, SafepointLevel::kGC) {
     // So the relocator uses the correct instruction size layout.
     FLAG_precompiled_mode = true;
     FLAG_use_bare_instructions = true;
diff --git a/runtime/vm/compiler/runtime_api.cc b/runtime/vm/compiler/runtime_api.cc
index b3edbc9..6df7d2e 100644
--- a/runtime/vm/compiler/runtime_api.cc
+++ b/runtime/vm/compiler/runtime_api.cc
@@ -714,17 +714,12 @@
                   : stack_overflow_shared_without_fpu_regs_entry_point_offset();
 }
 
-uword Thread::safepoint_state_unacquired() {
-  return dart::Thread::safepoint_state_unacquired();
+uword Thread::full_safepoint_state_unacquired() {
+  return dart::Thread::full_safepoint_state_unacquired();
 }
 
-uword Thread::safepoint_state_acquired() {
-  return dart::Thread::safepoint_state_acquired();
-}
-
-intptr_t Thread::safepoint_state_inside_bit() {
-  COMPILE_ASSERT(dart::Thread::AtSafepointField::bitsize() == 1);
-  return dart::Thread::AtSafepointField::shift();
+uword Thread::full_safepoint_state_acquired() {
+  return dart::Thread::full_safepoint_state_acquired();
 }
 
 uword Thread::generated_execution_state() {
diff --git a/runtime/vm/compiler/runtime_api.h b/runtime/vm/compiler/runtime_api.h
index c8cbc06..642c287 100644
--- a/runtime/vm/compiler/runtime_api.h
+++ b/runtime/vm/compiler/runtime_api.h
@@ -1062,9 +1062,8 @@
   static uword vm_tag_dart_id();
 
   static word safepoint_state_offset();
-  static uword safepoint_state_unacquired();
-  static uword safepoint_state_acquired();
-  static intptr_t safepoint_state_inside_bit();
+  static uword full_safepoint_state_unacquired();
+  static uword full_safepoint_state_acquired();
 
   static word execution_state_offset();
   static uword vm_execution_state();
diff --git a/runtime/vm/compiler/stub_code_compiler_arm.cc b/runtime/vm/compiler/stub_code_compiler_arm.cc
index 2c3be0b..0d611f4 100644
--- a/runtime/vm/compiler/stub_code_compiler_arm.cc
+++ b/runtime/vm/compiler/stub_code_compiler_arm.cc
@@ -466,8 +466,8 @@
   // them.
   __ blx(R5);
 
-  // EnterSafepoint clobbers R4, R5 and TMP, all saved or volatile.
-  __ EnterSafepoint(R4, R5);
+  // Clobbers R4, R5 and TMP, all saved or volatile.
+  __ EnterFullSafepoint(R4, R5);
 
   // Returns.
   __ PopList((1 << PC) | (1 << THR) | (1 << R4) | (1 << R5));
diff --git a/runtime/vm/compiler/stub_code_compiler_arm64.cc b/runtime/vm/compiler/stub_code_compiler_arm64.cc
index e4b71b1..6438a2c 100644
--- a/runtime/vm/compiler/stub_code_compiler_arm64.cc
+++ b/runtime/vm/compiler/stub_code_compiler_arm64.cc
@@ -440,9 +440,8 @@
   // Resets CSP and SP, important for EnterSafepoint below.
   __ blr(R10);
 
-  // EnterSafepoint clobbers TMP, TMP2 and R9 -- all volatile and not holding
-  // return values.
-  __ EnterSafepoint(/*scratch=*/R9);
+  // Clobbers TMP, TMP2 and R9 -- all volatile and not holding return values.
+  __ EnterFullSafepoint(/*scratch=*/R9);
 
   // Pop LR and THR from the real stack (CSP).
   RESTORES_LR_FROM_FRAME(__ ldp(
diff --git a/runtime/vm/compiler/stub_code_compiler_ia32.cc b/runtime/vm/compiler/stub_code_compiler_ia32.cc
index 939be70..0c4161f 100644
--- a/runtime/vm/compiler/stub_code_compiler_ia32.cc
+++ b/runtime/vm/compiler/stub_code_compiler_ia32.cc
@@ -315,8 +315,8 @@
   __ Bind(&check_done);
 #endif
 
-  // EnterSafepoint takes care to not clobber *any* registers (besides scratch).
-  __ EnterSafepoint(/*scratch=*/ECX);
+  // Takes care to not clobber *any* registers (besides scratch).
+  __ EnterFullSafepoint(/*scratch=*/ECX);
 
   // Restore callee-saved registers.
   __ movl(ECX, EBX);
diff --git a/runtime/vm/compiler/stub_code_compiler_x64.cc b/runtime/vm/compiler/stub_code_compiler_x64.cc
index 503f068..a25196d 100644
--- a/runtime/vm/compiler/stub_code_compiler_x64.cc
+++ b/runtime/vm/compiler/stub_code_compiler_x64.cc
@@ -383,8 +383,8 @@
   // the saved THR and the return address. The target will know to skip them.
   __ call(TMP);
 
-  // EnterSafepoint takes care to not clobber *any* registers (besides TMP).
-  __ EnterSafepoint();
+  // Takes care to not clobber *any* registers (besides TMP).
+  __ EnterFullSafepoint();
 
   // Restore THR (callee-saved).
   __ popq(THR);
diff --git a/runtime/vm/dart_api_impl.cc b/runtime/vm/dart_api_impl.cc
index 852f3b5..842bc8f 100644
--- a/runtime/vm/dart_api_impl.cc
+++ b/runtime/vm/dart_api_impl.cc
@@ -6987,7 +6987,7 @@
   while (true) {
     bool non_main_isolates_alive = false;
     {
-      SafepointOperationScope safepoint(thread);
+      DeoptSafepointOperationScope safepoint(thread);
       group->ForEachIsolate(
           [&](Isolate* isolate) {
             if (isolate != main_isolate) {
diff --git a/runtime/vm/heap/heap.cc b/runtime/vm/heap/heap.cc
index 52d1f65..411dbce 100644
--- a/runtime/vm/heap/heap.cc
+++ b/runtime/vm/heap/heap.cc
@@ -222,7 +222,8 @@
       heap_(isolate_group()->heap()),
       old_space_(heap_->old_space()),
       writable_(writable) {
-  isolate_group()->safepoint_handler()->SafepointThreads(thread);
+  isolate_group()->safepoint_handler()->SafepointThreads(thread,
+                                                         SafepointLevel::kGC);
 
   {
     // It's not safe to iterate over old space when concurrent marking or
@@ -273,7 +274,8 @@
     ml.NotifyAll();
   }
 
-  isolate_group()->safepoint_handler()->ResumeThreads(thread());
+  isolate_group()->safepoint_handler()->ResumeThreads(thread(),
+                                                      SafepointLevel::kGC);
 }
 
 void HeapIterationScope::IterateObjects(ObjectVisitor* visitor) const {
@@ -353,7 +355,7 @@
 
 void Heap::NotifyIdle(int64_t deadline) {
   Thread* thread = Thread::Current();
-  SafepointOperationScope safepoint_operation(thread);
+  GcSafepointOperationScope safepoint_operation(thread);
 
   // Check if we want to collect new-space first, because if we want to collect
   // both new-space and old-space, the new-space collection should run first
@@ -420,7 +422,7 @@
     return;
   }
   {
-    SafepointOperationScope safepoint_operation(thread);
+    GcSafepointOperationScope safepoint_operation(thread);
     RecordBeforeGC(kScavenge, reason);
     VMTagScope tagScope(thread, reason == kIdle ? VMTag::kGCIdleTagId
                                                 : VMTag::kGCNewSpaceTagId);
@@ -444,7 +446,7 @@
     return;
   }
   {
-    SafepointOperationScope safepoint_operation(thread);
+    GcSafepointOperationScope safepoint_operation(thread);
     RecordBeforeGC(kScavenge, reason);
     {
       VMTagScope tagScope(thread, reason == kIdle ? VMTag::kGCIdleTagId
@@ -484,7 +486,7 @@
     return;
   }
   {
-    SafepointOperationScope safepoint_operation(thread);
+    GcSafepointOperationScope safepoint_operation(thread);
     thread->isolate_group()->ForEachIsolate(
         [&](Isolate* isolate) {
           // Discard regexp backtracking stacks to further reduce memory usage.
diff --git a/runtime/vm/heap/heap_sources.gni b/runtime/vm/heap/heap_sources.gni
index 00100cf..de6d25e 100644
--- a/runtime/vm/heap/heap_sources.gni
+++ b/runtime/vm/heap/heap_sources.gni
@@ -41,4 +41,5 @@
   "pages_test.cc",
   "scavenger_test.cc",
   "weak_table_test.cc",
+  "safepoint_test.cc",
 ]
diff --git a/runtime/vm/heap/pages.cc b/runtime/vm/heap/pages.cc
index 192c3df..61023ef 100644
--- a/runtime/vm/heap/pages.cc
+++ b/runtime/vm/heap/pages.cc
@@ -1085,7 +1085,7 @@
 
   Thread* thread = Thread::Current();
   const int64_t pre_safe_point = OS::GetCurrentMonotonicMicros();
-  SafepointOperationScope safepoint_scope(thread);
+  GcSafepointOperationScope safepoint_scope(thread);
 
   const int64_t pre_wait_for_sweepers = OS::GetCurrentMonotonicMicros();
   // Wait for pending tasks to complete and then account for the driver task.
diff --git a/runtime/vm/heap/safepoint.cc b/runtime/vm/heap/safepoint.cc
index 4366a6c..811cb97 100644
--- a/runtime/vm/heap/safepoint.cc
+++ b/runtime/vm/heap/safepoint.cc
@@ -12,41 +12,33 @@
 
 DEFINE_FLAG(bool, trace_safepoint, false, "Trace Safepoint logic.");
 
-SafepointOperationScope::SafepointOperationScope(Thread* T)
-    : ThreadStackResource(T) {
+SafepointOperationScope::SafepointOperationScope(Thread* T,
+                                                 SafepointLevel level)
+    : ThreadStackResource(T), level_(level) {
   ASSERT(T != nullptr && T->isolate_group() != nullptr);
 
-  SafepointHandler* handler = T->isolate_group()->safepoint_handler();
-  ASSERT(handler != NULL);
-
-  // Signal all threads to get to a safepoint and wait for them to
-  // get to a safepoint.
-  handler->SafepointThreads(T);
+  auto handler = T->isolate_group()->safepoint_handler();
+  handler->SafepointThreads(T, level_);
 }
 
 SafepointOperationScope::~SafepointOperationScope() {
   Thread* T = thread();
   ASSERT(T != nullptr && T->isolate_group() != nullptr);
 
-  // Resume all threads which are blocked for the safepoint operation.
-  SafepointHandler* handler = T->isolate_group()->safepoint_handler();
-  ASSERT(handler != NULL);
-  handler->ResumeThreads(T);
+  auto handler = T->isolate_group()->safepoint_handler();
+  handler->ResumeThreads(T, level_);
 }
 
 ForceGrowthSafepointOperationScope::ForceGrowthSafepointOperationScope(
-    Thread* T)
-    : ThreadStackResource(T) {
+    Thread* T,
+    SafepointLevel level)
+    : ThreadStackResource(T), level_(level) {
   ASSERT(T != NULL);
   IsolateGroup* IG = T->isolate_group();
   ASSERT(IG != NULL);
 
-  SafepointHandler* handler = IG->safepoint_handler();
-  ASSERT(handler != NULL);
-
-  // Signal all threads to get to a safepoint and wait for them to
-  // get to a safepoint.
-  handler->SafepointThreads(T);
+  auto handler = IG->safepoint_handler();
+  handler->SafepointThreads(T, level_);
 
   // N.B.: Change growth policy inside the safepoint to prevent racy access.
   Heap* heap = IG->heap();
@@ -64,10 +56,8 @@
   Heap* heap = IG->heap();
   heap->SetGrowthControlState(current_growth_controller_state_);
 
-  // Resume all threads which are blocked for the safepoint operation.
-  SafepointHandler* handler = IG->safepoint_handler();
-  ASSERT(handler != NULL);
-  handler->ResumeThreads(T);
+  auto handler = IG->safepoint_handler();
+  handler->ResumeThreads(T, level_);
 
   if (current_growth_controller_state_) {
     ASSERT(T->CanCollectGarbage());
@@ -82,87 +72,131 @@
 
 SafepointHandler::SafepointHandler(IsolateGroup* isolate_group)
     : isolate_group_(isolate_group),
-      safepoint_lock_(),
-      number_threads_not_at_safepoint_(0),
-      safepoint_operation_count_(0),
-      owner_(NULL) {}
+      handlers_{
+          {isolate_group, SafepointLevel::kGC},
+          {isolate_group, SafepointLevel::kGCAndDeopt},
+      } {}
 
 SafepointHandler::~SafepointHandler() {
-  ASSERT(owner_ == NULL);
-  ASSERT(safepoint_operation_count_ == 0);
-  isolate_group_ = NULL;
+  for (intptr_t level = 0; level < SafepointLevel::kNumLevels; ++level) {
+    ASSERT(handlers_[level].owner_ == nullptr);
+  }
 }
 
-void SafepointHandler::SafepointThreads(Thread* T) {
+void SafepointHandler::SafepointThreads(Thread* T, SafepointLevel level) {
   ASSERT(T->no_safepoint_scope_depth() == 0);
   ASSERT(T->execution_state() == Thread::kThreadInVM);
+  ASSERT(T->current_safepoint_level() >= level);
 
   {
-    // First grab the threads list lock for this isolate
-    // and check if a safepoint is already in progress. This
-    // ensures that two threads do not start a safepoint operation
-    // at the same time.
-    MonitorLocker sl(threads_lock());
+    MonitorLocker tl(threads_lock());
 
-    // Now check to see if a safepoint operation is already in progress
-    // for this isolate, block if an operation is in progress.
-    while (SafepointInProgress()) {
-      // If we are recursively invoking a Safepoint operation then we
-      // just increment the count and return, otherwise we wait for the
-      // safepoint operation to be done.
-      if (owner_ == T) {
-        increment_safepoint_operation_count();
-        return;
-      }
-      sl.WaitWithSafepointCheck(T);
+    // Allow recursive deopt safepoint operation.
+    if (handlers_[level].owner_ == T) {
+      handlers_[level].operation_count_++;
+      // If we own this safepoint level already we have to own the lower levels
+      // as well.
+      AssertWeOwnLowerLevelSafepoints(T, level);
+      return;
     }
 
-    // Set safepoint in progress state by this thread.
-    SetSafepointInProgress(T);
+    // This level of nesting is not allowed (this thread cannot own lower levels
+    // and then later try acquire higher levels).
+    AssertWeDoNotOwnLowerLevelSafepoints(T, level);
 
-    // Go over the active thread list and ensure that all threads active
-    // in the isolate reach a safepoint.
-    Thread* current = isolate_group()->thread_registry()->active_list();
-    while (current != NULL) {
-      MonitorLocker tl(current->thread_lock());
-      if (!current->BypassSafepoints()) {
-        if (current == T) {
-          current->SetAtSafepoint(true);
-        } else {
-          uint32_t state = current->SetSafepointRequested(true);
-          if (!Thread::IsAtSafepoint(state)) {
-            // Thread is not already at a safepoint so try to
-            // get it to a safepoint and wait for it to check in.
-            if (current->IsMutatorThread()) {
-              current->ScheduleInterruptsLocked(Thread::kVMInterrupt);
-            }
-            MonitorLocker sl(&safepoint_lock_);
-            ++number_threads_not_at_safepoint_;
-          }
+    // Mark this thread at safepoint and possibly notify waiting threads.
+    {
+      MonitorLocker tl(T->thread_lock());
+      EnterSafepointLocked(T, &tl);
+    }
+
+    // Wait until other safepoint operations are done & mark us as owning
+    // the safepoint - so no other thread can.
+    while (handlers_[level].SafepointInProgress()) {
+      tl.Wait();
+    }
+    handlers_[level].SetSafepointInProgress(T);
+
+    // Ensure a thread is at a safepoint or notify it to get to one.
+    handlers_[level].NotifyThreadsToGetToSafepointLevel(T);
+  }
+
+  // Now wait for all threads that are not already at a safepoint to check-in.
+  handlers_[level].WaitUntilThreadsReachedSafepointLevel();
+
+  AcquireLowerLevelSafepoints(T, level);
+}
+
+void SafepointHandler::AssertWeOwnLowerLevelSafepoints(Thread* T,
+                                                       SafepointLevel level) {
+  for (intptr_t lower_level = level - 1; lower_level >= 0; --lower_level) {
+    RELEASE_ASSERT(handlers_[lower_level].owner_ == T);
+  }
+}
+
+void SafepointHandler::AssertWeDoNotOwnLowerLevelSafepoints(
+    Thread* T,
+    SafepointLevel level) {
+  for (intptr_t lower_level = level - 1; lower_level >= 0; --lower_level) {
+    RELEASE_ASSERT(handlers_[lower_level].owner_ != T);
+  }
+}
+
+void SafepointHandler::LevelHandler::NotifyThreadsToGetToSafepointLevel(
+    Thread* T) {
+  ASSERT(num_threads_not_parked_ == 0);
+  for (auto current = isolate_group()->thread_registry()->active_list();
+       current != nullptr; current = current->next()) {
+    MonitorLocker tl(current->thread_lock());
+    if (!current->BypassSafepoints() && current != T) {
+      const uint32_t state = current->SetSafepointRequested(level_, true);
+      if (!Thread::IsAtSafepoint(level_, state)) {
+        // Send OOB message to get it to safepoint.
+        if (current->IsMutatorThread()) {
+          current->ScheduleInterruptsLocked(Thread::kVMInterrupt);
         }
+        MonitorLocker sl(&parked_lock_);
+        num_threads_not_parked_++;
       }
-      current = current->next();
     }
   }
-  // Now wait for all threads that are not already at a safepoint to check-in.
+}
+
+void SafepointHandler::ResumeThreads(Thread* T, SafepointLevel level) {
   {
-    MonitorLocker sl(&safepoint_lock_);
-    intptr_t num_attempts = 0;
-    while (number_threads_not_at_safepoint_ > 0) {
-      Monitor::WaitResult retval = sl.Wait(1000);
-      if (retval == Monitor::kTimedOut) {
-        num_attempts += 1;
-        if (FLAG_trace_safepoint && num_attempts > 10) {
-          // We have been waiting too long, start logging this as we might
-          // have an issue where a thread is not checking in for a safepoint.
-          for (Thread* current =
-                   isolate_group()->thread_registry()->active_list();
-               current != NULL; current = current->next()) {
-            if (!current->IsAtSafepoint()) {
-              OS::PrintErr("Attempt:%" Pd
-                           " waiting for thread %s to check in\n",
-                           num_attempts, current->os_thread()->name());
-            }
+    MonitorLocker sl(threads_lock());
+
+    ASSERT(handlers_[level].SafepointInProgress());
+    ASSERT(handlers_[level].owner_ == T);
+    AssertWeOwnLowerLevelSafepoints(T, level);
+
+    // We allow recursive safepoints.
+    if (handlers_[level].operation_count_ > 1) {
+      handlers_[level].operation_count_--;
+      return;
+    }
+
+    ReleaseLowerLevelSafepoints(T, level);
+    handlers_[level].NotifyThreadsToContinue(T);
+    handlers_[level].ResetSafepointInProgress(T);
+    sl.NotifyAll();
+  }
+  ExitSafepointUsingLock(T);
+}
+
+void SafepointHandler::LevelHandler::WaitUntilThreadsReachedSafepointLevel() {
+  MonitorLocker sl(&parked_lock_);
+  intptr_t num_attempts = 0;
+  while (num_threads_not_parked_ > 0) {
+    Monitor::WaitResult retval = sl.Wait(1000);
+    if (retval == Monitor::kTimedOut) {
+      num_attempts += 1;
+      if (FLAG_trace_safepoint && num_attempts > 10) {
+        for (auto current = isolate_group()->thread_registry()->active_list();
+             current != nullptr; current = current->next()) {
+          if (!current->IsAtSafepoint(level_)) {
+            OS::PrintErr("Attempt:%" Pd " waiting for thread %s to check in\n",
+                         num_attempts, current->os_thread()->name());
           }
         }
       }
@@ -170,80 +204,96 @@
   }
 }
 
-void SafepointHandler::ResumeThreads(Thread* T) {
-  // First resume all the threads which are blocked for the safepoint
-  // operation.
-  MonitorLocker sl(threads_lock());
-
-  // First check if we are in a recursive safepoint operation, in that case
-  // we just decrement safepoint_operation_count and return.
-  ASSERT(SafepointInProgress());
-  if (safepoint_operation_count() > 1) {
-    decrement_safepoint_operation_count();
-    return;
+void SafepointHandler::AcquireLowerLevelSafepoints(Thread* T,
+                                                   SafepointLevel level) {
+  MonitorLocker tl(threads_lock());
+  ASSERT(handlers_[level].owner_ == T);
+  for (intptr_t lower_level = level - 1; lower_level >= 0; --lower_level) {
+    while (handlers_[lower_level].SafepointInProgress()) {
+      tl.Wait();
+    }
+    handlers_[lower_level].SetSafepointInProgress(T);
+    ASSERT(handlers_[lower_level].owner_ == T);
   }
-  Thread* current = isolate_group()->thread_registry()->active_list();
-  while (current != NULL) {
+}
+
+void SafepointHandler::ReleaseLowerLevelSafepoints(Thread* T,
+                                                   SafepointLevel level) {
+  for (intptr_t lower_level = 0; lower_level < level; ++lower_level) {
+    handlers_[lower_level].ResetSafepointInProgress(T);
+  }
+}
+
+void SafepointHandler::LevelHandler::NotifyThreadsToContinue(Thread* T) {
+  for (auto current = isolate_group()->thread_registry()->active_list();
+       current != nullptr; current = current->next()) {
     MonitorLocker tl(current->thread_lock());
-    if (!current->BypassSafepoints()) {
-      if (current == T) {
-        current->SetAtSafepoint(false);
-      } else {
-        uint32_t state = current->SetSafepointRequested(false);
-        if (Thread::IsBlockedForSafepoint(state)) {
-          tl.Notify();
+    if (!current->BypassSafepoints() && current != T) {
+      bool resume = false;
+      for (intptr_t lower_level = level_; lower_level >= 0; --lower_level) {
+        if (Thread::IsBlockedForSafepoint(current->SetSafepointRequested(
+                static_cast<SafepointLevel>(lower_level), false))) {
+          resume = true;
         }
       }
+      if (resume) {
+        tl.Notify();
+      }
     }
-    current = current->next();
   }
-  // Now reset the safepoint_in_progress_ state and notify all threads
-  // that are waiting to enter the isolate or waiting to start another
-  // safepoint operation.
-  ResetSafepointInProgress(T);
-  sl.NotifyAll();
 }
 
 void SafepointHandler::EnterSafepointUsingLock(Thread* T) {
   MonitorLocker tl(T->thread_lock());
-  T->SetAtSafepoint(true);
-  if (T->IsSafepointRequested()) {
-    MonitorLocker sl(&safepoint_lock_);
-    ASSERT(number_threads_not_at_safepoint_ > 0);
-    number_threads_not_at_safepoint_ -= 1;
-    sl.Notify();
-  }
+  EnterSafepointLocked(T, &tl);
 }
 
 void SafepointHandler::ExitSafepointUsingLock(Thread* T) {
   MonitorLocker tl(T->thread_lock());
   ASSERT(T->IsAtSafepoint());
-  while (T->IsSafepointRequested()) {
-    T->SetBlockedForSafepoint(true);
-    tl.Wait();
-    T->SetBlockedForSafepoint(false);
-  }
-  T->SetAtSafepoint(false);
+  ExitSafepointLocked(T, &tl);
+  ASSERT(!T->IsSafepointRequestedLocked());
 }
 
 void SafepointHandler::BlockForSafepoint(Thread* T) {
   ASSERT(!T->BypassSafepoints());
   MonitorLocker tl(T->thread_lock());
-  if (T->IsSafepointRequested()) {
-    T->SetAtSafepoint(true);
-    {
-      MonitorLocker sl(&safepoint_lock_);
-      ASSERT(number_threads_not_at_safepoint_ > 0);
-      number_threads_not_at_safepoint_ -= 1;
-      sl.Notify();
-    }
-    while (T->IsSafepointRequested()) {
-      T->SetBlockedForSafepoint(true);
-      tl.Wait();
-      T->SetBlockedForSafepoint(false);
-    }
-    T->SetAtSafepoint(false);
+  // This takes into account the safepoint level the thread can participate in.
+  if (T->IsSafepointRequestedLocked()) {
+    EnterSafepointLocked(T, &tl);
+    ExitSafepointLocked(T, &tl);
+    ASSERT(!T->IsSafepointRequestedLocked());
   }
 }
 
+void SafepointHandler::EnterSafepointLocked(Thread* T, MonitorLocker* tl) {
+  T->SetAtSafepoint(true);
+
+  for (intptr_t level = T->current_safepoint_level(); level >= 0; --level) {
+    if (T->IsSafepointLevelRequestedLocked(
+            static_cast<SafepointLevel>(level))) {
+      handlers_[level].NotifyWeAreParked(T);
+    }
+  }
+}
+
+void SafepointHandler::LevelHandler::NotifyWeAreParked(Thread* T) {
+  ASSERT(owner_ != nullptr);
+  MonitorLocker sl(&parked_lock_);
+  ASSERT(num_threads_not_parked_ > 0);
+  num_threads_not_parked_ -= 1;
+  if (num_threads_not_parked_ == 0) {
+    sl.Notify();
+  }
+}
+
+void SafepointHandler::ExitSafepointLocked(Thread* T, MonitorLocker* tl) {
+  while (T->IsSafepointRequestedLocked()) {
+    T->SetBlockedForSafepoint(true);
+    tl->Wait();
+    T->SetBlockedForSafepoint(false);
+  }
+  T->SetAtSafepoint(false);
+}
+
 }  // namespace dart
diff --git a/runtime/vm/heap/safepoint.h b/runtime/vm/heap/safepoint.h
index faf9c46..13552b7 100644
--- a/runtime/vm/heap/safepoint.h
+++ b/runtime/vm/heap/safepoint.h
@@ -17,23 +17,48 @@
 // all threads to a safepoint. At the end of the operation all the threads are
 // resumed.
 class SafepointOperationScope : public ThreadStackResource {
- public:
-  explicit SafepointOperationScope(Thread* T);
+ protected:
+  SafepointOperationScope(Thread* T, SafepointLevel level);
   ~SafepointOperationScope();
 
  private:
+  SafepointLevel level_;
+
   DISALLOW_COPY_AND_ASSIGN(SafepointOperationScope);
 };
 
+// Gets all mutators to a safepoint where GC is allowed.
+class GcSafepointOperationScope : public SafepointOperationScope {
+ public:
+  explicit GcSafepointOperationScope(Thread* T)
+      : SafepointOperationScope(T, SafepointLevel::kGC) {}
+  ~GcSafepointOperationScope() {}
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(GcSafepointOperationScope);
+};
+
+// Gets all mutators to a safepoint where GC and Deopt is allowed.
+class DeoptSafepointOperationScope : public SafepointOperationScope {
+ public:
+  explicit DeoptSafepointOperationScope(Thread* T)
+      : SafepointOperationScope(T, SafepointLevel::kGCAndDeopt) {}
+  ~DeoptSafepointOperationScope() {}
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(DeoptSafepointOperationScope);
+};
+
 // A stack based scope that can be used to perform an operation after getting
 // all threads to a safepoint. At the end of the operation all the threads are
 // resumed. Allocations in the scope will force heap growth.
 class ForceGrowthSafepointOperationScope : public ThreadStackResource {
  public:
-  explicit ForceGrowthSafepointOperationScope(Thread* T);
+  ForceGrowthSafepointOperationScope(Thread* T, SafepointLevel level);
   ~ForceGrowthSafepointOperationScope();
 
  private:
+  SafepointLevel level_;
   bool current_growth_controller_state_;
 
   DISALLOW_COPY_AND_ASSIGN(ForceGrowthSafepointOperationScope);
@@ -48,65 +73,105 @@
 
   void EnterSafepointUsingLock(Thread* T);
   void ExitSafepointUsingLock(Thread* T);
-
   void BlockForSafepoint(Thread* T);
 
-  bool IsOwnedByTheThread(Thread* thread) { return owner_ == thread; }
+  bool IsOwnedByTheThread(Thread* thread) {
+    for (intptr_t level = 0; level < SafepointLevel::kNumLevels; ++level) {
+      if (handlers_[level].owner_ == thread) {
+        return true;
+      }
+    }
+    return false;
+  }
+
+  bool AnySafepointInProgress() {
+    for (intptr_t level = 0; level < SafepointLevel::kNumLevels; ++level) {
+      if (handlers_[level].SafepointInProgress()) {
+        return true;
+      }
+    }
+    return false;
+  }
 
  private:
-  void SafepointThreads(Thread* T);
-  void ResumeThreads(Thread* T);
+  class LevelHandler {
+   public:
+    LevelHandler(IsolateGroup* isolate_group, SafepointLevel level)
+        : isolate_group_(isolate_group), level_(level) {}
+
+    bool SafepointInProgress() const {
+      ASSERT(threads_lock()->IsOwnedByCurrentThread());
+      ASSERT((operation_count_ > 0) == (owner_ != nullptr));
+      return ((operation_count_ > 0) && (owner_ != NULL));
+    }
+    void SetSafepointInProgress(Thread* T) {
+      ASSERT(threads_lock()->IsOwnedByCurrentThread());
+      ASSERT(owner_ == NULL);
+      ASSERT(operation_count_ == 0);
+      operation_count_ = 1;
+      owner_ = T;
+    }
+    void ResetSafepointInProgress(Thread* T) {
+      ASSERT(threads_lock()->IsOwnedByCurrentThread());
+      ASSERT(owner_ == T);
+      ASSERT(operation_count_ == 1);
+      operation_count_ = 0;
+      owner_ = NULL;
+    }
+    void NotifyWeAreParked(Thread* T);
+
+    IsolateGroup* isolate_group() const { return isolate_group_; }
+    Monitor* threads_lock() const { return isolate_group_->threads_lock(); }
+
+   private:
+    friend class SafepointHandler;
+
+    // Helper methods for [SafepointThreads]
+    void NotifyThreadsToGetToSafepointLevel(Thread* T);
+    void WaitUntilThreadsReachedSafepointLevel();
+
+    // Helper methods for [ResumeThreads]
+    void NotifyThreadsToContinue(Thread* T);
+
+    IsolateGroup* isolate_group_;
+    SafepointLevel level_;
+
+    // Monitor used by thread initiating a safepoint operation to track threads
+    // not at a safepoint and wait for these threads to reach a safepoint.
+    Monitor parked_lock_;
+
+    // If a safepoint operation is currently in progress, this field contains
+    // the thread that initiated the safepoint operation, otherwise it is NULL.
+    Thread* owner_ = nullptr;
+
+    // The number of nested safepoint operations currently held.
+    int32_t operation_count_ = 0;
+
+    // Count the number of threads the currently in-progress safepoint operation
+    // is waiting for to check-in.
+    int32_t num_threads_not_parked_ = 0;
+  };
+
+  void SafepointThreads(Thread* T, SafepointLevel level);
+  void ResumeThreads(Thread* T, SafepointLevel level);
+
+  // Helper methods for [SafepointThreads]
+  void AssertWeOwnLowerLevelSafepoints(Thread* T, SafepointLevel level);
+  void AssertWeDoNotOwnLowerLevelSafepoints(Thread* T, SafepointLevel level);
+  void AcquireLowerLevelSafepoints(Thread* T, SafepointLevel level);
+
+  // Helper methods for [ResumeThreads]
+  void ReleaseLowerLevelSafepoints(Thread* T, SafepointLevel level);
+
+  void EnterSafepointLocked(Thread* T, MonitorLocker* tl);
+  void ExitSafepointLocked(Thread* T, MonitorLocker* tl);
 
   IsolateGroup* isolate_group() const { return isolate_group_; }
   Monitor* threads_lock() const { return isolate_group_->threads_lock(); }
-  bool SafepointInProgress() const {
-    ASSERT(threads_lock()->IsOwnedByCurrentThread());
-    return ((safepoint_operation_count_ > 0) && (owner_ != NULL));
-  }
-  void SetSafepointInProgress(Thread* T) {
-    ASSERT(threads_lock()->IsOwnedByCurrentThread());
-    ASSERT(owner_ == NULL);
-    ASSERT(safepoint_operation_count_ == 0);
-    safepoint_operation_count_ = 1;
-    owner_ = T;
-  }
-  void ResetSafepointInProgress(Thread* T) {
-    ASSERT(threads_lock()->IsOwnedByCurrentThread());
-    ASSERT(owner_ == T);
-    ASSERT(safepoint_operation_count_ == 1);
-    safepoint_operation_count_ = 0;
-    owner_ = NULL;
-  }
-  int32_t safepoint_operation_count() const {
-    ASSERT(threads_lock()->IsOwnedByCurrentThread());
-    return safepoint_operation_count_;
-  }
-  void increment_safepoint_operation_count() {
-    ASSERT(threads_lock()->IsOwnedByCurrentThread());
-    ASSERT(safepoint_operation_count_ < kMaxInt32);
-    safepoint_operation_count_ += 1;
-  }
-  void decrement_safepoint_operation_count() {
-    ASSERT(threads_lock()->IsOwnedByCurrentThread());
-    ASSERT(safepoint_operation_count_ > 0);
-    safepoint_operation_count_ -= 1;
-  }
 
   IsolateGroup* isolate_group_;
 
-  // Monitor used by thread initiating a safepoint operation to track threads
-  // not at a safepoint and wait for these threads to reach a safepoint.
-  Monitor safepoint_lock_;
-  int32_t number_threads_not_at_safepoint_;
-
-  // Count that indicates if a safepoint operation is currently in progress
-  // and also tracks the number of recursive safepoint operations on the
-  // same thread.
-  int32_t safepoint_operation_count_;
-
-  // If a safepoint operation is currently in progress, this field contains
-  // the thread that initiated the safepoint operation, otherwise it is NULL.
-  Thread* owner_;
+  LevelHandler handlers_[SafepointLevel::kNumLevels];
 
   friend class Isolate;
   friend class IsolateGroup;
@@ -186,7 +251,7 @@
     // We do the more expensive operation of blocking the thread
     // only if a safepoint is requested.
     if (T->IsSafepointRequested()) {
-      handler()->BlockForSafepoint(T);
+      T->BlockForSafepoint();
     }
   }
 
@@ -291,10 +356,8 @@
     ASSERT(thread()->execution_state() == Thread::kThreadInGenerated);
     thread()->set_execution_state(Thread::kThreadInVM);
     // Fast check to see if a safepoint is requested or not.
-    // We do the more expensive operation of blocking the thread
-    // only if a safepoint is requested.
     if (thread()->IsSafepointRequested()) {
-      handler()->BlockForSafepoint(thread());
+      thread()->BlockForSafepoint();
     }
   }
 
diff --git a/runtime/vm/heap/safepoint_test.cc b/runtime/vm/heap/safepoint_test.cc
new file mode 100644
index 0000000..15eedc5
--- /dev/null
+++ b/runtime/vm/heap/safepoint_test.cc
@@ -0,0 +1,537 @@
+// Copyright (c) 2021, the Dart project authors.  Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "platform/assert.h"
+
+#include "vm/heap/safepoint.h"
+#include "vm/isolate.h"
+#include "vm/lockers.h"
+#include "vm/random.h"
+#include "vm/thread_pool.h"
+#include "vm/unit_test.h"
+
+namespace dart {
+
+class StateMachineTask : public ThreadPool::Task {
+ public:
+  enum State {
+    kInitialized = 0,
+    kEntered,
+    kPleaseExit,
+    kExited,
+    kNext,
+  };
+  struct Data {
+    explicit Data(IsolateGroup* isolate_group)
+        : isolate_group_(isolate_group) {}
+
+    void WaitUntil(intptr_t target_state) {
+      MonitorLocker ml(&monitor_);
+      while (state != target_state) {
+        ml.Wait();
+      }
+    }
+    void MarkAndNotify(intptr_t target_state) {
+      MonitorLocker ml(&monitor_);
+      state = target_state;
+      ml.Notify();
+    }
+    void AssertIsIn(intptr_t expected_state) {
+      MonitorLocker ml(&monitor_);
+      EXPECT_EQ(expected_state, state);
+    }
+    void AssertIsNotIn(intptr_t expected_state) {
+      MonitorLocker ml(&monitor_);
+      EXPECT_NE(expected_state, state);
+    }
+    bool IsIn(intptr_t expected_state) {
+      MonitorLocker ml(&monitor_);
+      return expected_state == state;
+    }
+
+    intptr_t state = kInitialized;
+    IsolateGroup* isolate_group_;
+
+   private:
+    Monitor monitor_;
+  };
+
+  explicit StateMachineTask(std::shared_ptr<Data> data)
+      : data_(std::move(data)) {}
+
+  virtual void Run() {
+    const bool kBypassSafepoint = false;
+    Thread::EnterIsolateGroupAsHelper(data_->isolate_group_,
+                                      Thread::kUnknownTask, kBypassSafepoint);
+    thread_ = Thread::Current();
+    data_->MarkAndNotify(kEntered);
+    RunInternal();
+    data_->WaitUntil(kPleaseExit);
+    Thread::ExitIsolateGroupAsHelper(kBypassSafepoint);
+    thread_ = nullptr;
+    data_->MarkAndNotify(kExited);
+  }
+
+ protected:
+  virtual void RunInternal() = 0;
+
+  std::shared_ptr<Data> data_;
+  Thread* thread_ = nullptr;
+};
+
+class DeoptTask : public StateMachineTask {
+ public:
+  enum State {
+    kStartDeoptOperation = StateMachineTask::kNext,
+    kFinishedDeoptOperation,
+  };
+
+  explicit DeoptTask(std::shared_ptr<Data> data)
+      : StateMachineTask(std::move(data)) {}
+
+ protected:
+  virtual void RunInternal() {
+    data_->WaitUntil(kStartDeoptOperation);
+    { DeoptSafepointOperationScope safepoint_operation(thread_); }
+    data_->MarkAndNotify(kFinishedDeoptOperation);
+  }
+};
+
+class GcWithoutDeoptTask : public StateMachineTask {
+ public:
+  enum State {
+    kStartSafepointOperation = StateMachineTask::kNext,
+    kEndSafepointOperation,
+    kJoinDeoptOperation,
+    kDeoptOperationDone,
+  };
+
+  explicit GcWithoutDeoptTask(std::shared_ptr<Data> data)
+      : StateMachineTask(std::move(data)) {}
+
+ protected:
+  virtual void RunInternal() {
+    data_->WaitUntil(kStartSafepointOperation);
+    {
+      RuntimeCallDeoptScope no_deopt(thread_,
+                                     RuntimeCallDeoptAbility::kCannotLazyDeopt);
+      GcSafepointOperationScope safepoint_operation(thread_);
+    }
+    data_->MarkAndNotify(kEndSafepointOperation);
+
+    data_->WaitUntil(kJoinDeoptOperation);
+    EXPECT(thread_->IsSafepointRequested());
+    thread_->BlockForSafepoint();
+    data_->MarkAndNotify(kDeoptOperationDone);
+  }
+};
+
+// This test ensures that while a "deopt safepoint operation" is about to start
+// but is still waiting for some threads to hit a "deopt safepoint" another
+// safepoint operation can sucessfully start and finish.
+ISOLATE_UNIT_TEST_CASE(
+    SafepointOperation_SafepointOpWhileDeoptSafepointOpBlocked) {
+  auto isolate_group = thread->isolate_group();
+
+  std::shared_ptr<DeoptTask::Data> deopt(new DeoptTask::Data(isolate_group));
+  std::shared_ptr<GcWithoutDeoptTask::Data> gc(
+      new GcWithoutDeoptTask::Data(isolate_group));
+
+  thread->EnterSafepoint();
+  {
+    // Will join outstanding threads on destruction.
+    ThreadPool pool;
+
+    pool.Run<DeoptTask>(deopt);
+    pool.Run<GcWithoutDeoptTask>(gc);
+
+    // Wait until both threads entered the isolate group.
+    deopt->WaitUntil(DeoptTask::kEntered);
+    gc->WaitUntil(GcWithoutDeoptTask::kEntered);
+
+    // Let deopt task start deopt operation scope (it will block in
+    // [SafepointOperationScope] constructor until all threads have checked-in).
+    deopt->MarkAndNotify(DeoptTask::kStartDeoptOperation);
+    OS::Sleep(200);  // Give it time to actually start the deopt operation
+
+    // Now let the other thread do a full safepoint operation and wait until
+    // it's done: We want to ensure that we can do normal safepoint operations
+    // while a deopt operation is being started and is waiting for all mutators
+    // to reach an appropriate place where they can be deopted.
+    gc->MarkAndNotify(GcWithoutDeoptTask::kStartSafepointOperation);
+    gc->WaitUntil(GcWithoutDeoptTask::kEndSafepointOperation);
+
+    // We were sucessfully doing a safepoint operation, now let's ensure the
+    // first thread is still stuck in the starting of deopt operation.
+    deopt->AssertIsIn(DeoptTask::kStartDeoptOperation);
+
+    // Now we'll let the other thread check-in and ensure the deopt operation
+    // proceeded and finished.
+    gc->MarkAndNotify(GcWithoutDeoptTask::kJoinDeoptOperation);
+    gc->WaitUntil(GcWithoutDeoptTask::kDeoptOperationDone);
+    deopt->WaitUntil(DeoptTask::kFinishedDeoptOperation);
+
+    // Make both threads exit the isolate group.
+    deopt->MarkAndNotify(DeoptTask::kPleaseExit);
+    gc->MarkAndNotify(GcWithoutDeoptTask::kPleaseExit);
+
+    deopt->WaitUntil(DeoptTask::kExited);
+    gc->WaitUntil(GcWithoutDeoptTask::kExited);
+  }
+  thread->ExitSafepoint();
+}
+
+class LongDeoptTask : public StateMachineTask {
+ public:
+  enum State {
+    kStartDeoptOperation = StateMachineTask::kNext,
+    kInsideDeoptOperation,
+    kFinishDeoptOperation,
+    kFinishedDeoptOperation,
+  };
+
+  explicit LongDeoptTask(std::shared_ptr<Data> data)
+      : StateMachineTask(std::move(data)) {}
+
+ protected:
+  virtual void RunInternal() {
+    data_->WaitUntil(kStartDeoptOperation);
+    {
+      DeoptSafepointOperationScope safepoint_operation(thread_);
+      data_->MarkAndNotify(kInsideDeoptOperation);
+      data_->WaitUntil(kFinishDeoptOperation);
+    }
+    data_->MarkAndNotify(kFinishedDeoptOperation);
+  }
+};
+
+class WaiterTask : public StateMachineTask {
+ public:
+  enum State {
+    kEnterSafepoint = StateMachineTask::kNext,
+    kInsideSafepoint,
+    kPleaseExitSafepoint,
+    kExitedSafepoint,
+  };
+
+  explicit WaiterTask(std::shared_ptr<Data> data)
+      : StateMachineTask(std::move(data)) {}
+
+ protected:
+  virtual void RunInternal() {
+    data_->WaitUntil(kEnterSafepoint);
+    thread_->EnterSafepoint();
+    data_->MarkAndNotify(kInsideSafepoint);
+    data_->WaitUntil(kPleaseExitSafepoint);
+    thread_->ExitSafepoint();
+    data_->MarkAndNotify(kExitedSafepoint);
+  }
+};
+
+// This test ensures that while a "deopt safepoint operation" is in-progress
+// other threads cannot perform a normal "safepoint operation".
+ISOLATE_UNIT_TEST_CASE(
+    SafepointOperation_SafepointOpBlockedWhileDeoptSafepointOp) {
+  auto isolate_group = thread->isolate_group();
+
+  std::shared_ptr<LongDeoptTask::Data> deopt(
+      new LongDeoptTask::Data(isolate_group));
+  std::shared_ptr<WaiterTask::Data> gc(new WaiterTask::Data(isolate_group));
+
+  thread->EnterSafepoint();
+  {
+    // Will join outstanding threads on destruction.
+    ThreadPool pool;
+
+    pool.Run<LongDeoptTask>(deopt);
+    pool.Run<WaiterTask>(gc);
+
+    // Wait until both threads entered the isolate group.
+    deopt->WaitUntil(LongDeoptTask::kEntered);
+    gc->WaitUntil(WaiterTask::kEntered);
+
+    // Let gc task enter safepoint.
+    gc->MarkAndNotify(WaiterTask::kEnterSafepoint);
+    gc->WaitUntil(WaiterTask::kInsideSafepoint);
+
+    // Now let the "deopt operation" run and block.
+    deopt->MarkAndNotify(LongDeoptTask::kStartDeoptOperation);
+    deopt->WaitUntil(LongDeoptTask::kInsideDeoptOperation);
+
+    // Now let the gc task try to exit safepoint and do it's own safepoint
+    // operation: We expect it to block on exiting safepoint, since the deopt
+    // operation is still ongoing.
+    gc->MarkAndNotify(WaiterTask::kPleaseExitSafepoint);
+    OS::Sleep(200);
+    gc->AssertIsNotIn(WaiterTask::kExitedSafepoint);
+
+    // Now let's finish the deopt operation & ensure the waiter thread made
+    // progress.
+    deopt->MarkAndNotify(LongDeoptTask::kFinishDeoptOperation);
+    gc->WaitUntil(WaiterTask::kExitedSafepoint);
+
+    // Make both threads exit the isolate group.
+    deopt->MarkAndNotify(LongDeoptTask::kPleaseExit);
+    gc->MarkAndNotify(WaiterTask::kPleaseExit);
+
+    deopt->WaitUntil(LongDeoptTask::kExited);
+    gc->WaitUntil(WaiterTask::kExited);
+  }
+  thread->ExitSafepoint();
+}
+
+class CheckinTask : public StateMachineTask {
+ public:
+  enum State {
+    kStartLoop = StateMachineTask::kNext,
+  };
+
+  struct Data : public StateMachineTask::Data {
+    Data(IsolateGroup* isolate_group,
+         SafepointLevel level,
+         std::atomic<intptr_t>* gc_only_checkins,
+         std::atomic<intptr_t>* deopt_checkin)
+        : StateMachineTask::Data(isolate_group),
+          level(level),
+          gc_only_checkins(gc_only_checkins),
+          deopt_checkin(deopt_checkin) {}
+
+    SafepointLevel level;
+    std::atomic<intptr_t>* gc_only_checkins;
+    std::atomic<intptr_t>* deopt_checkin;
+  };
+
+  explicit CheckinTask(std::shared_ptr<Data> data) : StateMachineTask(data) {}
+
+ protected:
+  Data* data() { return reinterpret_cast<Data*>(data_.get()); }
+
+  virtual void RunInternal() {
+    data_->WaitUntil(kStartLoop);
+
+    uword last_sync = OS::GetCurrentTimeMillis();
+    while (!data()->IsIn(kPleaseExit)) {
+      switch (data()->level) {
+        case SafepointLevel::kGC: {
+          // This thread should join only GC safepoint operations.
+          RuntimeCallDeoptScope no_deopt(
+              Thread::Current(), RuntimeCallDeoptAbility::kCannotLazyDeopt);
+          SafepointIfRequested(thread_, data()->gc_only_checkins);
+          break;
+        }
+        case SafepointLevel::kGCAndDeopt: {
+          // This thread should join any safepoint operations.
+          SafepointIfRequested(thread_, data()->deopt_checkin);
+          break;
+        }
+        case SafepointLevel::kNumLevels:
+          UNREACHABLE();
+      }
+
+      // If we are asked to join a deopt safepoint operation we will comply with
+      // that but only every second.
+      const auto now = OS::GetCurrentTimeMillis();
+      if ((now - last_sync) > 200) {
+        thread_->EnterSafepoint();
+        thread_->ExitSafepoint();
+        last_sync = now;
+      }
+    }
+  }
+
+  void SafepointIfRequested(Thread* thread, std::atomic<intptr_t>* checkins) {
+    OS::SleepMicros(10);
+    if (thread->IsSafepointRequested()) {
+      // Collaborates by checking into the safepoint.
+      thread->BlockForSafepoint();
+      (*checkins)++;
+    }
+  }
+};
+
+// Test that mutators will not check-in to "deopt safepoint operations" at
+// at places where the mutator cannot depot (which is indicated by the
+// Thread::runtime_call_kind_ value).
+ISOLATE_UNIT_TEST_CASE(SafepointOperation_SafepointPointTest) {
+  auto isolate_group = thread->isolate_group();
+
+  const intptr_t kTaskCount = 5;
+  std::atomic<intptr_t> gc_only_checkins[kTaskCount];
+  std::atomic<intptr_t> deopt_checkin[kTaskCount];
+  for (intptr_t i = 0; i < kTaskCount; ++i) {
+    gc_only_checkins[i] = 0;
+    deopt_checkin[i] = 0;
+  }
+
+  std::vector<std::shared_ptr<CheckinTask::Data>> threads;
+  for (intptr_t i = 0; i < kTaskCount; ++i) {
+    const auto level =
+        (i % 2) == 0 ? SafepointLevel::kGC : SafepointLevel::kGCAndDeopt;
+    std::unique_ptr<CheckinTask::Data> data(new CheckinTask::Data(
+        isolate_group, level, &gc_only_checkins[i], &deopt_checkin[i]));
+    threads.push_back(std::move(data));
+  }
+
+  {
+    // Will join outstanding threads on destruction.
+    ThreadPool pool;
+
+    for (intptr_t i = 0; i < kTaskCount; i++) {
+      pool.Run<CheckinTask>(threads[i]);
+    }
+    for (intptr_t i = 0; i < kTaskCount; i++) {
+      threads[i]->WaitUntil(CheckinTask::kEntered);
+    }
+    for (intptr_t i = 0; i < kTaskCount; i++) {
+      threads[i]->MarkAndNotify(CheckinTask::kStartLoop);
+    }
+    {
+      { GcSafepointOperationScope safepoint_operation(thread); }
+      OS::SleepMicros(500);
+      { DeoptSafepointOperationScope safepoint_operation(thread); }
+      OS::SleepMicros(500);
+      { GcSafepointOperationScope safepoint_operation(thread); }
+      OS::SleepMicros(500);
+      { DeoptSafepointOperationScope safepoint_operation(thread); }
+    }
+    for (intptr_t i = 0; i < kTaskCount; i++) {
+      threads[i]->MarkAndNotify(CheckinTask::kPleaseExit);
+    }
+    for (intptr_t i = 0; i < kTaskCount; i++) {
+      threads[i]->WaitUntil(CheckinTask::kExited);
+    }
+    for (intptr_t i = 0; i < kTaskCount; ++i) {
+      const auto level =
+          (i % 2) == 0 ? SafepointLevel::kGC : SafepointLevel::kGCAndDeopt;
+      switch (level) {
+        case SafepointLevel::kGC:
+          EXPECT_EQ(0, deopt_checkin[i]);
+          EXPECT_EQ(2, gc_only_checkins[i]);
+          break;
+        case SafepointLevel::kGCAndDeopt:
+          EXPECT_EQ(4, deopt_checkin[i]);
+          EXPECT_EQ(0, gc_only_checkins[i]);
+          break;
+        case SafepointLevel::kNumLevels:
+          UNREACHABLE();
+      }
+    }
+  }
+}
+
+class StressTask : public StateMachineTask {
+ public:
+  enum State {
+    kStart = StateMachineTask::kNext,
+  };
+
+  explicit StressTask(std::shared_ptr<Data> data) : StateMachineTask(data) {}
+
+ protected:
+  Data* data() { return reinterpret_cast<Data*>(data_.get()); }
+
+  virtual void RunInternal() {
+    data_->WaitUntil(kStart);
+
+    Random random(thread_->isolate_group()->random()->NextUInt64());
+    while (!data()->IsIn(kPleaseExit)) {
+      const auto us = random.NextUInt32() % 3;
+      switch (random.NextUInt32() % 5) {
+        case 0: {
+          DeoptSafepointOperationScope safepoint_op(thread_);
+          OS::SleepMicros(us);
+          break;
+        }
+        case 1: {
+          GcSafepointOperationScope safepoint_op(thread_);
+          OS::SleepMicros(us);
+          break;
+        }
+        case 2: {
+          const bool kBypassSafepoint = false;
+          Thread::ExitIsolateGroupAsHelper(kBypassSafepoint);
+          OS::SleepMicros(us);
+          Thread::EnterIsolateGroupAsHelper(
+              data_->isolate_group_, Thread::kUnknownTask, kBypassSafepoint);
+          thread_ = Thread::Current();
+          break;
+        }
+        case 3: {
+          thread_->EnterSafepoint();
+          OS::SleepMicros(us);
+          thread_->ExitSafepoint();
+          break;
+        }
+        case 4: {
+          if (thread_->IsSafepointRequested()) {
+            thread_->BlockForSafepoint();
+          }
+          break;
+        }
+      }
+    }
+  }
+};
+
+ISOLATE_UNIT_TEST_CASE(SafepointOperation_StressTest) {
+  auto isolate_group = thread->isolate_group();
+
+  const intptr_t kTaskCount = 5;
+
+  std::vector<std::shared_ptr<StressTask::Data>> threads;
+  for (intptr_t i = 0; i < kTaskCount; ++i) {
+    std::unique_ptr<StressTask::Data> data(new StressTask::Data(isolate_group));
+    threads.push_back(std::move(data));
+  }
+
+  thread->EnterSafepoint();
+  {
+    // Will join outstanding threads on destruction.
+    ThreadPool pool;
+
+    for (intptr_t i = 0; i < kTaskCount; i++) {
+      pool.Run<StressTask>(threads[i]);
+    }
+    for (intptr_t i = 0; i < kTaskCount; i++) {
+      threads[i]->WaitUntil(StressTask::kEntered);
+    }
+    for (intptr_t i = 0; i < kTaskCount; i++) {
+      threads[i]->MarkAndNotify(StressTask::kStart);
+    }
+    OS::Sleep(3 * 1000);
+    for (intptr_t i = 0; i < kTaskCount; i++) {
+      threads[i]->MarkAndNotify(StressTask::kPleaseExit);
+    }
+    for (intptr_t i = 0; i < kTaskCount; i++) {
+      threads[i]->WaitUntil(StressTask::kExited);
+    }
+  }
+  thread->ExitSafepoint();
+}
+
+ISOLATE_UNIT_TEST_CASE(SafepointOperation_DeoptAndNonDeoptNesting) {
+  {
+    DeoptSafepointOperationScope safepoint_scope(thread);
+    DeoptSafepointOperationScope safepoint_scope2(thread);
+    GcSafepointOperationScope safepoint_scope3(thread);
+    GcSafepointOperationScope safepoint_scope4(thread);
+  }
+  {
+    DeoptSafepointOperationScope safepoint_scope(thread);
+    GcSafepointOperationScope safepoint_scope2(thread);
+  }
+}
+
+ISOLATE_UNIT_TEST_CASE_WITH_EXPECTATION(
+    SafepointOperation_NonDeoptAndDeoptNesting,
+    "Crash") {
+  GcSafepointOperationScope safepoint_scope(thread);
+  DeoptSafepointOperationScope safepoint_scope2(thread);
+}
+
+}  // namespace dart
diff --git a/runtime/vm/heap/scavenger.cc b/runtime/vm/heap/scavenger.cc
index fa584f9..ebdc4c0 100644
--- a/runtime/vm/heap/scavenger.cc
+++ b/runtime/vm/heap/scavenger.cc
@@ -1546,7 +1546,7 @@
   // TODO(koda): Consider moving SafepointThreads into allocation failure/retry
   // logic to avoid needless collections.
   Thread* thread = Thread::Current();
-  SafepointOperationScope safepoint_scope(thread);
+  GcSafepointOperationScope safepoint_scope(thread);
 
   int64_t safe_point = OS::GetCurrentMonotonicMicros();
   heap_->RecordTime(kSafePoint, safe_point - start);
@@ -1784,7 +1784,7 @@
   // The latter means even if the scavenge promotes every object in the new
   // space, the new allocation means the space is not empty,
   // causing the assertion below to fail.
-  SafepointOperationScope scope(Thread::Current());
+  GcSafepointOperationScope scope(Thread::Current());
 
   // Forces the next scavenge to promote all the objects in the new space.
   early_tenure_ = true;
diff --git a/runtime/vm/isolate.cc b/runtime/vm/isolate.cc
index 69ae961..34ac317 100644
--- a/runtime/vm/isolate.cc
+++ b/runtime/vm/isolate.cc
@@ -564,9 +564,9 @@
   Thread* thread = nullptr;
   OSThread* os_thread = OSThread::Current();
   if (os_thread != nullptr) {
-    // If a safepoint operation is in progress wait for it
-    // to finish before scheduling this thread in.
-    while (!bypass_safepoint && safepoint_handler()->SafepointInProgress()) {
+    // If a safepoint operation is in progress wait for it to finish before
+    // scheduling this thread.
+    while (!bypass_safepoint && safepoint_handler()->AnySafepointInProgress()) {
       ml->Wait();
     }
 
@@ -653,7 +653,8 @@
   thread->heap_ = nullptr;
   thread->set_os_thread(nullptr);
   thread->set_execution_state(Thread::kThreadInNative);
-  thread->set_safepoint_state(Thread::SetAtSafepoint(true, 0));
+  thread->set_safepoint_state(Thread::AtSafepointField::encode(true) |
+                              Thread::AtDeoptSafepointField::encode(true));
   thread->clear_pending_functions();
   ASSERT(thread->no_safepoint_scope_depth() == 0);
   if (is_mutator) {
@@ -923,7 +924,7 @@
   if (need_to_grow_backing_store) {
     // We have to stop other isolates from accessing their field state, since
     // we'll have to grow the backing store.
-    SafepointOperationScope ops(Thread::Current());
+    GcSafepointOperationScope scope(Thread::Current());
     for (auto isolate : isolates_) {
       auto field_table = isolate->field_table();
       if (field_table->IsReadyToUse()) {
@@ -2106,7 +2107,7 @@
 
 void IsolateGroup::DeleteReloadContext() {
   // Another thread may be in the middle of GetClassForHeapWalkAt.
-  SafepointOperationScope safepoint_scope(Thread::Current());
+  GcSafepointOperationScope safepoint_scope(Thread::Current());
   group_reload_context_.reset();
 
   delete program_reload_context_;
@@ -2795,10 +2796,11 @@
   // all other threads, including auxiliary threads are at a safepoint), even
   // though we only need to ensure that the mutator threads are stopped.
   if (use_force_growth_in_otherwise) {
-    ForceGrowthSafepointOperationScope safepoint_scope(thread);
+    ForceGrowthSafepointOperationScope safepoint_scope(thread,
+                                                       SafepointLevel::kGC);
     otherwise->Call();
   } else {
-    SafepointOperationScope safepoint_scope(thread);
+    GcSafepointOperationScope safepoint_scope(thread);
     otherwise->Call();
   }
 }
diff --git a/runtime/vm/lockers.cc b/runtime/vm/lockers.cc
index c3b35f3..6cee868 100644
--- a/runtime/vm/lockers.cc
+++ b/runtime/vm/lockers.cc
@@ -27,8 +27,7 @@
     // Fast update failed which means we could potentially be in the middle
     // of a safepoint operation and need to block for it.
     monitor_->Exit();
-    SafepointHandler* handler = thread->isolate_group()->safepoint_handler();
-    handler->ExitSafepointUsingLock(thread);
+    thread->ExitSafepointUsingLock();
     monitor_->Enter();
   }
   thread->set_execution_state(Thread::kThreadInVM);
diff --git a/runtime/vm/native_api_impl.cc b/runtime/vm/native_api_impl.cc
index 9ed7641..e472564 100644
--- a/runtime/vm/native_api_impl.cc
+++ b/runtime/vm/native_api_impl.cc
@@ -263,7 +263,7 @@
     Thread::EnterIsolateAsHelper(args->isolate, Thread::TaskKind::kUnknownTask);
     Thread* const thread = Thread::Current();
     {
-      SafepointOperationScope scope(thread);
+      GcSafepointOperationScope scope(thread);
       args->isolate->group()->heap()->WriteProtectCode(/*read_only=*/false);
       (*args->callback)();
       args->isolate->group()->heap()->WriteProtectCode(/*read_only=*/true);
diff --git a/runtime/vm/object.cc b/runtime/vm/object.cc
index b013c63..343bf91 100644
--- a/runtime/vm/object.cc
+++ b/runtime/vm/object.cc
@@ -3931,9 +3931,10 @@
 }
 
 #if defined(DEBUG)
-static bool IsMutatorOrAtSafepoint() {
+static bool IsMutatorOrAtDeoptSafepoint() {
   Thread* thread = Thread::Current();
-  return thread->IsMutatorThread() || thread->IsAtSafepoint();
+  return thread->IsMutatorThread() ||
+         thread->IsAtSafepoint(SafepointLevel::kGCAndDeopt);
 }
 #endif
 
@@ -3978,7 +3979,7 @@
               Function::Handle(code.function()).ToQualifiedCString(),
               ToCString());
   }
-  DEBUG_ASSERT(IsMutatorOrAtSafepoint());
+  DEBUG_ASSERT(IsMutatorOrAtDeoptSafepoint());
   ASSERT(code.is_optimized());
   CHACodeArray a(*this);
   a.Register(code);
@@ -7060,7 +7061,7 @@
 void Function::SetInstructions(const Code& value) const {
   // Ensure that nobody is executing this function when we install it.
   if (untag()->code() != Code::null() && HasCode()) {
-    SafepointOperationScope safepoint(Thread::Current());
+    GcSafepointOperationScope safepoint(Thread::Current());
     SetInstructionsSafe(value);
   } else {
     ASSERT(IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter());
@@ -7189,7 +7190,7 @@
 #if defined(DART_PRECOMPILED_RUNTIME)
   UNREACHABLE();
 #else
-  DEBUG_ASSERT(IsMutatorOrAtSafepoint());
+  DEBUG_ASSERT(IsMutatorOrAtDeoptSafepoint());
   ASSERT(value.IsNull() || !value.is_optimized());
   untag()->set_unoptimized_code(value.ptr());
 #endif
@@ -10813,7 +10814,7 @@
 
 void Field::RegisterDependentCode(const Code& code) const {
   ASSERT(IsOriginal());
-  DEBUG_ASSERT(IsMutatorOrAtSafepoint());
+  DEBUG_ASSERT(IsMutatorOrAtDeoptSafepoint());
   ASSERT(code.is_optimized());
   FieldDependentArray a(*this);
   a.Register(code);
@@ -17270,7 +17271,7 @@
 }
 
 void Code::DisableDartCode() const {
-  SafepointOperationScope safepoint(Thread::Current());
+  GcSafepointOperationScope safepoint(Thread::Current());
   ASSERT(IsFunctionCode());
   ASSERT(instructions() == active_instructions());
   const Code& new_code = StubCode::FixCallersTarget();
@@ -17279,7 +17280,7 @@
 }
 
 void Code::DisableStubCode() const {
-  SafepointOperationScope safepoint(Thread::Current());
+  GcSafepointOperationScope safepoint(Thread::Current());
   ASSERT(IsAllocationStubCode());
   ASSERT(instructions() == active_instructions());
   const Code& new_code = StubCode::FixAllocationStubTarget();
diff --git a/runtime/vm/object_graph_test.cc b/runtime/vm/object_graph_test.cc
index 6e968dd..cfb022e 100644
--- a/runtime/vm/object_graph_test.cc
+++ b/runtime/vm/object_graph_test.cc
@@ -61,7 +61,7 @@
   intptr_t d_size = d.ptr()->untag()->HeapSize();
   {
     // No more allocation; raw pointers ahead.
-    SafepointOperationScope safepoint(thread);
+    GcSafepointOperationScope safepoint(thread);
     ObjectPtr b_raw = b.ptr();
     // Clear handles to cut unintended retained paths.
     b = Array::null();
diff --git a/runtime/vm/runtime_entry.cc b/runtime/vm/runtime_entry.cc
index 1e2b0de..cc62255 100644
--- a/runtime/vm/runtime_entry.cc
+++ b/runtime/vm/runtime_entry.cc
@@ -1116,7 +1116,7 @@
                                   caller_frame->pc(), caller_code));
   if (target_code.ptr() !=
       CodePatcher::GetStaticCallTargetAt(caller_frame->pc(), caller_code)) {
-    SafepointOperationScope safepoint(thread);
+    GcSafepointOperationScope safepoint(thread);
     if (target_code.ptr() !=
         CodePatcher::GetStaticCallTargetAt(caller_frame->pc(), caller_code)) {
       CodePatcher::PatchStaticCallAt(caller_frame->pc(), caller_code,
diff --git a/runtime/vm/runtime_entry.h b/runtime/vm/runtime_entry.h
index 3120254..bbcca7d 100644
--- a/runtime/vm/runtime_entry.h
+++ b/runtime/vm/runtime_entry.h
@@ -94,7 +94,7 @@
 
 // Helper macros for declaring and defining runtime entries.
 
-#define DEFINE_RUNTIME_ENTRY(name, argument_count)                             \
+#define DEFINE_RUNTIME_ENTRY_IMPL(name, argument_count, can_lazy_deopt)        \
   extern void DRT_##name(NativeArguments arguments);                           \
   extern const RuntimeEntry k##name##RuntimeEntry(                             \
       "DRT_" #name, &DRT_##name, argument_count, false, false);                \
@@ -109,6 +109,9 @@
     {                                                                          \
       Thread* thread = arguments.thread();                                     \
       ASSERT(thread == Thread::Current());                                     \
+      RuntimeCallDeoptScope runtime_call_deopt_scope(                          \
+          thread, can_lazy_deopt ? RuntimeCallDeoptAbility::kCanLazyDeopt      \
+                                 : RuntimeCallDeoptAbility::kCannotLazyDeopt); \
       Isolate* isolate = thread->isolate();                                    \
       TransitionGeneratedToVM transition(thread);                              \
       StackZone zone(thread);                                                  \
@@ -123,6 +126,12 @@
   static void DRT_Helper##name(Isolate* isolate, Thread* thread, Zone* zone,   \
                                NativeArguments arguments)
 
+#define DEFINE_RUNTIME_ENTRY(name, argument_count)                             \
+  DEFINE_RUNTIME_ENTRY_IMPL(name, argument_count, /*can_lazy_deopt=*/true)
+
+#define DEFINE_RUNTIME_ENTRY_NO_LAZY_DEOPT(name, argument_count)               \
+  DEFINE_RUNTIME_ENTRY_IMPL(name, argument_count, /*can_lazy_deopt=*/false)
+
 #define DECLARE_RUNTIME_ENTRY(name)                                            \
   extern const RuntimeEntry k##name##RuntimeEntry;                             \
   extern void DRT_##name(NativeArguments arguments);
diff --git a/runtime/vm/thread.h b/runtime/vm/thread.h
index abbddba..dcf8d0b 100644
--- a/runtime/vm/thread.h
+++ b/runtime/vm/thread.h
@@ -25,6 +25,7 @@
 #include "vm/runtime_entry_list.h"
 #include "vm/thread_stack_resource.h"
 #include "vm/thread_state.h"
+
 namespace dart {
 
 class AbstractType;
@@ -233,6 +234,28 @@
   kDontValidateFrames = 1,
 };
 
+enum class RuntimeCallDeoptAbility {
+  // There was no leaf call or a leaf call that can cause deoptimization
+  // after-call.
+  kCanLazyDeopt,
+  // There was a leaf call and the VM cannot cause deoptimize after-call.
+  kCannotLazyDeopt,
+};
+
+// The safepoint level a thread is on or a safepoint operation is requested for
+//
+// The higher the number the stronger the guarantees:
+//   * the time-to-safepoint latency increases with level
+//   * the frequency of hitting possible safe points decreases with level
+enum SafepointLevel {
+  // Safe to GC
+  kGC,
+  // Safe to GC as well as Deopt.
+  kGCAndDeopt,
+  // Number of levels.
+  kNumLevels,
+};
+
 // A VM thread; may be executing Dart code or performing helper tasks like
 // garbage collection or compilation. The Thread structure associated with
 // a thread is allocated by EnsureInit before entering an isolate, and destroyed
@@ -727,9 +750,14 @@
    * - Bit 0 of the safepoint_state_ field is used to indicate if the thread is
    *   already at a safepoint,
    * - Bit 1 of the safepoint_state_ field is used to indicate if a safepoint
-   *   operation is requested for this thread.
-   * - Bit 2 of the safepoint_state_ field is used to indicate that the thread
-   *   is blocked for the safepoint operation to complete.
+   *   is requested for this thread.
+   * - Bit 2 of the safepoint_state_ field is used to indicate if the thread is
+   *   already at a deopt safepoint,
+   * - Bit 3 of the safepoint_state_ field is used to indicate if a deopt
+   *   safepoint is requested for this thread.
+   * - Bit 4 of the safepoint_state_ field is used to indicate that the thread
+   *   is blocked at a (deopt)safepoint and has to be woken up once the
+   *   (deopt)safepoint operation is complete.
    *
    * The safepoint execution state (described above) for a thread is stored in
    * in the execution_state_ field.
@@ -739,35 +767,68 @@
    *   kThreadInNative - The thread is running native code.
    *   kThreadInBlockedState - The thread is blocked waiting for a resource.
    */
-  static bool IsAtSafepoint(uword state) {
-    return AtSafepointField::decode(state);
+  static bool IsAtSafepoint(SafepointLevel level, uword state) {
+    const uword mask = AtSafepointBits(level);
+    return (state & mask) == mask;
   }
   bool IsAtSafepoint() const {
-    return AtSafepointField::decode(safepoint_state_);
+    return IsAtSafepoint(current_safepoint_level());
   }
-  static uword SetAtSafepoint(bool value, uword state) {
-    return AtSafepointField::update(value, state);
+  bool IsAtSafepoint(SafepointLevel level) const {
+    return IsAtSafepoint(level, safepoint_state_.load());
   }
   void SetAtSafepoint(bool value) {
     ASSERT(thread_lock()->IsOwnedByCurrentThread());
-    safepoint_state_ = AtSafepointField::update(value, safepoint_state_);
+    if (value) {
+      safepoint_state_ |= AtSafepointBits(current_safepoint_level());
+    } else {
+      safepoint_state_ &= ~AtSafepointBits(current_safepoint_level());
+    }
+  }
+  bool IsSafepointRequestedLocked() const {
+    ASSERT(thread_lock()->IsOwnedByCurrentThread());
+    return IsSafepointRequested();
   }
   bool IsSafepointRequested() const {
-    return SafepointRequestedField::decode(safepoint_state_);
+    const uword state = safepoint_state_.load();
+    for (intptr_t level = current_safepoint_level(); level >= 0; --level) {
+      if (IsSafepointLevelRequested(state, static_cast<SafepointLevel>(level)))
+        return true;
+    }
+    return false;
   }
-  static uword SetSafepointRequested(bool value, uword state) {
-    return SafepointRequestedField::update(value, state);
-  }
-  uword SetSafepointRequested(bool value) {
+  bool IsSafepointLevelRequestedLocked(SafepointLevel level) const {
     ASSERT(thread_lock()->IsOwnedByCurrentThread());
+    if (level > current_safepoint_level()) return false;
+    const uword state = safepoint_state_.load();
+    return IsSafepointLevelRequested(state, level);
+  }
+
+  static bool IsSafepointLevelRequested(uword state, SafepointLevel level) {
+    switch (level) {
+      case SafepointLevel::kGC:
+        return (state & SafepointRequestedField::mask_in_place()) != 0;
+      case SafepointLevel::kGCAndDeopt:
+        return (state & DeoptSafepointRequestedField::mask_in_place()) != 0;
+      case SafepointLevel::kNumLevels:
+        UNREACHABLE();
+    }
+  }
+
+  void BlockForSafepoint();
+  uword SetSafepointRequested(SafepointLevel level, bool value) {
+    ASSERT(thread_lock()->IsOwnedByCurrentThread());
+
+    const uword mask = level == SafepointLevel::kGC
+                           ? SafepointRequestedField::mask_in_place()
+                           : DeoptSafepointRequestedField::mask_in_place();
+
     if (value) {
       // acquire pulls from the release in TryEnterSafepoint.
-      return safepoint_state_.fetch_or(SafepointRequestedField::encode(true),
-                                       std::memory_order_acquire);
+      return safepoint_state_.fetch_or(mask, std::memory_order_acquire);
     } else {
       // release pushes to the acquire in TryExitSafepoint.
-      return safepoint_state_.fetch_and(~SafepointRequestedField::encode(true),
-                                        std::memory_order_release);
+      return safepoint_state_.fetch_and(~mask, std::memory_order_release);
     }
   }
   static bool IsBlockedForSafepoint(uword state) {
@@ -815,12 +876,21 @@
            (execution_state() == kThreadInGenerated);
   }
 
-  static uword safepoint_state_unacquired() { return SetAtSafepoint(false, 0); }
-  static uword safepoint_state_acquired() { return SetAtSafepoint(true, 0); }
+  static uword full_safepoint_state_unacquired() {
+    return (0 << AtSafepointField::shift()) |
+           (0 << AtDeoptSafepointField::shift());
+  }
+  static uword full_safepoint_state_acquired() {
+    return (1 << AtSafepointField::shift()) |
+           (1 << AtDeoptSafepointField::shift());
+  }
 
   bool TryEnterSafepoint() {
     uword old_state = 0;
-    uword new_state = SetAtSafepoint(true, 0);
+    uword new_state = AtSafepointField::encode(true);
+    if (current_safepoint_level() == SafepointLevel::kGCAndDeopt) {
+      new_state |= AtDeoptSafepointField::encode(true);
+    }
     return safepoint_state_.compare_exchange_strong(old_state, new_state,
                                                     std::memory_order_release);
   }
@@ -837,7 +907,10 @@
   }
 
   bool TryExitSafepoint() {
-    uword old_state = SetAtSafepoint(true, 0);
+    uword old_state = AtSafepointField::encode(true);
+    if (current_safepoint_level() == SafepointLevel::kGCAndDeopt) {
+      old_state |= AtDeoptSafepointField::encode(true);
+    }
     uword new_state = 0;
     return safepoint_state_.compare_exchange_strong(old_state, new_state,
                                                     std::memory_order_acquire);
@@ -854,6 +927,8 @@
   }
 
   void CheckForSafepoint() {
+    // If we are in a runtime call that doesn't support lazy deopt, we will only
+    // respond to gc safepointing requests.
     ASSERT(no_safepoint_scope_depth() == 0);
     if (IsSafepointRequested()) {
       BlockForSafepoint();
@@ -918,6 +993,13 @@
 
   PendingDeopts& pending_deopts() { return pending_deopts_; }
 
+  SafepointLevel current_safepoint_level() const {
+    return runtime_call_deopt_ability_ ==
+                   RuntimeCallDeoptAbility::kCannotLazyDeopt
+               ? SafepointLevel::kGC
+               : SafepointLevel::kGCAndDeopt;
+  }
+
  private:
   template <class T>
   T* AllocateReusableHandle();
@@ -1025,6 +1107,8 @@
   uint32_t runtime_call_count_ = 0;
 
   // Deoptimization of stack frames.
+  RuntimeCallDeoptAbility runtime_call_deopt_ability_ =
+      RuntimeCallDeoptAbility::kCanLazyDeopt;
   PendingDeopts pending_deopts_;
 
   // Compiler state:
@@ -1054,11 +1138,32 @@
 #undef REUSABLE_HANDLE_SCOPE_VARIABLE
 #endif  // defined(DEBUG)
 
-  // Generated code assumes that AtSafepointField is the LSB.
   class AtSafepointField : public BitField<uword, bool, 0, 1> {};
-  class SafepointRequestedField : public BitField<uword, bool, 1, 1> {};
-  class BlockedForSafepointField : public BitField<uword, bool, 2, 1> {};
-  class BypassSafepointsField : public BitField<uword, bool, 3, 1> {};
+  class SafepointRequestedField
+      : public BitField<uword, bool, AtSafepointField::kNextBit, 1> {};
+  class AtDeoptSafepointField
+      : public BitField<uword, bool, SafepointRequestedField::kNextBit, 1> {};
+  class DeoptSafepointRequestedField
+      : public BitField<uword, bool, AtDeoptSafepointField::kNextBit, 1> {};
+  class BlockedForSafepointField
+      : public BitField<uword,
+                        bool,
+                        DeoptSafepointRequestedField::kNextBit,
+                        1> {};
+  class BypassSafepointsField
+      : public BitField<uword, bool, BlockedForSafepointField::kNextBit, 1> {};
+
+  static uword AtSafepointBits(SafepointLevel level) {
+    switch (level) {
+      case SafepointLevel::kGC:
+        return AtSafepointField::mask_in_place();
+      case SafepointLevel::kGCAndDeopt:
+        return AtSafepointField::mask_in_place() |
+               AtDeoptSafepointField::mask_in_place();
+      case SafepointLevel::kNumLevels:
+        UNREACHABLE();
+    }
+  }
 
 #if defined(USING_SAFE_STACK)
   uword saved_safestack_limit_;
@@ -1085,7 +1190,6 @@
   void set_safepoint_state(uint32_t value) { safepoint_state_ = value; }
   void EnterSafepointUsingLock();
   void ExitSafepointUsingLock();
-  void BlockForSafepoint();
 
   void FinishEntering(TaskKind kind);
   void PrepareLeaving();
@@ -1116,12 +1220,38 @@
   friend class CompilerState;
   friend class compiler::target::Thread;
   friend class FieldTable;
+  friend class RuntimeCallDeoptScope;
+  friend class
+      TransitionGeneratedToVM;  // IsSafepointRequested/BlockForSafepoint
+  friend class
+      TransitionVMToGenerated;  // IsSafepointRequested/BlockForSafepoint
+  friend class MonitorLocker;   // ExitSafepointUsingLock
   friend Isolate* CreateWithinExistingIsolateGroup(IsolateGroup*,
                                                    const char*,
                                                    char**);
   DISALLOW_COPY_AND_ASSIGN(Thread);
 };
 
+class RuntimeCallDeoptScope : public StackResource {
+ public:
+  RuntimeCallDeoptScope(Thread* thread, RuntimeCallDeoptAbility kind)
+      : StackResource(thread) {
+    // We cannot have nested calls into the VM without deopt support.
+    ASSERT(thread->runtime_call_deopt_ability_ ==
+           RuntimeCallDeoptAbility::kCanLazyDeopt);
+    thread->runtime_call_deopt_ability_ = kind;
+  }
+  virtual ~RuntimeCallDeoptScope() {
+    thread()->runtime_call_deopt_ability_ =
+        RuntimeCallDeoptAbility::kCanLazyDeopt;
+  }
+
+ private:
+  Thread* thread() {
+    return reinterpret_cast<Thread*>(StackResource::thread());
+  }
+};
+
 #if defined(HOST_OS_WINDOWS)
 // Clears the state of the current thread and frees the allocation.
 void WindowsThreadCleanUp();
diff --git a/runtime/vm/thread_test.cc b/runtime/vm/thread_test.cc
index 636aaa3..24cb5b9 100644
--- a/runtime/vm/thread_test.cc
+++ b/runtime/vm/thread_test.cc
@@ -675,13 +675,13 @@
 ISOLATE_UNIT_TEST_CASE(RecursiveSafepointTest1) {
   intptr_t count = 0;
   {
-    SafepointOperationScope safepoint_scope(thread);
+    GcSafepointOperationScope safepoint_scope(thread);
     count += 1;
     {
-      SafepointOperationScope safepoint_scope(thread);
+      GcSafepointOperationScope safepoint_scope(thread);
       count += 1;
       {
-        SafepointOperationScope safepoint_scope(thread);
+        GcSafepointOperationScope safepoint_scope(thread);
         count += 1;
       }
     }
@@ -785,7 +785,7 @@
   }
   bool all_helpers = false;
   do {
-    SafepointOperationScope safepoint_scope(thread);
+    GcSafepointOperationScope safepoint_scope(thread);
     {
       MonitorLocker ml(&monitor);
       if (expected_count == SafepointTestTask::kTaskCount) {
@@ -816,9 +816,9 @@
   }
   bool all_helpers = false;
   do {
-    SafepointOperationScope safepoint_scope(thread);
+    GcSafepointOperationScope safepoint_scope(thread);
     {
-      SafepointOperationScope safepoint_scope(thread);
+      GcSafepointOperationScope safepoint_scope(thread);
       MonitorLocker ml(&monitor);
       if (expected_count == SafepointTestTask::kTaskCount) {
         all_helpers = true;
@@ -830,9 +830,9 @@
   isolate->set_current_tag(tag);
   bool all_exited = false;
   do {
-    SafepointOperationScope safepoint_scope(thread);
+    GcSafepointOperationScope safepoint_scope(thread);
     {
-      SafepointOperationScope safepoint_scope(thread);
+      GcSafepointOperationScope safepoint_scope(thread);
       MonitorLocker ml(&monitor);
       if (exited == SafepointTestTask::kTaskCount) {
         all_exited = true;
diff --git a/tools/VERSION b/tools/VERSION
index 7aae026..400d307 100644
--- a/tools/VERSION
+++ b/tools/VERSION
@@ -27,5 +27,5 @@
 MAJOR 2
 MINOR 14
 PATCH 0
-PRERELEASE 98
+PRERELEASE 99
 PRERELEASE_PATCH 0
\ No newline at end of file