[vm, gc] Incremental compaction.

At the beginning of a major GC cycle, select some mostly-empty pages to be evacuated. Mark the pages and the objects on these pages. Apply a write barrier for stores creating old -> evacuation candidate pointers, and discover any such pointers that already exist during marking.

At the end of a major GC cycle, evacuate objects from these pages. Forward pointers of objects in the remembered set and new-space. Free the evacuated pages.

This compaction is incremental in the sense that creating the remembered set is interleaved with mutator execution. The evacuation step, however, is stop-the-world.

Write-barrier elimination for x.slot = x is removed. Write-barrier elimination for x.slot = constant is removed in the JIT, kept for AOT but snapshot pages are marked as never-evacuate.

TEST=ci
Bug: https://github.com/dart-lang/sdk/issues/52513
Change-Id: Icbc29ef7cb662ef8759b8c1d7a63b7af60766281
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/357760
Reviewed-by: Alexander Aprelev <aam@google.com>
Commit-Queue: Ryan Macnak <rmacnak@google.com>
diff --git a/runtime/docs/gc.md b/runtime/docs/gc.md
index 819696d..fa233c4 100644
--- a/runtime/docs/gc.md
+++ b/runtime/docs/gc.md
@@ -95,18 +95,18 @@
 ```c++
 enum HeaderBits {
   ...
-  kNotMarkedBit,            // Incremental barrier target.
-  kNewBit,                  // Generational barrier target.
-  kAlwaysSetBit,            // Incremental barrier source.
-  kOldAndNotRememberedBit,  // Generational barrier source.
+  kNotMarkedBit,                 // Incremental barrier target.
+  kNewOrEvacuationCandidateBit,  // Generational barrier target.
+  kAlwaysSetBit,                 // Incremental barrier source.
+  kOldAndNotRememberedBit,       // Generational barrier source.
   ...
 };
 
-static constexpr intptr_t kGenerationalBarrierMask = 1 << kNewBit;
+static constexpr intptr_t kGenerationalBarrierMask = 1 << kNewOrEvacuationCandidateBit;
 static constexpr intptr_t kIncrementalBarrierMask = 1 << kNotMarkedBit;
 static constexpr intptr_t kBarrierOverlapShift = 2;
 COMPILE_ASSERT(kNotMarkedBit + kBarrierOverlapShift == kAlwaysSetBit);
-COMPILE_ASSERT(kNewBit + kBarrierOverlapShift == kOldAndNotRememberedBit);
+COMPILE_ASSERT(kNewOrEvacuationCandidateBit + kBarrierOverlapShift == kOldAndNotRememberedBit);
 
 StorePointer(ObjectPtr source, ObjectPtr* slot, ObjectPtr target) {
   *slot = target;
@@ -178,7 +178,6 @@
 * `value` is a constant. Constants are always old, and they will be marked via the constant pools even if we fail to mark them via `container`.
 * `value` has the static type bool. All possible values of the bool type (null, false, true) are constants.
 * `value` is known to be a Smi. Smis are not heap objects.
-* `container` is the same object as `value`. The GC never needs to retain an additional object if it sees a self-reference, so ignoring a self-reference cannot cause us to free a reachable object.
 * `container` is known to be a new object or known to be an old object that is in the remembered set and is marked if marking is in progress.
 
 We can know that `container` meets the last property if `container` is the result of an allocation (instead of a heap load), and there is no instruction that can trigger a GC between the allocation and the store. This is because the allocation stubs ensure the result of AllocateObject is either a new-space object (common case, bump pointer allocation succeeds), or has been preemptively added to the remembered set and marking worklist (uncommon case, entered runtime to allocate object, possibly triggering GC).
diff --git a/runtime/platform/atomic.h b/runtime/platform/atomic.h
index 8ca44ea..cbba54e 100644
--- a/runtime/platform/atomic.h
+++ b/runtime/platform/atomic.h
@@ -80,8 +80,8 @@
   }
   T operator+=(T arg) { return fetch_add(arg) + arg; }
   T operator-=(T arg) { return fetch_sub(arg) - arg; }
-  T& operator++() { return fetch_add(1) + 1; }
-  T& operator--() { return fetch_sub(1) - 1; }
+  T operator++() { return fetch_add(1) + 1; }
+  T operator--() { return fetch_sub(1) - 1; }
   T operator++(int) { return fetch_add(1); }
   T operator--(int) { return fetch_sub(1); }
 
diff --git a/runtime/vm/app_snapshot.cc b/runtime/vm/app_snapshot.cc
index 29b2ae7..1d24501 100644
--- a/runtime/vm/app_snapshot.cc
+++ b/runtime/vm/app_snapshot.cc
@@ -893,7 +893,7 @@
   tags = UntaggedObject::AlwaysSetBit::update(true, tags);
   tags = UntaggedObject::NotMarkedBit::update(true, tags);
   tags = UntaggedObject::OldAndNotRememberedBit::update(true, tags);
-  tags = UntaggedObject::NewBit::update(false, tags);
+  tags = UntaggedObject::NewOrEvacuationCandidateBit::update(false, tags);
   tags = UntaggedObject::ImmutableBit::update(is_immutable, tags);
   raw->untag()->tags_ = tags;
 }
diff --git a/runtime/vm/bitfield.h b/runtime/vm/bitfield.h
index 4fe3be1..5e73e1f 100644
--- a/runtime/vm/bitfield.h
+++ b/runtime/vm/bitfield.h
@@ -36,6 +36,9 @@
   }
 
   T load(std::memory_order order) const { return field_.load(order); }
+  NO_SANITIZE_THREAD T load_ignore_race() const {
+    return *reinterpret_cast<const T*>(&field_);
+  }
   void store(T value, std::memory_order order) { field_.store(value, order); }
 
   bool compare_exchange_weak(T old_tags, T new_tags, std::memory_order order) {
@@ -48,11 +51,6 @@
     return TargetBitField::decode(field_.load(order));
   }
 
-  template <class TargetBitField>
-  NO_SANITIZE_THREAD typename TargetBitField::Type ReadIgnoreRace() const {
-    return TargetBitField::decode(*reinterpret_cast<const T*>(&field_));
-  }
-
   template <class TargetBitField,
             std::memory_order order = std::memory_order_relaxed>
   void UpdateBool(bool value) {
diff --git a/runtime/vm/compiler/assembler/assembler_arm.cc b/runtime/vm/compiler/assembler/assembler_arm.cc
index 888d397..a4258ca 100644
--- a/runtime/vm/compiler/assembler/assembler_arm.cc
+++ b/runtime/vm/compiler/assembler/assembler_arm.cc
@@ -1933,7 +1933,7 @@
   Label done;
   BranchIfSmi(value, &done, kNearJump);
   ldrb(TMP, FieldAddress(value, target::Object::tags_offset()));
-  tst(TMP, Operand(1 << target::UntaggedObject::kNewBit));
+  tst(TMP, Operand(1 << target::UntaggedObject::kNewOrEvacuationCandidateBit));
   b(&done, ZERO);
   ldrb(TMP, FieldAddress(object, target::Object::tags_offset()));
   tst(TMP, Operand(1 << target::UntaggedObject::kOldAndNotRememberedBit));
diff --git a/runtime/vm/compiler/assembler/assembler_arm64.cc b/runtime/vm/compiler/assembler/assembler_arm64.cc
index a3adb29..d00ddc3 100644
--- a/runtime/vm/compiler/assembler/assembler_arm64.cc
+++ b/runtime/vm/compiler/assembler/assembler_arm64.cc
@@ -1231,7 +1231,7 @@
   Label done;
   BranchIfSmi(value, &done, kNearJump);
   ldr(TMP, FieldAddress(value, target::Object::tags_offset()), kUnsignedByte);
-  tbz(&done, TMP, target::UntaggedObject::kNewBit);
+  tbz(&done, TMP, target::UntaggedObject::kNewOrEvacuationCandidateBit);
   ldr(TMP, FieldAddress(object, target::Object::tags_offset()), kUnsignedByte);
   tbz(&done, TMP, target::UntaggedObject::kOldAndNotRememberedBit);
   Stop("Write barrier is required");
diff --git a/runtime/vm/compiler/assembler/assembler_ia32.cc b/runtime/vm/compiler/assembler/assembler_ia32.cc
index 90c5177..1836678 100644
--- a/runtime/vm/compiler/assembler/assembler_ia32.cc
+++ b/runtime/vm/compiler/assembler/assembler_ia32.cc
@@ -2207,7 +2207,7 @@
   Label done;
   BranchIfSmi(value, &done, kNearJump);
   testb(FieldAddress(value, target::Object::tags_offset()),
-        Immediate(1 << target::UntaggedObject::kNewBit));
+        Immediate(1 << target::UntaggedObject::kNewOrEvacuationCandidateBit));
   j(ZERO, &done, Assembler::kNearJump);
   testb(FieldAddress(object, target::Object::tags_offset()),
         Immediate(1 << target::UntaggedObject::kOldAndNotRememberedBit));
diff --git a/runtime/vm/compiler/assembler/assembler_riscv.cc b/runtime/vm/compiler/assembler/assembler_riscv.cc
index 6b8eb2e..aa0f6a1 100644
--- a/runtime/vm/compiler/assembler/assembler_riscv.cc
+++ b/runtime/vm/compiler/assembler/assembler_riscv.cc
@@ -3518,7 +3518,7 @@
   Label done;
   BranchIfSmi(value, &done, kNearJump);
   lbu(TMP2, FieldAddress(value, target::Object::tags_offset()));
-  andi(TMP2, TMP2, 1 << target::UntaggedObject::kNewBit);
+  andi(TMP2, TMP2, 1 << target::UntaggedObject::kNewOrEvacuationCandidateBit);
   beqz(TMP2, &done, kNearJump);
   lbu(TMP2, FieldAddress(object, target::Object::tags_offset()));
   andi(TMP2, TMP2, 1 << target::UntaggedObject::kOldAndNotRememberedBit);
diff --git a/runtime/vm/compiler/assembler/assembler_x64.cc b/runtime/vm/compiler/assembler/assembler_x64.cc
index f0298b8..ac49fc1 100644
--- a/runtime/vm/compiler/assembler/assembler_x64.cc
+++ b/runtime/vm/compiler/assembler/assembler_x64.cc
@@ -1684,7 +1684,7 @@
   Label done;
   BranchIfSmi(value, &done, kNearJump);
   testb(FieldAddress(value, target::Object::tags_offset()),
-        Immediate(1 << target::UntaggedObject::kNewBit));
+        Immediate(1 << target::UntaggedObject::kNewOrEvacuationCandidateBit));
   j(ZERO, &done, Assembler::kNearJump);
   testb(FieldAddress(object, target::Object::tags_offset()),
         Immediate(1 << target::UntaggedObject::kOldAndNotRememberedBit));
diff --git a/runtime/vm/compiler/backend/il.cc b/runtime/vm/compiler/backend/il.cc
index 4ad918f..6cd4919 100644
--- a/runtime/vm/compiler/backend/il.cc
+++ b/runtime/vm/compiler/backend/il.cc
@@ -1392,10 +1392,17 @@
 
     // Strictly speaking, the incremental barrier can only be skipped for
     // immediate objects (Smis) or permanent objects (vm-isolate heap or
-    // image pages). Here we choose to skip the barrier for any constant on
-    // the assumption it will remain reachable through the object pool.
+    // image pages). For AOT, we choose to skip the barrier for any constant on
+    // the assumptions it will remain reachable through the object pool and it
+    // is on a page created by snapshot loading that is marked so as to never be
+    // evacuated.
     if (value->BindsToConstant()) {
-      return false;
+      if (FLAG_precompiled_mode) {
+        return false;
+      } else {
+        const Object& constant = value->BoundConstant();
+        return constant.ptr()->IsHeapObject() && !constant.InVMIsolateHeap();
+      }
     }
 
     // Follow the chain of redefinitions as redefined value could have a more
diff --git a/runtime/vm/compiler/backend/il.h b/runtime/vm/compiler/backend/il.h
index 0f78136..63d8c5d 100644
--- a/runtime/vm/compiler/backend/il.h
+++ b/runtime/vm/compiler/backend/il.h
@@ -6417,11 +6417,6 @@
       // The target field is native and unboxed, so not traversed by the GC.
       return false;
     }
-    if (instance()->definition() == value()->definition()) {
-      // `x.slot = x` cannot create an old->new or old&marked->old&unmarked
-      // reference.
-      return false;
-    }
 
     if (value()->definition()->Type()->IsBool()) {
       return false;
@@ -7074,12 +7069,6 @@
   bool aligned() const { return alignment_ == kAlignedAccess; }
 
   bool ShouldEmitStoreBarrier() const {
-    if (array()->definition() == value()->definition()) {
-      // `x[slot] = x` cannot create an old->new or old&marked->old&unmarked
-      // reference.
-      return false;
-    }
-
     if (value()->definition()->Type()->IsBool()) {
       return false;
     }
diff --git a/runtime/vm/compiler/frontend/base_flow_graph_builder.cc b/runtime/vm/compiler/frontend/base_flow_graph_builder.cc
index 1fb6ad84..b269383 100644
--- a/runtime/vm/compiler/frontend/base_flow_graph_builder.cc
+++ b/runtime/vm/compiler/frontend/base_flow_graph_builder.cc
@@ -522,11 +522,9 @@
     StoreBarrierType emit_store_barrier /* = kEmitStoreBarrier */,
     compiler::Assembler::MemoryOrder memory_order /* = kRelaxed */) {
   Value* value = Pop();
-  if (value->BindsToConstant()) {
-    emit_store_barrier = kNoStoreBarrier;
-  }
+  Value* instance = Pop();
   StoreFieldInstr* store = new (Z)
-      StoreFieldInstr(slot, Pop(), value, emit_store_barrier,
+      StoreFieldInstr(slot, instance, value, emit_store_barrier,
                       stores_inner_pointer, InstructionSource(position), kind);
   return Fragment(store);
 }
diff --git a/runtime/vm/compiler/runtime_api.cc b/runtime/vm/compiler/runtime_api.cc
index cbe1b18..3779a4a 100644
--- a/runtime/vm/compiler/runtime_api.cc
+++ b/runtime/vm/compiler/runtime_api.cc
@@ -361,7 +361,7 @@
   return dart::UntaggedObject::SizeTag::encode(
              TranslateOffsetInWordsToHost(instance_size)) |
          dart::UntaggedObject::ClassIdTag::encode(cid) |
-         dart::UntaggedObject::NewBit::encode(true) |
+         dart::UntaggedObject::NewOrEvacuationCandidateBit::encode(true) |
          dart::UntaggedObject::AlwaysSetBit::encode(true) |
          dart::UntaggedObject::NotMarkedBit::encode(true) |
          dart::UntaggedObject::ImmutableBit::encode(
@@ -377,7 +377,8 @@
 
 const word UntaggedObject::kCanonicalBit = dart::UntaggedObject::kCanonicalBit;
 
-const word UntaggedObject::kNewBit = dart::UntaggedObject::kNewBit;
+const word UntaggedObject::kNewOrEvacuationCandidateBit =
+    dart::UntaggedObject::kNewOrEvacuationCandidateBit;
 
 const word UntaggedObject::kOldAndNotRememberedBit =
     dart::UntaggedObject::kOldAndNotRememberedBit;
diff --git a/runtime/vm/compiler/runtime_api.h b/runtime/vm/compiler/runtime_api.h
index 2e044f9..65bcae04 100644
--- a/runtime/vm/compiler/runtime_api.h
+++ b/runtime/vm/compiler/runtime_api.h
@@ -418,7 +418,7 @@
  public:
   static const word kCardRememberedBit;
   static const word kCanonicalBit;
-  static const word kNewBit;
+  static const word kNewOrEvacuationCandidateBit;
   static const word kOldAndNotRememberedBit;
   static const word kNotMarkedBit;
   static const word kImmutableBit;
diff --git a/runtime/vm/flag_list.h b/runtime/vm/flag_list.h
index 2fc71af..2686638 100644
--- a/runtime/vm/flag_list.h
+++ b/runtime/vm/flag_list.h
@@ -127,8 +127,8 @@
   R(log_marker_tasks, false, bool, false,                                      \
     "Log debugging information for old gen GC marking tasks.")                 \
   P(scavenger_tasks, int, 2,                                                   \
-    "The number of tasks to spawn during scavenging (0 means "                 \
-    "perform all marking on main thread).")                                    \
+    "The number of tasks to spawn during scavenging and incremental "          \
+    "compaction (0 means perform all work on the main thread).")               \
   P(mark_when_idle, bool, false,                                               \
     "The Dart thread will assist in concurrent marking during idle time and "  \
     "is counted as one marker task")                                           \
@@ -216,6 +216,8 @@
   P(truncating_left_shift, bool, true,                                         \
     "Optimize left shift to truncate if possible")                             \
   P(use_compactor, bool, false, "Compact the heap during old-space GC.")       \
+  P(use_incremental_compactor, bool, true,                                     \
+    "Compact the heap during old-space GC.")                                   \
   P(use_cha_deopt, bool, true,                                                 \
     "Use class hierarchy analysis even if it can cause deoptimization.")       \
   P(use_field_guards, bool, true, "Use field guards and track field types")    \
diff --git a/runtime/vm/heap/become.cc b/runtime/vm/heap/become.cc
index e219084..f93a4f2 100644
--- a/runtime/vm/heap/become.cc
+++ b/runtime/vm/heap/become.cc
@@ -29,7 +29,7 @@
   bool is_old = (addr & kNewObjectAlignmentOffset) == kOldObjectAlignmentOffset;
   tags = UntaggedObject::NotMarkedBit::update(true, tags);
   tags = UntaggedObject::OldAndNotRememberedBit::update(is_old, tags);
-  tags = UntaggedObject::NewBit::update(!is_old, tags);
+  tags = UntaggedObject::NewOrEvacuationCandidateBit::update(!is_old, tags);
 
   result->tags_ = tags;
   if (size > UntaggedObject::SizeTag::kMaxSizeTag) {
diff --git a/runtime/vm/heap/compactor.cc b/runtime/vm/heap/compactor.cc
index aa83b9a..118850f 100644
--- a/runtime/vm/heap/compactor.cc
+++ b/runtime/vm/heap/compactor.cc
@@ -6,9 +6,9 @@
 
 #include "platform/atomic.h"
 #include "vm/globals.h"
-#include "vm/heap/become.h"
 #include "vm/heap/heap.h"
 #include "vm/heap/pages.h"
+#include "vm/heap/sweeper.h"
 #include "vm/thread_barrier.h"
 #include "vm/timeline.h"
 
@@ -184,18 +184,52 @@
 void GCCompactor::Compact(Page* pages, FreeList* freelist, Mutex* pages_lock) {
   SetupImagePageBoundaries();
 
-  // Divide the heap.
+  Page* fixed_head = nullptr;
+  Page* fixed_tail = nullptr;
+
+  // Divide the heap, and set aside never-evacuate pages.
   // TODO(30978): Try to divide based on live bytes or with work stealing.
   intptr_t num_pages = 0;
-  for (Page* page = pages; page != nullptr; page = page->next()) {
-    num_pages++;
+  Page* page = pages;
+  Page* prev = nullptr;
+  while (page != nullptr) {
+    Page* next = page->next();
+    if (page->is_never_evacuate()) {
+      if (prev != nullptr) {
+        prev->set_next(next);
+      } else {
+        pages = next;
+      }
+      if (fixed_tail == nullptr) {
+        fixed_tail = page;
+      }
+      page->set_next(fixed_head);
+      fixed_head = page;
+    } else {
+      prev = page;
+      num_pages++;
+    }
+    page = next;
   }
+  fixed_pages_ = fixed_head;
 
   intptr_t num_tasks = FLAG_compactor_tasks;
   RELEASE_ASSERT(num_tasks >= 1);
   if (num_pages < num_tasks) {
     num_tasks = num_pages;
   }
+  if (num_tasks == 0) {
+    ASSERT(pages == nullptr);
+
+    // Move pages to sweeper work lists.
+    heap_->old_space()->pages_ = nullptr;
+    heap_->old_space()->pages_tail_ = nullptr;
+    heap_->old_space()->sweep_regular_ = fixed_head;
+
+    heap_->old_space()->Sweep(/*exclusive*/ true);
+    heap_->old_space()->SweepLarge();
+    return;
+  }
 
   Partition* partitions = new Partition[num_tasks];
 
@@ -206,6 +240,7 @@
     Page* page = pages;
     Page* prev = nullptr;
     while (task_index < num_tasks) {
+      ASSERT(!page->is_never_evacuate());
       if (page_index % pages_per_task == 0) {
         partitions[task_index].head = page;
         partitions[task_index].tail = nullptr;
@@ -352,6 +387,12 @@
     partitions[num_tasks - 1].tail->set_next(nullptr);
     heap_->old_space()->pages_ = pages = partitions[0].head;
     heap_->old_space()->pages_tail_ = partitions[num_tasks - 1].tail;
+    if (fixed_head != nullptr) {
+      fixed_tail->set_next(heap_->old_space()->pages_);
+      heap_->old_space()->pages_ = fixed_head;
+
+      ASSERT(heap_->old_space()->pages_tail_ != nullptr);
+    }
 
     delete[] partitions;
   }
@@ -486,6 +527,7 @@
 }
 
 void CompactorTask::PlanPage(Page* page) {
+  ASSERT(!page->is_never_evacuate());
   uword current = page->object_start();
   uword end = page->object_end();
 
@@ -498,6 +540,7 @@
 }
 
 void CompactorTask::SlidePage(Page* page) {
+  ASSERT(!page->is_never_evacuate());
   uword current = page->object_start();
   uword end = page->object_end();
 
@@ -667,6 +710,11 @@
   if (forwarding_page == nullptr) {
     return;  // Not moved (VM isolate, large page, code page).
   }
+  if (page->is_never_evacuate()) {
+    // Forwarding page is non-NULL since one is still reserved for use as a
+    // counting page, but it doesn't have forwarding information.
+    return;
+  }
 
   ObjectPtr new_target =
       UntaggedObject::FromAddr(forwarding_page->Lookup(old_addr));
@@ -703,6 +751,11 @@
   if (forwarding_page == nullptr) {
     return;  // Not moved (VM isolate, large page, code page).
   }
+  if (page->is_never_evacuate()) {
+    // Forwarding page is non-NULL since one is still reserved for use as a
+    // counting page, but it doesn't have forwarding information.
+    return;
+  }
 
   ObjectPtr new_target =
       UntaggedObject::FromAddr(forwarding_page->Lookup(old_addr));
@@ -796,6 +849,24 @@
     page->VisitObjectPointers(this);
     ml.Lock();
   }
+  while (fixed_pages_ != nullptr) {
+    Page* page = fixed_pages_;
+    fixed_pages_ = page->next();
+    ml.Unlock();
+
+    GCSweeper sweeper;
+    FreeList* freelist = heap_->old_space()->DataFreeList(0);
+    bool page_in_use;
+    {
+      MutexLocker ml(freelist->mutex());
+      page_in_use = sweeper.SweepPage(page, freelist);
+    }
+    ASSERT(page_in_use);
+
+    page->VisitObjectPointers(this);
+
+    ml.Lock();
+  }
 }
 
 void GCCompactor::ForwardStackPointers() {
diff --git a/runtime/vm/heap/compactor.h b/runtime/vm/heap/compactor.h
index 9f4b0a8..c9f57b1 100644
--- a/runtime/vm/heap/compactor.h
+++ b/runtime/vm/heap/compactor.h
@@ -74,6 +74,7 @@
 
   Mutex large_pages_mutex_;
   Page* large_pages_ = nullptr;
+  Page* fixed_pages_ = nullptr;
 
   // The typed data views whose inner pointer must be updated after sliding is
   // complete.
diff --git a/runtime/vm/heap/freelist.cc b/runtime/vm/heap/freelist.cc
index ee5229a..91534cc 100644
--- a/runtime/vm/heap/freelist.cc
+++ b/runtime/vm/heap/freelist.cc
@@ -28,7 +28,7 @@
   tags = UntaggedObject::AlwaysSetBit::update(true, tags);
   tags = UntaggedObject::NotMarkedBit::update(true, tags);
   tags = UntaggedObject::OldAndNotRememberedBit::update(true, tags);
-  tags = UntaggedObject::NewBit::update(false, tags);
+  tags = UntaggedObject::NewOrEvacuationCandidateBit::update(false, tags);
   result->tags_ = tags;
 
   if (size > UntaggedObject::SizeTag::kMaxSizeTag) {
@@ -53,7 +53,7 @@
   tags = UntaggedObject::AlwaysSetBit::update(true, tags);
   tags = UntaggedObject::NotMarkedBit::update(true, tags);
   tags = UntaggedObject::OldAndNotRememberedBit::update(false, tags);
-  tags = UntaggedObject::NewBit::update(true, tags);
+  tags = UntaggedObject::NewOrEvacuationCandidateBit::update(true, tags);
   result->tags_ = tags;
 
   if (size > UntaggedObject::SizeTag::kMaxSizeTag) {
diff --git a/runtime/vm/heap/freelist.h b/runtime/vm/heap/freelist.h
index a950410..33807f1 100644
--- a/runtime/vm/heap/freelist.h
+++ b/runtime/vm/heap/freelist.h
@@ -224,6 +224,9 @@
   // The largest available small size in bytes, or negative if there is none.
   intptr_t last_free_small_size_;
 
+  friend class GCIncrementalCompactor;
+  friend class PrologueTask;
+
   DISALLOW_COPY_AND_ASSIGN(FreeList);
 };
 
diff --git a/runtime/vm/heap/heap.cc b/runtime/vm/heap/heap.cc
index 00abe66..c9713c0 100644
--- a/runtime/vm/heap/heap.cc
+++ b/runtime/vm/heap/heap.cc
@@ -12,6 +12,7 @@
 #include "vm/compiler/jit/compiler.h"
 #include "vm/dart.h"
 #include "vm/flags.h"
+#include "vm/heap/incremental_compactor.h"
 #include "vm/heap/pages.h"
 #include "vm/heap/safepoint.h"
 #include "vm/heap/scavenger.h"
@@ -467,6 +468,16 @@
       VMTagScope tagScope(thread, reason == GCReason::kIdle
                                       ? VMTag::kGCIdleTagId
                                       : VMTag::kGCNewSpaceTagId);
+      if (reason == GCReason::kStoreBuffer) {
+        // The remembered set may become too full, increasing the time of
+        // stop-the-world phases, if new-space or to-be-evacuated objects are
+        // pointed to by too many objects. This is resolved by evacuating
+        // new-space (so there are no old->new pointers) and aborting an
+        // incremental compaction (so there are no old->to-be-evacuated
+        // pointers). If we had separate remembered sets, would could do these
+        // actions separately.
+        GCIncrementalCompactor::Abort(old_space());
+      }
       TIMELINE_FUNCTION_GC_DURATION(thread, "CollectNewGeneration");
       new_space_.Scavenge(thread, type, reason);
       RecordAfterGC(type);
diff --git a/runtime/vm/heap/heap.h b/runtime/vm/heap/heap.h
index 95b0db4..fc384df 100644
--- a/runtime/vm/heap/heap.h
+++ b/runtime/vm/heap/heap.h
@@ -387,6 +387,7 @@
   friend class Serializer;            // VisitObjectsImagePages
   friend class HeapTestHelper;
   friend class GCTestHelper;
+  friend class GCIncrementalCompactor;
 
   DISALLOW_COPY_AND_ASSIGN(Heap);
 };
diff --git a/runtime/vm/heap/heap_sources.gni b/runtime/vm/heap/heap_sources.gni
index 9009a1b..5685b09 100644
--- a/runtime/vm/heap/heap_sources.gni
+++ b/runtime/vm/heap/heap_sources.gni
@@ -13,6 +13,8 @@
   "gc_shared.h",
   "heap.cc",
   "heap.h",
+  "incremental_compactor.cc",
+  "incremental_compactor.h",
   "marker.cc",
   "marker.h",
   "page.cc",
diff --git a/runtime/vm/heap/incremental_compactor.cc b/runtime/vm/heap/incremental_compactor.cc
new file mode 100644
index 0000000..3a4c160
--- /dev/null
+++ b/runtime/vm/heap/incremental_compactor.cc
@@ -0,0 +1,1013 @@
+// Copyright (c) 2024, the Dart project authors.  Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+#include "vm/heap/incremental_compactor.h"
+
+#include "platform/assert.h"
+#include "vm/dart_api_state.h"
+#include "vm/globals.h"
+#include "vm/heap/become.h"
+#include "vm/heap/freelist.h"
+#include "vm/heap/heap.h"
+#include "vm/heap/pages.h"
+#include "vm/log.h"
+#include "vm/thread_barrier.h"
+#include "vm/timeline.h"
+#include "vm/visitor.h"
+
+namespace dart {
+
+void GCIncrementalCompactor::Prologue(PageSpace* old_space) {
+  ASSERT(Thread::Current()->OwnsGCSafepoint());
+  TIMELINE_FUNCTION_GC_DURATION(Thread::Current(), "StartIncrementalCompact");
+  if (!SelectEvacuationCandidates(old_space)) {
+    return;
+  }
+  CheckFreeLists(old_space);
+}
+
+bool GCIncrementalCompactor::Epilogue(PageSpace* old_space) {
+  ASSERT(Thread::Current()->OwnsGCSafepoint());
+  TIMELINE_FUNCTION_GC_DURATION(Thread::Current(), "FinishIncrementalCompact");
+  if (!HasEvacuationCandidates(old_space)) {
+    return false;
+  }
+  old_space->MakeIterable();
+  CheckFreeLists(old_space);
+  CheckPreEvacuate(old_space);
+  Evacuate(old_space);
+  CheckPostEvacuate(old_space);
+  CheckFreeLists(old_space);
+  FreeEvacuatedPages(old_space);
+  VerifyAfterIncrementalCompaction(old_space);
+  return true;
+}
+
+void GCIncrementalCompactor::Abort(PageSpace* old_space) {
+  ASSERT(Thread::Current()->OwnsGCSafepoint());
+  TIMELINE_FUNCTION_GC_DURATION(Thread::Current(), "AbortIncrementalCompact");
+
+  {
+    MonitorLocker ml(old_space->tasks_lock());
+    switch (old_space->phase()) {
+      case PageSpace::kDone:
+        return;  // No incremental compact in progress.
+      case PageSpace::kSweepingRegular:
+      case PageSpace::kSweepingLarge:
+        // No incremental compact in progress, the page list is incomplete, and
+        // accessing page->next is a data race.
+        return;
+      case PageSpace::kMarking:
+      case PageSpace::kAwaitingFinalization:
+        break;  // Incremental compact may be in progress.
+      default:
+        UNREACHABLE();
+    }
+  }
+
+  old_space->PauseConcurrentMarking();
+
+  for (Page* page = old_space->pages_; page != nullptr; page = page->next()) {
+    if (!page->is_evacuation_candidate()) continue;
+
+    page->set_evacuation_candidate(false);
+
+    uword start = page->object_start();
+    uword end = page->object_end();
+    uword current = start;
+    while (current < end) {
+      ObjectPtr obj = UntaggedObject::FromAddr(current);
+      obj->untag()->ClearIsEvacuationCandidateUnsynchronized();
+      current += obj->untag()->HeapSize();
+    }
+  }
+
+  old_space->ResumeConcurrentMarking();
+}
+
+struct LiveBytes {
+  Page* page;
+  intptr_t live_bytes;
+};
+
+struct PrologueState {
+  MallocGrowableArray<LiveBytes> pages;
+  RelaxedAtomic<intptr_t> page_cursor;
+  intptr_t page_limit;
+  RelaxedAtomic<intptr_t> freelist_cursor;
+  intptr_t freelist_limit;
+};
+
+class PrologueTask : public ThreadPool::Task {
+ public:
+  PrologueTask(ThreadBarrier* barrier,
+               IsolateGroup* isolate_group,
+               PageSpace* old_space,
+               PrologueState* state)
+      : barrier_(barrier),
+        isolate_group_(isolate_group),
+        old_space_(old_space),
+        state_(state) {}
+
+  void Run() {
+    if (!barrier_->TryEnter()) {
+      barrier_->Release();
+      return;
+    }
+
+    bool result = Thread::EnterIsolateGroupAsHelper(
+        isolate_group_, Thread::kIncrementalCompactorTask,
+        /*bypass_safepoint=*/true);
+    ASSERT(result);
+
+    RunEnteredIsolateGroup();
+
+    Thread::ExitIsolateGroupAsHelper(/*bypass_safepoint=*/true);
+
+    barrier_->Sync();
+    barrier_->Release();
+  }
+
+  void RunEnteredIsolateGroup() {
+    MarkEvacuationCandidates();
+    PruneFreeLists();
+  }
+
+  void MarkEvacuationCandidates() {
+    TIMELINE_FUNCTION_GC_DURATION(Thread::Current(),
+                                  "MarkEvacuationCandidates");
+    for (;;) {
+      intptr_t page_index = state_->page_cursor.fetch_add(1);
+      if (page_index >= state_->page_limit) break;
+      Page* page = state_->pages[page_index].page;
+
+      // Already set, otherwise a barrier would be needed before moving onto
+      // freelists.
+      ASSERT(page->is_evacuation_candidate());
+
+      uword start = page->object_start();
+      uword end = page->object_end();
+      uword current = start;
+      while (current < end) {
+        ObjectPtr obj = UntaggedObject::FromAddr(current);
+        intptr_t cid = obj->untag()->GetClassId();
+        if (cid != kFreeListElement && cid != kForwardingCorpse) {
+          obj->untag()->SetIsEvacuationCandidateUnsynchronized();
+        }
+        current += obj->untag()->HeapSize();
+      }
+    }
+  }
+
+  void PruneFreeLists() {
+    TIMELINE_FUNCTION_GC_DURATION(Thread::Current(), "PruneFreeLists");
+    for (;;) {
+      intptr_t chunk = state_->freelist_cursor.fetch_add(1);
+      if (chunk >= state_->freelist_limit) break;
+      intptr_t list_index = chunk / (FreeList::kNumLists + 1);
+      intptr_t size_class_index = chunk % (FreeList::kNumLists + 1);
+      FreeList* freelist = &old_space_->freelists_[list_index];
+
+      // Empty bump-region, no need to prune this.
+      ASSERT(freelist->top_ == freelist->end_);
+
+      FreeListElement* current = freelist->free_lists_[size_class_index];
+      freelist->free_lists_[size_class_index] = nullptr;
+      while (current != nullptr) {
+        FreeListElement* next = current->next();
+        if (!Page::Of(current)->is_evacuation_candidate()) {
+          current->set_next(freelist->free_lists_[size_class_index]);
+          freelist->free_lists_[size_class_index] = current;
+        }
+        current = next;
+      }
+    }
+  }
+
+ private:
+  ThreadBarrier* barrier_;
+  IsolateGroup* isolate_group_;
+  PageSpace* old_space_;
+  PrologueState* state_;
+
+  DISALLOW_COPY_AND_ASSIGN(PrologueTask);
+};
+
+bool GCIncrementalCompactor::SelectEvacuationCandidates(PageSpace* old_space) {
+  // Only evacuate pages that are at least half empty.
+  constexpr intptr_t kEvacuationThreshold = kPageSize / 2;
+
+  // Evacuate no more than this amount of objects. This puts a bound on the
+  // stop-the-world evacuate step that is similar to the existing longest
+  // stop-the-world step of the scavenger.
+  const intptr_t kMaxEvacuatedBytes =
+      (old_space->heap_->new_space()->ThresholdInWords() << kWordSizeLog2) / 4;
+
+  PrologueState state;
+  {
+    TIMELINE_FUNCTION_GC_DURATION(Thread::Current(),
+                                  "SelectEvacuationCandidates");
+    for (Page* page = old_space->pages_; page != nullptr; page = page->next()) {
+      if (page->is_never_evacuate()) continue;
+
+      intptr_t live_bytes = page->live_bytes();
+      if (live_bytes > kEvacuationThreshold) continue;
+
+      state.pages.Add({page, live_bytes});
+    }
+    state.pages.Sort([](const LiveBytes* a, const LiveBytes* b) -> int {
+      if (a->live_bytes < b->live_bytes) return -1;
+      if (a->live_bytes > b->live_bytes) return 1;
+      return 0;
+    });
+
+    intptr_t num_candidates = 0;
+    intptr_t cumulative_live_bytes = 0;
+    for (intptr_t i = 0; i < state.pages.length(); i++) {
+      intptr_t live_bytes = state.pages[i].live_bytes;
+      if (cumulative_live_bytes + live_bytes <= kMaxEvacuatedBytes) {
+        num_candidates++;
+        cumulative_live_bytes += live_bytes;
+        state.pages[i].page->set_evacuation_candidate(true);
+      }
+    }
+
+#if defined(SUPPORT_TIMELINE)
+    tbes.SetNumArguments(2);
+    tbes.FormatArgument(0, "cumulative_live_bytes", "%" Pd,
+                        cumulative_live_bytes);
+    tbes.FormatArgument(1, "num_candidates", "%" Pd, num_candidates);
+#endif
+
+    state.page_cursor = 0;
+    state.page_limit = num_candidates;
+    state.freelist_cursor =
+        PageSpace::kDataFreelist * (FreeList::kNumLists + 1);
+    state.freelist_limit =
+        old_space->num_freelists_ * (FreeList::kNumLists + 1);
+
+    if (num_candidates == 0) return false;
+  }
+
+  old_space->ReleaseBumpAllocation();
+
+  const intptr_t num_tasks = Utils::Maximum(1, FLAG_scavenger_tasks);
+  RELEASE_ASSERT(num_tasks > 0);
+  ThreadBarrier* barrier = new ThreadBarrier(num_tasks, 1);
+  IsolateGroup* isolate_group = IsolateGroup::Current();
+  for (intptr_t i = 0; i < num_tasks; i++) {
+    if (i < (num_tasks - 1)) {
+      // Begin compacting on a helper thread.
+      bool result = Dart::thread_pool()->Run<PrologueTask>(
+          barrier, isolate_group, old_space, &state);
+      ASSERT(result);
+    } else {
+      // Last worker is the main thread.
+      PrologueTask task(barrier, isolate_group, old_space, &state);
+      task.RunEnteredIsolateGroup();
+      barrier->Sync();
+      barrier->Release();
+    }
+  }
+
+  for (intptr_t i = PageSpace::kDataFreelist, n = old_space->num_freelists_;
+       i < n; i++) {
+    FreeList* freelist = &old_space->freelists_[i];
+    ASSERT(freelist->top_ == freelist->end_);
+    freelist->free_map_.Reset();
+    for (intptr_t j = 0; j < FreeList::kNumLists; j++) {
+      freelist->free_map_.Set(j, freelist->free_lists_[j] != nullptr);
+    }
+  }
+
+  return true;
+}
+
+// Free lists should not contain any evacuation candidates.
+void GCIncrementalCompactor::CheckFreeLists(PageSpace* old_space) {
+#if defined(DEBUG)
+  for (intptr_t i = 0, n = old_space->num_freelists_; i < n; i++) {
+    FreeList* freelist = &old_space->freelists_[i];
+    if (freelist->top_ < freelist->end_) {
+      Page* page = Page::Of(freelist->top_);
+      ASSERT(!page->is_evacuation_candidate());
+    }
+    for (intptr_t j = 0; j <= FreeList::kNumLists; j++) {
+      FreeListElement* current = freelist->free_lists_[j];
+      while (current != nullptr) {
+        Page* page = Page::Of(reinterpret_cast<uword>(current));
+        ASSERT(!page->is_evacuation_candidate());
+        current = current->next();
+      }
+    }
+  }
+#endif
+}
+
+static void objcpy(void* dst, const void* src, size_t size) {
+  uword* __restrict dst_cursor = reinterpret_cast<uword*>(dst);
+  const uword* __restrict src_cursor = reinterpret_cast<const uword*>(src);
+  do {
+    uword a = *src_cursor++;
+    uword b = *src_cursor++;
+    *dst_cursor++ = a;
+    *dst_cursor++ = b;
+    size -= (2 * sizeof(uword));
+  } while (size > 0);
+}
+
+bool GCIncrementalCompactor::HasEvacuationCandidates(PageSpace* old_space) {
+  for (Page* page = old_space->pages_; page != nullptr; page = page->next()) {
+    if (page->is_evacuation_candidate()) return true;
+  }
+  return false;
+}
+
+void GCIncrementalCompactor::CheckPreEvacuate(PageSpace* old_space) {
+  if (!FLAG_verify_before_gc) return;
+
+  TIMELINE_FUNCTION_GC_DURATION(Thread::Current(), "CheckPreEvacuate");
+
+  // Check evacuation candidate pages have evacuation candidate objects or free
+  // space. I.e., we didn't allocate into it after selecting it as an evacuation
+  // candidate.
+  for (Page* page = old_space->pages_; page != nullptr; page = page->next()) {
+    if (page->is_evacuation_candidate()) {
+      uword start = page->object_start();
+      uword end = page->object_end();
+      uword current = start;
+      while (current < end) {
+        ObjectPtr obj = UntaggedObject::FromAddr(current);
+        intptr_t size = obj->untag()->HeapSize();
+        ASSERT(obj->untag()->IsEvacuationCandidate() ||
+               obj->untag()->GetClassId() == kFreeListElement ||
+               obj->untag()->GetClassId() == kForwardingCorpse);
+        current += size;
+      }
+    }
+  }
+
+  // Check non-evac pages don't have evac candidates.
+  for (Page* page = old_space->pages_; page != nullptr; page = page->next()) {
+    if (!page->is_evacuation_candidate()) {
+      uword start = page->object_start();
+      uword end = page->object_end();
+      uword current = start;
+      while (current < end) {
+        ObjectPtr obj = UntaggedObject::FromAddr(current);
+        intptr_t size = obj->untag()->HeapSize();
+        ASSERT(!obj->untag()->IsEvacuationCandidate());
+        current += size;
+      }
+    }
+  }
+}
+
+class IncrementalForwardingVisitor : public ObjectPointerVisitor,
+                                     public PredicateObjectPointerVisitor,
+                                     public ObjectVisitor,
+                                     public HandleVisitor {
+ public:
+  explicit IncrementalForwardingVisitor(Thread* thread)
+      : ObjectPointerVisitor(thread->isolate_group()), HandleVisitor(thread) {}
+
+  void VisitObject(ObjectPtr obj) override {
+    if (obj->untag()->IsMarked()) {
+      obj->untag()->VisitPointers(this);
+    }
+  }
+
+  void VisitPointers(ObjectPtr* first, ObjectPtr* last) override {
+    PredicateVisitPointers(first, last);
+  }
+  bool PredicateVisitPointers(ObjectPtr* first, ObjectPtr* last) override {
+    bool has_new_target = false;
+    for (ObjectPtr* ptr = first; ptr <= last; ptr++) {
+      ObjectPtr target = *ptr;
+      if (target->IsImmediateObject()) continue;
+      if (target->IsNewObject()) {
+        has_new_target = true;
+        continue;
+      }
+
+      if (target->IsForwardingCorpse()) {
+        ASSERT(!target->untag()->IsMarked());
+        ASSERT(!target->untag()->IsEvacuationCandidate());
+        uword addr = UntaggedObject::ToAddr(target);
+        ForwardingCorpse* forwarder = reinterpret_cast<ForwardingCorpse*>(addr);
+        *ptr = forwarder->target();
+      } else {
+        ASSERT(target->untag()->IsMarked());
+        ASSERT(!target->untag()->IsEvacuationCandidate());
+      }
+    }
+    return has_new_target;
+  }
+
+#if defined(DART_COMPRESSED_POINTERS)
+  void VisitCompressedPointers(uword heap_base,
+                               CompressedObjectPtr* first,
+                               CompressedObjectPtr* last) override {
+    PredicateVisitCompressedPointers(heap_base, first, last);
+  }
+  bool PredicateVisitCompressedPointers(uword heap_base,
+                                        CompressedObjectPtr* first,
+                                        CompressedObjectPtr* last) override {
+    bool has_new_target = false;
+    for (CompressedObjectPtr* ptr = first; ptr <= last; ptr++) {
+      ObjectPtr target = ptr->Decompress(heap_base);
+      if (target->IsImmediateObject()) continue;
+      if (target->IsNewObject()) {
+        has_new_target = true;
+        continue;
+      }
+
+      if (target->IsForwardingCorpse()) {
+        ASSERT(!target->untag()->IsMarked());
+        ASSERT(!target->untag()->IsEvacuationCandidate());
+        uword addr = UntaggedObject::ToAddr(target);
+        ForwardingCorpse* forwarder = reinterpret_cast<ForwardingCorpse*>(addr);
+        *ptr = forwarder->target();
+      } else {
+        ASSERT(target->untag()->IsMarked());
+        ASSERT(!target->untag()->IsEvacuationCandidate());
+      }
+    }
+    return has_new_target;
+  }
+#endif
+
+  void VisitHandle(uword addr) override {
+    FinalizablePersistentHandle* handle =
+        reinterpret_cast<FinalizablePersistentHandle*>(addr);
+    ObjectPtr target = handle->ptr();
+    if (target->IsHeapObject() && target->IsForwardingCorpse()) {
+      uword addr = UntaggedObject::ToAddr(target);
+      ForwardingCorpse* forwarder = reinterpret_cast<ForwardingCorpse*>(addr);
+      *handle->ptr_addr() = forwarder->target();
+    }
+  }
+
+  void VisitTypedDataViewPointers(TypedDataViewPtr view,
+                                  CompressedObjectPtr* first,
+                                  CompressedObjectPtr* last) override {
+    ObjectPtr old_backing = view->untag()->typed_data();
+    VisitCompressedPointers(view->heap_base(), first, last);
+    ObjectPtr new_backing = view->untag()->typed_data();
+
+    const bool backing_moved = old_backing != new_backing;
+    if (backing_moved) {
+      typed_data_views_.Add(view);
+    }
+  }
+
+  bool CanVisitSuspendStatePointers(SuspendStatePtr suspend_state) override {
+    if ((suspend_state->untag()->pc() != 0) && !can_visit_stack_frames_) {
+      // Visiting pointers of SuspendState objects with copied stack frame
+      // needs to query stack map, which can touch other Dart objects
+      // (such as GrowableObjectArray of InstructionsTable).
+      // Those objects may have an inconsistent state during compaction,
+      // so processing of SuspendState objects is postponed to the later
+      // stage of compaction.
+      suspend_states_.Add(suspend_state);
+      return false;
+    }
+    return true;
+  }
+
+  void UpdateViews() {
+    const intptr_t length = typed_data_views_.length();
+    for (intptr_t i = 0; i < length; ++i) {
+      auto raw_view = typed_data_views_[i];
+      const classid_t cid =
+          raw_view->untag()->typed_data()->GetClassIdMayBeSmi();
+      // If we have external typed data we can simply return, since the backing
+      // store lives in C-heap and will not move. Otherwise we have to update
+      // the inner pointer.
+      if (IsTypedDataClassId(cid)) {
+        raw_view->untag()->RecomputeDataFieldForInternalTypedData();
+      } else {
+        ASSERT(IsExternalTypedDataClassId(cid));
+      }
+    }
+  }
+
+  void UpdateSuspendStates() {
+    can_visit_stack_frames_ = true;
+    const intptr_t length = suspend_states_.length();
+    for (intptr_t i = 0; i < length; ++i) {
+      auto suspend_state = suspend_states_[i];
+      suspend_state->untag()->VisitPointers(this);
+    }
+  }
+
+ private:
+  bool can_visit_stack_frames_ = false;
+  MallocGrowableArray<TypedDataViewPtr> typed_data_views_;
+  MallocGrowableArray<SuspendStatePtr> suspend_states_;
+
+  DISALLOW_COPY_AND_ASSIGN(IncrementalForwardingVisitor);
+};
+
+class StoreBufferForwardingVisitor : public ObjectPointerVisitor {
+ public:
+  StoreBufferForwardingVisitor(IsolateGroup* isolate_group,
+                               IncrementalForwardingVisitor* visitor)
+      : ObjectPointerVisitor(isolate_group), visitor_(visitor) {}
+
+  void VisitPointers(ObjectPtr* first, ObjectPtr* last) override {
+    for (ObjectPtr* ptr = first; ptr <= last; ptr++) {
+      ObjectPtr obj = *ptr;
+      ASSERT(!obj->IsImmediateOrNewObject());
+
+      if (obj->IsForwardingCorpse()) {
+        ASSERT(!obj->untag()->IsMarked());
+        ASSERT(!obj->untag()->IsEvacuationCandidate());
+        uword addr = UntaggedObject::ToAddr(obj);
+        ForwardingCorpse* forwarder = reinterpret_cast<ForwardingCorpse*>(addr);
+        obj = forwarder->target();
+        *ptr = obj;
+      } else {
+        ASSERT(obj->untag()->IsMarked());
+        ASSERT(!obj->untag()->IsEvacuationCandidate());
+      }
+
+      visitor_->VisitObject(obj);
+    }
+  }
+
+#if defined(DART_COMPRESSED_POINTERS)
+  void VisitCompressedPointers(uword heap_base,
+                               CompressedObjectPtr* first,
+                               CompressedObjectPtr* last) override {
+    UNREACHABLE();  // Store buffer blocks are not compressed.
+  }
+#endif
+
+ private:
+  IncrementalForwardingVisitor* visitor_;
+
+  DISALLOW_COPY_AND_ASSIGN(StoreBufferForwardingVisitor);
+};
+
+class EpilogueState {
+ public:
+  EpilogueState(Page* evac_page,
+                StoreBufferBlock* block,
+                Page* new_page,
+                Mutex* pages_lock)
+      : evac_page_(evac_page),
+        block_(block),
+        new_page_(new_page),
+        pages_lock_(pages_lock) {}
+
+  bool NextEvacPage(Page** page) {
+    // Needs to be the old_space pages lock since evacuation may also allocate
+    // new pages and race with page->next_.
+    MutexLocker ml(pages_lock_);
+    while (evac_page_ != nullptr) {
+      Page* current = evac_page_;
+      evac_page_ = current->next();
+      if (current->is_evacuation_candidate()) {
+        *page = current;
+        return true;
+      }
+    }
+    return false;
+  }
+
+  bool NextBlock(StoreBufferBlock** block) {
+    MutexLocker ml(pages_lock_);
+    if (block_ != nullptr) {
+      StoreBufferBlock* current = block_;
+      block_ = current->next();
+      current->set_next(nullptr);
+      *block = current;
+      return true;
+    }
+    return false;
+  }
+
+  bool NextNewPage(Page** page) {
+    MutexLocker ml(pages_lock_);
+    if (new_page_ != nullptr) {
+      Page* current = new_page_;
+      new_page_ = current->next();
+      *page = current;
+      return true;
+    }
+    return false;
+  }
+
+  bool TakeOOM() { return oom_slice_.exchange(false); }
+  bool TakeWeakHandles() { return weak_handles_slice_.exchange(false); }
+  bool TakeWeakTables() { return weak_tables_slice_.exchange(false); }
+  bool TakeIdRing() { return id_ring_slice_.exchange(false); }
+  bool TakeRoots() { return roots_slice_.exchange(false); }
+  bool TakeResetProgressBars() {
+    return reset_progress_bars_slice_.exchange(false);
+  }
+
+  void AddNewFreeSize(intptr_t size) { new_free_size_ += size; }
+  intptr_t NewFreeSize() { return new_free_size_; }
+
+ private:
+  Page* evac_page_;
+  StoreBufferBlock* block_;
+  Page* new_page_;
+  Mutex* pages_lock_;
+
+  RelaxedAtomic<bool> oom_slice_ = {true};
+  RelaxedAtomic<bool> weak_handles_slice_ = {true};
+  RelaxedAtomic<bool> weak_tables_slice_ = {true};
+  RelaxedAtomic<bool> id_ring_slice_ = {true};
+  RelaxedAtomic<bool> roots_slice_ = {true};
+  RelaxedAtomic<bool> reset_progress_bars_slice_ = {true};
+  RelaxedAtomic<intptr_t> new_free_size_ = {0};
+};
+
+class EpilogueTask : public ThreadPool::Task {
+ public:
+  EpilogueTask(ThreadBarrier* barrier,
+               IsolateGroup* isolate_group,
+               PageSpace* old_space,
+               FreeList* freelist,
+               EpilogueState* state)
+      : barrier_(barrier),
+        isolate_group_(isolate_group),
+        old_space_(old_space),
+        freelist_(freelist),
+        state_(state) {}
+
+  void Run() {
+    bool result = Thread::EnterIsolateGroupAsHelper(
+        isolate_group_, Thread::kIncrementalCompactorTask,
+        /*bypass_safepoint=*/true);
+    ASSERT(result);
+
+    RunEnteredIsolateGroup();
+
+    Thread::ExitIsolateGroupAsHelper(/*bypass_safepoint=*/true);
+
+    barrier_->Sync();
+    barrier_->Release();
+  }
+
+  void RunEnteredIsolateGroup() {
+    Thread* thread = Thread::Current();
+
+    Evacuate();
+
+    barrier_->Sync();
+
+    IncrementalForwardingVisitor visitor(thread);
+    if (state_->TakeOOM()) {
+      old_space_->VisitRoots(&visitor);  // OOM reservation.
+    }
+    ForwardStoreBuffer(&visitor);
+    ForwardRememberedCards(&visitor);
+    ForwardNewSpace(&visitor);
+    if (state_->TakeWeakHandles()) {
+      TIMELINE_FUNCTION_GC_DURATION(thread, "WeakPersistentHandles");
+      isolate_group_->VisitWeakPersistentHandles(&visitor);
+    }
+    if (state_->TakeWeakTables()) {
+      TIMELINE_FUNCTION_GC_DURATION(thread, "WeakTables");
+      isolate_group_->heap()->ForwardWeakTables(&visitor);
+    }
+#ifndef PRODUCT
+    if (state_->TakeIdRing()) {
+      TIMELINE_FUNCTION_GC_DURATION(thread, "IdRing");
+      isolate_group_->ForEachIsolate(
+          [&](Isolate* isolate) {
+            ObjectIdRing* ring = isolate->object_id_ring();
+            if (ring != nullptr) {
+              ring->VisitPointers(&visitor);
+            }
+          },
+          /*at_safepoint=*/true);
+    }
+#endif  // !PRODUCT
+
+    barrier_->Sync();
+
+    {
+      // After forwarding the heap because visits each view's underyling buffer.
+      TIMELINE_FUNCTION_GC_DURATION(thread, "Views");
+      visitor.UpdateViews();
+    }
+
+    if (state_->TakeRoots()) {
+      // After forwarding the heap because visiting the stack requires stackmaps
+      // to already be forwarded.
+      TIMELINE_FUNCTION_GC_DURATION(thread, "Roots");
+      isolate_group_->VisitObjectPointers(
+          &visitor, ValidationPolicy::kDontValidateFrames);
+    }
+
+    barrier_->Sync();
+
+    {
+      // After processing the object store because of the dependency on
+      // canonicalized_stack_map_entries.
+      TIMELINE_FUNCTION_GC_DURATION(thread, "SuspendStates");
+      visitor.UpdateSuspendStates();
+    }
+
+    if (state_->TakeResetProgressBars()) {
+      // After ForwardRememberedCards.
+      old_space_->ResetProgressBars();
+    }
+  }
+
+  void Evacuate() {
+    TIMELINE_FUNCTION_GC_DURATION(Thread::Current(), "Evacuate");
+
+    old_space_->AcquireLock(freelist_);
+
+    bool any_failed = false;
+    intptr_t bytes_evacuated = 0;
+    Page* page;
+    while (state_->NextEvacPage(&page)) {
+      ASSERT(page->is_evacuation_candidate());
+
+      bool page_failed = false;
+      uword start = page->object_start();
+      uword end = page->object_end();
+      uword current = start;
+      while (current < end) {
+        ObjectPtr obj = UntaggedObject::FromAddr(current);
+        intptr_t size = obj->untag()->HeapSize();
+
+        if (obj->untag()->IsMarked()) {
+          uword copied = old_space_->TryAllocatePromoLocked(freelist_, size);
+          if (copied == 0) {
+            obj->untag()->ClearIsEvacuationCandidateUnsynchronized();
+            page_failed = true;
+            any_failed = true;
+          } else {
+            ASSERT(!Page::Of(copied)->is_evacuation_candidate());
+            bytes_evacuated += size;
+            objcpy(reinterpret_cast<void*>(copied),
+                   reinterpret_cast<const void*>(current), size);
+            ObjectPtr copied_obj = UntaggedObject::FromAddr(copied);
+
+            copied_obj->untag()->ClearIsEvacuationCandidateUnsynchronized();
+            if (IsTypedDataClassId(copied_obj->GetClassId())) {
+              static_cast<TypedDataPtr>(copied_obj)
+                  ->untag()
+                  ->RecomputeDataField();
+            }
+
+            ForwardingCorpse::AsForwarder(current, size)
+                ->set_target(copied_obj);
+          }
+        }
+
+        current += size;
+      }
+
+      if (page_failed) {
+        page->set_evacuation_candidate(false);
+      }
+    }
+
+    old_space_->ReleaseLock(freelist_);
+    old_space_->usage_.used_in_words -= (bytes_evacuated >> kWordSizeLog2);
+#if defined(SUPPORT_TIMELINE)
+    tbes.SetNumArguments(1);
+    tbes.FormatArgument(0, "bytes_evacuated", "%" Pd, bytes_evacuated);
+#endif
+
+    if (any_failed) {
+      OS::PrintErr("evacuation failed\n");
+    }
+  }
+
+  void ForwardStoreBuffer(IncrementalForwardingVisitor* visitor) {
+    TIMELINE_FUNCTION_GC_DURATION(Thread::Current(), "ForwardStoreBuffer");
+
+    StoreBufferForwardingVisitor store_visitor(isolate_group_, visitor);
+    StoreBuffer* store_buffer = isolate_group_->store_buffer();
+    StoreBufferBlock* block;
+    while (state_->NextBlock(&block)) {
+      // Generated code appends to store buffers; tell MemorySanitizer.
+      MSAN_UNPOISON(block, sizeof(*block));
+
+      block->VisitObjectPointers(&store_visitor);
+
+      store_buffer->PushBlock(block, StoreBuffer::kIgnoreThreshold);
+    }
+  }
+
+  void ForwardRememberedCards(IncrementalForwardingVisitor* visitor) {
+    TIMELINE_FUNCTION_GC_DURATION(Thread::Current(), "ForwardRememberedCards");
+    for (Page* page = old_space_->large_pages_; page != nullptr;
+         page = page->next()) {
+      page->VisitRememberedCards(visitor, /*only_marked*/ true);
+    }
+  }
+
+  void ForwardNewSpace(IncrementalForwardingVisitor* visitor) {
+    TIMELINE_FUNCTION_GC_DURATION(Thread::Current(), "ForwardNewSpace");
+    Page* page;
+    while (state_->NextNewPage(&page)) {
+      intptr_t free = ForwardAndSweepNewPage(visitor, page);
+      state_->AddNewFreeSize(free);
+    }
+  }
+
+  DART_NOINLINE
+  intptr_t ForwardAndSweepNewPage(IncrementalForwardingVisitor* visitor,
+                                  Page* page) {
+    ASSERT(!page->is_image());
+    ASSERT(!page->is_old());
+    ASSERT(!page->is_executable());
+
+    uword start = page->object_start();
+    uword end = page->object_end();
+    uword current = start;
+    intptr_t free = 0;
+    while (current < end) {
+      ObjectPtr raw_obj = UntaggedObject::FromAddr(current);
+      ASSERT(Page::Of(raw_obj) == page);
+      uword tags = raw_obj->untag()->tags();
+      intptr_t obj_size = raw_obj->untag()->HeapSize(tags);
+      if (UntaggedObject::IsMarked(tags)) {
+        raw_obj->untag()->ClearMarkBitUnsynchronized();
+        ASSERT(IsAllocatableInNewSpace(obj_size));
+        raw_obj->untag()->VisitPointers(visitor);
+      } else {
+        uword free_end = current + obj_size;
+        while (free_end < end) {
+          ObjectPtr next_obj = UntaggedObject::FromAddr(free_end);
+          tags = next_obj->untag()->tags();
+          if (UntaggedObject::IsMarked(tags)) {
+            // Reached the end of the free block.
+            break;
+          }
+          // Expand the free block by the size of this object.
+          free_end += next_obj->untag()->HeapSize(tags);
+        }
+        obj_size = free_end - current;
+#if defined(DEBUG)
+        memset(reinterpret_cast<void*>(current), Heap::kZapByte, obj_size);
+#endif  // DEBUG
+        FreeListElement::AsElementNew(current, obj_size);
+        free += obj_size;
+      }
+      current += obj_size;
+    }
+    return free;
+  }
+
+ private:
+  ThreadBarrier* barrier_;
+  IsolateGroup* isolate_group_;
+  PageSpace* old_space_;
+  FreeList* freelist_;
+  EpilogueState* state_;
+};
+
+void GCIncrementalCompactor::Evacuate(PageSpace* old_space) {
+  IsolateGroup* isolate_group = IsolateGroup::Current();
+  isolate_group->ReleaseStoreBuffers();
+  EpilogueState state(
+      old_space->pages_, isolate_group->store_buffer()->PopAll(),
+      old_space->heap_->new_space()->head(), &old_space->pages_lock_);
+
+  // This must use FLAG_scavenger_tasks because that determines the number of
+  // freelists available for workers.
+  const intptr_t num_tasks = Utils::Maximum(1, FLAG_scavenger_tasks);
+  RELEASE_ASSERT(num_tasks > 0);
+  ThreadBarrier* barrier = new ThreadBarrier(num_tasks, num_tasks);
+  for (intptr_t i = 0; i < num_tasks; i++) {
+    // Begin compacting on a helper thread.
+    FreeList* freelist = old_space->DataFreeList(i);
+    if (i < (num_tasks - 1)) {
+      bool result = Dart::thread_pool()->Run<EpilogueTask>(
+          barrier, isolate_group, old_space, freelist, &state);
+      ASSERT(result);
+    } else {
+      // Last worker is the main thread.
+      EpilogueTask task(barrier, isolate_group, old_space, freelist, &state);
+      task.RunEnteredIsolateGroup();
+      barrier->Sync();
+      barrier->Release();
+    }
+  }
+
+  old_space->heap_->new_space()->set_freed_in_words(state.NewFreeSize() >>
+                                                    kWordSizeLog2);
+}
+
+void GCIncrementalCompactor::CheckPostEvacuate(PageSpace* old_space) {
+  if (!FLAG_verify_after_gc) return;
+
+  TIMELINE_FUNCTION_GC_DURATION(Thread::Current(), "CheckPostEvacuate");
+
+  // Check there are no remaining evac candidates
+  for (Page* page = old_space->pages_; page != nullptr; page = page->next()) {
+    uword start = page->object_start();
+    uword end = page->object_end();
+    uword current = start;
+    while (current < end) {
+      ObjectPtr obj = UntaggedObject::FromAddr(current);
+      intptr_t size = obj->untag()->HeapSize();
+      ASSERT(!obj->untag()->IsEvacuationCandidate() ||
+             !obj->untag()->IsMarked());
+      current += size;
+    }
+  }
+}
+
+void GCIncrementalCompactor::FreeEvacuatedPages(PageSpace* old_space) {
+  Page* prev_page = nullptr;
+  Page* page = old_space->pages_;
+  while (page != nullptr) {
+    Page* next_page = page->next();
+    if (page->is_evacuation_candidate()) {
+      old_space->FreePage(page, prev_page);
+    } else {
+      prev_page = page;
+    }
+    page = next_page;
+  }
+}
+
+class VerifyAfterIncrementalCompactionVisitor : public ObjectVisitor,
+                                                public ObjectPointerVisitor {
+ public:
+  VerifyAfterIncrementalCompactionVisitor()
+      : ObjectVisitor(), ObjectPointerVisitor(IsolateGroup::Current()) {}
+
+  void VisitObject(ObjectPtr obj) override {
+    // New-space has been swept, but old-space has not.
+    if (obj->IsNewObject()) {
+      if (obj->untag()->GetClassId() != kFreeListElement) {
+        current_ = obj;
+        obj->untag()->VisitPointers(this);
+      }
+    } else {
+      if (obj->untag()->IsMarked()) {
+        current_ = obj;
+        obj->untag()->VisitPointers(this);
+      }
+    }
+  }
+
+  void VisitPointers(ObjectPtr* from, ObjectPtr* to) override {
+    for (ObjectPtr* ptr = from; ptr <= to; ptr++) {
+      ObjectPtr obj = *ptr;
+      if (!obj->IsHeapObject()) continue;
+      if (obj->IsForwardingCorpse() || obj->IsFreeListElement() ||
+          (obj->IsOldObject() && !obj->untag()->IsMarked())) {
+        OS::PrintErr("object=0x%" Px ", slot=0x%" Px ", value=0x%" Px "\n",
+                     static_cast<uword>(current_), reinterpret_cast<uword>(ptr),
+                     static_cast<uword>(obj));
+        failed_ = true;
+      }
+    }
+  }
+
+#if defined(DART_COMPRESSED_POINTERS)
+  void VisitCompressedPointers(uword heap_base,
+                               CompressedObjectPtr* from,
+                               CompressedObjectPtr* to) override {
+    for (CompressedObjectPtr* ptr = from; ptr <= to; ptr++) {
+      ObjectPtr obj = ptr->Decompress(heap_base);
+      if (!obj->IsHeapObject()) continue;
+      if (obj->IsForwardingCorpse() || obj->IsFreeListElement() ||
+          (obj->IsOldObject() && !obj->untag()->IsMarked())) {
+        OS::PrintErr("object=0x%" Px ", slot=0x%" Px ", value=0x%" Px "\n",
+                     static_cast<uword>(current_), reinterpret_cast<uword>(ptr),
+                     static_cast<uword>(obj));
+        failed_ = true;
+      }
+    }
+  }
+#endif
+
+  bool failed() const { return failed_; }
+
+ private:
+  ObjectPtr current_;
+  bool failed_ = false;
+
+  DISALLOW_COPY_AND_ASSIGN(VerifyAfterIncrementalCompactionVisitor);
+};
+
+void GCIncrementalCompactor::VerifyAfterIncrementalCompaction(
+    PageSpace* old_space) {
+  if (!FLAG_verify_after_gc) return;
+  TIMELINE_FUNCTION_GC_DURATION(Thread::Current(),
+                                "VerifyAfterIncrementalCompaction");
+  VerifyAfterIncrementalCompactionVisitor visitor;
+  old_space->heap_->VisitObjects(&visitor);
+  if (visitor.failed()) {
+    FATAL("verify after incremental compact");
+  }
+}
+
+}  // namespace dart
diff --git a/runtime/vm/heap/incremental_compactor.h b/runtime/vm/heap/incremental_compactor.h
new file mode 100644
index 0000000..d37ed4d
--- /dev/null
+++ b/runtime/vm/heap/incremental_compactor.h
@@ -0,0 +1,40 @@
+// Copyright (c) 2024, the Dart project authors.  Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+#ifndef RUNTIME_VM_HEAP_INCREMENTAL_COMPACTOR_H_
+#define RUNTIME_VM_HEAP_INCREMENTAL_COMPACTOR_H_
+
+#include "vm/allocation.h"
+
+namespace dart {
+
+// Forward declarations.
+class PageSpace;
+class ObjectVisitor;
+class IncrementalForwardingVisitor;
+
+// An evacuating compactor that is incremental in the sense that building the
+// remembered set is interleaved with the mutator. The evacuation and forwarding
+// is not interleaved with the mutator, which would require a read barrier.
+class GCIncrementalCompactor : public AllStatic {
+ public:
+  static void Prologue(PageSpace* old_space);
+  static bool Epilogue(PageSpace* old_space);
+  static void Abort(PageSpace* old_space);
+
+ private:
+  static bool SelectEvacuationCandidates(PageSpace* old_space);
+  static void CheckFreeLists(PageSpace* old_space);
+
+  static bool HasEvacuationCandidates(PageSpace* old_space);
+  static void CheckPreEvacuate(PageSpace* old_space);
+  static void Evacuate(PageSpace* old_space);
+  static void CheckPostEvacuate(PageSpace* old_space);
+  static void FreeEvacuatedPages(PageSpace* old_space);
+  static void VerifyAfterIncrementalCompaction(PageSpace* old_space);
+};
+
+}  // namespace dart
+
+#endif  // RUNTIME_VM_HEAP_INCREMENTAL_COMPACTOR_H_
diff --git a/runtime/vm/heap/marker.cc b/runtime/vm/heap/marker.cc
index 2f326db..3987ae9 100644
--- a/runtime/vm/heap/marker.cc
+++ b/runtime/vm/heap/marker.cc
@@ -39,7 +39,8 @@
         deferred_work_list_(deferred_marking_stack),
         marked_bytes_(0),
         marked_micros_(0),
-        concurrent_(true) {}
+        concurrent_(true),
+        has_evacuation_candidate_(false) {}
   ~MarkingVisitorBase() { ASSERT(delayed_.IsEmpty()); }
 
   uintptr_t marked_bytes() const { return marked_bytes_; }
@@ -56,6 +57,11 @@
     return raw->untag()->IsMarked();
   }
 
+  void FinishedRoots() {
+    // Nothing to remember for roots. Don't carry over to objects.
+    has_evacuation_candidate_ = false;
+  }
+
   bool ProcessPendingWeakProperties() {
     bool more_to_mark = false;
     WeakPropertyPtr cur_weak = delayed_.weak_properties.Release();
@@ -74,6 +80,15 @@
         // The key is marked so we make sure to properly visit all pointers
         // originating from this weak property.
         cur_weak->untag()->VisitPointersNonvirtual(this);
+        if (has_evacuation_candidate_) {
+          has_evacuation_candidate_ = false;
+          if (!cur_weak->untag()->IsCardRemembered()) {
+            if (cur_weak->untag()->TryAcquireRememberedBit()) {
+              Thread::Current()->StoreBufferAddObjectGC(cur_weak);
+            }
+          }
+        }
+
       } else {
         // Requeue this weak property to be handled later.
         ASSERT(IsMarked(cur_weak));
@@ -85,10 +100,24 @@
     return more_to_mark;
   }
 
+  DART_NOINLINE
+  void YieldConcurrentMarking() {
+    work_list_.Flush();
+    new_work_list_.Flush();
+    deferred_work_list_.Flush();
+    Thread* thread = Thread::Current();
+    thread->StoreBufferReleaseGC();
+    page_space_->YieldConcurrentMarking();
+    thread->StoreBufferAcquireGC();
+  }
+
   void DrainMarkingStackWithPauseChecks() {
+    Thread* thread = Thread::Current();
     do {
       ObjectPtr obj;
       while (work_list_.Pop(&obj)) {
+        ASSERT(!has_evacuation_candidate_);
+
         if (obj->IsNewObject()) {
           Page* page = Page::Of(obj);
           uword top = page->original_top();
@@ -97,10 +126,7 @@
           if (top <= addr && addr < end) {
             new_work_list_.Push(obj);
             if (UNLIKELY(page_space_->pause_concurrent_marking())) {
-              work_list_.Flush();
-              new_work_list_.Flush();
-              deferred_work_list_.Flush();
-              page_space_->YieldConcurrentMarking();
+              YieldConcurrentMarking();
             }
             continue;
           }
@@ -124,18 +150,26 @@
           // Shape changing is not compatible with concurrent marking.
           deferred_work_list_.Push(obj);
           size = obj->untag()->HeapSize();
+        } else if (obj->untag()->IsCardRemembered()) {
+          ASSERT((class_id == kArrayCid) || (class_id == kImmutableArrayCid));
+          size = VisitCards(static_cast<ArrayPtr>(obj));
         } else {
           size = obj->untag()->VisitPointersNonvirtual(this);
         }
+        if (has_evacuation_candidate_) {
+          has_evacuation_candidate_ = false;
+          if (!obj->untag()->IsCardRemembered()) {
+            if (obj->untag()->TryAcquireRememberedBit()) {
+              thread->StoreBufferAddObjectGC(obj);
+            }
+          }
+        }
         if (!obj->IsNewObject()) {
           marked_bytes_ += size;
         }
 
         if (UNLIKELY(page_space_->pause_concurrent_marking())) {
-          work_list_.Flush();
-          new_work_list_.Flush();
-          deferred_work_list_.Flush();
-          page_space_->YieldConcurrentMarking();
+          YieldConcurrentMarking();
         }
       }
     } while (ProcessPendingWeakProperties());
@@ -146,6 +180,44 @@
     deferred_work_list_.Flush();
   }
 
+  intptr_t VisitCards(ArrayPtr obj) {
+    ASSERT(obj->IsArray() || obj->IsImmutableArray());
+    ASSERT(obj->untag()->IsCardRemembered());
+    CompressedObjectPtr* obj_from = obj->untag()->from();
+    CompressedObjectPtr* obj_to =
+        obj->untag()->to(Smi::Value(obj->untag()->length()));
+    uword heap_base = obj.heap_base();
+
+    Page* page = Page::Of(obj);
+    for (intptr_t i = 0, n = page->card_table_size(); i < n; i++) {
+      CompressedObjectPtr* card_from =
+          reinterpret_cast<CompressedObjectPtr*>(page) +
+          (i << Page::kSlotsPerCardLog2);
+      CompressedObjectPtr* card_to =
+          reinterpret_cast<CompressedObjectPtr*>(card_from) +
+          (1 << Page::kSlotsPerCardLog2) - 1;
+      // Minus 1 because to is inclusive.
+
+      if (card_from < obj_from) {
+        // First card overlaps with header.
+        card_from = obj_from;
+      }
+      if (card_to > obj_to) {
+        // Last card(s) may extend past the object. Array truncation can make
+        // this happen for more than one card.
+        card_to = obj_to;
+      }
+
+      VisitCompressedPointers(heap_base, card_from, card_to);
+      if (has_evacuation_candidate_) {
+        has_evacuation_candidate_ = false;
+        page->RememberCard(card_from);
+      }
+    }
+
+    return obj->untag()->HeapSize();
+  }
+
   void DrainMarkingStack() {
     while (ProcessMarkingStack(kIntptrMax)) {
     }
@@ -170,10 +242,13 @@
   }
 
   bool ProcessMarkingStack(intptr_t remaining_budget) {
+    Thread* thread = Thread::Current();
     do {
       // First drain the marking stacks.
       ObjectPtr obj;
       while (work_list_.Pop(&obj)) {
+        ASSERT(!has_evacuation_candidate_);
+
         if (sync && concurrent_ && obj->IsNewObject()) {
           Page* page = Page::Of(obj);
           uword top = page->original_top();
@@ -218,7 +293,19 @@
               return true;  // More to mark.
             }
           }
-          size = obj->untag()->VisitPointersNonvirtual(this);
+          if (obj->untag()->IsCardRemembered()) {
+            ASSERT((class_id == kArrayCid) || (class_id == kImmutableArrayCid));
+            size = VisitCards(static_cast<ArrayPtr>(obj));
+          } else {
+            size = obj->untag()->VisitPointersNonvirtual(this);
+          }
+        }
+        if (has_evacuation_candidate_) {
+          has_evacuation_candidate_ = false;
+          if (!obj->untag()->IsCardRemembered() &&
+              obj->untag()->TryAcquireRememberedBit()) {
+            thread->StoreBufferAddObjectGC(obj);
+          }
         }
         if (!obj->IsNewObject()) {
           marked_bytes_ += size;
@@ -254,19 +341,23 @@
   }
 
   void VisitPointers(ObjectPtr* first, ObjectPtr* last) override {
+    bool has_evacuation_candidate = false;
     for (ObjectPtr* current = first; current <= last; current++) {
-      MarkObject(LoadPointerIgnoreRace(current));
+      has_evacuation_candidate |= MarkObject(LoadPointerIgnoreRace(current));
     }
+    has_evacuation_candidate_ |= has_evacuation_candidate;
   }
 
 #if defined(DART_COMPRESSED_POINTERS)
   void VisitCompressedPointers(uword heap_base,
                                CompressedObjectPtr* first,
                                CompressedObjectPtr* last) override {
+    bool has_evacuation_candidate = false;
     for (CompressedObjectPtr* current = first; current <= last; current++) {
-      MarkObject(
+      has_evacuation_candidate |= MarkObject(
           LoadCompressedPointerIgnoreRace(current).Decompress(heap_base));
     }
+    has_evacuation_candidate_ |= has_evacuation_candidate;
   }
 #endif
 
@@ -291,17 +382,25 @@
     ObjectPtr raw_target =
         LoadCompressedPointerIgnoreRace(&raw_weak->untag()->target_)
             .Decompress(raw_weak->heap_base());
-    if (raw_target->IsHeapObject() && !raw_target->untag()->IsMarked()) {
-      // Target was white. Enqueue the weak reference. It is potentially dead.
-      // It might still be made alive by weak properties in next rounds.
-      ASSERT(IsMarked(raw_weak));
-      delayed_.weak_references.Enqueue(raw_weak);
+    if (raw_target->IsHeapObject()) {
+      if (!raw_target->untag()->IsMarked()) {
+        // Target was white. Enqueue the weak reference. It is potentially dead.
+        // It might still be made alive by weak properties in next rounds.
+        ASSERT(IsMarked(raw_weak));
+        delayed_.weak_references.Enqueue(raw_weak);
+      } else {
+        if (raw_target->untag()->IsEvacuationCandidate()) {
+          has_evacuation_candidate_ = true;
+        }
+      }
     }
     // Always visit the type argument.
     ObjectPtr raw_type_arguments =
         LoadCompressedPointerIgnoreRace(&raw_weak->untag()->type_arguments_)
             .Decompress(raw_weak->heap_base());
-    MarkObject(raw_type_arguments);
+    if (MarkObject(raw_type_arguments)) {
+      has_evacuation_candidate_ = true;
+    }
     return raw_weak->untag()->HeapSize();
   }
 
@@ -314,18 +413,24 @@
     ASSERT(IsMarked(raw_entry));
     delayed_.finalizer_entries.Enqueue(raw_entry);
     // Only visit token and next.
-    MarkObject(LoadCompressedPointerIgnoreRace(&raw_entry->untag()->token_)
-                   .Decompress(raw_entry->heap_base()));
-    MarkObject(LoadCompressedPointerIgnoreRace(&raw_entry->untag()->next_)
-                   .Decompress(raw_entry->heap_base()));
+    if (MarkObject(LoadCompressedPointerIgnoreRace(&raw_entry->untag()->token_)
+                       .Decompress(raw_entry->heap_base()))) {
+      has_evacuation_candidate_ = true;
+    }
+    if (MarkObject(LoadCompressedPointerIgnoreRace(&raw_entry->untag()->next_)
+                       .Decompress(raw_entry->heap_base()))) {
+      has_evacuation_candidate_ = true;
+    }
     return raw_entry->untag()->HeapSize();
   }
 
   void ProcessDeferredMarking() {
-    TIMELINE_FUNCTION_GC_DURATION(Thread::Current(), "ProcessDeferredMarking");
+    Thread* thread = Thread::Current();
+    TIMELINE_FUNCTION_GC_DURATION(thread, "ProcessDeferredMarking");
 
     ObjectPtr obj;
     while (deferred_work_list_.Pop(&obj)) {
+      ASSERT(!has_evacuation_candidate_);
       ASSERT(obj->IsHeapObject());
       // We need to scan objects even if they were already scanned via ordinary
       // marking. An object may have changed since its ordinary scan and been
@@ -351,6 +456,13 @@
           marked_bytes_ += size;
         }
       }
+      if (has_evacuation_candidate_) {
+        has_evacuation_candidate_ = false;
+        if (!obj->untag()->IsCardRemembered() &&
+            obj->untag()->TryAcquireRememberedBit()) {
+          thread->StoreBufferAddObjectGC(obj);
+        }
+      }
     }
   }
 
@@ -425,6 +537,15 @@
     if (target->untag()->IsMarked()) {
       // Object already null (which is permanently marked) or has survived this
       // GC.
+      if (target->untag()->IsEvacuationCandidate()) {
+        if (parent->untag()->IsCardRemembered()) {
+          Page::Of(parent)->RememberCard(slot);
+        } else {
+          if (parent->untag()->TryAcquireRememberedBit()) {
+            Thread::Current()->StoreBufferAddObjectGC(parent);
+          }
+        }
+      }
       return false;
     }
     *slot = Object::null();
@@ -485,16 +606,16 @@
   }
 
   DART_FORCE_INLINE
-  void MarkObject(ObjectPtr obj) {
+  bool MarkObject(ObjectPtr obj) {
     if (obj->IsImmediateObject()) {
-      return;
+      return false;
     }
 
     if (sync && concurrent_ && obj->IsNewObject()) {
       if (TryAcquireMarkBit(obj)) {
         PushMarked(obj);
       }
-      return;
+      return false;
     }
 
     // While it might seem this is redundant with TryAcquireMarkBit, we must
@@ -507,26 +628,26 @@
     // was allocated after the concurrent marker started. It can read either a
     // zero or the header of an object allocated black, both of which appear
     // marked.
-    if (obj->untag()->IsMarkedIgnoreRace()) {
-      return;
+    uword tags = obj->untag()->tags_ignore_race();
+    if (UntaggedObject::IsMarked(tags)) {
+      return UntaggedObject::IsEvacuationCandidate(tags);
     }
 
-    intptr_t class_id = obj->GetClassId();
+    intptr_t class_id = UntaggedObject::ClassIdTag::decode(tags);
     ASSERT(class_id != kFreeListElement);
 
     if (sync && UNLIKELY(class_id == kInstructionsCid)) {
       // If this is the concurrent marker, this object may be non-writable due
       // to W^X (--write-protect-code).
       deferred_work_list_.Push(obj);
-      return;
+      return false;
     }
 
-    if (!TryAcquireMarkBit(obj)) {
-      // Already marked.
-      return;
+    if (TryAcquireMarkBit(obj)) {
+      PushMarked(obj);
     }
 
-    PushMarked(obj);
+    return UntaggedObject::IsEvacuationCandidate(tags);
   }
 
   PageSpace* page_space_;
@@ -537,6 +658,7 @@
   uintptr_t marked_bytes_;
   int64_t marked_micros_;
   bool concurrent_;
+  bool has_evacuation_candidate_;
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(MarkingVisitorBase);
 };
@@ -805,6 +927,7 @@
       num_busy_->fetch_add(1u);
       visitor_->set_concurrent(false);
       marker_->IterateRoots(visitor_);
+      visitor_->FinishedRoots();
 
       visitor_->ProcessDeferredMarking();
 
@@ -855,6 +978,8 @@
       // Don't MournFinalizerEntries here, do it on main thread, so that we
       // don't have to coordinate workers.
 
+      thread->ReleaseStoreBuffer();  // Ahead of IterateWeak
+      barrier_->Sync();
       marker_->IterateWeakRoots(thread);
       int64_t stop = OS::GetCurrentMonotonicMicros();
       visitor_->AddMicros(stop - start);
@@ -901,6 +1026,7 @@
       int64_t start = OS::GetCurrentMonotonicMicros();
 
       marker_->IterateRoots(visitor_);
+      visitor_->FinishedRoots();
 
       visitor_->DrainMarkingStackWithPauseChecks();
       int64_t stop = OS::GetCurrentMonotonicMicros();
@@ -1023,6 +1149,7 @@
       TIMELINE_FUNCTION_GC_DURATION(Thread::Current(), "ConcurrentMark");
       int64_t start = OS::GetCurrentMonotonicMicros();
       IterateRoots(visitor);
+      visitor->FinishedRoots();
       int64_t stop = OS::GetCurrentMonotonicMicros();
       visitor->AddMicros(stop - start);
       if (FLAG_log_marker_tasks) {
@@ -1173,6 +1300,7 @@
       visitor.set_concurrent(false);
       ResetSlices();
       IterateRoots(&visitor);
+      visitor.FinishedRoots();
       visitor.ProcessDeferredMarking();
       visitor.DrainMarkingStack();
       visitor.ProcessDeferredMarking();
diff --git a/runtime/vm/heap/page.cc b/runtime/vm/heap/page.cc
index 08e998e..331fc23 100644
--- a/runtime/vm/heap/page.cc
+++ b/runtime/vm/heap/page.cc
@@ -114,6 +114,7 @@
   result->end_ = 0;
   result->survivor_end_ = 0;
   result->resolved_top_ = 0;
+  result->live_bytes_ = 0;
 
   if ((flags & kNew) != 0) {
     uword top = result->object_start();
@@ -171,7 +172,8 @@
 }
 
 void Page::VisitObjects(ObjectVisitor* visitor) const {
-  ASSERT(Thread::Current()->OwnsGCSafepoint());
+  ASSERT(Thread::Current()->OwnsGCSafepoint() ||
+         (Thread::Current()->task_kind() == Thread::kIncrementalCompactorTask));
   NoSafepointScope no_safepoint;
   uword obj_addr = object_start();
   uword end_addr = object_end();
@@ -207,9 +209,11 @@
   ASSERT(obj_addr == end_addr);
 }
 
-void Page::VisitRememberedCards(PredicateObjectPointerVisitor* visitor) {
+void Page::VisitRememberedCards(PredicateObjectPointerVisitor* visitor,
+                                bool only_marked) {
   ASSERT(Thread::Current()->OwnsGCSafepoint() ||
-         (Thread::Current()->task_kind() == Thread::kScavengerTask));
+         (Thread::Current()->task_kind() == Thread::kScavengerTask) ||
+         (Thread::Current()->task_kind() == Thread::kIncrementalCompactorTask));
   NoSafepointScope no_safepoint;
 
   if (card_table_ == nullptr) {
@@ -220,6 +224,7 @@
       static_cast<ArrayPtr>(UntaggedObject::FromAddr(object_start()));
   ASSERT(obj->IsArray() || obj->IsImmutableArray());
   ASSERT(obj->untag()->IsCardRemembered());
+  if (only_marked && !obj->untag()->IsMarked()) return;
   CompressedObjectPtr* obj_from = obj->untag()->from();
   CompressedObjectPtr* obj_to =
       obj->untag()->to(Smi::Value(obj->untag()->length()));
diff --git a/runtime/vm/heap/page.h b/runtime/vm/heap/page.h
index 9ecc2ea..8d106ec 100644
--- a/runtime/vm/heap/page.h
+++ b/runtime/vm/heap/page.h
@@ -72,6 +72,7 @@
     kVMIsolate = 1 << 3,
     kNew = 1 << 4,
     kEvacuationCandidate = 1 << 5,
+    kNeverEvacuate = 1 << 6,
   };
   bool is_executable() const { return (flags_ & kExecutable) != 0; }
   bool is_large() const { return (flags_ & kLarge) != 0; }
@@ -82,6 +83,21 @@
   bool is_evacuation_candidate() const {
     return (flags_ & kEvacuationCandidate) != 0;
   }
+  void set_evacuation_candidate(bool value) {
+    if (value) {
+      flags_ |= kEvacuationCandidate;
+    } else {
+      flags_ &= ~kEvacuationCandidate;
+    }
+  }
+  bool is_never_evacuate() const { return (flags_ & kNeverEvacuate) != 0; }
+  void set_never_evacuate(bool value) {
+    if (value) {
+      flags_ |= kNeverEvacuate;
+    } else {
+      flags_ &= ~kNeverEvacuate;
+    }
+  }
 
   Page* next() const { return next_; }
   void set_next(Page* next) { next_ = next; }
@@ -105,6 +121,11 @@
   }
   intptr_t used() const { return object_end() - object_start(); }
 
+  intptr_t live_bytes() const { return live_bytes_; }
+  void set_live_bytes(intptr_t value) { live_bytes_ = value; }
+  void add_live_bytes(intptr_t value) { live_bytes_ += value; }
+  void sub_live_bytes(intptr_t value) { live_bytes_ -= value; }
+
   ForwardingPage* forwarding_page() const { return forwarding_page_; }
   void RegisterUnwindingRecords();
   void UnregisterUnwindingRecords();
@@ -142,11 +163,12 @@
     ASSERT(obj->IsHeapObject());
     return reinterpret_cast<Page*>(static_cast<uword>(obj) & kPageMask);
   }
-
-  // Warning: This does not work for addresses on image pages or on large pages.
   static Page* Of(uword addr) {
     return reinterpret_cast<Page*>(addr & kPageMask);
   }
+  static Page* Of(void* addr) {
+    return reinterpret_cast<Page*>(reinterpret_cast<uword>(addr) & kPageMask);
+  }
 
   // 1 card = 32 slots.
   static constexpr intptr_t kSlotsPerCardLog2 = 5;
@@ -173,7 +195,8 @@
     return IsCardRemembered(reinterpret_cast<uword>(slot));
   }
 #endif
-  void VisitRememberedCards(PredicateObjectPointerVisitor* visitor);
+  void VisitRememberedCards(PredicateObjectPointerVisitor* visitor,
+                            bool only_marked = false);
   void ResetProgressBar();
 
   Thread* owner() const { return owner_; }
@@ -263,7 +286,8 @@
     intptr_t word_offset = index >> kBitsPerWordLog2;
     intptr_t bit_offset = index & (kBitsPerWord - 1);
     uword bit_mask = static_cast<uword>(1) << bit_offset;
-    card_table_[word_offset] |= bit_mask;
+    reinterpret_cast<std::atomic<uword>*>(&card_table_[word_offset])
+        ->fetch_or(bit_mask, std::memory_order_relaxed);
   }
   bool IsCardRemembered(uword slot) {
     ASSERT(Contains(slot));
@@ -316,7 +340,10 @@
   // value meets the allocation top. Called "SCAN" in the original Cheney paper.
   uword resolved_top_;
 
-  friend class CheckStoreBufferVisitor;
+  RelaxedAtomic<intptr_t> live_bytes_;
+
+  friend class CheckStoreBufferScavengeVisitor;
+  friend class CheckStoreBufferEvacuateVisitor;
   friend class GCCompactor;
   friend class PageSpace;
   template <bool>
diff --git a/runtime/vm/heap/pages.cc b/runtime/vm/heap/pages.cc
index d63bf22..a95805b 100644
--- a/runtime/vm/heap/pages.cc
+++ b/runtime/vm/heap/pages.cc
@@ -10,6 +10,7 @@
 #include "vm/dart.h"
 #include "vm/heap/become.h"
 #include "vm/heap/compactor.h"
+#include "vm/heap/incremental_compactor.h"
 #include "vm/heap/marker.h"
 #include "vm/heap/safepoint.h"
 #include "vm/heap/sweeper.h"
@@ -346,6 +347,7 @@
     // Start of the newly allocated page is the allocated object.
     result = page->object_start();
     // Note: usage_.capacity_in_words is increased by AllocatePage.
+    Page::Of(result)->add_live_bytes(size);
     usage_.used_in_words += (size >> kWordSizeLog2);
     // Enqueue the remainder in the free list.
     uword free_start = result + size;
@@ -387,6 +389,7 @@
     if (page != nullptr) {
       result = page->object_start();
       // Note: usage_.capacity_in_words is increased by AllocateLargePage.
+      Page::Of(result)->add_live_bytes(size);
       usage_.used_in_words += (size >> kWordSizeLog2);
     }
   }
@@ -413,6 +416,9 @@
                                       is_locked);
       // usage_ is updated by the call above.
     } else {
+      if (!is_protected) {
+        Page::Of(result)->add_live_bytes(size);
+      }
       usage_.used_in_words += (size >> kWordSizeLog2);
     }
   } else {
@@ -1039,6 +1045,9 @@
   if (marker_ == nullptr) {
     ASSERT(phase() == kDone);
     marker_ = new GCMarker(isolate_group, heap_);
+    if (FLAG_use_incremental_compactor) {
+      GCIncrementalCompactor::Prologue(this);
+    }
   } else {
     ASSERT(phase() == kAwaitingFinalization);
   }
@@ -1059,15 +1068,26 @@
   delete marker_;
   marker_ = nullptr;
 
-  // Reset the freelists and setup sweeping.
-  for (intptr_t i = 0; i < num_freelists_; i++) {
-    freelists_[i].Reset();
+  if (FLAG_verify_store_buffer) {
+    VerifyStoreBuffers("Verifying remembered set after marking");
   }
 
   if (FLAG_verify_before_gc) {
     heap_->VerifyGC("Verifying before sweeping", kAllowMarked);
   }
 
+  bool has_reservation = MarkReservation();
+
+  bool new_space_is_swept = false;
+  if (FLAG_use_incremental_compactor) {
+    new_space_is_swept = GCIncrementalCompactor::Epilogue(this);
+  }
+
+  // Reset the freelists and setup sweeping.
+  for (intptr_t i = 0; i < num_freelists_; i++) {
+    freelists_[i].Reset();
+  }
+
   {
     // Executable pages are always swept immediately to simplify
     // code protection.
@@ -1090,8 +1110,6 @@
     }
   }
 
-  bool has_reservation = MarkReservation();
-
   {
     // Move pages to sweeper work lists.
     MutexLocker ml(&pages_lock_);
@@ -1105,23 +1123,24 @@
     }
   }
 
-  bool can_verify;
-  SweepNew();
+  if (!new_space_is_swept) {
+    SweepNew();
+  }
+  bool is_concurrent_sweep_running = false;
   if (compact) {
     Compact(thread);
     set_phase(kDone);
-    can_verify = true;
+    is_concurrent_sweep_running = true;
   } else if (FLAG_concurrent_sweep && has_reservation) {
     ConcurrentSweep(isolate_group);
-    can_verify = false;
+    is_concurrent_sweep_running = true;
   } else {
     SweepLarge();
     Sweep(/*exclusive*/ true);
     set_phase(kDone);
-    can_verify = true;
   }
 
-  if (FLAG_verify_after_gc && can_verify) {
+  if (FLAG_verify_after_gc && !is_concurrent_sweep_running) {
     heap_->VerifyGC("Verifying after sweeping", kForbidMarked);
   }
 
@@ -1149,6 +1168,163 @@
   }
 }
 
+class CollectStoreBufferEvacuateVisitor : public ObjectPointerVisitor {
+ public:
+  CollectStoreBufferEvacuateVisitor(ObjectSet* in_store_buffer, const char* msg)
+      : ObjectPointerVisitor(IsolateGroup::Current()),
+        in_store_buffer_(in_store_buffer),
+        msg_(msg) {}
+
+  void VisitPointers(ObjectPtr* from, ObjectPtr* to) override {
+    for (ObjectPtr* ptr = from; ptr <= to; ptr++) {
+      ObjectPtr obj = *ptr;
+      RELEASE_ASSERT_WITH_MSG(obj->untag()->IsRemembered(), msg_);
+      RELEASE_ASSERT_WITH_MSG(obj->IsOldObject(), msg_);
+
+      RELEASE_ASSERT_WITH_MSG(!obj->untag()->IsCardRemembered(), msg_);
+      if (obj.GetClassId() == kArrayCid) {
+        const uword length =
+            Smi::Value(static_cast<UntaggedArray*>(obj.untag())->length());
+        RELEASE_ASSERT_WITH_MSG(!Array::UseCardMarkingForAllocation(length),
+                                msg_);
+      }
+      in_store_buffer_->Add(obj);
+    }
+  }
+
+#if defined(DART_COMPRESSED_POINTERS)
+  void VisitCompressedPointers(uword heap_base,
+                               CompressedObjectPtr* from,
+                               CompressedObjectPtr* to) override {
+    UNREACHABLE();  // Store buffer blocks are not compressed.
+  }
+#endif
+
+ private:
+  ObjectSet* const in_store_buffer_;
+  const char* msg_;
+
+  DISALLOW_COPY_AND_ASSIGN(CollectStoreBufferEvacuateVisitor);
+};
+
+class CheckStoreBufferEvacuateVisitor : public ObjectVisitor,
+                                        public ObjectPointerVisitor {
+ public:
+  CheckStoreBufferEvacuateVisitor(ObjectSet* in_store_buffer, const char* msg)
+      : ObjectVisitor(),
+        ObjectPointerVisitor(IsolateGroup::Current()),
+        in_store_buffer_(in_store_buffer),
+        msg_(msg) {}
+
+  void VisitObject(ObjectPtr obj) override {
+    if (obj->IsPseudoObject()) return;
+    RELEASE_ASSERT_WITH_MSG(obj->IsOldObject(), msg_);
+    if (!obj->untag()->IsMarked()) return;
+
+    if (obj->untag()->IsRemembered()) {
+      RELEASE_ASSERT_WITH_MSG(in_store_buffer_->Contains(obj), msg_);
+    } else {
+      RELEASE_ASSERT_WITH_MSG(!in_store_buffer_->Contains(obj), msg_);
+    }
+
+    visiting_ = obj;
+    is_remembered_ = obj->untag()->IsRemembered();
+    is_card_remembered_ = obj->untag()->IsCardRemembered();
+    if (is_card_remembered_) {
+      RELEASE_ASSERT_WITH_MSG(!is_remembered_, msg_);
+      RELEASE_ASSERT_WITH_MSG(Page::Of(obj)->progress_bar_ == 0, msg_);
+    }
+    obj->untag()->VisitPointers(this);
+  }
+
+  void VisitPointers(ObjectPtr* from, ObjectPtr* to) override {
+    for (ObjectPtr* ptr = from; ptr <= to; ptr++) {
+      ObjectPtr obj = *ptr;
+      if (obj->IsHeapObject() && obj->untag()->IsEvacuationCandidate()) {
+        if (is_card_remembered_) {
+          if (!Page::Of(visiting_)->IsCardRemembered(ptr)) {
+            FATAL(
+                "%s: Old object %#" Px " references new object %#" Px
+                ", but the "
+                "slot's card is not remembered. Consider using rr to watch the "
+                "slot %p and reverse-continue to find the store with a missing "
+                "barrier.\n",
+                msg_, static_cast<uword>(visiting_), static_cast<uword>(obj),
+                ptr);
+          }
+        } else if (!is_remembered_) {
+          FATAL("%s: Old object %#" Px " references new object %#" Px
+                ", but it is "
+                "not in any store buffer. Consider using rr to watch the "
+                "slot %p and reverse-continue to find the store with a missing "
+                "barrier.\n",
+                msg_, static_cast<uword>(visiting_), static_cast<uword>(obj),
+                ptr);
+        }
+      }
+    }
+  }
+
+#if defined(DART_COMPRESSED_POINTERS)
+  void VisitCompressedPointers(uword heap_base,
+                               CompressedObjectPtr* from,
+                               CompressedObjectPtr* to) override {
+    for (CompressedObjectPtr* ptr = from; ptr <= to; ptr++) {
+      ObjectPtr obj = ptr->Decompress(heap_base);
+      if (obj->IsHeapObject() && obj->IsNewObject()) {
+        if (is_card_remembered_) {
+          if (!Page::Of(visiting_)->IsCardRemembered(ptr)) {
+            FATAL(
+                "%s: Old object %#" Px " references new object %#" Px
+                ", but the "
+                "slot's card is not remembered. Consider using rr to watch the "
+                "slot %p and reverse-continue to find the store with a missing "
+                "barrier.\n",
+                msg_, static_cast<uword>(visiting_), static_cast<uword>(obj),
+                ptr);
+          }
+        } else if (!is_remembered_) {
+          FATAL("%s: Old object %#" Px " references new object %#" Px
+                ", but it is "
+                "not in any store buffer. Consider using rr to watch the "
+                "slot %p and reverse-continue to find the store with a missing "
+                "barrier.\n",
+                msg_, static_cast<uword>(visiting_), static_cast<uword>(obj),
+                ptr);
+        }
+      }
+    }
+  }
+#endif
+
+ private:
+  const ObjectSet* const in_store_buffer_;
+  ObjectPtr visiting_;
+  bool is_remembered_;
+  bool is_card_remembered_;
+  const char* msg_;
+};
+
+void PageSpace::VerifyStoreBuffers(const char* msg) {
+  ASSERT(msg != nullptr);
+  Thread* thread = Thread::Current();
+  StackZone stack_zone(thread);
+  Zone* zone = stack_zone.GetZone();
+
+  ObjectSet* in_store_buffer = new (zone) ObjectSet(zone);
+  heap_->AddRegionsToObjectSet(in_store_buffer);
+
+  {
+    CollectStoreBufferEvacuateVisitor visitor(in_store_buffer, msg);
+    heap_->isolate_group()->store_buffer()->VisitObjectPointers(&visitor);
+  }
+
+  {
+    CheckStoreBufferEvacuateVisitor visitor(in_store_buffer, msg);
+    heap_->old_space()->VisitObjects(&visitor);
+  }
+}
+
 void PageSpace::SweepNew() {
   // TODO(rmacnak): Run in parallel with SweepExecutable.
   TIMELINE_FUNCTION_GC_DURATION(Thread::Current(), "SweepNew");
@@ -1279,6 +1455,7 @@
     intptr_t block_size = block->HeapSize();
     if (remaining > 0) {
       usage_.used_in_words -= (remaining >> kWordSizeLog2);
+      Page::Of(freelist->top())->add_live_bytes(remaining);
       freelist->FreeLocked(freelist->top(), remaining);
     }
     freelist->set_top(reinterpret_cast<uword>(block));
@@ -1287,6 +1464,7 @@
     // the size of the whole bump area here and subtract the remaining size
     // when switching to a new area.
     usage_.used_in_words += (block_size >> kWordSizeLog2);
+    Page::Of(block)->add_live_bytes(block_size);
     remaining = block_size;
   }
   ASSERT(remaining >= size);
@@ -1307,6 +1485,7 @@
 uword PageSpace::TryAllocatePromoLockedSlow(FreeList* freelist, intptr_t size) {
   uword result = freelist->TryAllocateSmallLocked(size);
   if (result != 0) {
+    Page::Of(result)->add_live_bytes(size);
     freelist->AddUnaccountedSize(size);
     return result;
   }
@@ -1349,6 +1528,7 @@
   page->end_ = memory->end();
   page->survivor_end_ = 0;
   page->resolved_top_ = 0;
+  page->live_bytes_ = 0;
 
   MutexLocker ml(&pages_lock_);
   page->next_ = image_pages_;
diff --git a/runtime/vm/heap/pages.h b/runtime/vm/heap/pages.h
index fe0bc18..1d7e42c 100644
--- a/runtime/vm/heap/pages.h
+++ b/runtime/vm/heap/pages.h
@@ -184,6 +184,11 @@
   }
   void EvaluateAfterLoading() {
     page_space_controller_.EvaluateAfterLoading(usage_);
+
+    MutexLocker ml(&pages_lock_);
+    for (Page* page = pages_; page != nullptr; page = page->next()) {
+      page->set_never_evacuate(true);
+    }
   }
 
   intptr_t UsedInWords() const { return usage_.used_in_words; }
@@ -412,6 +417,7 @@
   void FreePages(Page* pages);
 
   void CollectGarbageHelper(Thread* thread, bool compact, bool finalize);
+  void VerifyStoreBuffers(const char* msg);
   void SweepNew();
   void SweepLarge();
   void Sweep(bool exclusive);
@@ -496,6 +502,9 @@
   friend class PageSpaceController;
   friend class ConcurrentSweeperTask;
   friend class GCCompactor;
+  friend class GCIncrementalCompactor;
+  friend class PrologueTask;
+  friend class EpilogueTask;
   friend class CompactorTask;
   friend class Code;
 
diff --git a/runtime/vm/heap/pointer_block.cc b/runtime/vm/heap/pointer_block.cc
index e9c05c3..6eb1102 100644
--- a/runtime/vm/heap/pointer_block.cc
+++ b/runtime/vm/heap/pointer_block.cc
@@ -266,6 +266,9 @@
   }
 }
 
+template class PointerBlock<kStoreBufferBlockSize>;
+template class PointerBlock<kMarkingStackBlockSize>;
+
 template class BlockStack<kStoreBufferBlockSize>;
 template class BlockStack<kMarkingStackBlockSize>;
 
diff --git a/runtime/vm/heap/pointer_block.h b/runtime/vm/heap/pointer_block.h
index 2bb8aec0e..bd9a760 100644
--- a/runtime/vm/heap/pointer_block.h
+++ b/runtime/vm/heap/pointer_block.h
@@ -129,7 +129,7 @@
 
    private:
     Block* head_;
-    intptr_t length_;
+    RelaxedAtomic<intptr_t> length_;
     DISALLOW_COPY_AND_ASSIGN(List);
   };
 
diff --git a/runtime/vm/heap/scavenger.cc b/runtime/vm/heap/scavenger.cc
index 64e96f2..6e20d52 100644
--- a/runtime/vm/heap/scavenger.cc
+++ b/runtime/vm/heap/scavenger.cc
@@ -220,8 +220,11 @@
   }
 
   void VisitPointers(ObjectPtr* first, ObjectPtr* last) override {
+#if !defined(TARGET_ARCH_IA32)
+    // Pointers embedded in Instructions are not aligned.
     ASSERT(Utils::IsAligned(first, sizeof(*first)));
     ASSERT(Utils::IsAligned(last, sizeof(*last)));
+#endif
     for (ObjectPtr* current = first; current <= last; current++) {
       ScavengePointer(current);
     }
@@ -379,9 +382,12 @@
     // ScavengePointer cannot be called recursively.
     ObjectPtr obj = *p;
 
-    if (obj->IsImmediateOrOldObject()) {
+    if (obj->IsImmediateObject()) {
       return false;
     }
+    if (obj->IsOldObject()) {
+      return obj->untag()->IsEvacuationCandidate();
+    }
 
     ObjectPtr new_obj = ScavengeObject(obj);
 
@@ -408,10 +414,12 @@
     // ScavengePointer cannot be called recursively.
     ObjectPtr obj = p->Decompress(heap_base);
 
-    // Could be tested without decompression.
-    if (obj->IsImmediateOrOldObject()) {
+    if (obj->IsImmediateObject()) {
       return false;
     }
+    if (obj->IsOldObject()) {
+      return obj->untag()->IsEvacuationCandidate();
+    }
 
     ObjectPtr new_obj = ScavengeObject(obj);
 
@@ -483,7 +491,7 @@
         // Promoted: update age/barrier tags.
         uword tags = static_cast<uword>(header);
         tags = UntaggedObject::OldAndNotRememberedBit::update(true, tags);
-        tags = UntaggedObject::NewBit::update(false, tags);
+        tags = UntaggedObject::NewOrEvacuationCandidateBit::update(false, tags);
         new_obj->untag()->tags_.store(tags, std::memory_order_relaxed);
       }
 
@@ -506,6 +514,7 @@
         if (new_obj->IsOldObject()) {
           // Abandon as a free list element.
           FreeListElement::AsElement(new_addr, size);
+          Page::Of(new_addr)->sub_live_bytes(size);
           bytes_promoted_ -= size;
         } else {
           // Undo to-space allocation.
@@ -862,9 +871,9 @@
   return old_size_in_words;
 }
 
-class CollectStoreBufferVisitor : public ObjectPointerVisitor {
+class CollectStoreBufferScavengeVisitor : public ObjectPointerVisitor {
  public:
-  CollectStoreBufferVisitor(ObjectSet* in_store_buffer, const char* msg)
+  CollectStoreBufferScavengeVisitor(ObjectSet* in_store_buffer, const char* msg)
       : ObjectPointerVisitor(IsolateGroup::Current()),
         in_store_buffer_(in_store_buffer),
         msg_(msg) {}
@@ -897,14 +906,16 @@
  private:
   ObjectSet* const in_store_buffer_;
   const char* msg_;
+
+  DISALLOW_COPY_AND_ASSIGN(CollectStoreBufferScavengeVisitor);
 };
 
-class CheckStoreBufferVisitor : public ObjectVisitor,
-                                public ObjectPointerVisitor {
+class CheckStoreBufferScavengeVisitor : public ObjectVisitor,
+                                        public ObjectPointerVisitor {
  public:
-  CheckStoreBufferVisitor(ObjectSet* in_store_buffer,
-                          const SemiSpace* to,
-                          const char* msg)
+  CheckStoreBufferScavengeVisitor(ObjectSet* in_store_buffer,
+                                  const SemiSpace* to,
+                                  const char* msg)
       : ObjectVisitor(),
         ObjectPointerVisitor(IsolateGroup::Current()),
         in_store_buffer_(in_store_buffer),
@@ -915,8 +926,11 @@
     if (obj->IsPseudoObject()) return;
     RELEASE_ASSERT_WITH_MSG(obj->IsOldObject(), msg_);
 
-    RELEASE_ASSERT_WITH_MSG(
-        obj->untag()->IsRemembered() == in_store_buffer_->Contains(obj), msg_);
+    if (obj->untag()->IsRemembered()) {
+      RELEASE_ASSERT_WITH_MSG(in_store_buffer_->Contains(obj), msg_);
+    } else {
+      RELEASE_ASSERT_WITH_MSG(!in_store_buffer_->Contains(obj), msg_);
+    }
 
     visiting_ = obj;
     is_remembered_ = obj->untag()->IsRemembered();
@@ -999,6 +1013,8 @@
   bool is_remembered_;
   bool is_card_remembered_;
   const char* msg_;
+
+  DISALLOW_COPY_AND_ASSIGN(CheckStoreBufferScavengeVisitor);
 };
 
 void Scavenger::VerifyStoreBuffers(const char* msg) {
@@ -1011,12 +1027,12 @@
   heap_->AddRegionsToObjectSet(in_store_buffer);
 
   {
-    CollectStoreBufferVisitor visitor(in_store_buffer, msg);
+    CollectStoreBufferScavengeVisitor visitor(in_store_buffer, msg);
     heap_->isolate_group()->store_buffer()->VisitObjectPointers(&visitor);
   }
 
   {
-    CheckStoreBufferVisitor visitor(in_store_buffer, to_, msg);
+    CheckStoreBufferScavengeVisitor visitor(in_store_buffer, to_, msg);
     heap_->old_space()->VisitObjects(&visitor);
   }
 }
@@ -1612,10 +1628,20 @@
     ObjectPtr parent,
     CompressedObjectPtr* slot) {
   ObjectPtr target = slot->Decompress(parent->heap_base());
-  if (target->IsImmediateOrOldObject()) {
+  if (target->IsImmediateObject()) {
     // Object already null (which is old) or not touched during this GC.
     return false;
   }
+  if (target->IsOldObject()) {
+    if (parent->IsOldObject() && target->untag()->IsEvacuationCandidate()) {
+      if (!parent->untag()->IsCardRemembered()) {
+        if (parent->untag()->TryAcquireRememberedBit()) {
+          Thread::Current()->StoreBufferAddObjectGC(parent);
+        }
+      }
+    }
+    return false;
+  }
   uword header = ReadHeaderRelaxed(target);
   if (IsForwarding(header)) {
     // Get the new location of the object.
@@ -1644,7 +1670,8 @@
 
 void Scavenger::VisitObjects(ObjectVisitor* visitor) const {
   ASSERT(Thread::Current()->OwnsGCSafepoint() ||
-         (Thread::Current()->task_kind() == Thread::kMarkerTask));
+         (Thread::Current()->task_kind() == Thread::kMarkerTask) ||
+         (Thread::Current()->task_kind() == Thread::kIncrementalCompactorTask));
   for (Page* page = to_->head(); page != nullptr; page = page->next()) {
     page->VisitObjects(visitor);
   }
@@ -1814,7 +1841,6 @@
     }
   }
   ASSERT(promotion_stack_.IsEmpty());
-  heap_->old_space()->ResumeConcurrentMarking();
 
   // Scavenge finished. Run accounting.
   int64_t end = OS::GetCurrentMonotonicMicros();
@@ -1822,6 +1848,7 @@
       start, end, usage_before, GetCurrentUsage(), promo_candidate_words,
       bytes_promoted >> kWordSizeLog2, abandoned_bytes >> kWordSizeLog2));
   Epilogue(from);
+  heap_->old_space()->ResumeConcurrentMarking();
 
   if (FLAG_verify_after_gc) {
     heap_->WaitForSweeperTasksAtSafepoint(thread);
@@ -1908,7 +1935,8 @@
         uword from_header = static_cast<uword>(to_header);
         from_header =
             UntaggedObject::OldAndNotRememberedBit::update(false, from_header);
-        from_header = UntaggedObject::NewBit::update(true, from_header);
+        from_header = UntaggedObject::NewOrEvacuationCandidateBit::update(
+            true, from_header);
 
         WriteHeaderRelaxed(from_obj, from_header);
 
diff --git a/runtime/vm/heap/sweeper.cc b/runtime/vm/heap/sweeper.cc
index ff9a3da..b4d4b61 100644
--- a/runtime/vm/heap/sweeper.cc
+++ b/runtime/vm/heap/sweeper.cc
@@ -102,6 +102,7 @@
       }
       // Only add to the free list if not covering the whole page.
       if ((current == start) && (free_end == end)) {
+        page->set_live_bytes(0);
         return false;  // Not in use.
       }
       obj_size = free_end - current;
@@ -131,6 +132,7 @@
   }
   ASSERT(current == end);
   ASSERT(used_in_bytes != 0);
+  page->set_live_bytes(used_in_bytes);
   return true;  // In use.
 }
 
diff --git a/runtime/vm/image_snapshot.cc b/runtime/vm/image_snapshot.cc
index f919439..5e8a758 100644
--- a/runtime/vm/image_snapshot.cc
+++ b/runtime/vm/image_snapshot.cc
@@ -601,7 +601,7 @@
     UntaggedObject::AlwaysSetBit::encode(true) |
     UntaggedObject::NotMarkedBit::encode(false) |
     UntaggedObject::OldAndNotRememberedBit::encode(true) |
-    UntaggedObject::NewBit::encode(false);
+    UntaggedObject::NewOrEvacuationCandidateBit::encode(false);
 
 uword ImageWriter::GetMarkedTags(classid_t cid,
                                  intptr_t size,
diff --git a/runtime/vm/isolate.cc b/runtime/vm/isolate.cc
index 09c59e7..d7b9321 100644
--- a/runtime/vm/isolate.cc
+++ b/runtime/vm/isolate.cc
@@ -2804,7 +2804,8 @@
            (thread->task_kind() == Thread::kMutatorTask) ||
            (thread->task_kind() == Thread::kMarkerTask) ||
            (thread->task_kind() == Thread::kCompactorTask) ||
-           (thread->task_kind() == Thread::kScavengerTask));
+           (thread->task_kind() == Thread::kScavengerTask) ||
+           (thread->task_kind() == Thread::kIncrementalCompactorTask));
     for (Isolate* isolate : isolates_) {
       function(isolate);
     }
diff --git a/runtime/vm/object.cc b/runtime/vm/object.cc
index f9d2e00..5b2cfa2 100644
--- a/runtime/vm/object.cc
+++ b/runtime/vm/object.cc
@@ -2735,7 +2735,7 @@
   tags = UntaggedObject::AlwaysSetBit::update(true, tags);
   tags = UntaggedObject::NotMarkedBit::update(true, tags);
   tags = UntaggedObject::OldAndNotRememberedBit::update(is_old, tags);
-  tags = UntaggedObject::NewBit::update(!is_old, tags);
+  tags = UntaggedObject::NewOrEvacuationCandidateBit::update(!is_old, tags);
   tags = UntaggedObject::ImmutableBit::update(
       Object::ShouldHaveImmutabilityBitSet(class_id), tags);
 #if defined(HASH_IN_OBJECT_HEADER)
diff --git a/runtime/vm/object_graph_copy.cc b/runtime/vm/object_graph_copy.cc
index 4958c39..4e57cab 100644
--- a/runtime/vm/object_graph_copy.cc
+++ b/runtime/vm/object_graph_copy.cc
@@ -235,7 +235,7 @@
   tags = UntaggedObject::NotMarkedBit::update(true, tags);
   tags = UntaggedObject::OldAndNotRememberedBit::update(false, tags);
   tags = UntaggedObject::CanonicalBit::update(false, tags);
-  tags = UntaggedObject::NewBit::update(true, tags);
+  tags = UntaggedObject::NewOrEvacuationCandidateBit::update(true, tags);
   tags = UntaggedObject::ImmutableBit::update(
       IsUnmodifiableTypedDataViewClassId(cid), tags);
 #if defined(HASH_IN_OBJECT_HEADER)
diff --git a/runtime/vm/raw_object.cc b/runtime/vm/raw_object.cc
index 92bfe52..f669e81 100644
--- a/runtime/vm/raw_object.cc
+++ b/runtime/vm/raw_object.cc
@@ -48,16 +48,12 @@
   // Validate that the tags_ field is sensible.
   uword tags = tags_;
   if (IsNewObject()) {
-    if (!NewBit::decode(tags)) {
+    if (!NewOrEvacuationCandidateBit::decode(tags)) {
       FATAL("New object missing kNewBit: %" Px "\n", tags);
     }
     if (OldAndNotRememberedBit::decode(tags)) {
       FATAL("New object has kOldAndNotRememberedBit: %" Px "\n", tags);
     }
-  } else {
-    if (NewBit::decode(tags)) {
-      FATAL("Old object has kNewBit: %" Px "\n", tags);
-    }
   }
   const intptr_t class_id = ClassIdTag::decode(tags);
   if (!isolate_group->class_table()->IsValidIndex(class_id)) {
diff --git a/runtime/vm/raw_object.h b/runtime/vm/raw_object.h
index 6531e31..da30a6f 100644
--- a/runtime/vm/raw_object.h
+++ b/runtime/vm/raw_object.h
@@ -10,6 +10,7 @@
 #endif
 
 #include "platform/assert.h"
+#include "platform/thread_sanitizer.h"
 #include "vm/class_id.h"
 #include "vm/compiler/method_recognizer.h"
 #include "vm/compiler/runtime_api.h"
@@ -162,10 +163,10 @@
   enum TagBits {
     kCardRememberedBit = 0,
     kCanonicalBit = 1,
-    kNotMarkedBit = 2,            // Incremental barrier target.
-    kNewBit = 3,                  // Generational barrier target.
-    kAlwaysSetBit = 4,            // Incremental barrier source.
-    kOldAndNotRememberedBit = 5,  // Generational barrier source.
+    kNotMarkedBit = 2,                 // Incremental barrier target.
+    kNewOrEvacuationCandidateBit = 3,  // Generational barrier target.
+    kAlwaysSetBit = 4,                 // Incremental barrier source.
+    kOldAndNotRememberedBit = 5,       // Generational barrier source.
     kImmutableBit = 6,
     kReservedBit = 7,
 
@@ -177,11 +178,13 @@
     kHashTagSize = 32,
   };
 
-  static constexpr intptr_t kGenerationalBarrierMask = 1 << kNewBit;
+  static constexpr intptr_t kGenerationalBarrierMask =
+      1 << kNewOrEvacuationCandidateBit;
   static constexpr intptr_t kIncrementalBarrierMask = 1 << kNotMarkedBit;
   static constexpr intptr_t kBarrierOverlapShift = 2;
   COMPILE_ASSERT(kNotMarkedBit + kBarrierOverlapShift == kAlwaysSetBit);
-  COMPILE_ASSERT(kNewBit + kBarrierOverlapShift == kOldAndNotRememberedBit);
+  COMPILE_ASSERT(kNewOrEvacuationCandidateBit + kBarrierOverlapShift ==
+                 kOldAndNotRememberedBit);
 
   // The bit in the Smi tag position must be something that can be set to 0
   // for a dead filler object of either generation.
@@ -246,7 +249,8 @@
 
   class NotMarkedBit : public BitField<uword, bool, kNotMarkedBit, 1> {};
 
-  class NewBit : public BitField<uword, bool, kNewBit, 1> {};
+  class NewOrEvacuationCandidateBit
+      : public BitField<uword, bool, kNewOrEvacuationCandidateBit, 1> {};
 
   class CanonicalBit : public BitField<uword, bool, kCanonicalBit, 1> {};
 
@@ -292,14 +296,12 @@
   }
 
   uword tags() const { return tags_; }
+  uword tags_ignore_race() const { return tags_.load_ignore_race(); }
 
   // Support for GC marking bit. Marked objects are either grey (not yet
   // visited) or black (already visited).
   static bool IsMarked(uword tags) { return !NotMarkedBit::decode(tags); }
   bool IsMarked() const { return !tags_.Read<NotMarkedBit>(); }
-  bool IsMarkedIgnoreRace() const {
-    return !tags_.ReadIgnoreRace<NotMarkedBit>();
-  }
   void SetMarkBit() {
     ASSERT(!IsMarked());
     tags_.UpdateBool<NotMarkedBit>(false);
@@ -324,6 +326,25 @@
   DART_WARN_UNUSED_RESULT
   bool TryAcquireMarkBit() { return tags_.TryClear<NotMarkedBit>(); }
 
+  static bool IsEvacuationCandidate(uword tags) {
+    return NewOrEvacuationCandidateBit::decode(tags);
+  }
+  bool IsEvacuationCandidate() {
+    return tags_.Read<NewOrEvacuationCandidateBit>();
+  }
+  void SetIsEvacuationCandidate() {
+    ASSERT(IsOldObject());
+    tags_.UpdateBool<NewOrEvacuationCandidateBit>(true);
+  }
+  void SetIsEvacuationCandidateUnsynchronized() {
+    ASSERT(IsOldObject());
+    tags_.UpdateUnsynchronized<NewOrEvacuationCandidateBit>(true);
+  }
+  void ClearIsEvacuationCandidateUnsynchronized() {
+    ASSERT(IsOldObject());
+    tags_.UpdateUnsynchronized<NewOrEvacuationCandidateBit>(false);
+  }
+
   // Canonical objects have the property that two canonical objects are
   // logically equal iff they are the same object (pointer equal).
   bool IsCanonical() const { return tags_.Read<CanonicalBit>(); }
@@ -3223,6 +3244,8 @@
   template <typename Table, bool kAllCanonicalObjectsAreIncludedIntoSet>
   friend class CanonicalSetDeserializationCluster;
   friend class Page;
+  template <bool>
+  friend class MarkingVisitorBase;
   friend class FastObjectCopy;  // For initializing fields.
   friend void UpdateLengthField(intptr_t, ObjectPtr, ObjectPtr);  // length_
 };
diff --git a/runtime/vm/thread.cc b/runtime/vm/thread.cc
index 431f209..e511031 100644
--- a/runtime/vm/thread.cc
+++ b/runtime/vm/thread.cc
@@ -670,7 +670,7 @@
 }
 
 void Thread::ReleaseStoreBuffer() {
-  ASSERT(IsAtSafepoint() || OwnsSafepoint());
+  ASSERT(IsAtSafepoint() || OwnsSafepoint() || task_kind_ == kMarkerTask);
   if (store_buffer_block_ == nullptr || store_buffer_block_->IsEmpty()) {
     return;  // Nothing to release.
   }
@@ -813,6 +813,17 @@
   store_buffer_block_ = isolate_group()->store_buffer()->PopNonFullBlock();
 }
 
+void Thread::StoreBufferReleaseGC() {
+  StoreBufferBlock* block = store_buffer_block_;
+  store_buffer_block_ = nullptr;
+  isolate_group()->store_buffer()->PushBlock(block,
+                                             StoreBuffer::kIgnoreThreshold);
+}
+
+void Thread::StoreBufferAcquireGC() {
+  store_buffer_block_ = isolate_group()->store_buffer()->PopNonFullBlock();
+}
+
 void Thread::MarkingStackBlockProcess() {
   MarkingStackRelease();
   MarkingStackAcquire();
@@ -965,7 +976,6 @@
   void VisitPointers(ObjectPtr* first, ObjectPtr* last) override {
     for (; first != last + 1; first++) {
       ObjectPtr obj = *first;
-      // Stores into new-space objects don't need a write barrier.
       if (obj->IsImmediateObject()) continue;
 
       // To avoid adding too much work into the remembered set, skip large
diff --git a/runtime/vm/thread.h b/runtime/vm/thread.h
index 56e6f4b..9bbe666 100644
--- a/runtime/vm/thread.h
+++ b/runtime/vm/thread.h
@@ -351,6 +351,7 @@
     kCompactorTask = 0x10,
     kScavengerTask = 0x20,
     kSampleBlockTask = 0x40,
+    kIncrementalCompactorTask = 0x80,
   };
   // Converts a TaskKind to its corresponding C-String name.
   static const char* TaskKindToCString(TaskKind kind);
@@ -662,6 +663,8 @@
   }
 #endif
   void StoreBufferBlockProcess(StoreBuffer::ThresholdPolicy policy);
+  void StoreBufferReleaseGC();
+  void StoreBufferAcquireGC();
   static intptr_t store_buffer_block_offset() {
     return OFFSET_OF(Thread, store_buffer_block_);
   }