[vm, gc] Don't eagerly evacuate new space on old allocation failure, take 2.

Large allocations and direct old space allocations are not signals that the current new space objects will be long lived.

Still perform a scavenge to handle (dead new -> dead old) references even when new space allocation is low, but a non-evacuating scavenge.

Change-Id: I4008fb163c4536abfc7ba42410ec57917a69786a
Reviewed-on: https://dart-review.googlesource.com/72340
Reviewed-by: Zach Anderson <zra@google.com>
Commit-Queue: Ryan Macnak <rmacnak@google.com>
diff --git a/runtime/vm/compiler/jit/compiler.cc b/runtime/vm/compiler/jit/compiler.cc
index 6e19591..371fafa 100644
--- a/runtime/vm/compiler/jit/compiler.cc
+++ b/runtime/vm/compiler/jit/compiler.cc
@@ -1828,7 +1828,7 @@
   // TODO(srdjan): Checking different strategy for collecting garbage
   // accumulated by background compiler.
   if (isolate_->heap()->NeedsGarbageCollection()) {
-    isolate_->heap()->CollectAllGarbage();
+    isolate_->heap()->CollectMostGarbage();
   }
   {
     MonitorLocker ml(queue_monitor_);
diff --git a/runtime/vm/heap/heap.cc b/runtime/vm/heap/heap.cc
index 9a281b0..de9911f 100644
--- a/runtime/vm/heap/heap.cc
+++ b/runtime/vm/heap/heap.cc
@@ -95,8 +95,9 @@
     if (addr != 0) {
       return addr;
     }
-    // All GC tasks finished without allocating successfully. Run a full GC.
-    CollectAllGarbage();
+    // All GC tasks finished without allocating successfully. Collect both
+    // generations.
+    CollectMostGarbage();
     addr = old_space_.TryAllocate(size, type);
     if (addr != 0) {
       return addr;
@@ -144,7 +145,7 @@
     ASSERT(space == kOld);
     old_space_.AllocateExternal(cid, size);
     if (old_space_.NeedsGarbageCollection()) {
-      CollectAllGarbage(kExternal);
+      CollectMostGarbage(kExternal);
     }
   }
 }
@@ -460,6 +461,12 @@
   }
 }
 
+void Heap::CollectMostGarbage(GCReason reason) {
+  Thread* thread = Thread::Current();
+  CollectNewSpaceGarbage(thread, reason);
+  CollectOldSpaceGarbage(thread, kMarkSweep, reason);
+}
+
 void Heap::CollectAllGarbage(GCReason reason) {
   Thread* thread = Thread::Current();
 
diff --git a/runtime/vm/heap/heap.h b/runtime/vm/heap/heap.h
index d4c2cf1..7f2689d 100644
--- a/runtime/vm/heap/heap.h
+++ b/runtime/vm/heap/heap.h
@@ -114,13 +114,24 @@
   void NotifyIdle(int64_t deadline);
   void NotifyLowMemory();
 
+  // Collect a single generation.
   void CollectGarbage(Space space);
   void CollectGarbage(GCType type, GCReason reason);
+
+  // Collect both generations by performing a scavenge followed by a
+  // mark-sweep. This function may not collect all unreachable objects. Because
+  // mark-sweep treats new space as roots, a cycle between unreachable old and
+  // new objects will not be collected until the new objects are promoted.
+  // Verification based on heap iteration should instead use CollectAllGarbage.
+  void CollectMostGarbage(GCReason reason = kFull);
+
+  // Collect both generations by performing an evacuation followed by a
+  // mark-sweep. This function will collect all unreachable objects.
   void CollectAllGarbage(GCReason reason = kFull);
+
   bool NeedsGarbageCollection() const {
     return old_space_.NeedsGarbageCollection();
   }
-
   void WaitForSweeperTasks(Thread* thread);
 
   // Enables growth control on the page space heaps.  This should be
diff --git a/runtime/vm/stub_code.cc b/runtime/vm/stub_code.cc
index 82e4767..8b1a357 100644
--- a/runtime/vm/stub_code.cc
+++ b/runtime/vm/stub_code.cc
@@ -178,7 +178,7 @@
       }
       Isolate* isolate = thread->isolate();
       if (isolate->heap()->NeedsGarbageCollection()) {
-        isolate->heap()->CollectAllGarbage();
+        isolate->heap()->CollectMostGarbage();
       }
     }
 #ifndef PRODUCT