[vm/compiler] Use loop framework for register allocator

Rationale:
Rather than relying on a separate loop detector, rely
on the new loop framework, which avoids code duplication
and ensures any improvement in loop detection/handling
will benefit this phase too. Note, most of the time, the
same loops are discovered with a few exceptions (which
is okay, since this is "just" heuristic usage). This CL
also simplifies loop detection a bit.

https://github.com/dart-lang/sdk/issues/34473

Change-Id: I1a1b19b99a698c74822473d2a1fe370287c1ade4
Reviewed-on: https://dart-review.googlesource.com/c/80523
Commit-Queue: Aart Bik <ajcbik@google.com>
Reviewed-by: Vyacheslav Egorov <vegorov@google.com>
Reviewed-by: Alexander Markov <alexmarkov@google.com>
diff --git a/runtime/vm/compiler/backend/flow_graph.cc b/runtime/vm/compiler/backend/flow_graph.cc
index d00e961..cd8350c 100644
--- a/runtime/vm/compiler/backend/flow_graph.cc
+++ b/runtime/vm/compiler/backend/flow_graph.cc
@@ -1604,25 +1604,6 @@
   return loop_blocks;
 }
 
-void FlowGraph::LinkToInner(LoopInfo* loop) const {
-  for (; loop != nullptr; loop = loop->next()) {
-    if (FLAG_trace_optimization) {
-      THR_Print("%s {", loop->ToCString());
-      for (BitVector::Iterator it(loop->blocks()); !it.Done(); it.Advance()) {
-        THR_Print(" B%" Pd, preorder_[it.Current()]->block_id());
-      }
-      THR_Print(" }\n");
-    }
-    LinkToInner(loop->inner());
-    for (BitVector::Iterator it(loop->blocks()); !it.Done(); it.Advance()) {
-      BlockEntryInstr* block = preorder_[it.Current()];
-      if (block->loop_info() == nullptr) {
-        block->set_loop_info(loop);
-      }
-    }
-  }
-}
-
 LoopHierarchy* FlowGraph::ComputeLoops() const {
   // Iterate over all entry blocks in the flow graph to attach
   // loop information to each loop header.
@@ -1649,15 +1630,14 @@
           ASSERT(block->loop_info()->header() == block);
           block->loop_info()->AddBlocks(loop_blocks);
         }
+        block->loop_info()->AddBackEdge(pred);
       }
     }
   }
 
   // Build the loop hierarchy and link every entry block to
   // the closest enveloping loop in loop hierarchy.
-  LoopHierarchy* loop_hierarchy = new (zone()) LoopHierarchy(loop_headers);
-  LinkToInner(loop_hierarchy->first());
-  return loop_hierarchy;
+  return new (zone()) LoopHierarchy(loop_headers, preorder_);
 }
 
 intptr_t FlowGraph::InstructionCount() const {
diff --git a/runtime/vm/compiler/backend/flow_graph.h b/runtime/vm/compiler/backend/flow_graph.h
index cace092..330bcf3 100644
--- a/runtime/vm/compiler/backend/flow_graph.h
+++ b/runtime/vm/compiler/backend/flow_graph.h
@@ -292,9 +292,11 @@
     if (loop_hierarchy_ == nullptr) {
       loop_hierarchy_ = ComputeLoops();
     }
-    return *loop_hierarchy_;
+    return loop_hierarchy();
   }
 
+  const LoopHierarchy& loop_hierarchy() const { return *loop_hierarchy_; }
+
   // Resets the loop hierarchy of the flow graph. Use this to
   // force a recomputation of loop detection by the next call
   // to GetLoopHierarchy() (note that this does not immediately
@@ -305,10 +307,6 @@
     loop_invariant_loads_ = nullptr;
   }
 
-  // Helper method to find the natural loops in the flow graph and attach
-  // the loop information to each entry block. Returns the loop hierarchy.
-  LoopHierarchy* ComputeLoops() const;
-
   // Per loop header invariant loads sets. Each set contains load id for
   // those loads that are not affected by anything in the loop and can be
   // hoisted out. Sets are computed by LoadOptimizer.
@@ -407,10 +405,12 @@
   PhiInstr* AddPhi(JoinEntryInstr* join, Definition* d1, Definition* d2);
 
  private:
+  friend class FlowGraphCompiler;  // TODO(ajcbik): restructure
   friend class IfConverter;
   friend class BranchSimplifier;
   friend class ConstantPropagator;
   friend class DeadCodeElimination;
+  friend class Intrinsifier;
 
   // SSA transformation methods and fields.
   void ComputeDominators(GrowableArray<BitVector*>* dominance_frontier);
@@ -454,14 +454,15 @@
   void ReplacePredecessor(BlockEntryInstr* old_block,
                           BlockEntryInstr* new_block);
 
-  // Find the blocks in the natural loop for the back edge m->n. The
+  // Finds the blocks in the natural loop for the back edge m->n. The
   // algorithm is described in "Advanced Compiler Design & Implementation"
   // (Muchnick) p192. Returns a BitVector indexed by block pre-order
   // number where each bit indicates membership in the loop.
   BitVector* FindLoopBlocks(BlockEntryInstr* m, BlockEntryInstr* n) const;
 
-  // Attaches closest enveloping loop in the hierarchy to each block entry.
-  void LinkToInner(LoopInfo* loop) const;
+  // Finds the natural loops in the flow graph and attaches the loop
+  // information to each entry block. Returns the loop hierarchy.
+  LoopHierarchy* ComputeLoops() const;
 
   void InsertConversionsFor(Definition* def);
   void ConvertUse(Value* use, Representation from);
diff --git a/runtime/vm/compiler/backend/il.cc b/runtime/vm/compiler/backend/il.cc
index 60f7962..9688cae 100644
--- a/runtime/vm/compiler/backend/il.cc
+++ b/runtime/vm/compiler/backend/il.cc
@@ -13,6 +13,7 @@
 #include "vm/compiler/backend/flow_graph_compiler.h"
 #include "vm/compiler/backend/linearscan.h"
 #include "vm/compiler/backend/locations.h"
+#include "vm/compiler/backend/loops.h"
 #include "vm/compiler/backend/range_analysis.h"
 #include "vm/compiler/frontend/flow_graph_builder.h"
 #include "vm/compiler/jit/compiler.h"
@@ -1595,6 +1596,10 @@
   return NULL;
 }
 
+bool BlockEntryInstr::IsLoopHeader() const {
+  return loop_info_ != nullptr && loop_info_->header() == this;
+}
+
 // Helper to mutate the graph during inlining. This block should be
 // replaced with new_block as a predecessor of all of this block's
 // successors.  For each successor, the predecessors will be reordered
diff --git a/runtime/vm/compiler/backend/il.h b/runtime/vm/compiler/backend/il.h
index 66467b7..33d1434 100644
--- a/runtime/vm/compiler/backend/il.h
+++ b/runtime/vm/compiler/backend/il.h
@@ -1359,6 +1359,7 @@
   // Loop related methods.
   LoopInfo* loop_info() const { return loop_info_; }
   void set_loop_info(LoopInfo* loop_info) { loop_info_ = loop_info; }
+  bool IsLoopHeader() const;
 
   virtual BlockEntryInstr* GetBlock() { return this; }
 
diff --git a/runtime/vm/compiler/backend/linearscan.cc b/runtime/vm/compiler/backend/linearscan.cc
index 4b2c8a4..34d18ca 100644
--- a/runtime/vm/compiler/backend/linearscan.cc
+++ b/runtime/vm/compiler/backend/linearscan.cc
@@ -11,6 +11,7 @@
 #include "vm/compiler/backend/flow_graph_compiler.h"
 #include "vm/compiler/backend/il.h"
 #include "vm/compiler/backend/il_printer.h"
+#include "vm/compiler/backend/loops.h"
 #include "vm/log.h"
 #include "vm/parser.h"
 #include "vm/stack_frame.h"
@@ -488,12 +489,23 @@
   }
 }
 
-// Returns true if all uses of the given range inside the given loop
-// have Any allocation policy.
-static bool HasOnlyUnconstrainedUsesInLoop(LiveRange* range,
-                                           BlockInfo* loop_header) {
-  const intptr_t boundary = loop_header->last_block()->end_pos();
+// Returns loops latest position.
+static intptr_t LoopEnd(LoopInfo* loop_info) {
+  intptr_t max_pos = 0;
+  const GrowableArray<BlockEntryInstr*>& back_edges = loop_info->back_edges();
+  for (intptr_t i = 0, n = back_edges.length(); i < n; i++) {
+    intptr_t end_pos = back_edges[i]->end_pos();
+    if (end_pos > max_pos) {
+      max_pos = end_pos;
+    }
+  }
+  return max_pos;
+}
 
+// Returns true if all uses of the given range inside the
+// given loop boundary have Any allocation policy.
+static bool HasOnlyUnconstrainedUsesInLoop(LiveRange* range,
+                                           intptr_t boundary) {
   UsePosition* use = range->first_use();
   while ((use != NULL) && (use->pos() < boundary)) {
     if (!use->location_slot()->Equals(Location::Any())) {
@@ -501,7 +513,6 @@
     }
     use = use->next();
   }
-
   return true;
 }
 
@@ -524,8 +535,7 @@
   Zone* zone = flow_graph_.zone();
   for (intptr_t i = 0; i < (block_count - 1); i++) {
     BlockEntryInstr* block = postorder_[i];
-
-    BlockInfo* block_info = BlockInfoAt(block->start_pos());
+    ASSERT(BlockEntryAt(block->start_pos()) == block);
 
     // For every SSA value that is live out of this block, create an interval
     // that covers the whole block.  It will be shortened if we encounter a
@@ -536,16 +546,21 @@
       range->AddUseInterval(block->start_pos(), block->end_pos());
     }
 
-    BlockInfo* loop_header = block_info->loop_header();
-    if ((loop_header != NULL) && (loop_header->last_block() == block)) {
-      current_interference_set =
-          new (zone) BitVector(zone, flow_graph_.max_virtual_register_number());
-      ASSERT(loop_header->backedge_interference() == NULL);
-      // All values flowing into the loop header are live at the back-edge and
-      // can interfere with phi moves.
-      current_interference_set->AddAll(
-          liveness_.GetLiveInSet(loop_header->entry()));
-      loop_header->set_backedge_interference(current_interference_set);
+    LoopInfo* loop_info = block->loop_info();
+    if ((loop_info != nullptr) && (loop_info->IsBackEdge(block))) {
+      if (backedge_interference_[loop_info->id()] != nullptr) {
+        // Restore interference for subsequent backedge a loop
+        // (perhaps inner loop's header reset set in the meanwhile).
+        current_interference_set = backedge_interference_[loop_info->id()];
+      } else {
+        // All values flowing into the loop header are live at the
+        // back edge and can interfere with phi moves.
+        current_interference_set = new (zone)
+            BitVector(zone, flow_graph_.max_virtual_register_number());
+        current_interference_set->AddAll(
+            liveness_.GetLiveInSet(loop_info->header()));
+        backedge_interference_[loop_info->id()] = current_interference_set;
+      }
     }
 
     // Connect outgoing phi-moves that were created in NumberInstructions
@@ -563,13 +578,13 @@
     }
 
     // Check if any values live into the loop can be spilled for free.
-    if (block_info->is_loop_header()) {
+    if (block->IsLoopHeader()) {
       current_interference_set = NULL;
       for (BitVector::Iterator it(liveness_.GetLiveInSetAt(i)); !it.Done();
            it.Advance()) {
         LiveRange* range = GetLiveRange(it.Current());
-        if (HasOnlyUnconstrainedUsesInLoop(range, block_info)) {
-          range->MarkHasOnlyUnconstrainedUsesInLoop(block_info->loop_id());
+        if (HasOnlyUnconstrainedUsesInLoop(range, LoopEnd(loop_info))) {
+          range->MarkHasOnlyUnconstrainedUsesInLoop(loop_info->id());
         }
       }
     }
@@ -900,7 +915,8 @@
 
   // All uses are recorded at the start position in the block.
   const intptr_t pos = join->start_pos();
-  const bool is_loop_header = BlockInfoAt(join->start_pos())->is_loop_header();
+  const bool is_loop_header = join->IsLoopHeader();
+
   intptr_t move_idx = 0;
   for (PhiIterator it(join); !it.Done(); it.Advance()) {
     PhiInstr* phi = it.Current();
@@ -1542,10 +1558,9 @@
   const intptr_t block_count = postorder_.length();
   for (intptr_t i = block_count - 1; i >= 0; i--) {
     BlockEntryInstr* block = postorder_[i];
-    BlockInfo* info = new BlockInfo(block);
 
     instructions_.Add(block);
-    block_info_.Add(info);
+    block_entries_.Add(block);
     block->set_start_pos(pos);
     block->set_lifetime_position(pos);
     pos += 2;
@@ -1555,7 +1570,7 @@
       // Do not assign numbers to parallel move instructions.
       if (!current->IsParallelMove()) {
         instructions_.Add(current);
-        block_info_.Add(info);
+        block_entries_.Add(block);
         current->set_lifetime_position(pos);
         pos += 2;
       }
@@ -1592,65 +1607,18 @@
       }
     }
   }
-}
 
-// Discover structural (reducible) loops nesting structure.
-void FlowGraphAllocator::DiscoverLoops() {
-  // This algorithm relies on the assumption that we emit blocks in reverse
-  // postorder, so postorder number can be used to identify loop nesting.
-  //
-  // TODO(vegorov): consider using a generic algorithm to correctly discover
-  // both headers of reducible and irreducible loops.
-  //
-  // TODO(ajcbik): this looks like it can use the new loop hierarchy instead.
-  BlockInfo* current_loop = NULL;
-
-  intptr_t loop_id = 0;  // All loop headers have a unique id.
-
-  const intptr_t block_count = postorder_.length();
-  for (intptr_t i = 0; i < block_count; i++) {
-    BlockEntryInstr* block = postorder_[i];
-    GotoInstr* goto_instr = block->last_instruction()->AsGoto();
-    if (goto_instr != NULL) {
-      JoinEntryInstr* successor = goto_instr->successor();
-      if (successor->postorder_number() > i) {
-        // This is back-edge.
-        BlockInfo* successor_info = BlockInfoAt(successor->lifetime_position());
-        ASSERT(successor_info->entry() == successor);
-        if (!successor_info->is_loop_header() &&
-            ((current_loop == NULL) ||
-             (current_loop->entry()->postorder_number() >
-              successor_info->entry()->postorder_number()))) {
-          ASSERT(successor_info != current_loop);
-
-          successor_info->mark_loop_header();
-          successor_info->set_loop_id(loop_id++);
-          successor_info->set_last_block(block);
-          // For loop header loop information points to the outer loop.
-          successor_info->set_loop(current_loop);
-          current_loop = successor_info;
-        }
-      }
-    }
-
-    if (current_loop != NULL) {
-      BlockInfo* current_info = BlockInfoAt(block->lifetime_position());
-      if (current_info == current_loop) {
-        ASSERT(current_loop->is_loop_header());
-        current_loop = current_info->loop();
-      } else {
-        current_info->set_loop(current_loop);
-      }
-    }
-  }
+  // Storage per loop.
+  const intptr_t num_loops = flow_graph_.loop_hierarchy().num_loops();
+  backedge_interference_.EnsureLength(num_loops, nullptr);
 }
 
 Instruction* FlowGraphAllocator::InstructionAt(intptr_t pos) const {
   return instructions_[pos / 2];
 }
 
-BlockInfo* FlowGraphAllocator::BlockInfoAt(intptr_t pos) const {
-  return block_info_[pos / 2];
+BlockEntryInstr* FlowGraphAllocator::BlockEntryAt(intptr_t pos) const {
+  return block_entries_[pos / 2];
 }
 
 bool FlowGraphAllocator::IsBlockEntry(intptr_t pos) const {
@@ -1860,21 +1828,23 @@
 
   intptr_t split_pos = kIllegalPosition;
 
-  BlockInfo* split_block = BlockInfoAt(to);
-  if (from < split_block->entry()->lifetime_position()) {
+  BlockEntryInstr* split_block_entry = BlockEntryAt(to);
+  ASSERT(split_block_entry == InstructionAt(to)->GetBlock());
+
+  if (from < split_block_entry->lifetime_position()) {
     // Interval [from, to) spans multiple blocks.
 
     // If last block is inside a loop prefer splitting at outermost loop's
     // header.
-    BlockInfo* loop_header = split_block->loop();
-    while ((loop_header != NULL) &&
-           (from < loop_header->entry()->lifetime_position())) {
-      split_block = loop_header;
-      loop_header = loop_header->loop();
+    LoopInfo* loop_info = split_block_entry->loop_info();
+    while ((loop_info != nullptr) &&
+           (from < loop_info->header()->lifetime_position())) {
+      split_block_entry = loop_info->header();
+      loop_info = loop_info->outer();
     }
 
     // Split at block's start.
-    split_pos = split_block->entry()->lifetime_position();
+    split_pos = split_block_entry->lifetime_position();
   } else {
     // Interval [from, to) is contained inside a single block.
 
@@ -1915,15 +1885,12 @@
 
   // When spilling the value inside the loop check if this spill can
   // be moved outside.
-  BlockInfo* block_info = BlockInfoAt(from);
-  if (block_info->is_loop_header() || (block_info->loop() != NULL)) {
-    BlockInfo* loop_header =
-        block_info->is_loop_header() ? block_info : block_info->loop();
-
-    if ((range->Start() <= loop_header->entry()->start_pos()) &&
-        RangeHasOnlyUnconstrainedUsesInLoop(range, loop_header->loop_id())) {
-      ASSERT(loop_header->entry()->start_pos() <= from);
-      from = loop_header->entry()->start_pos();
+  LoopInfo* loop_info = BlockEntryAt(from)->loop_info();
+  if (loop_info != nullptr) {
+    if ((range->Start() <= loop_info->header()->start_pos()) &&
+        RangeHasOnlyUnconstrainedUsesInLoop(range, loop_info->id())) {
+      ASSERT(loop_info->header()->start_pos() <= from);
+      from = loop_info->header()->start_pos();
       TRACE_ALLOC(
           THR_Print("  moved spill position to loop header %" Pd "\n", from));
     }
@@ -2204,16 +2171,16 @@
   // If we are in a loop try to reduce number of moves on the back edge by
   // searching for a candidate that does not interfere with phis on the back
   // edge.
-  BlockInfo* loop_header = BlockInfoAt(unallocated->Start())->loop_header();
-  if ((unallocated->vreg() >= 0) && (loop_header != NULL) &&
-      (free_until >= loop_header->last_block()->end_pos()) &&
-      loop_header->backedge_interference()->Contains(unallocated->vreg())) {
+  LoopInfo* loop_info = BlockEntryAt(unallocated->Start())->loop_info();
+  if ((unallocated->vreg() >= 0) && (loop_info != nullptr) &&
+      (free_until >= LoopEnd(loop_info)) &&
+      backedge_interference_[loop_info->id()]->Contains(unallocated->vreg())) {
     GrowableArray<bool> used_on_backedge(number_of_registers_);
     for (intptr_t i = 0; i < number_of_registers_; i++) {
       used_on_backedge.Add(false);
     }
 
-    for (PhiIterator it(loop_header->entry()->AsJoinEntry()); !it.Done();
+    for (PhiIterator it(loop_info->header()->AsJoinEntry()); !it.Done();
          it.Advance()) {
       PhiInstr* phi = it.Current();
       ASSERT(phi->is_alive());
@@ -2239,13 +2206,12 @@
     }
 
     if (used_on_backedge[candidate]) {
-      TRACE_ALLOC(THR_Print("considering %s for v%" Pd
-                            ": has interference on the back edge"
-                            " {loop [%" Pd ", %" Pd ")}\n",
-                            MakeRegisterLocation(candidate).Name(),
-                            unallocated->vreg(),
-                            loop_header->entry()->start_pos(),
-                            loop_header->last_block()->end_pos()));
+      TRACE_ALLOC(
+          THR_Print("considering %s for v%" Pd
+                    ": has interference on the back edge"
+                    " {loop [%" Pd ", %" Pd ")}\n",
+                    MakeRegisterLocation(candidate).Name(), unallocated->vreg(),
+                    loop_info->header()->start_pos(), LoopEnd(loop_info)));
       for (intptr_t reg = 0; reg < NumberOfRegisters(); ++reg) {
         if (blocked_registers_[reg] || (reg == candidate) ||
             used_on_backedge[reg]) {
@@ -2295,17 +2261,19 @@
   return false;
 }
 
-bool FlowGraphAllocator::IsCheapToEvictRegisterInLoop(BlockInfo* loop,
+bool FlowGraphAllocator::IsCheapToEvictRegisterInLoop(BlockEntryInstr* header,
                                                       intptr_t reg) {
-  const intptr_t loop_start = loop->entry()->start_pos();
-  const intptr_t loop_end = loop->last_block()->end_pos();
+  LoopInfo* loop_info = header->loop_info();
+
+  const intptr_t loop_start = header->start_pos();
+  const intptr_t loop_end = LoopEnd(loop_info);
 
   for (intptr_t i = 0; i < registers_[reg]->length(); i++) {
     LiveRange* allocated = (*registers_[reg])[i];
 
     UseInterval* interval = allocated->finger()->first_pending_use_interval();
     if (interval->Contains(loop_start)) {
-      if (!RangeHasOnlyUnconstrainedUsesInLoop(allocated, loop->loop_id())) {
+      if (!RangeHasOnlyUnconstrainedUsesInLoop(allocated, loop_info->id())) {
         return false;
       }
     } else if (interval->start() < loop_end) {
@@ -2319,13 +2287,14 @@
 bool FlowGraphAllocator::HasCheapEvictionCandidate(LiveRange* phi_range) {
   ASSERT(phi_range->is_loop_phi());
 
-  BlockInfo* loop_header = BlockInfoAt(phi_range->Start());
-  ASSERT(loop_header->is_loop_header());
-  ASSERT(phi_range->Start() == loop_header->entry()->start_pos());
+  BlockEntryInstr* header = BlockEntryAt(phi_range->Start());
+
+  ASSERT(header->IsLoopHeader());
+  ASSERT(phi_range->Start() == header->start_pos());
 
   for (intptr_t reg = 0; reg < NumberOfRegisters(); ++reg) {
     if (blocked_registers_[reg]) continue;
-    if (IsCheapToEvictRegisterInLoop(loop_header, reg)) {
+    if (IsCheapToEvictRegisterInLoop(header, reg)) {
       return true;
     }
   }
@@ -2944,8 +2913,6 @@
 
   NumberInstructions();
 
-  DiscoverLoops();
-
 #if defined(TARGET_ARCH_DBC)
   last_used_register_ = -1;
 #endif
diff --git a/runtime/vm/compiler/backend/linearscan.h b/runtime/vm/compiler/backend/linearscan.h
index 200597a..1dacc5f 100644
--- a/runtime/vm/compiler/backend/linearscan.h
+++ b/runtime/vm/compiler/backend/linearscan.h
@@ -12,7 +12,6 @@
 namespace dart {
 
 class AllocationFinger;
-class BlockInfo;
 class FlowGraph;
 class LiveRange;
 class UseInterval;
@@ -85,14 +84,9 @@
   // that will be used for phi resolution.
   void NumberInstructions();
   Instruction* InstructionAt(intptr_t pos) const;
-  BlockInfo* BlockInfoAt(intptr_t pos) const;
+  BlockEntryInstr* BlockEntryAt(intptr_t pos) const;
   bool IsBlockEntry(intptr_t pos) const;
 
-  // Discover structural (reducible) loops nesting structure.
-  // It will be used later in SplitBetween heuristic that selects an
-  // optimal splitting position.
-  void DiscoverLoops();
-
   LiveRange* MakeLiveRangeForTemporary();
 
   // Visit instructions in the postorder and build live ranges for
@@ -197,7 +191,7 @@
   // performance because it introduces multiple operations with memory
   // inside the loop body and on the back edge.
   bool HasCheapEvictionCandidate(LiveRange* phi_range);
-  bool IsCheapToEvictRegisterInLoop(BlockInfo* loop, intptr_t reg);
+  bool IsCheapToEvictRegisterInLoop(BlockEntryInstr* block, intptr_t reg);
 
   // Assign selected non-free register to an unallocated live range and
   // evict any interference that can be evicted by splitting and spilling
@@ -259,8 +253,11 @@
   // Mapping between lifetime positions and instructions.
   GrowableArray<Instruction*> instructions_;
 
-  // Mapping between lifetime positions and blocks containing them.
-  GrowableArray<BlockInfo*> block_info_;
+  // Mapping between lifetime positions and block entries.
+  GrowableArray<BlockEntryInstr*> block_entries_;
+
+  // Mapping between loops and backedge interferences.
+  GrowableArray<BitVector*> backedge_interference_;
 
   SSALivenessAnalysis liveness_;
 
@@ -332,68 +329,6 @@
   DISALLOW_COPY_AND_ASSIGN(FlowGraphAllocator);
 };
 
-// Additional information about a block that is not contained in a
-// block entry.
-class BlockInfo : public ZoneAllocated {
- public:
-  explicit BlockInfo(BlockEntryInstr* entry)
-      : entry_(entry),
-        loop_(NULL),
-        is_loop_header_(false),
-        backedge_interference_(NULL) {}
-
-  BlockEntryInstr* entry() const { return entry_; }
-
-  // Returns true is this node is a header of a structural loop.
-  bool is_loop_header() const { return is_loop_header_; }
-
-  // Returns header of the innermost loop containing this block.
-  BlockInfo* loop_header() {
-    if (is_loop_header()) {
-      return this;
-    } else if (loop() != NULL) {
-      return loop();
-    } else {
-      return NULL;
-    }
-  }
-
-  // Innermost reducible loop containing this node. Loop headers point to
-  // outer loop not to themselves.
-  BlockInfo* loop() const { return loop_; }
-
-  void mark_loop_header() { is_loop_header_ = true; }
-  void set_loop(BlockInfo* loop) {
-    ASSERT(loop_ == NULL);
-    ASSERT((loop == NULL) || loop->is_loop_header());
-    loop_ = loop;
-  }
-
-  BlockEntryInstr* last_block() const { return last_block_; }
-  void set_last_block(BlockEntryInstr* last_block) { last_block_ = last_block; }
-
-  intptr_t loop_id() const { return loop_id_; }
-  void set_loop_id(intptr_t loop_id) { loop_id_ = loop_id; }
-
-  BitVector* backedge_interference() const { return backedge_interference_; }
-
-  void set_backedge_interference(BitVector* backedge_interference) {
-    backedge_interference_ = backedge_interference;
-  }
-
- private:
-  BlockEntryInstr* entry_;
-  BlockInfo* loop_;
-  bool is_loop_header_;
-
-  BlockEntryInstr* last_block_;
-  intptr_t loop_id_;
-
-  BitVector* backedge_interference_;
-
-  DISALLOW_COPY_AND_ASSIGN(BlockInfo);
-};
-
 // UsePosition represents a single use of an SSA value by some instruction.
 // It points to a location slot which either tells register allocator
 // where instruction expects the value (if slot contains a fixed location) or
diff --git a/runtime/vm/compiler/backend/loops.cc b/runtime/vm/compiler/backend/loops.cc
index 0f9ef9e..42e656e 100644
--- a/runtime/vm/compiler/backend/loops.cc
+++ b/runtime/vm/compiler/backend/loops.cc
@@ -15,17 +15,33 @@
     : id_(id),
       header_(header),
       blocks_(blocks),
+      back_edges_(),
       outer_(nullptr),
       inner_(nullptr),
-      next_(nullptr),
-      prev_(nullptr) {}
+      next_(nullptr) {}
 
 void LoopInfo::AddBlocks(BitVector* blocks) {
   blocks_->AddAll(blocks);
 }
 
+void LoopInfo::AddBackEdge(BlockEntryInstr* block) {
+  back_edges_.Add(block);
+}
+
+bool LoopInfo::IsBackEdge(BlockEntryInstr* block) const {
+  for (intptr_t i = 0, n = back_edges_.length(); i < n; i++) {
+    if (back_edges_[i] == block) {
+      return true;
+    }
+  }
+  return false;
+}
+
 bool LoopInfo::IsIn(LoopInfo* other) const {
-  return other->blocks_->Contains(header_->preorder_number());
+  if (other != nullptr) {
+    return other->blocks_->Contains(header_->preorder_number());
+  }
+  return false;
 }
 
 intptr_t LoopInfo::NestingDepth() const {
@@ -49,42 +65,63 @@
   if (outer_) f.Print(" outer=%" Pd, outer_->id_);
   if (inner_) f.Print(" inner=%" Pd, inner_->id_);
   if (next_) f.Print(" next=%" Pd, next_->id_);
-  if (prev_) f.Print(" prev=%" Pd, prev_->id_);
+  f.Print(" [");
+  for (intptr_t i = 0, n = back_edges_.length(); i < n; i++) {
+    f.Print(" B%" Pd, back_edges_[i]->block_id());
+  }
+  f.Print(" ]");
   return Thread::Current()->zone()->MakeCopyOfString(buffer);
 }
 
-LoopHierarchy::LoopHierarchy(ZoneGrowableArray<BlockEntryInstr*>* headers)
-    : headers_(headers), first_(nullptr), last_(nullptr) {
+LoopHierarchy::LoopHierarchy(ZoneGrowableArray<BlockEntryInstr*>* headers,
+                             const GrowableArray<BlockEntryInstr*>& preorder)
+    : headers_(headers), preorder_(preorder), top_(nullptr) {
   Build();
 }
 
-void LoopHierarchy::AddLoop(LoopInfo* loop) {
-  if (first_ == nullptr) {
-    // First loop.
-    ASSERT(last_ == nullptr);
-    first_ = last_ = loop;
-  } else if (loop->IsIn(last_)) {
-    // First inner loop.
-    loop->outer_ = last_;
-    ASSERT(last_->inner_ == nullptr);
-    last_ = last_->inner_ = loop;
-  } else {
-    // Subsequent loop.
-    while (last_->outer_ != nullptr && !loop->IsIn(last_->outer_)) {
-      last_ = last_->outer_;
+void LoopHierarchy::Build() {
+  // Link every entry block to the closest enveloping loop.
+  for (intptr_t i = 0, n = headers_->length(); i < n; ++i) {
+    LoopInfo* loop = (*headers_)[i]->loop_info();
+    for (BitVector::Iterator it(loop->blocks()); !it.Done(); it.Advance()) {
+      BlockEntryInstr* block = preorder_[it.Current()];
+      if (block->loop_info() == nullptr) {
+        block->set_loop_info(loop);
+      } else {
+        ASSERT(block->loop_info()->IsIn(loop));
+      }
     }
-    loop->outer_ = last_->outer_;
-    loop->prev_ = last_;
-    ASSERT(last_->next_ == nullptr);
-    last_ = last_->next_ = loop;
+  }
+  // Build hierarchy from headers.
+  for (intptr_t i = 0, n = headers_->length(); i < n; ++i) {
+    BlockEntryInstr* header = (*headers_)[i];
+    LoopInfo* loop = header->loop_info();
+    LoopInfo* dom_loop = header->dominator()->loop_info();
+    ASSERT(loop->outer_ == nullptr);
+    ASSERT(loop->next_ == nullptr);
+    if (loop->IsIn(dom_loop)) {
+      loop->outer_ = dom_loop;
+      loop->next_ = dom_loop->inner_;
+      dom_loop->inner_ = loop;
+    } else {
+      loop->next_ = top_;
+      top_ = loop;
+    }
+  }
+  // If tracing is requested, print the loop hierarchy.
+  if (FLAG_trace_optimization) {
+    Print(top());
   }
 }
 
-void LoopHierarchy::Build() {
-  for (intptr_t i = 0, n = headers_->length(); i < n; ++i) {
-    LoopInfo* loop = (*headers_)[n - 1 - i]->loop_info();
-    ASSERT(loop->id() == (n - 1 - i));
-    AddLoop(loop);
+void LoopHierarchy::Print(LoopInfo* loop) {
+  for (; loop != nullptr; loop = loop->next_) {
+    THR_Print("%s {", loop->ToCString());
+    for (BitVector::Iterator it(loop->blocks()); !it.Done(); it.Advance()) {
+      THR_Print(" B%" Pd, preorder_[it.Current()]->block_id());
+    }
+    THR_Print(" }\n");
+    Print(loop->inner_);
   }
 }
 
diff --git a/runtime/vm/compiler/backend/loops.h b/runtime/vm/compiler/backend/loops.h
index c297e95..555f69f 100644
--- a/runtime/vm/compiler/backend/loops.h
+++ b/runtime/vm/compiler/backend/loops.h
@@ -18,6 +18,12 @@
   // Merges given blocks to this loop.
   void AddBlocks(BitVector* blocks);
 
+  // Adds back edge to this loop.
+  void AddBackEdge(BlockEntryInstr* block);
+
+  // Returns true if given block is backedge of this loop.
+  bool IsBackEdge(BlockEntryInstr* block) const;
+
   // Returns true if this loop is nested inside other loop.
   bool IsIn(LoopInfo* other) const;
 
@@ -27,11 +33,11 @@
   // Getters.
   intptr_t id() const { return id_; }
   BlockEntryInstr* header() const { return header_; }
+  const GrowableArray<BlockEntryInstr*>& back_edges() { return back_edges_; }
   BitVector* blocks() const { return blocks_; }
   LoopInfo* outer() const { return outer_; }
   LoopInfo* inner() const { return inner_; }
   LoopInfo* next() const { return next_; }
-  LoopInfo* prev() const { return prev_; }
 
   // For debugging.
   const char* ToCString() const;
@@ -50,11 +56,13 @@
   // indexed by its "preorder_number".
   BitVector* blocks_;
 
+  // Back edges of loop (usually one).
+  GrowableArray<BlockEntryInstr*> back_edges_;
+
   // Loop hierarchy.
   LoopInfo* outer_;
   LoopInfo* inner_;
   LoopInfo* next_;
-  LoopInfo* prev_;
 
   DISALLOW_COPY_AND_ASSIGN(LoopInfo);
 };
@@ -62,22 +70,25 @@
 // Information on the loop hierarchy in the flow graph.
 class LoopHierarchy : public ZoneAllocated {
  public:
-  explicit LoopHierarchy(ZoneGrowableArray<BlockEntryInstr*>* headers);
+  LoopHierarchy(ZoneGrowableArray<BlockEntryInstr*>* headers,
+                const GrowableArray<BlockEntryInstr*>& preorder);
 
   // Getters.
   const ZoneGrowableArray<BlockEntryInstr*>& headers() const {
     return *headers_;
   }
-  LoopInfo* first() const { return first_; }
-  LoopInfo* last() const { return last_; }
+  LoopInfo* top() const { return top_; }
+
+  // Returns total number of loops in the hierarchy.
+  intptr_t num_loops() const { return headers_->length(); }
 
  private:
-  void AddLoop(LoopInfo* loop);
   void Build();
+  void Print(LoopInfo* loop);
 
   ZoneGrowableArray<BlockEntryInstr*>* headers_;
-  LoopInfo* first_;
-  LoopInfo* last_;
+  const GrowableArray<BlockEntryInstr*>& preorder_;
+  LoopInfo* top_;
 
   DISALLOW_COPY_AND_ASSIGN(LoopHierarchy);
 };
diff --git a/runtime/vm/compiler/compiler_pass.cc b/runtime/vm/compiler/compiler_pass.cc
index 590cafb..988d0834 100644
--- a/runtime/vm/compiler/compiler_pass.cc
+++ b/runtime/vm/compiler/compiler_pass.cc
@@ -395,6 +395,8 @@
 });
 
 COMPILER_PASS(AllocateRegisters, {
+  // Ensure loop hierarchy has been computed.
+  flow_graph->GetLoopHierarchy();
   // Perform register allocation on the SSA graph.
   FlowGraphAllocator allocator(*flow_graph);
   allocator.AllocateRegisters();
diff --git a/runtime/vm/compiler/intrinsifier.cc b/runtime/vm/compiler/intrinsifier.cc
index 64b1dbd..7537204 100644
--- a/runtime/vm/compiler/intrinsifier.cc
+++ b/runtime/vm/compiler/intrinsifier.cc
@@ -248,6 +248,11 @@
     printer.PrintBlocks();
   }
 
+  // Ensure loop hierarchy has been computed.
+  GrowableArray<BitVector*> dominance_frontier;
+  graph->ComputeDominators(&dominance_frontier);
+  graph->GetLoopHierarchy();
+
   // Perform register allocation on the SSA graph.
   FlowGraphAllocator allocator(*graph, true);  // Intrinsic mode.
   allocator.AllocateRegisters();
diff --git a/tests/language_2/vm/irreducible_loop_test.dart b/tests/language_2/vm/irreducible_loop_test.dart
new file mode 100644
index 0000000..96baa3d
--- /dev/null
+++ b/tests/language_2/vm/irreducible_loop_test.dart
@@ -0,0 +1,32 @@
+// Copyright (c) 2018, the Dart project authors.  Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+// VMOptions=--no_background_compilation --optimization_counter_threshold=10
+
+import "package:expect/expect.dart";
+
+// This method forms an infinite, irreducible loop. As long as we
+// don't enter any of the branches, the method terminates. The test
+// is included to ensure an irreducible loop does not break anything
+// in the compiler.
+int bar(int x) {
+  switch (x) {
+    case_1:
+    case 1:
+      continue case_2;
+    case_2:
+    case 2:
+      continue case_1;
+  }
+  return x;
+}
+
+main() {
+  for (var i = -50; i <= 0; i++) {
+    Expect.equals(i, bar(i));
+  }
+  for (var i = 3; i <= 50; i++) {
+    Expect.equals(i, bar(i));
+  }
+}