[vm, compiler] Fix Code checked entry offsets.

Function entry points have been subject to block scheduling since 06f9a9e35427d505f52893134a1e42eb166c49d2. Block scheduling in AOT never reorders the entries.

Bug: https://github.com/dart-lang/sdk/issues/36409
Bug: https://github.com/dart-lang/sdk/issues/36731
Change-Id: Id6725fed4d9bbd381631fa88c5397435a35acc9a
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/102463
Commit-Queue: Ryan Macnak <rmacnak@google.com>
Reviewed-by: Alexander Markov <alexmarkov@google.com>
diff --git a/runtime/vm/compiler/assembler/assembler_dbc.h b/runtime/vm/compiler/assembler/assembler_dbc.h
index 253fdc1..b88795f 100644
--- a/runtime/vm/compiler/assembler/assembler_dbc.h
+++ b/runtime/vm/compiler/assembler/assembler_dbc.h
@@ -39,6 +39,8 @@
   // Misc. functionality
   intptr_t prologue_offset() const { return 0; }
 
+  void MonomorphicCheckedEntry() {}
+
   // Debugging and bringup support.
   void Stop(const char* message) override;
 
diff --git a/runtime/vm/compiler/assembler/assembler_ia32.h b/runtime/vm/compiler/assembler/assembler_ia32.h
index f9e01e5..df07d96 100644
--- a/runtime/vm/compiler/assembler/assembler_ia32.h
+++ b/runtime/vm/compiler/assembler/assembler_ia32.h
@@ -646,6 +646,8 @@
   void LeaveFrame();
   void ReserveAlignedFrameSpace(intptr_t frame_space);
 
+  void MonomorphicCheckedEntry() {}
+
   // Require a temporary register 'tmp'.
   // Clobber all non-CPU registers (e.g. XMM registers and the "FPU stack").
   void TransitionGeneratedToNative(Register destination_address,
diff --git a/runtime/vm/compiler/assembler/disassembler.cc b/runtime/vm/compiler/assembler/disassembler.cc
index 8cf7adc..a2fe6a1 100644
--- a/runtime/vm/compiler/assembler/disassembler.cc
+++ b/runtime/vm/compiler/assembler/disassembler.cc
@@ -318,11 +318,24 @@
       ExceptionHandlers::Handle(zone, code.exception_handlers());
   THR_Print("%s}\n", handlers.ToCString());
 
-  if (instructions.unchecked_entrypoint_pc_offset() != 0) {
-    THR_Print("Unchecked entrypoint at offset 0x%" Px "\n",
-              Instructions::UncheckedEntryPoint(instructions.raw()));
-  } else {
-    THR_Print("No unchecked entrypoint.\n");
+  {
+    THR_Print("Entry points for function '%s' {\n", function_fullname);
+    THR_Print("  [code+0x%02" Px "] %" Px " kNormal\n",
+              Code::entry_point_offset(CodeEntryKind::kNormal) - kHeapObjectTag,
+              Instructions::EntryPoint(instructions.raw()));
+    THR_Print(
+        "  [code+0x%02" Px "] %" Px " kUnchecked\n",
+        Code::entry_point_offset(CodeEntryKind::kUnchecked) - kHeapObjectTag,
+        Instructions::UncheckedEntryPoint(instructions.raw()));
+    THR_Print(
+        "  [code+0x%02" Px "] %" Px " kMonomorphic\n",
+        Code::entry_point_offset(CodeEntryKind::kMonomorphic) - kHeapObjectTag,
+        Instructions::MonomorphicEntryPoint(instructions.raw()));
+    THR_Print("  [code+0x%02" Px "] %" Px " kMonomorphicUnchecked\n",
+              Code::entry_point_offset(CodeEntryKind::kMonomorphicUnchecked) -
+                  kHeapObjectTag,
+              Instructions::MonomorphicUncheckedEntryPoint(instructions.raw()));
+    THR_Print("}\n");
   }
 
   {
diff --git a/runtime/vm/compiler/backend/block_scheduler.cc b/runtime/vm/compiler/backend/block_scheduler.cc
index 46b26a2..51eceea 100644
--- a/runtime/vm/compiler/backend/block_scheduler.cc
+++ b/runtime/vm/compiler/backend/block_scheduler.cc
@@ -219,13 +219,23 @@
     Union(&chains, source_chain, target_chain);
   }
 
+  // Ensure the checked entry remains first to avoid needing another offset on
+  // Instructions, compare Code::EntryPoint.
+  GraphEntryInstr* graph_entry = flow_graph()->graph_entry();
+  flow_graph()->CodegenBlockOrder(true)->Add(graph_entry);
+  FunctionEntryInstr* checked_entry = graph_entry->normal_entry();
+  if (checked_entry != nullptr) {
+    flow_graph()->CodegenBlockOrder(true)->Add(checked_entry);
+  }
   // Build a new block order.  Emit each chain when its first block occurs
   // in the original reverse postorder ordering (which gives a topological
   // sort of the blocks).
   for (intptr_t i = block_count - 1; i >= 0; --i) {
     if (chains[i]->first->block == flow_graph()->postorder()[i]) {
       for (Link* link = chains[i]->first; link != NULL; link = link->next) {
-        flow_graph()->CodegenBlockOrder(true)->Add(link->block);
+        if ((link->block != checked_entry) && (link->block != graph_entry)) {
+          flow_graph()->CodegenBlockOrder(true)->Add(link->block);
+        }
       }
     }
   }
diff --git a/runtime/vm/compiler/backend/flow_graph_compiler.cc b/runtime/vm/compiler/backend/flow_graph_compiler.cc
index 00f1bdf..73c21d8 100644
--- a/runtime/vm/compiler/backend/flow_graph_compiler.cc
+++ b/runtime/vm/compiler/backend/flow_graph_compiler.cc
@@ -335,12 +335,11 @@
     return target->Position();
   }
 
-// Intrinsification happened.
-#ifdef DART_PRECOMPILER
+  // Intrinsification happened.
   if (parsed_function().function().IsDynamicFunction()) {
     return Instructions::kMonomorphicEntryOffset;
   }
-#endif
+
   return 0;
 }
 
diff --git a/runtime/vm/compiler/backend/flow_graph_compiler_arm.cc b/runtime/vm/compiler/backend/flow_graph_compiler_arm.cc
index 7f1bfb9..d968031 100644
--- a/runtime/vm/compiler/backend/flow_graph_compiler_arm.cc
+++ b/runtime/vm/compiler/backend/flow_graph_compiler_arm.cc
@@ -1061,7 +1061,8 @@
   __ LoadFromOffset(kWord, R0, SP,
                     (ic_data.CountWithoutTypeArgs() - 1) * kWordSize);
   __ LoadUniqueObject(R9, ic_data);
-  GenerateDartCall(deopt_id, token_pos, stub, RawPcDescriptors::kIcCall, locs);
+  GenerateDartCall(deopt_id, token_pos, stub, RawPcDescriptors::kIcCall, locs,
+                   Code::EntryKind::kMonomorphic);
   __ Drop(ic_data.CountWithTypeArgs());
 }
 
diff --git a/runtime/vm/compiler/backend/flow_graph_compiler_arm64.cc b/runtime/vm/compiler/backend/flow_graph_compiler_arm64.cc
index 0c6be28..3eeac84 100644
--- a/runtime/vm/compiler/backend/flow_graph_compiler_arm64.cc
+++ b/runtime/vm/compiler/backend/flow_graph_compiler_arm64.cc
@@ -1040,7 +1040,8 @@
   ASSERT(Array::Handle(zone(), ic_data.arguments_descriptor()).Length() > 0);
   __ LoadFromOffset(R0, SP, (ic_data.CountWithoutTypeArgs() - 1) * kWordSize);
   __ LoadUniqueObject(R5, ic_data);
-  GenerateDartCall(deopt_id, token_pos, stub, RawPcDescriptors::kIcCall, locs);
+  GenerateDartCall(deopt_id, token_pos, stub, RawPcDescriptors::kIcCall, locs,
+                   Code::EntryKind::kMonomorphic);
   __ Drop(ic_data.CountWithTypeArgs());
 }
 
diff --git a/runtime/vm/compiler/backend/flow_graph_compiler_ia32.cc b/runtime/vm/compiler/backend/flow_graph_compiler_ia32.cc
index 5629335..5cf94a8 100644
--- a/runtime/vm/compiler/backend/flow_graph_compiler_ia32.cc
+++ b/runtime/vm/compiler/backend/flow_graph_compiler_ia32.cc
@@ -931,7 +931,8 @@
   // Load receiver into EBX.
   __ movl(EBX, Address(ESP, (ic_data.CountWithoutTypeArgs() - 1) * kWordSize));
   __ LoadObject(ECX, ic_data);
-  GenerateDartCall(deopt_id, token_pos, stub, RawPcDescriptors::kIcCall, locs);
+  GenerateDartCall(deopt_id, token_pos, stub, RawPcDescriptors::kIcCall, locs,
+                   Code::EntryKind::kMonomorphic);
   __ Drop(ic_data.CountWithTypeArgs());
 }
 
diff --git a/runtime/vm/compiler/backend/flow_graph_compiler_x64.cc b/runtime/vm/compiler/backend/flow_graph_compiler_x64.cc
index 996f030..5542b19 100644
--- a/runtime/vm/compiler/backend/flow_graph_compiler_x64.cc
+++ b/runtime/vm/compiler/backend/flow_graph_compiler_x64.cc
@@ -1060,7 +1060,8 @@
   // Load receiver into RDX.
   __ movq(RDX, Address(RSP, (ic_data.CountWithoutTypeArgs() - 1) * kWordSize));
   __ LoadUniqueObject(RBX, ic_data);
-  GenerateDartCall(deopt_id, token_pos, stub, RawPcDescriptors::kIcCall, locs);
+  GenerateDartCall(deopt_id, token_pos, stub, RawPcDescriptors::kIcCall, locs,
+                   Code::EntryKind::kMonomorphic);
   __ Drop(ic_data.CountWithTypeArgs(), RCX);
 }
 
diff --git a/runtime/vm/compiler/backend/il.cc b/runtime/vm/compiler/backend/il.cc
index 5c840ec..c2aeb5e 100644
--- a/runtime/vm/compiler/backend/il.cc
+++ b/runtime/vm/compiler/backend/il.cc
@@ -3831,16 +3831,12 @@
 // fall-through code in [FlowGraphCompiler::CompileGraph()].
 // (As opposed to here where we don't check for the return value of
 // [Intrinsify]).
-#if defined(DART_SUPPORT_PRECOMPILATION)
-  if (FLAG_precompiled_mode) {
-    const Function& function = compiler->parsed_function().function();
-    if (function.IsDynamicFunction()) {
-      compiler->SpecialStatsBegin(CombinedCodeStatistics::kTagCheckedEntry);
-      __ MonomorphicCheckedEntry();
-      compiler->SpecialStatsEnd(CombinedCodeStatistics::kTagCheckedEntry);
-    }
+  const Function& function = compiler->parsed_function().function();
+  if (function.IsDynamicFunction()) {
+    compiler->SpecialStatsBegin(CombinedCodeStatistics::kTagCheckedEntry);
+    __ MonomorphicCheckedEntry();
+    compiler->SpecialStatsEnd(CombinedCodeStatistics::kTagCheckedEntry);
   }
-#endif
 
   // NOTE: Because of the presence of multiple entry-points, we generate several
   // times the same intrinsification & frame setup. That's why we cannot rely on
diff --git a/runtime/vm/compiler/backend/il_arm.cc b/runtime/vm/compiler/backend/il_arm.cc
index ebe2a08..cf86087 100644
--- a/runtime/vm/compiler/backend/il_arm.cc
+++ b/runtime/vm/compiler/backend/il_arm.cc
@@ -6797,10 +6797,15 @@
 
 void GraphEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
   BlockEntryInstr* entry = normal_entry();
-  if (entry == nullptr) entry = osr_entry();
-
-  if (!compiler->CanFallThroughTo(entry)) {
-    __ b(compiler->GetJumpLabel(entry));
+  if (entry != nullptr) {
+    if (!compiler->CanFallThroughTo(entry)) {
+      FATAL("Checked function entry must have no offset");
+    }
+  } else {
+    entry = osr_entry();
+    if (!compiler->CanFallThroughTo(entry)) {
+      __ b(compiler->GetJumpLabel(entry));
+    }
   }
 }
 
diff --git a/runtime/vm/compiler/backend/il_arm64.cc b/runtime/vm/compiler/backend/il_arm64.cc
index 198269b..1bf08fc 100644
--- a/runtime/vm/compiler/backend/il_arm64.cc
+++ b/runtime/vm/compiler/backend/il_arm64.cc
@@ -5954,10 +5954,15 @@
 
 void GraphEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
   BlockEntryInstr* entry = normal_entry();
-  if (entry == nullptr) entry = osr_entry();
-
-  if (!compiler->CanFallThroughTo(entry)) {
-    __ b(compiler->GetJumpLabel(entry));
+  if (entry != nullptr) {
+    if (!compiler->CanFallThroughTo(entry)) {
+      FATAL("Checked function entry must have no offset");
+    }
+  } else {
+    entry = osr_entry();
+    if (!compiler->CanFallThroughTo(entry)) {
+      __ b(compiler->GetJumpLabel(entry));
+    }
   }
 }
 
diff --git a/runtime/vm/compiler/backend/il_dbc.cc b/runtime/vm/compiler/backend/il_dbc.cc
index a577d3c..dad840e 100644
--- a/runtime/vm/compiler/backend/il_dbc.cc
+++ b/runtime/vm/compiler/backend/il_dbc.cc
@@ -1334,10 +1334,15 @@
 
 void GraphEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
   BlockEntryInstr* entry = normal_entry();
-  if (entry == nullptr) entry = osr_entry();
-
-  if (!compiler->CanFallThroughTo(entry)) {
-    __ Jump(compiler->GetJumpLabel(entry));
+  if (entry != nullptr) {
+    if (!compiler->CanFallThroughTo(entry)) {
+      FATAL("Checked function entry must have no offset");
+    }
+  } else {
+    entry = osr_entry();
+    if (!compiler->CanFallThroughTo(entry)) {
+      __ Jump(compiler->GetJumpLabel(entry));
+    }
   }
 }
 
diff --git a/runtime/vm/compiler/backend/il_ia32.cc b/runtime/vm/compiler/backend/il_ia32.cc
index 349a09c..403ca00 100644
--- a/runtime/vm/compiler/backend/il_ia32.cc
+++ b/runtime/vm/compiler/backend/il_ia32.cc
@@ -6075,10 +6075,15 @@
 
 void GraphEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
   BlockEntryInstr* entry = normal_entry();
-  if (entry == nullptr) entry = osr_entry();
-
-  if (!compiler->CanFallThroughTo(entry)) {
-    __ jmp(compiler->GetJumpLabel(entry));
+  if (entry != nullptr) {
+    if (!compiler->CanFallThroughTo(entry)) {
+      FATAL("Checked function entry must have no offset");
+    }
+  } else {
+    entry = osr_entry();
+    if (!compiler->CanFallThroughTo(entry)) {
+      __ jmp(compiler->GetJumpLabel(entry));
+    }
   }
 }
 
diff --git a/runtime/vm/compiler/backend/il_x64.cc b/runtime/vm/compiler/backend/il_x64.cc
index ce0b196..e8bf0f8 100644
--- a/runtime/vm/compiler/backend/il_x64.cc
+++ b/runtime/vm/compiler/backend/il_x64.cc
@@ -6230,10 +6230,15 @@
 
 void GraphEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
   BlockEntryInstr* entry = normal_entry();
-  if (entry == nullptr) entry = osr_entry();
-
-  if (!compiler->CanFallThroughTo(entry)) {
-    __ jmp(compiler->GetJumpLabel(entry));
+  if (entry != nullptr) {
+    if (!compiler->CanFallThroughTo(entry)) {
+      FATAL("Checked function entry must have no offset");
+    }
+  } else {
+    entry = osr_entry();
+    if (!compiler->CanFallThroughTo(entry)) {
+      __ jmp(compiler->GetJumpLabel(entry));
+    }
   }
 }