[vm/compiler] Initial implementation of IL binary serialization

This change adds binary serialization/deserialization of flow graphs.
It supports all IL instructions and certain objects which can be
referenced from IL instructions. IL binary serialization is a useful
machanism which would allow us to split compilation into multiple parts
in order to parallelize AOT compilation.

The program structure (libraries/classes/functions/fields) is not
serialized. It is assumed that reader and writer use the same
program structure.

Caveats:
* FFI callbacks are not supported yet.
* Closure functions are not re-created when reading flow graph.
* Flow graph should be in SSA form (unoptimized flow graphs are not
  supported).
* JIT mode is not supported (serializer currently assumes lazy
  linking of native methods and empty ICData).

In order to test IL serialization, --test_il_serialization VM option is
added to serialize and deserialize flow graph before generating code.

TEST=vm/dart/splay_test now runs with --test_il_serialization.

TEST=Manual run of vm-kernel-precomp-linux-debug-x64-try with
--test_il_serialization enabled (only ffi tests failed).

Issue: https://github.com/dart-lang/sdk/issues/43299
Change-Id: I7bbfd9e3a301e00c9cfbffa06b8f1f6c78a78470
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/254941
Reviewed-by: Ryan Macnak <rmacnak@google.com>
Commit-Queue: Alexander Markov <alexmarkov@google.com>
Reviewed-by: Slava Egorov <vegorov@google.com>
diff --git a/runtime/tests/vm/dart/splay_test.dart b/runtime/tests/vm/dart/splay_test.dart
index ef51ede..25b7d98 100644
--- a/runtime/tests/vm/dart/splay_test.dart
+++ b/runtime/tests/vm/dart/splay_test.dart
@@ -31,6 +31,7 @@
 // VMOptions=--no_load_cse
 // VMOptions=--no_dead_store_elimination
 // VMOptions=--no_load_cse --no_dead_store_elimination
+// VMOptions=--test_il_serialization
 
 import "dart:math";
 import 'package:benchmark_harness/benchmark_harness.dart';
diff --git a/runtime/tests/vm/dart_2/splay_test.dart b/runtime/tests/vm/dart_2/splay_test.dart
index 6cd3050..d6a349f 100644
--- a/runtime/tests/vm/dart_2/splay_test.dart
+++ b/runtime/tests/vm/dart_2/splay_test.dart
@@ -35,6 +35,7 @@
 // VMOptions=--no_load_cse
 // VMOptions=--no_dead_store_elimination
 // VMOptions=--no_load_cse --no_dead_store_elimination
+// VMOptions=--test_il_serialization
 
 import "dart:math";
 import 'package:benchmark_harness/benchmark_harness.dart';
diff --git a/runtime/vm/bitmap.cc b/runtime/vm/bitmap.cc
index 06119e3..a79912af 100644
--- a/runtime/vm/bitmap.cc
+++ b/runtime/vm/bitmap.cc
@@ -149,4 +149,26 @@
   }
 }
 
+void BitmapBuilder::Write(BaseWriteStream* stream) const {
+  const intptr_t payload_size =
+      Utils::Minimum(Utils::RoundUp(Length(), kBitsPerByte) / kBitsPerByte,
+                     data_size_in_bytes_);
+  stream->Write<intptr_t>(Length());
+  stream->Write<intptr_t>(payload_size);
+  stream->WriteBytes(BackingStore(), payload_size);
+}
+
+void BitmapBuilder::Read(ReadStream* stream) {
+  length_ = stream->Read<intptr_t>();
+  const intptr_t payload_size = stream->Read<intptr_t>();
+  if (payload_size > data_size_in_bytes_) {
+    data_size_in_bytes_ = payload_size;
+    data_.ptr_ = AllocBackingStore(data_size_in_bytes_);
+  } else {
+    memset(BackingStore() + payload_size, 0,
+           data_size_in_bytes_ - payload_size);
+  }
+  stream->ReadBytes(BackingStore(), payload_size);
+}
+
 }  // namespace dart
diff --git a/runtime/vm/bitmap.h b/runtime/vm/bitmap.h
index 74c4c5b..2c18ba5 100644
--- a/runtime/vm/bitmap.h
+++ b/runtime/vm/bitmap.h
@@ -54,6 +54,9 @@
   void Print() const;
   void AppendAsBytesTo(BaseWriteStream* stream) const;
 
+  void Write(BaseWriteStream* stream) const;
+  void Read(ReadStream* stream);
+
  private:
   static constexpr intptr_t kIncrementSizeInBytes = 16;
   static constexpr intptr_t kInlineCapacityInBytes = 16;
diff --git a/runtime/vm/code_descriptors.h b/runtime/vm/code_descriptors.h
index 47ced6f..6ca2430 100644
--- a/runtime/vm/code_descriptors.h
+++ b/runtime/vm/code_descriptors.h
@@ -107,7 +107,7 @@
     ASSERT(list_[try_index].pc_offset == ExceptionHandlers::kInvalidPcOffset);
     list_[try_index].pc_offset = pc_offset;
     list_[try_index].is_generated = is_generated;
-    ASSERT(handler_types.IsZoneHandle());
+    ASSERT(handler_types.IsNotTemporaryScopedHandle());
     list_[try_index].handler_types = &handler_types;
     list_[try_index].needs_stacktrace |= needs_stacktrace;
   }
diff --git a/runtime/vm/compiler/aot/dispatch_table_generator.cc b/runtime/vm/compiler/aot/dispatch_table_generator.cc
index bb5faac..1245c38 100644
--- a/runtime/vm/compiler/aot/dispatch_table_generator.cc
+++ b/runtime/vm/compiler/aot/dispatch_table_generator.cc
@@ -375,6 +375,10 @@
 const TableSelector* SelectorMap::GetSelector(
     const Function& interface_target) const {
   const int32_t sid = SelectorId(interface_target);
+  return GetSelector(sid);
+}
+
+const TableSelector* SelectorMap::GetSelector(int32_t sid) const {
   if (sid == kInvalidSelectorId) return nullptr;
   const TableSelector* selector = &selectors_[sid];
   if (!selector->IsUsed()) return nullptr;
diff --git a/runtime/vm/compiler/aot/dispatch_table_generator.h b/runtime/vm/compiler/aot/dispatch_table_generator.h
index e916a3c..770d880 100644
--- a/runtime/vm/compiler/aot/dispatch_table_generator.h
+++ b/runtime/vm/compiler/aot/dispatch_table_generator.h
@@ -61,6 +61,8 @@
   // not have a selector assigned.
   const TableSelector* GetSelector(const Function& interface_target) const;
 
+  const TableSelector* GetSelector(int32_t sid) const;
+
  private:
   static const int32_t kInvalidSelectorId =
       kernel::ProcedureAttributesMetadata::kInvalidSelectorId;
diff --git a/runtime/vm/compiler/backend/compile_type.h b/runtime/vm/compiler/backend/compile_type.h
index 1eeb89c..bc9c6e5 100644
--- a/runtime/vm/compiler/backend/compile_type.h
+++ b/runtime/vm/compiler/backend/compile_type.h
@@ -18,6 +18,8 @@
 class AbstractType;
 class BaseTextBuffer;
 class Definition;
+class FlowGraphDeserializer;
+class FlowGraphSerializer;
 
 template <typename T>
 class GrowableArray;
@@ -281,6 +283,9 @@
   void set_owner(Definition* owner) { owner_ = owner; }
   Definition* owner() const { return owner_; }
 
+  void Write(FlowGraphSerializer* s) const;
+  explicit CompileType(FlowGraphDeserializer* d);
+
  private:
   bool can_be_null_;
   bool can_be_sentinel_;
diff --git a/runtime/vm/compiler/backend/flow_graph.cc b/runtime/vm/compiler/backend/flow_graph.cc
index fbefe32..2459014 100644
--- a/runtime/vm/compiler/backend/flow_graph.cc
+++ b/runtime/vm/compiler/backend/flow_graph.cc
@@ -2896,4 +2896,120 @@
   FlowGraphPrinter::PrintGraph(phase, this);
 }
 
+class SSACompactor : public ValueObject {
+ public:
+  SSACompactor(intptr_t num_blocks,
+               intptr_t num_ssa_vars,
+               ZoneGrowableArray<Definition*>* detached_defs)
+      : block_num_(num_blocks),
+        ssa_num_(num_ssa_vars),
+        detached_defs_(detached_defs) {
+    block_num_.EnsureLength(num_blocks, -1);
+    ssa_num_.EnsureLength(num_ssa_vars, -1);
+  }
+
+  void RenumberGraph(FlowGraph* graph) {
+    for (auto block : graph->reverse_postorder()) {
+      block_num_[block->block_id()] = 1;
+      CollectDetachedMaterializations(block->env());
+
+      if (auto* block_with_idefs = block->AsBlockEntryWithInitialDefs()) {
+        for (Definition* def : *block_with_idefs->initial_definitions()) {
+          RenumberDefinition(def);
+          CollectDetachedMaterializations(def->env());
+        }
+      }
+      if (auto* join = block->AsJoinEntry()) {
+        for (PhiIterator it(join); !it.Done(); it.Advance()) {
+          RenumberDefinition(it.Current());
+        }
+      }
+      for (ForwardInstructionIterator it(block); !it.Done(); it.Advance()) {
+        Instruction* instr = it.Current();
+        if (Definition* def = instr->AsDefinition()) {
+          RenumberDefinition(def);
+        }
+        CollectDetachedMaterializations(instr->env());
+      }
+    }
+    for (auto* def : (*detached_defs_)) {
+      RenumberDefinition(def);
+    }
+    graph->set_current_ssa_temp_index(current_ssa_index_);
+
+    // Preserve order between block ids to as predecessors are sorted
+    // by block ids.
+    intptr_t current_block_index = 0;
+    for (intptr_t i = 0, n = block_num_.length(); i < n; ++i) {
+      if (block_num_[i] >= 0) {
+        block_num_[i] = current_block_index++;
+      }
+    }
+    for (auto block : graph->reverse_postorder()) {
+      block->set_block_id(block_num_[block->block_id()]);
+    }
+    graph->set_max_block_id(current_block_index - 1);
+  }
+
+ private:
+  void RenumberDefinition(Definition* def) {
+    if (def->HasSSATemp()) {
+      const intptr_t old_index = def->ssa_temp_index();
+      intptr_t new_index = ssa_num_[old_index];
+      if (new_index < 0) {
+        ssa_num_[old_index] = new_index = current_ssa_index_++;
+      }
+      def->set_ssa_temp_index(new_index);
+    }
+  }
+
+  bool IsDetachedDefinition(Definition* def) {
+    return def->IsMaterializeObject() && (def->next() == nullptr);
+  }
+
+  void AddDetachedDefinition(Definition* def) {
+    for (intptr_t i = 0, n = detached_defs_->length(); i < n; ++i) {
+      if ((*detached_defs_)[i] == def) {
+        return;
+      }
+    }
+    detached_defs_->Add(def);
+    // Follow inputs as detached definitions can reference other
+    // detached definitions.
+    for (intptr_t i = 0, n = def->InputCount(); i < n; ++i) {
+      Definition* input = def->InputAt(i)->definition();
+      if (IsDetachedDefinition(input)) {
+        AddDetachedDefinition(input);
+      }
+    }
+    ASSERT(def->env() == nullptr);
+  }
+
+  void CollectDetachedMaterializations(Environment* env) {
+    if (env == nullptr) {
+      return;
+    }
+    for (Environment::DeepIterator it(env); !it.Done(); it.Advance()) {
+      Definition* def = it.CurrentValue()->definition();
+      if (IsDetachedDefinition(def)) {
+        AddDetachedDefinition(def);
+      }
+    }
+  }
+
+  GrowableArray<intptr_t> block_num_;
+  GrowableArray<intptr_t> ssa_num_;
+  intptr_t current_ssa_index_ = 0;
+  ZoneGrowableArray<Definition*>* detached_defs_;
+};
+
+void FlowGraph::CompactSSA(ZoneGrowableArray<Definition*>* detached_defs) {
+  if (detached_defs == nullptr) {
+    detached_defs = new (Z) ZoneGrowableArray<Definition*>(Z, 0);
+  }
+  SSACompactor compactor(max_block_id() + 1, current_ssa_temp_index(),
+                         detached_defs);
+  compactor.RenumberGraph(this);
+}
+
 }  // namespace dart
diff --git a/runtime/vm/compiler/backend/flow_graph.h b/runtime/vm/compiler/backend/flow_graph.h
index 354e15f..354e2fa 100644
--- a/runtime/vm/compiler/backend/flow_graph.h
+++ b/runtime/vm/compiler/backend/flow_graph.h
@@ -208,6 +208,9 @@
   const GrowableArray<BlockEntryInstr*>& reverse_postorder() const {
     return reverse_postorder_;
   }
+  const GrowableArray<BlockEntryInstr*>& optimized_block_order() const {
+    return optimized_block_order_;
+  }
   static bool ShouldReorderBlocks(const Function& function, bool is_optimized);
   GrowableArray<BlockEntryInstr*>* CodegenBlockOrder(bool is_optimized);
 
@@ -533,6 +536,14 @@
   const Array& coverage_array() const { return *coverage_array_; }
   void set_coverage_array(const Array& array) { coverage_array_ = &array; }
 
+  // Renumbers SSA values and basic blocks to make numbering dense.
+  // Preserves order among block ids.
+  //
+  // Also collects definitions which are detached from the flow graph
+  // but still referenced (currently only MaterializeObject instructions
+  // can be detached).
+  void CompactSSA(ZoneGrowableArray<Definition*>* detached_defs = nullptr);
+
  private:
   friend class FlowGraphCompiler;  // TODO(ajcbik): restructure
   friend class FlowGraphChecker;
diff --git a/runtime/vm/compiler/backend/flow_graph_checker.cc b/runtime/vm/compiler/backend/flow_graph_checker.cc
index e3f8fdd..6fad4a1 100644
--- a/runtime/vm/compiler/backend/flow_graph_checker.cc
+++ b/runtime/vm/compiler/backend/flow_graph_checker.cc
@@ -403,6 +403,13 @@
     ASSERT1(def->previous() != nullptr, def);
     // Skip checks below for common constants as checking them could be slow.
     if (IsCommonConstant(def)) return;
+  } else if (def->IsMaterializeObject()) {
+    // Materializations can be both linked into graph and detached.
+    if (def->next() != nullptr) {
+      ASSERT1(def->previous() != nullptr, def);
+    } else {
+      ASSERT1(def->previous() == nullptr, def);
+    }
   } else {
     // Others are fully linked into graph.
     ASSERT1(def->next() != nullptr, def);
@@ -453,6 +460,14 @@
     ASSERT1(instruction->IsGraphEntry() || instruction->next() != nullptr,
             instruction);
     ASSERT2(DefDominatesUse(def, instruction), def, instruction);
+  } else if (instruction->IsMaterializeObject()) {
+    // Materializations can be both linked into graph and detached.
+    if (instruction->next() != nullptr) {
+      ASSERT1(instruction->previous() != nullptr, instruction);
+      ASSERT2(DefDominatesUse(def, instruction), def, instruction);
+    } else {
+      ASSERT1(instruction->previous() == nullptr, instruction);
+    }
   } else {
     // Others are fully linked into graph.
     ASSERT1(IsControlFlow(instruction) || instruction->next() != nullptr,
diff --git a/runtime/vm/compiler/backend/il.cc b/runtime/vm/compiler/backend/il.cc
index 0dcade7..7182992 100644
--- a/runtime/vm/compiler/backend/il.cc
+++ b/runtime/vm/compiler/backend/il.cc
@@ -1057,6 +1057,11 @@
          (needs_number_check() == other_op->needs_number_check());
 }
 
+const RuntimeEntry& CaseInsensitiveCompareInstr::TargetFunction() const {
+  return handle_surrogates_ ? kCaseInsensitiveCompareUTF16RuntimeEntry
+                            : kCaseInsensitiveCompareUCS2RuntimeEntry;
+}
+
 bool MathMinMaxInstr::AttributesEqual(const Instruction& other) const {
   auto const other_op = other.AsMathMinMax();
   ASSERT(other_op != NULL);
@@ -3143,10 +3148,10 @@
     }
   }
 
-  if ((speculative_mode_ == kGuardInputs) && !ComputeCanDeoptimize()) {
+  if ((SpeculativeModeOfInput(0) == kGuardInputs) && !ComputeCanDeoptimize()) {
     // Remember if we ever learn out input doesn't require checking, as
     // the input Value might be later changed that would make us forget.
-    speculative_mode_ = kNotSpeculative;
+    set_speculative_mode(kNotSpeculative);
   }
 
   return this;
diff --git a/runtime/vm/compiler/backend/il.h b/runtime/vm/compiler/backend/il.h
index 878c309..e8023e6 100644
--- a/runtime/vm/compiler/backend/il.h
+++ b/runtime/vm/compiler/backend/il.h
@@ -11,11 +11,13 @@
 
 #include <memory>
 #include <tuple>
+#include <type_traits>
 #include <utility>
 
 #include "vm/allocation.h"
 #include "vm/code_descriptors.h"
 #include "vm/compiler/backend/compile_type.h"
+#include "vm/compiler/backend/il_serializer.h"
 #include "vm/compiler/backend/locations.h"
 #include "vm/compiler/backend/slot.h"
 #include "vm/compiler/compiler_pass.h"
@@ -588,6 +590,63 @@
   DECLARE_INSTRUCTION_NO_BACKEND(type)                                         \
   DECLARE_COMPARISON_METHODS
 
+template <typename T, bool is_enum>
+struct unwrap_enum {};
+
+template <typename T>
+struct unwrap_enum<T, true> {
+  using type = std::underlying_type_t<T>;
+};
+
+template <typename T>
+struct unwrap_enum<T, false> {
+  using type = T;
+};
+
+template <typename T>
+using serializable_type_t =
+    typename unwrap_enum<std::remove_cv_t<T>, std::is_enum<T>::value>::type;
+
+#define WRITE_INSTRUCTION_FIELD(type, name)                                    \
+  s->Write<serializable_type_t<type>>(                                         \
+      static_cast<serializable_type_t<type>>(name));
+#define READ_INSTRUCTION_FIELD(type, name)                                     \
+  , name(static_cast<std::remove_cv_t<type>>(                                  \
+        d->Read<serializable_type_t<type>>()))
+#define DECLARE_INSTRUCTION_FIELD(type, name) type name;
+
+// Every instruction class should declare its serialization via
+// DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS, DECLARE_EMPTY_SERIALIZATION
+// or DECLARE_CUSTOM_SERIALIZATION.
+// If instruction class has fields which reference other instructions,
+// then it should also use DECLARE_EXTRA_SERIALIZATION and serialize
+// those references in WriteExtra/ReadExtra methods.
+#define DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(Instr, BaseClass, FieldList)   \
+ public:                                                                       \
+  virtual void WriteTo(FlowGraphSerializer* s) {                               \
+    BaseClass::WriteTo(s);                                                     \
+    FieldList(WRITE_INSTRUCTION_FIELD)                                         \
+  }                                                                            \
+  explicit Instr(FlowGraphDeserializer* d)                                     \
+      : BaseClass(d) FieldList(READ_INSTRUCTION_FIELD) {}                      \
+                                                                               \
+ private:                                                                      \
+  FieldList(DECLARE_INSTRUCTION_FIELD)
+
+#define DECLARE_CUSTOM_SERIALIZATION(Instr)                                    \
+ public:                                                                       \
+  virtual void WriteTo(FlowGraphSerializer* s);                                \
+  explicit Instr(FlowGraphDeserializer* d);
+
+#define DECLARE_EMPTY_SERIALIZATION(Instr, BaseClass)                          \
+ public:                                                                       \
+  explicit Instr(FlowGraphDeserializer* d) : BaseClass(d) {}
+
+#define DECLARE_EXTRA_SERIALIZATION                                            \
+ public:                                                                       \
+  virtual void WriteExtra(FlowGraphSerializer* s);                             \
+  virtual void ReadExtra(FlowGraphDeserializer* d);
+
 #if defined(INCLUDE_IL_PRINTER)
 #define PRINT_TO_SUPPORT virtual void PrintTo(BaseTextBuffer* f) const;
 #define PRINT_OPERANDS_TO_SUPPORT                                              \
@@ -716,6 +775,9 @@
     return true;
   }
 
+  void Write(FlowGraphSerializer* s) const;
+  explicit CallTargets(FlowGraphDeserializer* d);
+
  private:
   void CreateHelper(Zone* zone, const ICData& ic_data);
   void MergeIntoRanges();
@@ -1270,6 +1332,9 @@
     return Location::kRegister;
   }
 
+  DECLARE_CUSTOM_SERIALIZATION(Instruction)
+  DECLARE_EXTRA_SERIALIZATION
+
  protected:
   // GetDeoptId and/or CopyDeoptIdFrom.
   friend class CallSiteInliner;
@@ -1288,6 +1353,12 @@
     deopt_id_ = instr.deopt_id_;
   }
 
+  // Write/read locs and environment, but not inputs.
+  // Used when one instruction embeds another and reuses their inputs
+  // (e.g. Branch/IfThenElse/CheckCondition wrap Comparison).
+  void WriteExtraWithoutInputs(FlowGraphSerializer* s);
+  void ReadExtraWithoutInputs(FlowGraphDeserializer* d);
+
  private:
   friend class BranchInstr;          // For RawSetInputAt.
   friend class IfThenElseInstr;      // For RawSetInputAt.
@@ -1340,6 +1411,8 @@
 
   virtual bool AllowsCSE() const { return true; }
   virtual bool HasUnknownSideEffects() const { return false; }
+
+  DECLARE_EMPTY_SERIALIZATION(PureInstruction, Instruction)
 };
 
 // Types to be used as ThrowsTrait for TemplateInstruction/TemplateDefinition.
@@ -1384,6 +1457,8 @@
 
   virtual bool MayThrow() const { return ThrowsTrait::kCanThrow; }
 
+  DECLARE_EMPTY_SERIALIZATION(TemplateInstruction, BaseClass)
+
  protected:
   EmbeddedArray<Value*, N> inputs_;
 
@@ -1484,6 +1559,8 @@
   }
 
   PRINT_TO_SUPPORT
+  DECLARE_EMPTY_SERIALIZATION(ParallelMoveInstr, TemplateInstruction)
+  DECLARE_EXTRA_SERIALIZATION
 
  private:
   GrowableArray<MoveOperands*> moves_;  // Elements cannot be null.
@@ -1624,6 +1701,9 @@
 
   DECLARE_ABSTRACT_INSTRUCTION(BlockEntry)
 
+  DECLARE_CUSTOM_SERIALIZATION(BlockEntryInstr)
+  DECLARE_EXTRA_SERIALIZATION
+
  protected:
   BlockEntryInstr(intptr_t block_id,
                   intptr_t try_index,
@@ -1774,6 +1854,9 @@
     return this;
   }
 
+  DECLARE_CUSTOM_SERIALIZATION(BlockEntryWithInitialDefs)
+  DECLARE_EXTRA_SERIALIZATION
+
  protected:
   void PrintInitialDefinitionsTo(BaseTextBuffer* f) const;
 
@@ -1856,6 +1939,8 @@
   }
 
   PRINT_TO_SUPPORT
+  DECLARE_CUSTOM_SERIALIZATION(GraphEntryInstr)
+  DECLARE_EXTRA_SERIALIZATION
 
  private:
   GraphEntryInstr(const ParsedFunction& parsed_function,
@@ -1888,8 +1973,9 @@
                  intptr_t deopt_id,
                  intptr_t stack_depth = 0)
       : BlockEntryInstr(block_id, try_index, deopt_id, stack_depth),
-        predecessors_(2),  // Two is the assumed to be the common case.
-        phis_(NULL) {}
+        phis_(nullptr),
+        predecessors_(2)  // Two is the assumed to be the common case.
+  {}
 
   DECLARE_INSTRUCTION(JoinEntry)
 
@@ -1913,6 +1999,14 @@
 
   PRINT_TO_SUPPORT
 
+#define FIELD_LIST(F) F(ZoneGrowableArray<PhiInstr*>*, phis_)
+
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(JoinEntryInstr,
+                                          BlockEntryInstr,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+  DECLARE_EXTRA_SERIALIZATION
+
  private:
   // Classes that have access to predecessors_ when inlining.
   friend class BlockEntryInstr;
@@ -1928,7 +2022,6 @@
   virtual void AddPredecessor(BlockEntryInstr* predecessor);
 
   GrowableArray<BlockEntryInstr*> predecessors_;
-  ZoneGrowableArray<PhiInstr*>* phis_;
 
   DISALLOW_COPY_AND_ASSIGN(JoinEntryInstr);
 };
@@ -1961,7 +2054,6 @@
                    intptr_t deopt_id,
                    intptr_t stack_depth = 0)
       : BlockEntryInstr(block_id, try_index, deopt_id, stack_depth),
-        predecessor_(NULL),
         edge_weight_(0.0) {}
 
   DECLARE_INSTRUCTION(TargetEntry)
@@ -1980,6 +2072,12 @@
 
   PRINT_TO_SUPPORT
 
+#define FIELD_LIST(F) F(double, edge_weight_)
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(TargetEntryInstr,
+                                          BlockEntryInstr,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
  private:
   friend class BlockEntryInstr;  // Access to predecessor_ when inlining.
 
@@ -1989,8 +2087,8 @@
     predecessor_ = predecessor;
   }
 
-  BlockEntryInstr* predecessor_;
-  double edge_weight_;
+  // Not serialized, set in DiscoverBlocks.
+  BlockEntryInstr* predecessor_ = nullptr;
 
   DISALLOW_COPY_AND_ASSIGN(TargetEntryInstr);
 };
@@ -2029,6 +2127,7 @@
   GraphEntryInstr* graph_entry() const { return graph_entry_; }
 
   PRINT_TO_SUPPORT
+  DECLARE_CUSTOM_SERIALIZATION(FunctionEntryInstr)
 
  private:
   virtual void ClearPredecessors() { graph_entry_ = nullptr; }
@@ -2062,13 +2161,19 @@
 
   PRINT_TO_SUPPORT
 
+#define FIELD_LIST(F)                                                          \
+  F(const intptr_t, callback_id_)                                              \
+  F(const compiler::ffi::CallbackMarshaller&, marshaller_)
+
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(NativeEntryInstr,
+                                          FunctionEntryInstr,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
  private:
   void SaveArguments(FlowGraphCompiler* compiler) const;
   void SaveArgument(FlowGraphCompiler* compiler,
                     const compiler::ffi::NativeLocation& loc) const;
-
-  const intptr_t callback_id_;
-  const compiler::ffi::CallbackMarshaller& marshaller_;
 };
 
 // Represents an OSR entrypoint to a function.
@@ -2097,6 +2202,7 @@
   GraphEntryInstr* graph_entry() const { return graph_entry_; }
 
   PRINT_TO_SUPPORT
+  DECLARE_CUSTOM_SERIALIZATION(OsrEntryInstr)
 
  private:
   virtual void ClearPredecessors() { graph_entry_ = nullptr; }
@@ -2125,8 +2231,12 @@
 
   PRINT_TO_SUPPORT
 
- private:
-  const intptr_t indirect_id_;
+#define FIELD_LIST(F) F(const intptr_t, indirect_id_)
+
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(IndirectEntryInstr,
+                                          JoinEntryInstr,
+                                          FIELD_LIST)
+#undef FIELD_LIST
 };
 
 class CatchBlockEntryInstr : public BlockEntryWithInitialDefs {
@@ -2187,6 +2297,7 @@
   intptr_t catch_try_index() const { return catch_try_index_; }
 
   PRINT_TO_SUPPORT
+  DECLARE_CUSTOM_SERIALIZATION(CatchBlockEntryInstr)
 
  private:
   friend class BlockEntryInstr;  // Access to predecessor_ when inlining.
@@ -2201,7 +2312,6 @@
   BlockEntryInstr* predecessor_;
   const Array& catch_handler_types_;
   const intptr_t catch_try_index_;
-  GrowableArray<Definition*> initial_definitions_;
   const LocalVariable* exception_var_;
   const LocalVariable* stacktrace_var_;
   const LocalVariable* raw_exception_var_;
@@ -2266,6 +2376,9 @@
     return *this;
   }
 
+  void Write(FlowGraphSerializer* s) const;
+  explicit AliasIdentity(FlowGraphDeserializer* d);
+
  private:
   explicit AliasIdentity(intptr_t value) : value_(value) {}
 
@@ -2460,6 +2573,8 @@
   virtual Definition* AsDefinition() { return this; }
   virtual const Definition* AsDefinition() const { return this; }
 
+  DECLARE_CUSTOM_SERIALIZATION(Definition)
+
  protected:
   friend class RangeAnalysis;
   friend class Value;
@@ -2510,6 +2625,8 @@
 
   virtual bool AllowsCSE() const { return true; }
   virtual bool HasUnknownSideEffects() const { return false; }
+
+  DECLARE_EMPTY_SERIALIZATION(PureDefinition, Definition)
 };
 
 template <intptr_t N,
@@ -2530,6 +2647,7 @@
 
   virtual bool MayThrow() const { return ThrowsTrait::kCanThrow; }
 
+  DECLARE_EMPTY_SERIALIZATION(TemplateDefinition, BaseClass)
  protected:
   EmbeddedArray<Value*, N> inputs_;
 
@@ -2572,6 +2690,8 @@
   intptr_t InputCount() const { return inputs_.length(); }
   Value* InputAt(intptr_t i) const { return inputs_[i]; }
 
+  DECLARE_CUSTOM_SERIALIZATION(VariadicDefinition)
+
  protected:
   InputsArray inputs_;
 
@@ -2648,6 +2768,7 @@
   virtual Definition* Canonicalize(FlowGraph* flow_graph);
 
   PRINT_TO_SUPPORT
+  DECLARE_CUSTOM_SERIALIZATION(PhiInstr)
 
   enum ReceiverType { kUnknownReceiver = -1, kNotReceiver = 0, kReceiver = 1 };
 
@@ -2722,17 +2843,23 @@
 
   PRINT_OPERANDS_TO_SUPPORT
 
- private:
-  const intptr_t index_;
+#define FIELD_LIST(F)                                                          \
+  F(const intptr_t, index_)                                                    \
+  /* The offset (in words) of the last slot of the parameter, relative */      \
+  /* to the first parameter. */                                                \
+  /* It is used in the FlowGraphAllocator when it sets the assigned */         \
+  /* location and spill slot for the parameter definition. */                  \
+  F(const intptr_t, param_offset_)                                             \
+  F(const Register, base_reg_)                                                 \
+  F(const Representation, representation_)
 
-  // The offset (in words) of the last slot of the parameter, relative
-  // to the first parameter.
-  // It is used in the FlowGraphAllocator when it sets the assigned location
-  // and spill slot for the parameter definition.
-  const intptr_t param_offset_;
-  const Register base_reg_;
-  const Representation representation_;
-  BlockEntryInstr* block_;
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(ParameterInstr,
+                                          TemplateDefinition,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
+ private:
+  BlockEntryInstr* block_ = nullptr;
 
   DISALLOW_COPY_AND_ASSIGN(ParameterInstr);
 };
@@ -2765,10 +2892,16 @@
 
   PRINT_OPERANDS_TO_SUPPORT
 
- private:
-  const compiler::ffi::CallbackMarshaller& marshaller_;
-  const intptr_t def_index_;
+#define FIELD_LIST(F)                                                          \
+  F(const compiler::ffi::CallbackMarshaller&, marshaller_)                     \
+  F(const intptr_t, def_index_)
 
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(NativeParameterInstr,
+                                          TemplateDefinition,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
+ private:
   DISALLOW_COPY_AND_ASSIGN(NativeParameterInstr);
 };
 
@@ -2815,9 +2948,14 @@
 
   PRINT_OPERANDS_TO_SUPPORT
 
- private:
-  const intptr_t offset_;
+#define FIELD_LIST(F) F(const intptr_t, offset_)
 
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(StoreIndexedUnsafeInstr,
+                                          TemplateInstruction,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
+ private:
   DISALLOW_COPY_AND_ASSIGN(StoreIndexedUnsafeInstr);
 };
 
@@ -2865,10 +3003,16 @@
 
   PRINT_OPERANDS_TO_SUPPORT
 
- private:
-  const intptr_t offset_;
-  const Representation representation_;
+#define FIELD_LIST(F)                                                          \
+  F(const intptr_t, offset_)                                                   \
+  F(const Representation, representation_)
 
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(LoadIndexedUnsafeInstr,
+                                          TemplateDefinition,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
+ private:
   DISALLOW_COPY_AND_ASSIGN(LoadIndexedUnsafeInstr);
 };
 
@@ -2921,6 +3065,16 @@
   Value* dest_start() const { return inputs_[kDestStartPos]; }
   Value* length() const { return inputs_[kLengthPos]; }
 
+#define FIELD_LIST(F)                                                          \
+  F(classid_t, src_cid_)                                                       \
+  F(classid_t, dest_cid_)                                                      \
+  F(intptr_t, element_size_)
+
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(MemoryCopyInstr,
+                                          TemplateInstruction,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
  private:
   // Set array_reg to point to the index indicated by start (contained in
   // start_reg) of the typed data or string in array (contained in array_reg).
@@ -2945,10 +3099,6 @@
     }
   }
 
-  classid_t src_cid_;
-  classid_t dest_cid_;
-  intptr_t element_size_;
-
   DISALLOW_COPY_AND_ASSIGN(MemoryCopyInstr);
 };
 
@@ -2983,9 +3133,14 @@
 
   PRINT_OPERANDS_TO_SUPPORT
 
- private:
-  const Code& code_;
+#define FIELD_LIST(F) F(const Code&, code_)
 
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(TailCallInstr,
+                                          TemplateInstruction,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
+ private:
   DISALLOW_COPY_AND_ASSIGN(TailCallInstr);
 };
 
@@ -3019,9 +3174,14 @@
 
   PRINT_OPERANDS_TO_SUPPORT
 
- private:
-  const Representation representation_;
+#define FIELD_LIST(F) F(const Representation, representation_)
 
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(PushArgumentInstr,
+                                          TemplateDefinition,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
+ private:
   DISALLOW_COPY_AND_ASSIGN(PushArgumentInstr);
 };
 
@@ -3089,11 +3249,17 @@
 
   PRINT_OPERANDS_TO_SUPPORT
 
- private:
-  const TokenPosition token_pos_;
-  const intptr_t yield_index_;
-  const Representation representation_;
+#define FIELD_LIST(F)                                                          \
+  F(const TokenPosition, token_pos_)                                           \
+  F(const intptr_t, yield_index_)                                              \
+  F(const Representation, representation_)
 
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(ReturnInstr,
+                                          TemplateInstruction,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
+ private:
   const Code& GetReturnStub(FlowGraphCompiler* compiler) const;
 
   DISALLOW_COPY_AND_ASSIGN(ReturnInstr);
@@ -3123,9 +3289,14 @@
     return false;
   }
 
- private:
-  const compiler::ffi::CallbackMarshaller& marshaller_;
+#define FIELD_LIST(F) F(const compiler::ffi::CallbackMarshaller&, marshaller_)
 
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(NativeReturnInstr,
+                                          ReturnInstr,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
+ private:
   void EmitReturnMoves(FlowGraphCompiler* compiler);
 
   DISALLOW_COPY_AND_ASSIGN(NativeReturnInstr);
@@ -3151,9 +3322,14 @@
 
   virtual bool HasUnknownSideEffects() const { return false; }
 
- private:
-  const TokenPosition token_pos_;
+#define FIELD_LIST(F) F(const TokenPosition, token_pos_)
 
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(ThrowInstr,
+                                          TemplateInstruction,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
+ private:
   DISALLOW_COPY_AND_ASSIGN(ThrowInstr);
 };
 
@@ -3186,10 +3362,16 @@
 
   virtual bool HasUnknownSideEffects() const { return false; }
 
- private:
-  const TokenPosition token_pos_;
-  const intptr_t catch_try_index_;
+#define FIELD_LIST(F)                                                          \
+  F(const TokenPosition, token_pos_)                                           \
+  F(const intptr_t, catch_try_index_)
 
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(ReThrowInstr,
+                                          TemplateInstruction,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
+ private:
   DISALLOW_COPY_AND_ASSIGN(ReThrowInstr);
 };
 
@@ -3207,9 +3389,14 @@
 
   virtual bool HasUnknownSideEffects() const { return false; }
 
- private:
-  const char* message_;
+#define FIELD_LIST(F) F(const char*, message_)
 
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(StopInstr,
+                                          TemplateInstruction,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
+ private:
   DISALLOW_COPY_AND_ASSIGN(StopInstr);
 };
 
@@ -3217,10 +3404,9 @@
  public:
   explicit GotoInstr(JoinEntryInstr* entry, intptr_t deopt_id)
       : TemplateInstruction(deopt_id),
-        block_(NULL),
-        successor_(entry),
         edge_weight_(0.0),
-        parallel_move_(NULL) {}
+        parallel_move_(nullptr),
+        successor_(entry) {}
 
   DECLARE_INSTRUCTION(Goto)
 
@@ -3270,14 +3456,23 @@
 
   PRINT_TO_SUPPORT
 
- private:
-  BlockEntryInstr* block_;
-  JoinEntryInstr* successor_;
-  double edge_weight_;
+#define FIELD_LIST(F)                                                          \
+  F(double, edge_weight_)                                                      \
+  /* Parallel move that will be used by linear scan register allocator to */   \
+  /* connect live ranges at the end of the block and resolve phis. */          \
+  F(ParallelMoveInstr*, parallel_move_)
 
-  // Parallel move that will be used by linear scan register allocator to
-  // connect live ranges at the end of the block and resolve phis.
-  ParallelMoveInstr* parallel_move_;
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(GotoInstr,
+                                          TemplateInstruction,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+  DECLARE_EXTRA_SERIALIZATION
+
+ private:
+  BlockEntryInstr* block_ = nullptr;
+  JoinEntryInstr* successor_ = nullptr;
+
+  DISALLOW_COPY_AND_ASSIGN(GotoInstr);
 };
 
 // IndirectGotoInstr represents a dynamically computed jump. Only
@@ -3339,9 +3534,14 @@
 
   PRINT_TO_SUPPORT
 
+  DECLARE_CUSTOM_SERIALIZATION(IndirectGotoInstr)
+  DECLARE_EXTRA_SERIALIZATION
+
  private:
   GrowableArray<TargetEntryInstr*> successors_;
   const TypedData& offsets_;
+
+  DISALLOW_COPY_AND_ASSIGN(IndirectGotoInstr);
 };
 
 class ComparisonInstr : public Definition {
@@ -3393,6 +3593,17 @@
 
   DECLARE_ABSTRACT_INSTRUCTION(Comparison)
 
+#define FIELD_LIST(F)                                                          \
+  F(const TokenPosition, token_pos_)                                           \
+  F(Token::Kind, kind_)                                                        \
+  /* Set by optimizer. */                                                      \
+  F(intptr_t, operation_cid_)
+
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(ComparisonInstr,
+                                          Definition,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
  protected:
   ComparisonInstr(const InstructionSource& source,
                   Token::Kind kind,
@@ -3403,10 +3614,6 @@
         operation_cid_(kIllegalCid) {}
 
  private:
-  const TokenPosition token_pos_;
-  Token::Kind kind_;
-  intptr_t operation_cid_;  // Set by optimizer.
-
   DISALLOW_COPY_AND_ASSIGN(ComparisonInstr);
 };
 
@@ -3415,6 +3622,7 @@
   virtual bool AllowsCSE() const { return true; }
   virtual bool HasUnknownSideEffects() const { return false; }
 
+  DECLARE_EMPTY_SERIALIZATION(PureComparison, ComparisonInstr)
  protected:
   PureComparison(const InstructionSource& source,
                  Token::Kind kind,
@@ -3428,17 +3636,20 @@
 class TemplateComparison
     : public CSETrait<ComparisonInstr, PureComparison>::Base {
  public:
+  using BaseClass = typename CSETrait<ComparisonInstr, PureComparison>::Base;
+
   TemplateComparison(const InstructionSource& source,
                      Token::Kind kind,
                      intptr_t deopt_id = DeoptId::kNone)
-      : CSETrait<ComparisonInstr, PureComparison>::Base(source, kind, deopt_id),
-        inputs_() {}
+      : BaseClass(source, kind, deopt_id), inputs_() {}
 
   virtual intptr_t InputCount() const { return N; }
   virtual Value* InputAt(intptr_t i) const { return inputs_[i]; }
 
   virtual bool MayThrow() const { return ThrowsTrait::kCanThrow; }
 
+  DECLARE_EMPTY_SERIALIZATION(TemplateComparison, BaseClass)
+
  protected:
   EmbeddedArray<Value*, N> inputs_;
 
@@ -3449,7 +3660,7 @@
 class BranchInstr : public Instruction {
  public:
   explicit BranchInstr(ComparisonInstr* comparison, intptr_t deopt_id)
-      : Instruction(deopt_id), comparison_(comparison), constant_target_(NULL) {
+      : Instruction(deopt_id), comparison_(comparison) {
     ASSERT(comparison->env() == NULL);
     for (intptr_t i = comparison->InputCount() - 1; i >= 0; --i) {
       comparison->InputAt(i)->set_instruction(this);
@@ -3529,15 +3740,20 @@
 
   PRINT_TO_SUPPORT
 
+#define FIELD_LIST(F) F(ComparisonInstr*, comparison_)
+
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(BranchInstr, Instruction, FIELD_LIST)
+#undef FIELD_LIST
+  DECLARE_EXTRA_SERIALIZATION
+
  private:
   virtual void RawSetInputAt(intptr_t i, Value* value) {
     comparison()->RawSetInputAt(i, value);
   }
 
-  TargetEntryInstr* true_successor_;
-  TargetEntryInstr* false_successor_;
-  ComparisonInstr* comparison_;
-  TargetEntryInstr* constant_target_;
+  TargetEntryInstr* true_successor_ = nullptr;
+  TargetEntryInstr* false_successor_ = nullptr;
+  TargetEntryInstr* constant_target_ = nullptr;
 
   DISALLOW_COPY_AND_ASSIGN(BranchInstr);
 };
@@ -3553,9 +3769,14 @@
 
   DECLARE_INSTRUCTION(Deoptimize)
 
- private:
-  const ICData::DeoptReasonId deopt_reason_;
+#define FIELD_LIST(F) F(const ICData::DeoptReasonId, deopt_reason_)
 
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(DeoptimizeInstr,
+                                          TemplateInstruction,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
+ private:
   DISALLOW_COPY_AND_ASSIGN(DeoptimizeInstr);
 };
 
@@ -3584,8 +3805,14 @@
 
   PRINT_OPERANDS_TO_SUPPORT
 
+#define FIELD_LIST(F) F(CompileType*, constrained_type_)
+
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(RedefinitionInstr,
+                                          TemplateDefinition,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
  private:
-  CompileType* constrained_type_;
   DISALLOW_COPY_AND_ASSIGN(RedefinitionInstr);
 };
 
@@ -3609,14 +3836,15 @@
 
   PRINT_OPERANDS_TO_SUPPORT
 
+  DECLARE_EMPTY_SERIALIZATION(ReachabilityFenceInstr, TemplateInstruction)
+
  private:
   DISALLOW_COPY_AND_ASSIGN(ReachabilityFenceInstr);
 };
 
 class ConstraintInstr : public TemplateDefinition<1, NoThrow> {
  public:
-  ConstraintInstr(Value* value, Range* constraint)
-      : constraint_(constraint), target_(NULL) {
+  ConstraintInstr(Value* value, Range* constraint) : constraint_(constraint) {
     SetInputAt(0, value);
   }
 
@@ -3646,9 +3874,16 @@
 
   PRINT_OPERANDS_TO_SUPPORT
 
+#define FIELD_LIST(F) F(Range*, constraint_)
+
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(ConstraintInstr,
+                                          TemplateDefinition,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+  DECLARE_EXTRA_SERIALIZATION
+
  private:
-  Range* constraint_;
-  TargetEntryInstr* target_;
+  TargetEntryInstr* target_ = nullptr;
 
   DISALLOW_COPY_AND_ASSIGN(ConstraintInstr);
 };
@@ -3700,10 +3935,16 @@
 
   PRINT_OPERANDS_TO_SUPPORT
 
- private:
-  const Object& value_;
-  const TokenPosition token_pos_;
+#define FIELD_LIST(F)                                                          \
+  F(const Object&, value_)                                                     \
+  F(const TokenPosition, token_pos_)
 
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(ConstantInstr,
+                                          TemplateDefinition,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
+ private:
   DISALLOW_COPY_AND_ASSIGN(ConstantInstr);
 };
 
@@ -3721,6 +3962,7 @@
   uword constant_address() const { return constant_address_; }
 
   DECLARE_INSTRUCTION(UnboxedConstant)
+  DECLARE_CUSTOM_SERIALIZATION(UnboxedConstantInstr)
 
  private:
   const Representation representation_;
@@ -3785,9 +4027,14 @@
 
   PRINT_OPERANDS_TO_SUPPORT
 
- private:
-  const TokenPosition token_pos_;
+#define FIELD_LIST(F) F(const TokenPosition, token_pos_)
 
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(AssertSubtypeInstr,
+                                          TemplateInstruction,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
+ private:
   DISALLOW_COPY_AND_ASSIGN(AssertSubtypeInstr);
 };
 
@@ -3881,11 +4128,17 @@
 
   PRINT_OPERANDS_TO_SUPPORT
 
- private:
-  const TokenPosition token_pos_;
-  const String& dst_name_;
-  const Kind kind_;
+#define FIELD_LIST(F)                                                          \
+  F(const TokenPosition, token_pos_)                                           \
+  F(const String&, dst_name_)                                                  \
+  F(const Kind, kind_)
 
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(AssertAssignableInstr,
+                                          TemplateDefinition,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
+ private:
   DISALLOW_COPY_AND_ASSIGN(AssertAssignableInstr);
 };
 
@@ -3920,9 +4173,14 @@
 
   PRINT_OPERANDS_TO_SUPPORT
 
- private:
-  const TokenPosition token_pos_;
+#define FIELD_LIST(F) F(const TokenPosition, token_pos_)
 
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(AssertBooleanInstr,
+                                          TemplateDefinition,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
+ private:
   DISALLOW_COPY_AND_ASSIGN(AssertBooleanInstr);
 };
 
@@ -3974,9 +4232,16 @@
 
   PRINT_OPERANDS_TO_SUPPORT
 
+#define FIELD_LIST(F) F(const SpecialParameterKind, kind_)
+
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(SpecialParameterInstr,
+                                          TemplateDefinition,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+  DECLARE_EXTRA_SERIALIZATION
+
  private:
-  const SpecialParameterKind kind_;
-  BlockEntryInstr* block_;
+  BlockEntryInstr* block_ = nullptr;
   DISALLOW_COPY_AND_ASSIGN(SpecialParameterInstr);
 };
 
@@ -4083,11 +4348,19 @@
         ArgumentsSizeWithoutTypeArgs(), argument_names());
   }
 
+#define FIELD_LIST(F)                                                          \
+  F(const intptr_t, type_args_len_)                                            \
+  F(const Array&, argument_names_)                                             \
+  F(const TokenPosition, token_pos_)
+
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(TemplateDartCall,
+                                          VariadicDefinition,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+  DECLARE_EXTRA_SERIALIZATION
+
  private:
-  intptr_t type_args_len_;
-  const Array& argument_names_;
   PushArgumentsArray* push_arguments_ = nullptr;
-  TokenPosition token_pos_;
 
   DISALLOW_COPY_AND_ASSIGN(TemplateDartCall);
 };
@@ -4113,6 +4386,7 @@
   virtual bool HasUnknownSideEffects() const { return true; }
 
   PRINT_OPERANDS_TO_SUPPORT
+  DECLARE_EMPTY_SERIALIZATION(ClosureCallInstr, TemplateDartCall)
 
  private:
   DISALLOW_COPY_AND_ASSIGN(ClosureCallInstr);
@@ -4143,7 +4417,10 @@
         interface_target_(interface_target),
         tearoff_interface_target_(tearoff_interface_target),
         result_type_(nullptr),
-        has_unique_selector_(false) {
+        has_unique_selector_(false),
+        entry_kind_(Code::EntryKind::kNormal),
+        receiver_is_not_smi_(false),
+        is_call_on_this_(false) {
     ASSERT(function_name.IsNotTemporaryScopedHandle());
     ASSERT(interface_target.IsNotTemporaryScopedHandle());
     ASSERT(tearoff_interface_target.IsNotTemporaryScopedHandle());
@@ -4238,23 +4515,31 @@
 
   virtual Representation representation() const;
 
+#define FIELD_LIST(F)                                                          \
+  F(const ICData*, ic_data_)                                                   \
+  F(const String&, function_name_)                                             \
+  /* Binary op, unary op, kGET or kILLEGAL. */                                 \
+  F(const Token::Kind, token_kind_)                                            \
+  F(const Function&, interface_target_)                                        \
+  F(const Function&, tearoff_interface_target_)                                \
+  /* Inferred result type. */                                                  \
+  F(CompileType*, result_type_)                                                \
+  F(bool, has_unique_selector_)                                                \
+  F(Code::EntryKind, entry_kind_)                                              \
+  F(bool, receiver_is_not_smi_)                                                \
+  F(bool, is_call_on_this_)
+
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(InstanceCallBaseInstr,
+                                          TemplateDartCall,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
  protected:
   friend class CallSpecializer;
   void set_ic_data(ICData* value) { ic_data_ = value; }
   void set_result_type(CompileType* result_type) { result_type_ = result_type; }
 
  private:
-  const ICData* ic_data_;
-  const String& function_name_;
-  const Token::Kind token_kind_;  // Binary op, unary op, kGET or kILLEGAL.
-  const Function& interface_target_;
-  const Function& tearoff_interface_target_;
-  CompileType* result_type_;  // Inferred result type.
-  bool has_unique_selector_;
-  Code::EntryKind entry_kind_ = Code::EntryKind::kNormal;
-  bool receiver_is_not_smi_ = false;
-  bool is_call_on_this_ = false;
-
   DISALLOW_COPY_AND_ASSIGN(InstanceCallBaseInstr);
 };
 
@@ -4283,7 +4568,8 @@
             deopt_id,
             interface_target,
             tearoff_interface_target),
-        checked_argument_count_(checked_argument_count) {}
+        checked_argument_count_(checked_argument_count),
+        receivers_static_type_(nullptr) {}
 
   InstanceCallInstr(
       const InstructionSource& source,
@@ -4306,7 +4592,8 @@
                               deopt_id,
                               interface_target,
                               tearoff_interface_target),
-        checked_argument_count_(checked_argument_count) {}
+        checked_argument_count_(checked_argument_count),
+        receivers_static_type_(nullptr) {}
 
   DECLARE_INSTRUCTION(InstanceCall)
 
@@ -4335,11 +4622,18 @@
   const CallTargets& Targets();
   void SetTargets(const CallTargets* targets) { targets_ = targets; }
 
+#define FIELD_LIST(F)                                                          \
+  F(const intptr_t, checked_argument_count_)                                   \
+  F(const AbstractType*, receivers_static_type_)
+
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(InstanceCallInstr,
+                                          InstanceCallBaseInstr,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
  private:
   const CallTargets* targets_ = nullptr;
   const class BinaryFeedback* binary_ = nullptr;
-  const intptr_t checked_argument_count_;
-  const AbstractType* receivers_static_type_ = nullptr;
 
   DISALLOW_COPY_AND_ASSIGN(InstanceCallInstr);
 };
@@ -4402,6 +4696,16 @@
 
   PRINT_OPERANDS_TO_SUPPORT
 
+#define FIELD_LIST(F)                                                          \
+  F(const CallTargets&, targets_)                                              \
+  F(const bool, complete_)                                                     \
+  F(intptr_t, total_call_count_)
+
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(PolymorphicInstanceCallInstr,
+                                          InstanceCallBaseInstr,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
  private:
   PolymorphicInstanceCallInstr(const InstructionSource& source,
                                const String& function_name,
@@ -4431,10 +4735,6 @@
     total_call_count_ = CallCount();
   }
 
-  const CallTargets& targets_;
-  const bool complete_;
-  intptr_t total_call_count_;
-
   friend class PolymorphicInliner;
 
   DISALLOW_COPY_AND_ASSIGN(PolymorphicInstanceCallInstr);
@@ -4510,10 +4810,16 @@
 
   PRINT_OPERANDS_TO_SUPPORT
 
- private:
-  const Function& interface_target_;
-  const compiler::TableSelector* selector_;
+#define FIELD_LIST(F)                                                          \
+  F(const Function&, interface_target_)                                        \
+  F(const compiler::TableSelector*, selector_)
 
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(DispatchTableCallInstr,
+                                          TemplateDartCall,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
+ private:
   DISALLOW_COPY_AND_ASSIGN(DispatchTableCallInstr);
 };
 
@@ -4543,6 +4849,16 @@
 
   PRINT_OPERANDS_TO_SUPPORT;
 
+#define FIELD_LIST(F)                                                          \
+  /* True if the comparison must check for double or Mint and */               \
+  /* use value comparison instead. */                                          \
+  F(bool, needs_number_check_)
+
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(StrictCompareInstr,
+                                          TemplateComparison,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
  private:
   Condition EmitComparisonCodeRegConstant(FlowGraphCompiler* compiler,
                                           BranchLabels labels,
@@ -4554,10 +4870,6 @@
                        const Object& obj,
                        Condition* condition_out);
 
-  // True if the comparison must check for double or Mint and
-  // use value comparison instead.
-  bool needs_number_check_;
-
   DISALLOW_COPY_AND_ASSIGN(StrictCompareInstr);
 };
 
@@ -4587,6 +4899,8 @@
     return kTagged;
   }
 
+  DECLARE_EMPTY_SERIALIZATION(TestSmiInstr, TemplateComparison)
+
  private:
   DISALLOW_COPY_AND_ASSIGN(TestSmiInstr);
 };
@@ -4632,9 +4946,16 @@
 
   PRINT_OPERANDS_TO_SUPPORT
 
+#define FIELD_LIST(F)                                                          \
+  F(const ZoneGrowableArray<intptr_t>&, cid_results_)                          \
+  F(bool, licm_hoisted_)
+
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(TestCidsInstr,
+                                          TemplateComparison,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
  private:
-  const ZoneGrowableArray<intptr_t>& cid_results_;
-  bool licm_hoisted_;
   DISALLOW_COPY_AND_ASSIGN(TestCidsInstr);
 };
 
@@ -4690,9 +5011,16 @@
 
   PRINT_OPERANDS_TO_SUPPORT
 
+#define FIELD_LIST(F)                                                          \
+  F(bool, null_aware_)                                                         \
+  F(const SpeculativeMode, speculative_mode_)
+
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(EqualityCompareInstr,
+                                          TemplateComparison,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
  private:
-  bool null_aware_;
-  const SpeculativeMode speculative_mode_;
   DISALLOW_COPY_AND_ASSIGN(EqualityCompareInstr);
 };
 
@@ -4739,8 +5067,14 @@
 
   PRINT_OPERANDS_TO_SUPPORT
 
+#define FIELD_LIST(F) F(const SpeculativeMode, speculative_mode_)
+
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(RelationalOpInstr,
+                                          TemplateComparison,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
  private:
-  const SpeculativeMode speculative_mode_;
   DISALLOW_COPY_AND_ASSIGN(RelationalOpInstr);
 };
 
@@ -4815,15 +5149,22 @@
 
   PRINT_OPERANDS_TO_SUPPORT
 
+#define FIELD_LIST(F)                                                          \
+  F(ComparisonInstr*, comparison_)                                             \
+  F(const intptr_t, if_true_)                                                  \
+  F(const intptr_t, if_false_)
+
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(IfThenElseInstr,
+                                          Definition,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+  DECLARE_EXTRA_SERIALIZATION
+
  private:
   virtual void RawSetInputAt(intptr_t i, Value* value) {
     comparison()->RawSetInputAt(i, value);
   }
 
-  ComparisonInstr* comparison_;
-  const intptr_t if_true_;
-  const intptr_t if_false_;
-
   DISALLOW_COPY_AND_ASSIGN(IfThenElseInstr);
 };
 
@@ -4848,6 +5189,7 @@
         rebind_rule_(rebind_rule),
         result_type_(NULL),
         is_known_list_constructor_(false),
+        entry_kind_(Code::EntryKind::kNormal),
         identity_(AliasIdentity::Unknown()) {
     ASSERT(function.IsZoneHandle());
     ASSERT(!function.IsNull());
@@ -4872,6 +5214,7 @@
         rebind_rule_(rebind_rule),
         result_type_(NULL),
         is_known_list_constructor_(false),
+        entry_kind_(Code::EntryKind::kNormal),
         identity_(AliasIdentity::Unknown()) {
     ASSERT(function.IsZoneHandle());
     ASSERT(!function.IsNull());
@@ -4990,21 +5333,26 @@
 
   PRINT_OPERANDS_TO_SUPPORT
 
+#define FIELD_LIST(F)                                                          \
+  F(const ICData*, ic_data_)                                                   \
+  F(const intptr_t, call_count_)                                               \
+  F(const Function&, function_)                                                \
+  F(const ICData::RebindRule, rebind_rule_)                                    \
+  /* Known or inferred result type. */                                         \
+  F(CompileType*, result_type_)                                                \
+  /* 'True' for recognized list constructors. */                               \
+  F(bool, is_known_list_constructor_)                                          \
+  F(Code::EntryKind, entry_kind_)                                              \
+  F(AliasIdentity, identity_)
+
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(StaticCallInstr,
+                                          TemplateDartCall,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
  private:
-  const ICData* ic_data_;
   const CallTargets* targets_ = nullptr;
   const class BinaryFeedback* binary_ = nullptr;
-  const intptr_t call_count_;
-  const Function& function_;
-  const ICData::RebindRule rebind_rule_;
-  CompileType* result_type_;  // Known or inferred result type.
-
-  // 'True' for recognized list constructors.
-  bool is_known_list_constructor_;
-
-  Code::EntryKind entry_kind_ = Code::EntryKind::kNormal;
-
-  AliasIdentity identity_;
 
   DISALLOW_COPY_AND_ASSIGN(StaticCallInstr);
 };
@@ -5036,11 +5384,17 @@
 
   PRINT_OPERANDS_TO_SUPPORT
 
- private:
-  const LocalVariable& local_;
-  bool is_last_;
-  const TokenPosition token_pos_;
+#define FIELD_LIST(F)                                                          \
+  F(const LocalVariable&, local_)                                              \
+  F(bool, is_last_)                                                            \
+  F(const TokenPosition, token_pos_)
 
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(LoadLocalInstr,
+                                          TemplateDefinition,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
+ private:
   DISALLOW_COPY_AND_ASSIGN(LoadLocalInstr);
 };
 
@@ -5080,14 +5434,21 @@
 
   PRINT_OPERANDS_TO_SUPPORT
 
+#define FIELD_LIST(F)                                                          \
+  F(const intptr_t, num_temps_)                                                \
+  F(const bool, has_input_)
+
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(DropTempsInstr,
+                                          Definition,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
  private:
   virtual void RawSetInputAt(intptr_t i, Value* value) {
     ASSERT(has_input_);
     value_ = value;
   }
 
-  const intptr_t num_temps_;
-  const bool has_input_;
   Value* value_ = nullptr;
 
   DISALLOW_COPY_AND_ASSIGN(DropTempsInstr);
@@ -5129,9 +5490,15 @@
 
   PRINT_OPERANDS_TO_SUPPORT
 
- private:
-  ConstantInstr* null_;
+#define FIELD_LIST(F) F(ConstantInstr*, null_)
 
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(MakeTempInstr,
+                                          TemplateDefinition,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+  DECLARE_EXTRA_SERIALIZATION
+
+ private:
   DISALLOW_COPY_AND_ASSIGN(MakeTempInstr);
 };
 
@@ -5171,12 +5538,18 @@
 
   PRINT_OPERANDS_TO_SUPPORT
 
- private:
-  const LocalVariable& local_;
-  bool is_dead_;
-  bool is_last_;
-  const TokenPosition token_pos_;
+#define FIELD_LIST(F)                                                          \
+  F(const LocalVariable&, local_)                                              \
+  F(bool, is_dead_)                                                            \
+  F(bool, is_last_)                                                            \
+  F(const TokenPosition, token_pos_)
 
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(StoreLocalInstr,
+                                          TemplateDefinition,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
+ private:
   DISALLOW_COPY_AND_ASSIGN(StoreLocalInstr);
 };
 
@@ -5194,11 +5567,8 @@
                          source),
         native_name_(name),
         function_(function),
-        native_c_function_(NULL),
-        is_bootstrap_native_(false),
-        is_auto_scope_(true),
-        link_lazily_(link_lazily),
-        token_pos_(source.token_pos) {
+        token_pos_(source.token_pos),
+        link_lazily_(link_lazily) {
     ASSERT(name.IsZoneHandle());
     ASSERT(function.IsZoneHandle());
   }
@@ -5224,6 +5594,16 @@
 
   PRINT_OPERANDS_TO_SUPPORT
 
+#define FIELD_LIST(F)                                                          \
+  F(const String&, native_name_)                                               \
+  F(const Function&, function_)                                                \
+  F(const TokenPosition, token_pos_)
+
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(NativeCallInstr,
+                                          TemplateDartCall,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
  private:
   void set_native_c_function(NativeFunction value) {
     native_c_function_ = value;
@@ -5232,13 +5612,12 @@
   void set_is_bootstrap_native(bool value) { is_bootstrap_native_ = value; }
   void set_is_auto_scope(bool value) { is_auto_scope_ = value; }
 
-  const String& native_name_;
-  const Function& function_;
-  NativeFunction native_c_function_;
-  bool is_bootstrap_native_;
-  bool is_auto_scope_;
-  bool link_lazily_;
-  const TokenPosition token_pos_;
+  // These fields are not serialized.
+  // IL serialization only supports lazy linking of native functions.
+  NativeFunction native_c_function_ = nullptr;
+  bool is_bootstrap_native_ = false;
+  bool is_auto_scope_ = true;
+  bool link_lazily_ = true;
 
   DISALLOW_COPY_AND_ASSIGN(NativeCallInstr);
 };
@@ -5303,6 +5682,15 @@
 
   PRINT_OPERANDS_TO_SUPPORT
 
+#define FIELD_LIST(F)                                                          \
+  F(const compiler::ffi::CallMarshaller&, marshaller_)                         \
+  F(bool, is_leaf_)
+
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(FfiCallInstr,
+                                          VariadicDefinition,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
  private:
   LocationSummary* MakeLocationSummaryInternal(Zone* zone,
                                                bool is_optimizing,
@@ -5320,9 +5708,6 @@
                        const Register temp0,
                        const Register temp1);
 
-  const compiler::ffi::CallMarshaller& marshaller_;
-  bool is_leaf_;
-
   DISALLOW_COPY_AND_ASSIGN(FfiCallInstr);
 };
 
@@ -5360,9 +5745,15 @@
 
   PRINT_OPERANDS_TO_SUPPORT
 
- private:
-  const compiler::ffi::NativeCallingConvention& native_calling_convention_;
+#define FIELD_LIST(F)                                                          \
+  F(const compiler::ffi::NativeCallingConvention&, native_calling_convention_)
 
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(CCallInstr,
+                                          VariadicDefinition,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
+ private:
   DISALLOW_COPY_AND_ASSIGN(CCallInstr);
 };
 
@@ -5390,9 +5781,14 @@
   virtual bool ComputeCanDeoptimize() const { return false; }
   virtual bool HasUnknownSideEffects() const { return false; }
 
- private:
-  const int32_t offset_;
+#define FIELD_LIST(F) F(const int32_t, offset_)
 
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(RawStoreFieldInstr,
+                                          TemplateInstruction,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
+ private:
   DISALLOW_COPY_AND_ASSIGN(RawStoreFieldInstr);
 };
 
@@ -5412,10 +5808,16 @@
   virtual bool HasUnknownSideEffects() const { return true; }
   virtual Instruction* Canonicalize(FlowGraph* flow_graph);
 
- private:
-  const TokenPosition token_pos_;
-  const UntaggedPcDescriptors::Kind stub_kind_;
+#define FIELD_LIST(F)                                                          \
+  F(const TokenPosition, token_pos_)                                           \
+  F(const UntaggedPcDescriptors::Kind, stub_kind_)
 
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(DebugStepCheckInstr,
+                                          TemplateInstruction,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
+ private:
   DISALLOW_COPY_AND_ASSIGN(DebugStepCheckInstr);
 };
 
@@ -5566,6 +5968,19 @@
 
   PRINT_OPERANDS_TO_SUPPORT
 
+#define FIELD_LIST(F)                                                          \
+  F(const Slot&, slot_)                                                        \
+  F(StoreBarrierType, emit_store_barrier_)                                     \
+  F(compiler::Assembler::MemoryOrder, memory_order_)                           \
+  F(const TokenPosition, token_pos_)                                           \
+  /* Marks initializing stores. E.g. in the constructor. */                    \
+  F(const bool, is_initialization_)
+
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(StoreFieldInstr,
+                                          TemplateInstruction,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
  private:
   friend class JitCallSpecializer;  // For ASSERT(initialization_).
 
@@ -5578,13 +5993,6 @@
                                        : compiler::Assembler::kValueIsNotSmi;
   }
 
-  const Slot& slot_;
-  StoreBarrierType emit_store_barrier_;
-  compiler::Assembler::MemoryOrder memory_order_;
-  const TokenPosition token_pos_;
-  // Marks initializing stores. E.g. in the constructor.
-  const bool is_initialization_;
-
   DISALLOW_COPY_AND_ASSIGN(StoreFieldInstr);
 };
 
@@ -5608,9 +6016,14 @@
 
   PRINT_OPERANDS_TO_SUPPORT
 
- private:
-  const Field& field_;
+#define FIELD_LIST(F) F(const Field&, field_)
 
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(GuardFieldInstr,
+                                          TemplateInstruction,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
+ private:
   DISALLOW_COPY_AND_ASSIGN(GuardFieldInstr);
 };
 
@@ -5627,6 +6040,8 @@
 
   virtual bool AttributesEqual(const Instruction& other) const;
 
+  DECLARE_EMPTY_SERIALIZATION(GuardFieldClassInstr, GuardFieldInstr)
+
  private:
   DISALLOW_COPY_AND_ASSIGN(GuardFieldClassInstr);
 };
@@ -5644,6 +6059,8 @@
 
   virtual bool AttributesEqual(const Instruction& other) const;
 
+  DECLARE_EMPTY_SERIALIZATION(GuardFieldLengthInstr, GuardFieldInstr)
+
  private:
   DISALLOW_COPY_AND_ASSIGN(GuardFieldLengthInstr);
 };
@@ -5667,6 +6084,8 @@
 
   virtual bool AttributesEqual(const Instruction& other) const;
 
+  DECLARE_EMPTY_SERIALIZATION(GuardFieldTypeInstr, GuardFieldInstr)
+
  private:
   DISALLOW_COPY_AND_ASSIGN(GuardFieldTypeInstr);
 };
@@ -5725,11 +6144,15 @@
   virtual bool CanTriggerGC() const { return calls_initializer(); }
   virtual bool MayThrow() const { return calls_initializer(); }
 
- private:
-  const TokenPosition token_pos_;
-  const bool throw_exception_on_initialization_;
-  bool calls_initializer_;
+#define FIELD_LIST(F)                                                          \
+  F(const TokenPosition, token_pos_)                                           \
+  F(const bool, throw_exception_on_initialization_)                            \
+  F(bool, calls_initializer_)
 
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(TemplateLoadField, Base, FIELD_LIST)
+#undef FIELD_LIST
+
+ private:
   DISALLOW_COPY_AND_ASSIGN(TemplateLoadField);
 };
 
@@ -5764,9 +6187,14 @@
 
   PRINT_OPERANDS_TO_SUPPORT
 
- private:
-  const Field& field_;
+#define FIELD_LIST(F) F(const Field&, field_)
 
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(LoadStaticFieldInstr,
+                                          TemplateLoadField,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
+ private:
   DISALLOW_COPY_AND_ASSIGN(LoadStaticFieldInstr);
 };
 
@@ -5801,6 +6229,15 @@
 
   PRINT_OPERANDS_TO_SUPPORT
 
+#define FIELD_LIST(F)                                                          \
+  F(const Field&, field_)                                                      \
+  F(const TokenPosition, token_pos_)
+
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(StoreStaticFieldInstr,
+                                          TemplateDefinition,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
  private:
   compiler::Assembler::CanBeSmi CanValueBeSmi() const {
     ASSERT(value()->Type()->ToNullableCid() != kSmiCid);
@@ -5808,9 +6245,6 @@
                                        : compiler::Assembler::kValueIsNotSmi;
   }
 
-  const Field& field_;
-  const TokenPosition token_pos_;
-
   DISALLOW_COPY_AND_ASSIGN(StoreStaticFieldInstr);
 };
 
@@ -5881,14 +6315,21 @@
 
   virtual Definition* Canonicalize(FlowGraph* flow_graph);
 
- private:
-  const bool index_unboxed_;
-  const intptr_t index_scale_;
-  const intptr_t class_id_;
-  const AlignmentType alignment_;
-  const TokenPosition token_pos_;
-  CompileType* result_type_;  // derived from call
+#define FIELD_LIST(F)                                                          \
+  F(const bool, index_unboxed_)                                                \
+  F(const intptr_t, index_scale_)                                              \
+  F(const intptr_t, class_id_)                                                 \
+  F(const AlignmentType, alignment_)                                           \
+  F(const TokenPosition, token_pos_)                                           \
+  /* derived from call */                                                      \
+  F(CompileType*, result_type_)
 
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(LoadIndexedInstr,
+                                          TemplateDefinition,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
+ private:
   DISALLOW_COPY_AND_ASSIGN(LoadIndexedInstr);
 };
 
@@ -5962,12 +6403,18 @@
     return !can_pack_into_smi() && (representation() == kTagged);
   }
 
- private:
-  const intptr_t class_id_;
-  const TokenPosition token_pos_;
-  const intptr_t element_count_;
-  Representation representation_;
+#define FIELD_LIST(F)                                                          \
+  F(const intptr_t, class_id_)                                                 \
+  F(const TokenPosition, token_pos_)                                           \
+  F(const intptr_t, element_count_)                                            \
+  F(Representation, representation_)
 
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(LoadCodeUnitsInstr,
+                                          TemplateDefinition,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
+ private:
   DISALLOW_COPY_AND_ASSIGN(LoadCodeUnitsInstr);
 };
 
@@ -5987,6 +6434,9 @@
 
   virtual bool AttributesEqual(const Instruction& other) const { return true; }
 
+  DECLARE_EMPTY_SERIALIZATION(OneByteStringFromCharCodeInstr,
+                              TemplateDefinition)
+
  private:
   DISALLOW_COPY_AND_ASSIGN(OneByteStringFromCharCodeInstr);
 };
@@ -6009,9 +6459,14 @@
     return other.AsStringToCharCode()->cid_ == cid_;
   }
 
- private:
-  const intptr_t cid_;
+#define FIELD_LIST(F) F(const intptr_t, cid_)
 
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(StringToCharCodeInstr,
+                                          TemplateDefinition,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
+ private:
   DISALLOW_COPY_AND_ASSIGN(StringToCharCodeInstr);
 };
 
@@ -6083,9 +6538,14 @@
 
   PRINT_TO_SUPPORT
 
- private:
-  const Slot& scan_flags_field_;
+#define FIELD_LIST(F) F(const Slot&, scan_flags_field_)
 
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(Utf8ScanInstr,
+                                          TemplateDefinition,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
+ private:
   DISALLOW_COPY_AND_ASSIGN(Utf8ScanInstr);
 };
 
@@ -6159,19 +6619,25 @@
 
   virtual Instruction* Canonicalize(FlowGraph* flow_graph);
 
+#define FIELD_LIST(F)                                                          \
+  F(StoreBarrierType, emit_store_barrier_)                                     \
+  F(const bool, index_unboxed_)                                                \
+  F(const intptr_t, index_scale_)                                              \
+  F(const intptr_t, class_id_)                                                 \
+  F(const AlignmentType, alignment_)                                           \
+  F(const TokenPosition, token_pos_)                                           \
+  F(const SpeculativeMode, speculative_mode_)
+
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(StoreIndexedInstr,
+                                          TemplateInstruction,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
  private:
   compiler::Assembler::CanBeSmi CanValueBeSmi() const {
     return compiler::Assembler::kValueCanBeSmi;
   }
 
-  StoreBarrierType emit_store_barrier_;
-  const bool index_unboxed_;
-  const intptr_t index_scale_;
-  const intptr_t class_id_;
-  const AlignmentType alignment_;
-  const TokenPosition token_pos_;
-  const SpeculativeMode speculative_mode_;
-
   DISALLOW_COPY_AND_ASSIGN(StoreIndexedInstr);
 };
 
@@ -6192,11 +6658,17 @@
   virtual bool HasUnknownSideEffects() const { return false; }
   virtual Instruction* Canonicalize(FlowGraph* flow_graph);
 
- private:
-  const Array& coverage_array_;
-  const intptr_t coverage_index_;
-  const TokenPosition token_pos_;
+#define FIELD_LIST(F)                                                          \
+  F(const Array&, coverage_array_)                                             \
+  F(const intptr_t, coverage_index_)                                           \
+  F(const TokenPosition, token_pos_)
 
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(RecordCoverageInstr,
+                                          TemplateInstruction,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
+ private:
   DISALLOW_COPY_AND_ASSIGN(RecordCoverageInstr);
 };
 
@@ -6216,6 +6688,8 @@
 
   virtual Definition* Canonicalize(FlowGraph* flow_graph);
 
+  DECLARE_EMPTY_SERIALIZATION(BooleanNegateInstr, TemplateDefinition)
+
  private:
   DISALLOW_COPY_AND_ASSIGN(BooleanNegateInstr);
 };
@@ -6255,12 +6729,16 @@
 
   PRINT_OPERANDS_TO_SUPPORT
 
- private:
-  const TokenPosition token_pos_;
-  Value* value_;
-  Value* type_arguments_;
-  const AbstractType& type_;
+#define FIELD_LIST(F)                                                          \
+  F(const TokenPosition, token_pos_)                                           \
+  F(const AbstractType&, type_)
 
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(InstanceOfInstr,
+                                          TemplateDefinition,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
+ private:
   DISALLOW_COPY_AND_ASSIGN(InstanceOfInstr);
 };
 
@@ -6324,10 +6802,16 @@
 
   DECLARE_ABSTRACT_INSTRUCTION(Allocation);
 
- private:
-  const TokenPosition token_pos_;
-  AliasIdentity identity_;
+#define FIELD_LIST(F)                                                          \
+  F(const TokenPosition, token_pos_)                                           \
+  F(AliasIdentity, identity_)
 
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(AllocationInstr,
+                                          Definition,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
+ private:
   DISALLOW_COPY_AND_ASSIGN(AllocationInstr);
 };
 
@@ -6341,6 +6825,8 @@
   virtual intptr_t InputCount() const { return N; }
   virtual Value* InputAt(intptr_t i) const { return inputs_[i]; }
 
+  DECLARE_EMPTY_SERIALIZATION(TemplateAllocation, AllocationInstr)
+
  protected:
   EmbeddedArray<Value*, N> inputs_;
 
@@ -6362,6 +6848,7 @@
       : AllocationInstr(source, deopt_id),
         cls_(cls),
         has_type_arguments_(type_arguments != nullptr),
+        type_arguments_slot_(nullptr),
         type_arguments_(type_arguments) {
     ASSERT(cls.IsZoneHandle());
     ASSERT(!cls.IsNull());
@@ -6401,16 +6888,23 @@
 
   PRINT_OPERANDS_TO_SUPPORT
 
+#define FIELD_LIST(F)                                                          \
+  F(const Class&, cls_)                                                        \
+  F(const bool, has_type_arguments_)                                           \
+  F(const Slot*, type_arguments_slot_)
+
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(AllocateObjectInstr,
+                                          AllocationInstr,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
  private:
   virtual void RawSetInputAt(intptr_t i, Value* value) {
     ASSERT(has_type_arguments_ && (i == kTypeArgumentsPos));
     type_arguments_ = value;
   }
 
-  const Class& cls_;
-  const bool has_type_arguments_;
-  Value* type_arguments_;
-  const Slot* type_arguments_slot_ = nullptr;
+  Value* type_arguments_ = nullptr;
 
   DISALLOW_COPY_AND_ASSIGN(AllocateObjectInstr);
 };
@@ -6462,6 +6956,8 @@
         compiler::target::Closure::InstanceSize());
   }
 
+  DECLARE_EMPTY_SERIALIZATION(AllocateClosureInstr, TemplateAllocation)
+
  private:
   DISALLOW_COPY_AND_ASSIGN(AllocateClosureInstr);
 };
@@ -6488,9 +6984,14 @@
 
   PRINT_OPERANDS_TO_SUPPORT
 
- private:
-  const intptr_t num_context_variables_;
+#define FIELD_LIST(F) F(const intptr_t, num_context_variables_)
 
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(AllocateUninitializedContextInstr,
+                                          TemplateAllocation,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
+ private:
   DISALLOW_COPY_AND_ASSIGN(AllocateUninitializedContextInstr);
 };
 
@@ -6505,13 +7006,11 @@
                          const ZoneGrowableArray<const Slot*>& slots,
                          InputsArray&& values)
       : VariadicDefinition(std::move(values)),
-        allocation_(allocation),
         cls_(cls),
         num_elements_(num_elements),
         slots_(slots),
-        locations_(nullptr),
-        visited_for_liveness_(false),
-        registers_remapped_(false) {
+        registers_remapped_(false),
+        allocation_(allocation) {
     ASSERT(slots_.length() == InputCount());
   }
 
@@ -6524,7 +7023,10 @@
     return slots_[i]->offset_in_bytes();
   }
 
-  const Location& LocationAt(intptr_t i) { return locations_[i]; }
+  const Location& LocationAt(intptr_t i) {
+    ASSERT(0 <= i && i < InputCount());
+    return locations_[i];
+  }
 
   DECLARE_INSTRUCTION(MaterializeObject)
 
@@ -6553,15 +7055,24 @@
 
   PRINT_OPERANDS_TO_SUPPORT
 
- private:
-  AllocationInstr* allocation_;
-  const Class& cls_;
-  intptr_t num_elements_;
-  const ZoneGrowableArray<const Slot*>& slots_;
-  Location* locations_;
+#define FIELD_LIST(F)                                                          \
+  F(const Class&, cls_)                                                        \
+  F(intptr_t, num_elements_)                                                   \
+  F(const ZoneGrowableArray<const Slot*>&, slots_)                             \
+  F(bool, registers_remapped_)
 
-  bool visited_for_liveness_;
-  bool registers_remapped_;
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(MaterializeObjectInstr,
+                                          VariadicDefinition,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+  DECLARE_EXTRA_SERIALIZATION
+
+ private:
+  Location* locations_ = nullptr;
+
+  // Not serialized.
+  AllocationInstr* allocation_ = nullptr;
+  bool visited_for_liveness_ = false;
 
   DISALLOW_COPY_AND_ASSIGN(MaterializeObjectInstr);
 };
@@ -6583,6 +7094,8 @@
 
   DECLARE_ABSTRACT_INSTRUCTION(ArrayAllocation);
 
+  DECLARE_EMPTY_SERIALIZATION(ArrayAllocationInstr, AllocationInstr)
+
  private:
   DISALLOW_COPY_AND_ASSIGN(ArrayAllocationInstr);
 };
@@ -6597,11 +7110,15 @@
   virtual intptr_t InputCount() const { return N; }
   virtual Value* InputAt(intptr_t i) const { return inputs_[i]; }
 
+  DECLARE_EMPTY_SERIALIZATION(TemplateArrayAllocation, ArrayAllocationInstr)
+
  protected:
   EmbeddedArray<Value*, N> inputs_;
 
  private:
   virtual void RawSetInputAt(intptr_t i, Value* value) { inputs_[i] = value; }
+
+  DISALLOW_COPY_AND_ASSIGN(TemplateArrayAllocation);
 };
 
 class CreateArrayInstr : public TemplateArrayAllocation<2> {
@@ -6643,6 +7160,8 @@
     }
   }
 
+  DECLARE_EMPTY_SERIALIZATION(CreateArrayInstr, TemplateArrayAllocation)
+
  private:
   DISALLOW_COPY_AND_ASSIGN(CreateArrayInstr);
 };
@@ -6681,9 +7200,14 @@
     }
   }
 
- private:
-  const classid_t class_id_;
+#define FIELD_LIST(F) F(const classid_t, class_id_)
 
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(AllocateTypedDataInstr,
+                                          TemplateArrayAllocation,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
+ private:
   DISALLOW_COPY_AND_ASSIGN(AllocateTypedDataInstr);
 };
 
@@ -6720,9 +7244,14 @@
 
   PRINT_OPERANDS_TO_SUPPORT
 
- private:
-  intptr_t offset_;
+#define FIELD_LIST(F) F(const intptr_t, offset_)
 
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(LoadUntaggedInstr,
+                                          TemplateDefinition,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
+ private:
   DISALLOW_COPY_AND_ASSIGN(LoadUntaggedInstr);
 };
 
@@ -6754,10 +7283,16 @@
 
   PRINT_OPERANDS_TO_SUPPORT
 
- private:
-  const Representation representation_;
-  const bool input_can_be_smi_;
+#define FIELD_LIST(F)                                                          \
+  F(const Representation, representation_)                                     \
+  F(const bool, input_can_be_smi_)
 
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(LoadClassIdInstr,
+                                          TemplateDefinition,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
+ private:
   DISALLOW_COPY_AND_ASSIGN(LoadClassIdInstr);
 };
 
@@ -6837,6 +7372,13 @@
 
   PRINT_OPERANDS_TO_SUPPORT
 
+#define FIELD_LIST(F) F(const Slot&, slot_)
+
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(LoadFieldInstr,
+                                          TemplateLoadField,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
  private:
   intptr_t OffsetInBytes() const { return slot().offset_in_bytes(); }
 
@@ -6844,8 +7386,6 @@
   // calls initializer if it is not. Field value is already loaded.
   void EmitNativeCodeForInitializerCall(FlowGraphCompiler* compiler);
 
-  const Slot& slot_;
-
   DISALLOW_COPY_AND_ASSIGN(LoadFieldInstr);
 };
 
@@ -6883,10 +7423,16 @@
 
   PRINT_OPERANDS_TO_SUPPORT
 
- private:
-  const TokenPosition token_pos_;
-  const AbstractType& type_;
+#define FIELD_LIST(F)                                                          \
+  F(const TokenPosition, token_pos_)                                           \
+  F(const AbstractType&, type_)
 
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(InstantiateTypeInstr,
+                                          TemplateDefinition,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
+ private:
   DISALLOW_COPY_AND_ASSIGN(InstantiateTypeInstr);
 };
 
@@ -6969,11 +7515,17 @@
 
   PRINT_OPERANDS_TO_SUPPORT
 
- private:
-  const TokenPosition token_pos_;
-  const Class& instantiator_class_;
-  const Function& function_;
+#define FIELD_LIST(F)                                                          \
+  F(const TokenPosition, token_pos_)                                           \
+  F(const Class&, instantiator_class_)                                         \
+  F(const Function&, function_)
 
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(InstantiateTypeArgumentsInstr,
+                                          TemplateDefinition,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
+ private:
   DISALLOW_COPY_AND_ASSIGN(InstantiateTypeArgumentsInstr);
 };
 
@@ -7006,9 +7558,14 @@
 
   PRINT_OPERANDS_TO_SUPPORT
 
- private:
-  const ZoneGrowableArray<const Slot*>& context_slots_;
+#define FIELD_LIST(F) F(const ZoneGrowableArray<const Slot*>&, context_slots_)
 
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(AllocateContextInstr,
+                                          TemplateAllocation,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
+ private:
   DISALLOW_COPY_AND_ASSIGN(AllocateContextInstr);
 };
 
@@ -7049,10 +7606,16 @@
 
   virtual bool HasUnknownSideEffects() const { return false; }
 
- private:
-  const TokenPosition token_pos_;
-  const ZoneGrowableArray<const Slot*>& context_slots_;
+#define FIELD_LIST(F)                                                          \
+  F(const TokenPosition, token_pos_)                                           \
+  F(const ZoneGrowableArray<const Slot*>&, context_slots_)
 
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(CloneContextInstr,
+                                          TemplateDefinition,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
+ private:
   DISALLOW_COPY_AND_ASSIGN(CloneContextInstr);
 };
 
@@ -7077,9 +7640,14 @@
 
   void set_licm_hoisted(bool value) { licm_hoisted_ = value; }
 
- private:
-  bool licm_hoisted_;
+#define FIELD_LIST(F) F(bool, licm_hoisted_)
 
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(CheckEitherNonSmiInstr,
+                                          TemplateInstruction,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
+ private:
   DISALLOW_COPY_AND_ASSIGN(CheckEitherNonSmiInstr);
 };
 
@@ -7130,6 +7698,13 @@
     return kNotSpeculative;
   }
 
+#define FIELD_LIST(F) F(const Representation, from_representation_)
+
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(BoxInstr,
+                                          TemplateDefinition,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
  protected:
   BoxInstr(Representation from_representation, Value* value)
       : from_representation_(from_representation) {
@@ -7141,8 +7716,6 @@
     return Boxing::ValueOffset(from_representation());
   }
 
-  const Representation from_representation_;
-
   DISALLOW_COPY_AND_ASSIGN(BoxInstr);
 };
 
@@ -7164,6 +7737,8 @@
 
   DECLARE_ABSTRACT_INSTRUCTION(BoxInteger)
 
+  DECLARE_EMPTY_SERIALIZATION(BoxIntegerInstr, BoxInstr)
+
  private:
   DISALLOW_COPY_AND_ASSIGN(BoxIntegerInstr);
 };
@@ -7180,6 +7755,8 @@
 
   DECLARE_INSTRUCTION(BoxSmallInt)
 
+  DECLARE_EMPTY_SERIALIZATION(BoxSmallIntInstr, BoxIntegerInstr)
+
  private:
   DISALLOW_COPY_AND_ASSIGN(BoxSmallIntInstr);
 };
@@ -7191,6 +7768,8 @@
 
   DECLARE_INSTRUCTION_BACKEND()
 
+  DECLARE_EMPTY_SERIALIZATION(BoxInteger32Instr, BoxIntegerInstr)
+
  private:
   DISALLOW_COPY_AND_ASSIGN(BoxInteger32Instr);
 };
@@ -7202,6 +7781,8 @@
 
   DECLARE_INSTRUCTION_NO_BACKEND(BoxInt32)
 
+  DECLARE_EMPTY_SERIALIZATION(BoxInt32Instr, BoxInteger32Instr)
+
  private:
   DISALLOW_COPY_AND_ASSIGN(BoxInt32Instr);
 };
@@ -7213,6 +7794,8 @@
 
   DECLARE_INSTRUCTION_NO_BACKEND(BoxUint32)
 
+  DECLARE_EMPTY_SERIALIZATION(BoxUint32Instr, BoxInteger32Instr)
+
  private:
   DISALLOW_COPY_AND_ASSIGN(BoxUint32Instr);
 };
@@ -7226,6 +7809,8 @@
 
   DECLARE_INSTRUCTION(BoxInt64)
 
+  DECLARE_EMPTY_SERIALIZATION(BoxInt64Instr, BoxIntegerInstr)
+
  private:
   DISALLOW_COPY_AND_ASSIGN(BoxInt64Instr);
 };
@@ -7279,6 +7864,15 @@
 
   virtual TokenPosition token_pos() const { return TokenPosition::kBox; }
 
+#define FIELD_LIST(F)                                                          \
+  F(const Representation, representation_)                                     \
+  F(SpeculativeMode, speculative_mode_)
+
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(UnboxInstr,
+                                          TemplateDefinition,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
  protected:
   UnboxInstr(Representation representation,
              Value* value,
@@ -7290,6 +7884,10 @@
     SetInputAt(0, value);
   }
 
+  void set_speculative_mode(SpeculativeMode value) {
+    speculative_mode_ = value;
+  }
+
  private:
   bool CanConvertSmi() const;
   void EmitLoadFromBox(FlowGraphCompiler* compiler);
@@ -7302,10 +7900,6 @@
 
   intptr_t ValueOffset() const { return Boxing::ValueOffset(representation_); }
 
- protected:
-  const Representation representation_;
-  SpeculativeMode speculative_mode_;
-
   DISALLOW_COPY_AND_ASSIGN(UnboxInstr);
 };
 
@@ -7339,9 +7933,14 @@
 
   PRINT_OPERANDS_TO_SUPPORT
 
- private:
-  bool is_truncating_;
+#define FIELD_LIST(F) F(bool, is_truncating_)
 
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(UnboxIntegerInstr,
+                                          UnboxInstr,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
+ private:
   DISALLOW_COPY_AND_ASSIGN(UnboxIntegerInstr);
 };
 
@@ -7360,6 +7959,8 @@
 
   DECLARE_INSTRUCTION_BACKEND()
 
+  DECLARE_EMPTY_SERIALIZATION(UnboxInteger32Instr, UnboxIntegerInstr)
+
  private:
   DISALLOW_COPY_AND_ASSIGN(UnboxInteger32Instr);
 };
@@ -7383,6 +7984,8 @@
 
   DECLARE_INSTRUCTION_NO_BACKEND(UnboxUint32)
 
+  DECLARE_EMPTY_SERIALIZATION(UnboxUint32Instr, UnboxInteger32Instr)
+
  private:
   DISALLOW_COPY_AND_ASSIGN(UnboxUint32Instr);
 };
@@ -7407,6 +8010,8 @@
 
   DECLARE_INSTRUCTION_NO_BACKEND(UnboxInt32)
 
+  DECLARE_EMPTY_SERIALIZATION(UnboxInt32Instr, UnboxInteger32Instr)
+
  private:
   DISALLOW_COPY_AND_ASSIGN(UnboxInt32Instr);
 };
@@ -7436,6 +8041,8 @@
 
   DECLARE_INSTRUCTION_NO_BACKEND(UnboxInt64)
 
+  DECLARE_EMPTY_SERIALIZATION(UnboxInt64Instr, UnboxIntegerInstr)
+
  private:
   DISALLOW_COPY_AND_ASSIGN(UnboxInt64Instr);
 };
@@ -7494,16 +8101,21 @@
 
   PRINT_OPERANDS_TO_SUPPORT
 
- private:
-  const MathUnaryKind kind_;
+#define FIELD_LIST(F) F(const MathUnaryKind, kind_)
 
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(MathUnaryInstr,
+                                          TemplateDefinition,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
+ private:
   DISALLOW_COPY_AND_ASSIGN(MathUnaryInstr);
 };
 
 // Calls into the runtime and performs a case-insensitive comparison of the
 // UTF16 strings (i.e. TwoByteString or ExternalTwoByteString) located at
 // str[lhs_index:lhs_index + length] and str[rhs_index:rhs_index + length].
-// Depending on the runtime entry passed, we will treat the strings as either
+// Depending on [handle_surrogates], we will treat the strings as either
 // UCS2 (no surrogate handling) or UTF16 (surrogates handled appropriately).
 class CaseInsensitiveCompareInstr
     : public TemplateDefinition<4, NoThrow, Pure> {
@@ -7512,9 +8124,9 @@
                               Value* lhs_index,
                               Value* rhs_index,
                               Value* length,
-                              const RuntimeEntry& entry,
+                              bool handle_surrogates,
                               intptr_t cid)
-      : entry_(entry), cid_(cid) {
+      : handle_surrogates_(handle_surrogates), cid_(cid) {
     ASSERT(cid == kTwoByteStringCid || cid == kExternalTwoByteStringCid);
     ASSERT(index_scale() == 2);
     SetInputAt(0, str);
@@ -7528,7 +8140,7 @@
   Value* rhs_index() const { return inputs_[2]; }
   Value* length() const { return inputs_[3]; }
 
-  const RuntimeEntry& TargetFunction() const { return entry_; }
+  const RuntimeEntry& TargetFunction() const;
   bool IsExternal() const { return cid_ == kExternalTwoByteStringCid; }
   intptr_t class_id() const { return cid_; }
 
@@ -7544,13 +8156,21 @@
   virtual CompileType ComputeType() const;
 
   virtual bool AttributesEqual(const Instruction& other) const {
-    return other.AsCaseInsensitiveCompare()->cid_ == cid_;
+    const auto* other_compare = other.AsCaseInsensitiveCompare();
+    return (other_compare->handle_surrogates_ == handle_surrogates_) &&
+           (other_compare->cid_ == cid_);
   }
 
- private:
-  const RuntimeEntry& entry_;
-  const intptr_t cid_;
+#define FIELD_LIST(F)                                                          \
+  F(const bool, handle_surrogates_)                                            \
+  F(const intptr_t, cid_)
 
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(CaseInsensitiveCompareInstr,
+                                          TemplateDefinition,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
+ private:
   DISALLOW_COPY_AND_ASSIGN(CaseInsensitiveCompareInstr);
 };
 
@@ -7605,10 +8225,16 @@
   virtual CompileType ComputeType() const;
   virtual bool AttributesEqual(const Instruction& other) const;
 
- private:
-  const MethodRecognizer::Kind op_kind_;
-  const intptr_t result_cid_;
+#define FIELD_LIST(F)                                                          \
+  F(const MethodRecognizer::Kind, op_kind_)                                    \
+  F(const intptr_t, result_cid_)
 
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(MathMinMaxInstr,
+                                          TemplateDefinition,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
+ private:
   DISALLOW_COPY_AND_ASSIGN(MathMinMaxInstr);
 };
 
@@ -7667,11 +8293,17 @@
            (speculative_mode_ == other_bin_op->speculative_mode_);
   }
 
- private:
-  const Token::Kind op_kind_;
-  const TokenPosition token_pos_;
-  const SpeculativeMode speculative_mode_;
+#define FIELD_LIST(F)                                                          \
+  F(const Token::Kind, op_kind_)                                               \
+  F(const TokenPosition, token_pos_)                                           \
+  F(const SpeculativeMode, speculative_mode_)
 
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(BinaryDoubleOpInstr,
+                                          TemplateDefinition,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
+ private:
   DISALLOW_COPY_AND_ASSIGN(BinaryDoubleOpInstr);
 };
 
@@ -7711,9 +8343,14 @@
 
   virtual ComparisonInstr* CopyWithNewOperands(Value* left, Value* right);
 
- private:
-  const MethodRecognizer::Kind op_kind_;
+#define FIELD_LIST(F) F(const MethodRecognizer::Kind, op_kind_)
 
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(DoubleTestOpInstr,
+                                          TemplateComparison,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
+ private:
   DISALLOW_COPY_AND_ASSIGN(DoubleTestOpInstr);
 };
 
@@ -7748,8 +8385,15 @@
 
   DECLARE_ABSTRACT_INSTRUCTION(UnaryIntegerOp)
 
+#define FIELD_LIST(F) F(const Token::Kind, op_kind_)
+
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(UnaryIntegerOpInstr,
+                                          TemplateDefinition,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
  private:
-  const Token::Kind op_kind_;
+  DISALLOW_COPY_AND_ASSIGN(UnaryIntegerOpInstr);
 };
 
 // Handles both Smi operations: BIT_OR and NEGATE.
@@ -7766,6 +8410,8 @@
 
   DECLARE_INSTRUCTION(UnarySmiOp)
 
+  DECLARE_EMPTY_SERIALIZATION(UnarySmiOpInstr, UnaryIntegerOpInstr)
+
  private:
   DISALLOW_COPY_AND_ASSIGN(UnarySmiOpInstr);
 };
@@ -7794,6 +8440,8 @@
 
   DECLARE_INSTRUCTION(UnaryUint32Op)
 
+  DECLARE_EMPTY_SERIALIZATION(UnaryUint32OpInstr, UnaryIntegerOpInstr)
+
  private:
   DISALLOW_COPY_AND_ASSIGN(UnaryUint32OpInstr);
 };
@@ -7832,8 +8480,14 @@
 
   DECLARE_INSTRUCTION(UnaryInt64Op)
 
+#define FIELD_LIST(F) F(const SpeculativeMode, speculative_mode_)
+
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(UnaryInt64OpInstr,
+                                          UnaryIntegerOpInstr,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
  private:
-  const SpeculativeMode speculative_mode_;
   DISALLOW_COPY_AND_ASSIGN(UnaryInt64OpInstr);
 };
 
@@ -7902,6 +8556,16 @@
 
   DECLARE_ABSTRACT_INSTRUCTION(BinaryIntegerOp)
 
+#define FIELD_LIST(F)                                                          \
+  F(const Token::Kind, op_kind_)                                               \
+  F(bool, can_overflow_)                                                       \
+  F(bool, is_truncating_)
+
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(BinaryIntegerOpInstr,
+                                          TemplateDefinition,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
  protected:
   void InferRangeHelper(const Range* left_range,
                         const Range* right_range,
@@ -7910,11 +8574,6 @@
  private:
   Definition* CreateConstantResult(FlowGraph* graph, const Integer& result);
 
-  const Token::Kind op_kind_;
-
-  bool can_overflow_;
-  bool is_truncating_;
-
   DISALLOW_COPY_AND_ASSIGN(BinaryIntegerOpInstr);
 };
 
@@ -7938,9 +8597,14 @@
 
   Range* right_range() const { return right_range_; }
 
- private:
-  Range* right_range_;
+#define FIELD_LIST(F) F(Range*, right_range_)
 
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(BinarySmiOpInstr,
+                                          BinaryIntegerOpInstr,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
+ private:
   DISALLOW_COPY_AND_ASSIGN(BinarySmiOpInstr);
 };
 
@@ -7996,6 +8660,8 @@
 
   DECLARE_INSTRUCTION(BinaryInt32Op)
 
+  DECLARE_EMPTY_SERIALIZATION(BinaryInt32OpInstr, BinaryIntegerOpInstr)
+
  private:
   DISALLOW_COPY_AND_ASSIGN(BinaryInt32OpInstr);
 };
@@ -8038,6 +8704,8 @@
 
   DECLARE_INSTRUCTION(BinaryUint32Op)
 
+  DECLARE_EMPTY_SERIALIZATION(BinaryUint32OpInstr, BinaryIntegerOpInstr)
+
  private:
   DISALLOW_COPY_AND_ASSIGN(BinaryUint32OpInstr);
 };
@@ -8083,8 +8751,14 @@
 
   DECLARE_INSTRUCTION(BinaryInt64Op)
 
+#define FIELD_LIST(F) F(const SpeculativeMode, speculative_mode_)
+
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(BinaryInt64OpInstr,
+                                          BinaryIntegerOpInstr,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
  private:
-  const SpeculativeMode speculative_mode_;
   DISALLOW_COPY_AND_ASSIGN(BinaryInt64OpInstr);
 };
 
@@ -8113,6 +8787,13 @@
 
   DECLARE_ABSTRACT_INSTRUCTION(ShiftIntegerOp)
 
+#define FIELD_LIST(F) F(Range*, shift_range_)
+
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(ShiftIntegerOpInstr,
+                                          BinaryIntegerOpInstr,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
  protected:
   static const intptr_t kShiftCountLimit = 63;
 
@@ -8121,8 +8802,6 @@
   bool IsShiftCountInRange(int64_t max = kShiftCountLimit) const;
 
  private:
-  Range* shift_range_;
-
   DISALLOW_COPY_AND_ASSIGN(ShiftIntegerOpInstr);
 };
 
@@ -8154,6 +8833,8 @@
 
   DECLARE_INSTRUCTION(ShiftInt64Op)
 
+  DECLARE_EMPTY_SERIALIZATION(ShiftInt64OpInstr, ShiftIntegerOpInstr)
+
  private:
   DISALLOW_COPY_AND_ASSIGN(ShiftInt64OpInstr);
 };
@@ -8185,6 +8866,8 @@
 
   DECLARE_INSTRUCTION(SpeculativeShiftInt64Op)
 
+  DECLARE_EMPTY_SERIALIZATION(SpeculativeShiftInt64OpInstr, ShiftIntegerOpInstr)
+
  private:
   DISALLOW_COPY_AND_ASSIGN(SpeculativeShiftInt64OpInstr);
 };
@@ -8217,6 +8900,8 @@
 
   DECLARE_INSTRUCTION(ShiftUint32Op)
 
+  DECLARE_EMPTY_SERIALIZATION(ShiftUint32OpInstr, ShiftIntegerOpInstr)
+
  private:
   static const intptr_t kUint32ShiftCountLimit = 31;
 
@@ -8247,6 +8932,9 @@
 
   virtual CompileType ComputeType() const;
 
+  DECLARE_EMPTY_SERIALIZATION(SpeculativeShiftUint32OpInstr,
+                              ShiftIntegerOpInstr)
+
  private:
   static const intptr_t kUint32ShiftCountLimit = 31;
 
@@ -8298,10 +8986,16 @@
 
   PRINT_OPERANDS_TO_SUPPORT
 
- private:
-  const Token::Kind op_kind_;
-  const SpeculativeMode speculative_mode_;
+#define FIELD_LIST(F)                                                          \
+  F(const Token::Kind, op_kind_)                                               \
+  F(const SpeculativeMode, speculative_mode_)
 
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(UnaryDoubleOpInstr,
+                                          TemplateDefinition,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
+ private:
   DISALLOW_COPY_AND_ASSIGN(UnaryDoubleOpInstr);
 };
 
@@ -8352,12 +9046,18 @@
 
   PRINT_OPERANDS_TO_SUPPORT
 
- private:
-  const TokenPosition token_pos_;
-  const intptr_t stack_depth_;
-  const intptr_t loop_depth_;
-  const Kind kind_;
+#define FIELD_LIST(F)                                                          \
+  F(const TokenPosition, token_pos_)                                           \
+  F(const intptr_t, stack_depth_)                                              \
+  F(const intptr_t, loop_depth_)                                               \
+  F(const Kind, kind_)
 
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(CheckStackOverflowInstr,
+                                          TemplateInstruction,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
+ private:
   DISALLOW_COPY_AND_ASSIGN(CheckStackOverflowInstr);
 };
 
@@ -8381,9 +9081,14 @@
 
   virtual bool AttributesEqual(const Instruction& other) const { return true; }
 
- private:
-  const TokenPosition token_pos_;
+#define FIELD_LIST(F) F(const TokenPosition, token_pos_)
 
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(SmiToDoubleInstr,
+                                          TemplateDefinition,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
+ private:
   DISALLOW_COPY_AND_ASSIGN(SmiToDoubleInstr);
 };
 
@@ -8407,6 +9112,8 @@
 
   virtual bool AttributesEqual(const Instruction& other) const { return true; }
 
+  DECLARE_EMPTY_SERIALIZATION(Int32ToDoubleInstr, TemplateDefinition)
+
  private:
   DISALLOW_COPY_AND_ASSIGN(Int32ToDoubleInstr);
 };
@@ -8448,9 +9155,14 @@
     return speculative_mode_ == other.AsInt64ToDouble()->speculative_mode_;
   }
 
- private:
-  const SpeculativeMode speculative_mode_;
+#define FIELD_LIST(F) F(const SpeculativeMode, speculative_mode_)
 
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(Int64ToDoubleInstr,
+                                          TemplateDefinition,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
+ private:
   DISALLOW_COPY_AND_ASSIGN(Int64ToDoubleInstr);
 };
 
@@ -8495,9 +9207,14 @@
     return other.AsDoubleToInteger()->recognized_kind() == recognized_kind();
   }
 
- private:
-  const MethodRecognizer::Kind recognized_kind_;
+#define FIELD_LIST(F) F(const MethodRecognizer::Kind, recognized_kind_)
 
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(DoubleToIntegerInstr,
+                                          TemplateDefinition,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
+ private:
   DISALLOW_COPY_AND_ASSIGN(DoubleToIntegerInstr);
 };
 
@@ -8526,6 +9243,8 @@
 
   virtual bool AttributesEqual(const Instruction& other) const { return true; }
 
+  DECLARE_EMPTY_SERIALIZATION(DoubleToSmiInstr, TemplateDefinition)
+
  private:
   DISALLOW_COPY_AND_ASSIGN(DoubleToSmiInstr);
 };
@@ -8569,9 +9288,14 @@
     return other.AsDoubleToDouble()->recognized_kind() == recognized_kind();
   }
 
- private:
-  const MethodRecognizer::Kind recognized_kind_;
+#define FIELD_LIST(F) F(const MethodRecognizer::Kind, recognized_kind_)
 
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(DoubleToDoubleInstr,
+                                          TemplateDefinition,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
+ private:
   DISALLOW_COPY_AND_ASSIGN(DoubleToDoubleInstr);
 };
 
@@ -8609,9 +9333,14 @@
 
   virtual Definition* Canonicalize(FlowGraph* flow_graph);
 
- private:
-  const SpeculativeMode speculative_mode_;
+#define FIELD_LIST(F) F(const SpeculativeMode, speculative_mode_)
 
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(DoubleToFloatInstr,
+                                          TemplateDefinition,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
+ private:
   DISALLOW_COPY_AND_ASSIGN(DoubleToFloatInstr);
 };
 
@@ -8643,6 +9372,8 @@
 
   virtual Definition* Canonicalize(FlowGraph* flow_graph);
 
+  DECLARE_EMPTY_SERIALIZATION(FloatToDoubleInstr, TemplateDefinition)
+
  private:
   DISALLOW_COPY_AND_ASSIGN(FloatToDoubleInstr);
 };
@@ -8698,10 +9429,16 @@
 
   PRINT_OPERANDS_TO_SUPPORT
 
- private:
-  const MethodRecognizer::Kind recognized_kind_;
-  const TokenPosition token_pos_;
+#define FIELD_LIST(F)                                                          \
+  F(const MethodRecognizer::Kind, recognized_kind_)                            \
+  F(const TokenPosition, token_pos_)
 
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(InvokeMathCFunctionInstr,
+                                          VariadicDefinition,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
+ private:
   DISALLOW_COPY_AND_ASSIGN(InvokeMathCFunctionInstr);
 };
 
@@ -8746,10 +9483,17 @@
 
   PRINT_OPERANDS_TO_SUPPORT
 
+#define FIELD_LIST(F)                                                          \
+  F(const intptr_t, index_)                                                    \
+  F(const Representation, definition_rep_)                                     \
+  F(const intptr_t, definition_cid_)
+
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(ExtractNthOutputInstr,
+                                          TemplateDefinition,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
  private:
-  const intptr_t index_;
-  const Representation definition_rep_;
-  const intptr_t definition_cid_;
   DISALLOW_COPY_AND_ASSIGN(ExtractNthOutputInstr);
 };
 
@@ -8778,6 +9522,8 @@
 
   PRINT_OPERANDS_TO_SUPPORT
 
+  DECLARE_EMPTY_SERIALIZATION(TruncDivModInstr, TemplateDefinition)
+
  private:
   Range* divisor_range() const {
     // Note: this range is only used to remove check for zero divisor from
@@ -8832,12 +9578,18 @@
 
   PRINT_OPERANDS_TO_SUPPORT
 
- private:
-  const Cids& cids_;
-  bool licm_hoisted_;
-  bool is_bit_test_;
-  const TokenPosition token_pos_;
+#define FIELD_LIST(F)                                                          \
+  F(const Cids&, cids_)                                                        \
+  F(bool, licm_hoisted_)                                                       \
+  F(bool, is_bit_test_)                                                        \
+  F(const TokenPosition, token_pos_)
 
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(CheckClassInstr,
+                                          TemplateInstruction,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
+ private:
   int EmitCheckCid(FlowGraphCompiler* compiler,
                    int bias,
                    intptr_t cid_start,
@@ -8881,10 +9633,16 @@
   bool licm_hoisted() const { return licm_hoisted_; }
   void set_licm_hoisted(bool value) { licm_hoisted_ = value; }
 
- private:
-  const TokenPosition token_pos_;
-  bool licm_hoisted_;
+#define FIELD_LIST(F)                                                          \
+  F(const TokenPosition, token_pos_)                                           \
+  F(bool, licm_hoisted_)
 
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(CheckSmiInstr,
+                                          TemplateInstruction,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
+ private:
   DISALLOW_COPY_AND_ASSIGN(CheckSmiInstr);
 };
 
@@ -8944,11 +9702,17 @@
 
   PRINT_OPERANDS_TO_SUPPORT
 
- private:
-  const TokenPosition token_pos_;
-  const String& function_name_;
-  const ExceptionType exception_type_;
+#define FIELD_LIST(F)                                                          \
+  F(const TokenPosition, token_pos_)                                           \
+  F(const String&, function_name_)                                             \
+  F(const ExceptionType, exception_type_)
 
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(CheckNullInstr,
+                                          TemplateDefinition,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
+ private:
   DISALLOW_COPY_AND_ASSIGN(CheckNullInstr);
 };
 
@@ -8977,11 +9741,16 @@
 
   PRINT_OPERANDS_TO_SUPPORT
 
+#define FIELD_LIST(F) F(CidRangeValue, cids_)
+
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(CheckClassIdInstr,
+                                          TemplateInstruction,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
  private:
   bool Contains(intptr_t cid) const;
 
-  CidRangeValue cids_;
-
   DISALLOW_COPY_AND_ASSIGN(CheckClassIdInstr);
 };
 
@@ -9011,6 +9780,8 @@
   // Give a name to the location/input indices.
   enum { kLengthPos = 0, kIndexPos = 1 };
 
+  DECLARE_EMPTY_SERIALIZATION(CheckBoundBase, TemplateDefinition)
+
  private:
   DISALLOW_COPY_AND_ASSIGN(CheckBoundBase);
 };
@@ -9045,10 +9816,16 @@
 
   void set_licm_hoisted(bool value) { licm_hoisted_ = value; }
 
- private:
-  bool generalized_;
-  bool licm_hoisted_;
+#define FIELD_LIST(F)                                                          \
+  F(bool, generalized_)                                                        \
+  F(bool, licm_hoisted_)
 
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(CheckArrayBoundInstr,
+                                          CheckBoundBase,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
+ private:
   DISALLOW_COPY_AND_ASSIGN(CheckArrayBoundInstr);
 };
 
@@ -9102,6 +9879,8 @@
     return SlowPathSharingSupported(is_optimizing);
   }
 
+  DECLARE_EMPTY_SERIALIZATION(GenericCheckBoundInstr, CheckBoundBase)
+
  private:
   DISALLOW_COPY_AND_ASSIGN(GenericCheckBoundInstr);
 };
@@ -9127,6 +9906,8 @@
 
   virtual bool ComputeCanDeoptimize() const { return false; }
 
+  DECLARE_EMPTY_SERIALIZATION(CheckWritableInstr, TemplateDefinition)
+
  private:
   DISALLOW_COPY_AND_ASSIGN(CheckWritableInstr);
 };
@@ -9167,13 +9948,19 @@
 
   PRINT_OPERANDS_TO_SUPPORT
 
+#define FIELD_LIST(F) F(ComparisonInstr*, comparison_)
+
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(CheckConditionInstr,
+                                          Instruction,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+  DECLARE_EXTRA_SERIALIZATION
+
  private:
   virtual void RawSetInputAt(intptr_t i, Value* value) {
     comparison()->RawSetInputAt(i, value);
   }
 
-  ComparisonInstr* comparison_;
-
   DISALLOW_COPY_AND_ASSIGN(CheckConditionInstr);
 };
 
@@ -9238,11 +10025,17 @@
 
   PRINT_OPERANDS_TO_SUPPORT
 
- private:
-  const Representation from_representation_;
-  const Representation to_representation_;
-  bool is_truncating_;
+#define FIELD_LIST(F)                                                          \
+  F(const Representation, from_representation_)                                \
+  F(const Representation, to_representation_)                                  \
+  F(bool, is_truncating_)
 
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(IntConverterInstr,
+                                          TemplateDefinition,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
+ private:
   DISALLOW_COPY_AND_ASSIGN(IntConverterInstr);
 };
 
@@ -9289,10 +10082,16 @@
 
   PRINT_OPERANDS_TO_SUPPORT
 
- private:
-  const Representation from_representation_;
-  const Representation to_representation_;
+#define FIELD_LIST(F)                                                          \
+  F(const Representation, from_representation_)                                \
+  F(const Representation, to_representation_)
 
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(BitCastInstr,
+                                          TemplateDefinition,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
+ private:
   DISALLOW_COPY_AND_ASSIGN(BitCastInstr);
 };
 
@@ -9318,6 +10117,8 @@
 
   DECLARE_INSTRUCTION(LoadThread);
 
+  DECLARE_EMPTY_SERIALIZATION(LoadThreadInstr, TemplateDefinition)
+
  private:
   DISALLOW_COPY_AND_ASSIGN(LoadThreadInstr);
 };
@@ -9526,6 +10327,13 @@
   DECLARE_INSTRUCTION(SimdOp)
   PRINT_OPERANDS_TO_SUPPORT
 
+#define FIELD_LIST(F)                                                          \
+  F(const Kind, kind_)                                                         \
+  F(intptr_t, mask_)
+
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(SimdOpInstr, Definition, FIELD_LIST)
+#undef FIELD_LIST
+
  private:
   SimdOpInstr(Kind kind, intptr_t deopt_id)
       : Definition(deopt_id), kind_(kind) {}
@@ -9549,9 +10357,7 @@
   // We consider SimdOpInstr to be very uncommon so we don't optimize them for
   // size. Any instance of SimdOpInstr has enough space to fit any variation.
   // TODO(dartbug.com/30949) optimize this for size.
-  const Kind kind_;
   Value* inputs_[4];
-  intptr_t mask_;
 
   DISALLOW_COPY_AND_ASSIGN(SimdOpInstr);
 };
@@ -9590,10 +10396,16 @@
   DECLARE_INSTRUCTION(Call1ArgStub);
   PRINT_OPERANDS_TO_SUPPORT
 
- private:
-  const StubId stub_id_;
-  const TokenPosition token_pos_;
+#define FIELD_LIST(F)                                                          \
+  F(const StubId, stub_id_)                                                    \
+  F(const TokenPosition, token_pos_)
 
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(Call1ArgStubInstr,
+                                          TemplateDefinition,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
+ private:
   DISALLOW_COPY_AND_ASSIGN(Call1ArgStubInstr);
 };
 
@@ -9634,11 +10446,17 @@
   DECLARE_INSTRUCTION(Suspend);
   PRINT_OPERANDS_TO_SUPPORT
 
- private:
-  const StubId stub_id_;
-  const intptr_t resume_deopt_id_;
-  const TokenPosition token_pos_;
+#define FIELD_LIST(F)                                                          \
+  F(const StubId, stub_id_)                                                    \
+  F(const intptr_t, resume_deopt_id_)                                          \
+  F(const TokenPosition, token_pos_)
 
+  DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(SuspendInstr,
+                                          TemplateDefinition,
+                                          FIELD_LIST)
+#undef FIELD_LIST
+
+ private:
   DISALLOW_COPY_AND_ASSIGN(SuspendInstr);
 };
 
@@ -9851,6 +10669,9 @@
   // from the copy.
   Environment* DeepCopy(Zone* zone, intptr_t length) const;
 
+  void Write(FlowGraphSerializer* s) const;
+  explicit Environment(FlowGraphDeserializer* d);
+
  private:
   friend class ShallowIterator;
   friend class compiler::BlockBuilder;  // For Environment constructor.
@@ -9969,6 +10790,9 @@
   ConstantInstr* constant = definition()->AsConstant();
   return (constant == nullptr) || constant->value().ptr() == value.ptr();
 }
+#undef DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS
+#undef DECLARE_CUSTOM_SERIALIZATION
+#undef DECLARE_EMPTY_SERIALIZATION
 
 }  // namespace dart
 
diff --git a/runtime/vm/compiler/backend/il_serializer.cc b/runtime/vm/compiler/backend/il_serializer.cc
new file mode 100644
index 0000000..6fb76fc
--- /dev/null
+++ b/runtime/vm/compiler/backend/il_serializer.cc
@@ -0,0 +1,2305 @@
+// Copyright (c) 2022, the Dart project authors.  Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+#include "vm/compiler/backend/il_serializer.h"
+
+#include "vm/closure_functions_cache.h"
+#if defined(DART_PRECOMPILER)
+#include "vm/compiler/aot/precompiler.h"
+#endif
+#include "vm/compiler/backend/flow_graph.h"
+#include "vm/compiler/backend/il.h"
+#include "vm/compiler/backend/range_analysis.h"
+#include "vm/compiler/ffi/call.h"
+#include "vm/compiler/frontend/flow_graph_builder.h"
+#include "vm/object_store.h"
+#include "vm/parser.h"
+
+#define Z zone_
+
+// This file declares write/read methods for each type,
+// sorted alphabetically by type/class name (case-insensitive).
+// Each "write" method is followed by corresponding "read" method
+// or constructor.
+
+namespace dart {
+
+FlowGraphSerializer::FlowGraphSerializer(NonStreamingWriteStream* stream)
+    : stream_(stream),
+      zone_(Thread::Current()->zone()),
+      isolate_group_(IsolateGroup::Current()),
+      heap_(IsolateGroup::Current()->heap()) {}
+
+FlowGraphSerializer::~FlowGraphSerializer() {
+  heap_->ResetObjectIdTable();
+}
+
+FlowGraphDeserializer::FlowGraphDeserializer(
+    const ParsedFunction& parsed_function,
+    ReadStream* stream)
+    : parsed_function_(parsed_function),
+      stream_(stream),
+      zone_(Thread::Current()->zone()),
+      thread_(Thread::Current()),
+      isolate_group_(IsolateGroup::Current()) {}
+
+ClassPtr FlowGraphDeserializer::GetClassById(classid_t id) const {
+  return isolate_group()->class_table()->At(id);
+}
+
+template <>
+void FlowGraphSerializer::Write<const AbstractType*>(const AbstractType* x) {
+  if (x == nullptr) {
+    Write<bool>(false);
+  } else {
+    Write<bool>(true);
+    Write<const AbstractType&>(*x);
+  }
+}
+
+template <>
+const AbstractType* FlowGraphDeserializer::Read<const AbstractType*>() {
+  if (!Read<bool>()) {
+    return nullptr;
+  }
+  return &Read<const AbstractType&>();
+}
+
+template <>
+void FlowGraphSerializer::Write<AliasIdentity>(AliasIdentity x) {
+  x.Write(this);
+}
+
+template <>
+AliasIdentity FlowGraphDeserializer::Read<AliasIdentity>() {
+  return AliasIdentity(this);
+}
+
+void AliasIdentity::Write(FlowGraphSerializer* s) const {
+  s->Write<intptr_t>(value_);
+}
+
+AliasIdentity::AliasIdentity(FlowGraphDeserializer* d)
+    : value_(d->Read<intptr_t>()) {}
+
+void BlockEntryInstr::WriteTo(FlowGraphSerializer* s) {
+  TemplateInstruction::WriteTo(s);
+  s->Write<intptr_t>(block_id_);
+  s->Write<intptr_t>(try_index_);
+  s->Write<intptr_t>(stack_depth_);
+  s->Write<ParallelMoveInstr*>(parallel_move_);
+}
+
+BlockEntryInstr::BlockEntryInstr(FlowGraphDeserializer* d)
+    : TemplateInstruction(d),
+      block_id_(d->Read<intptr_t>()),
+      try_index_(d->Read<intptr_t>()),
+      stack_depth_(d->Read<intptr_t>()),
+      dominated_blocks_(1),
+      parallel_move_(d->Read<ParallelMoveInstr*>()) {
+  d->set_block(block_id_, this);
+  d->set_current_block(this);
+}
+
+void BlockEntryInstr::WriteExtra(FlowGraphSerializer* s) {
+  TemplateInstruction::WriteExtra(s);
+  s->WriteRef<BlockEntryInstr*>(dominator_);
+  s->WriteGrowableArrayOfRefs<BlockEntryInstr*>(dominated_blocks_);
+  if (parallel_move_ != nullptr) {
+    parallel_move_->WriteExtra(s);
+  }
+}
+
+void BlockEntryInstr::ReadExtra(FlowGraphDeserializer* d) {
+  TemplateInstruction::ReadExtra(d);
+  dominator_ = d->ReadRef<BlockEntryInstr*>();
+  dominated_blocks_ = d->ReadGrowableArrayOfRefs<BlockEntryInstr*>();
+  if (parallel_move_ != nullptr) {
+    parallel_move_->ReadExtra(d);
+  }
+}
+
+template <>
+void FlowGraphSerializer::WriteRef<BlockEntryInstr*>(BlockEntryInstr* x) {
+  ASSERT(can_write_refs());
+  if (x == nullptr) {
+    Write<intptr_t>(-1);
+    return;
+  }
+  const intptr_t id = x->block_id();
+  ASSERT(id >= 0);
+  Write<intptr_t>(id);
+}
+
+template <>
+BlockEntryInstr* FlowGraphDeserializer::ReadRef<BlockEntryInstr*>() {
+  const intptr_t id = Read<intptr_t>();
+  if (id < 0) {
+    return nullptr;
+  }
+  return block(id);
+}
+
+#define INSTRUCTION_REFS_SERIALIZABLE_AS_BLOCK_ENTRY(V)                        \
+  V(CatchBlockEntry, CatchBlockEntryInstr)                                     \
+  V(FunctionEntry, FunctionEntryInstr)                                         \
+  V(IndirectEntry, IndirectEntryInstr)                                         \
+  V(JoinEntry, JoinEntryInstr)                                                 \
+  V(OsrEntry, OsrEntryInstr)                                                   \
+  V(TargetEntry, TargetEntryInstr)
+
+#define SERIALIZABLE_AS_BLOCK_ENTRY(name, type)                                \
+  template <>                                                                  \
+  void FlowGraphSerializer::WriteRef<type*>(type * x) {                        \
+    WriteRef<BlockEntryInstr*>(x);                                             \
+  }                                                                            \
+  template <>                                                                  \
+  type* FlowGraphDeserializer::ReadRef<type*>() {                              \
+    BlockEntryInstr* instr = ReadRef<BlockEntryInstr*>();                      \
+    ASSERT((instr == nullptr) || instr->Is##name());                           \
+    return static_cast<type*>(instr);                                          \
+  }
+
+INSTRUCTION_REFS_SERIALIZABLE_AS_BLOCK_ENTRY(SERIALIZABLE_AS_BLOCK_ENTRY)
+#undef SERIALIZABLE_AS_BLOCK_ENTRY
+#undef INSTRUCTION_REFS_SERIALIZABLE_AS_BLOCK_ENTRY
+
+void BlockEntryWithInitialDefs::WriteTo(FlowGraphSerializer* s) {
+  BlockEntryInstr::WriteTo(s);
+  s->Write<GrowableArray<Definition*>>(initial_definitions_);
+}
+
+BlockEntryWithInitialDefs::BlockEntryWithInitialDefs(FlowGraphDeserializer* d)
+    : BlockEntryInstr(d),
+      initial_definitions_(d->Read<GrowableArray<Definition*>>()) {
+  for (Definition* def : initial_definitions_) {
+    def->set_previous(this);
+    if (auto par = def->AsParameter()) {
+      par->set_block(this);
+    }
+  }
+}
+
+void BlockEntryWithInitialDefs::WriteExtra(FlowGraphSerializer* s) {
+  BlockEntryInstr::WriteExtra(s);
+  for (Definition* def : initial_definitions_) {
+    def->WriteExtra(s);
+  }
+}
+
+void BlockEntryWithInitialDefs::ReadExtra(FlowGraphDeserializer* d) {
+  BlockEntryInstr::ReadExtra(d);
+  for (Definition* def : initial_definitions_) {
+    def->ReadExtra(d);
+  }
+}
+
+template <>
+void FlowGraphSerializer::Write<bool>(bool x) {
+  stream_->Write<uint8_t>(x ? 1 : 0);
+}
+
+template <>
+bool FlowGraphDeserializer::Read<bool>() {
+  return (stream_->Read<uint8_t>() != 0);
+}
+
+void BranchInstr::WriteExtra(FlowGraphSerializer* s) {
+  // Branch reuses inputs from its embedded Comparison.
+  // Instruction::WriteExtra is not called to avoid
+  // writing/reading inputs twice.
+  WriteExtraWithoutInputs(s);
+  comparison_->WriteExtra(s);
+  s->WriteRef<TargetEntryInstr*>(true_successor_);
+  s->WriteRef<TargetEntryInstr*>(false_successor_);
+  s->WriteRef<TargetEntryInstr*>(constant_target_);
+}
+
+void BranchInstr::ReadExtra(FlowGraphDeserializer* d) {
+  ReadExtraWithoutInputs(d);
+  comparison_->ReadExtra(d);
+  for (intptr_t i = comparison_->InputCount() - 1; i >= 0; --i) {
+    comparison_->InputAt(i)->set_instruction(this);
+  }
+  true_successor_ = d->ReadRef<TargetEntryInstr*>();
+  false_successor_ = d->ReadRef<TargetEntryInstr*>();
+  constant_target_ = d->ReadRef<TargetEntryInstr*>();
+}
+
+template <>
+void FlowGraphSerializer::Write<const compiler::ffi::CallbackMarshaller&>(
+    const compiler::ffi::CallbackMarshaller& x) {
+  UNIMPLEMENTED();
+}
+
+template <>
+const compiler::ffi::CallbackMarshaller&
+FlowGraphDeserializer::Read<const compiler::ffi::CallbackMarshaller&>() {
+  UNIMPLEMENTED();
+  return *compiler::ffi::CallbackMarshaller::FromFunction(
+      Z, Function::null_function(), nullptr);
+}
+
+template <>
+void FlowGraphSerializer::Write<const compiler::ffi::CallMarshaller&>(
+    const compiler::ffi::CallMarshaller& x) {
+  Write<const Function&>(x.dart_signature());
+}
+
+template <>
+const compiler::ffi::CallMarshaller&
+FlowGraphDeserializer::Read<const compiler::ffi::CallMarshaller&>() {
+  const Function& dart_signature = Read<const Function&>();
+  const char* error = nullptr;
+  return *compiler::ffi::CallMarshaller::FromFunction(Z, dart_signature,
+                                                      &error);
+}
+
+template <>
+void FlowGraphSerializer::Write<const CallTargets&>(const CallTargets& x) {
+  x.Write(this);
+}
+
+template <>
+const CallTargets& FlowGraphDeserializer::Read<const CallTargets&>() {
+  return *(new (Z) CallTargets(this));
+}
+
+void CallTargets::Write(FlowGraphSerializer* s) const {
+  const intptr_t len = cid_ranges_.length();
+  s->Write<intptr_t>(len);
+  for (intptr_t i = 0; i < len; ++i) {
+    TargetInfo* t = TargetAt(i);
+    s->Write<intptr_t>(t->cid_start);
+    s->Write<intptr_t>(t->cid_end);
+    s->Write<const Function&>(*(t->target));
+    s->Write<intptr_t>(t->count);
+    s->Write<int8_t>(t->exactness.Encode());
+  }
+}
+
+CallTargets::CallTargets(FlowGraphDeserializer* d) : Cids(d->zone()) {
+  const intptr_t len = d->Read<intptr_t>();
+  cid_ranges_.EnsureLength(len, nullptr);
+  for (intptr_t i = 0; i < len; ++i) {
+    const intptr_t cid_start = d->Read<intptr_t>();
+    const intptr_t cid_end = d->Read<intptr_t>();
+    const Function& target = d->Read<const Function&>();
+    const intptr_t count = d->Read<intptr_t>();
+    const StaticTypeExactnessState exactness =
+        StaticTypeExactnessState::Decode(d->Read<int8_t>());
+    TargetInfo* t = new (d->zone())
+        TargetInfo(cid_start, cid_end, &target, count, exactness);
+    cid_ranges_[i] = t;
+  }
+}
+
+void CatchBlockEntryInstr::WriteTo(FlowGraphSerializer* s) {
+  BlockEntryWithInitialDefs::WriteTo(s);
+  s->Write<const Array&>(catch_handler_types_);
+  s->Write<intptr_t>(catch_try_index_);
+  s->Write<bool>(needs_stacktrace_);
+  s->Write<bool>(is_generated_);
+}
+
+CatchBlockEntryInstr::CatchBlockEntryInstr(FlowGraphDeserializer* d)
+    : BlockEntryWithInitialDefs(d),
+      graph_entry_(d->graph_entry()),
+      predecessor_(nullptr),
+      catch_handler_types_(d->Read<const Array&>()),
+      catch_try_index_(d->Read<intptr_t>()),
+      exception_var_(nullptr),
+      stacktrace_var_(nullptr),
+      raw_exception_var_(nullptr),
+      raw_stacktrace_var_(nullptr),
+      needs_stacktrace_(d->Read<bool>()),
+      is_generated_(d->Read<bool>()) {}
+
+template <>
+void FlowGraphSerializer::Write<const char*>(const char* x) {
+  ASSERT(x != nullptr);
+  const intptr_t len = strlen(x);
+  Write<intptr_t>(len);
+  stream_->WriteBytes(x, len);
+}
+
+template <>
+const char* FlowGraphDeserializer::Read<const char*>() {
+  const intptr_t len = Read<intptr_t>();
+  char* str = zone()->Alloc<char>(len + 1);
+  stream_->ReadBytes(str, len);
+  str[len] = 0;
+  return str;
+}
+
+void CheckConditionInstr::WriteExtra(FlowGraphSerializer* s) {
+  // CheckCondition reuses inputs from its embedded Comparison.
+  // Instruction::WriteExtra is not called to avoid
+  // writing/reading inputs twice.
+  WriteExtraWithoutInputs(s);
+  comparison_->WriteExtra(s);
+}
+
+void CheckConditionInstr::ReadExtra(FlowGraphDeserializer* d) {
+  ReadExtraWithoutInputs(d);
+  comparison_->ReadExtra(d);
+  for (intptr_t i = comparison_->InputCount() - 1; i >= 0; --i) {
+    comparison_->InputAt(i)->set_instruction(this);
+  }
+}
+
+template <>
+void FlowGraphSerializer::Write<CidRangeValue>(CidRangeValue x) {
+  Write<intptr_t>(x.cid_start);
+  Write<intptr_t>(x.cid_end);
+}
+
+template <>
+CidRangeValue FlowGraphDeserializer::Read<CidRangeValue>() {
+  const intptr_t cid_start = Read<intptr_t>();
+  const intptr_t cid_end = Read<intptr_t>();
+  return CidRangeValue(cid_start, cid_end);
+}
+
+template <>
+void FlowGraphSerializer::Write<const Cids&>(const Cids& x) {
+  const intptr_t len = x.length();
+  Write<intptr_t>(len);
+  for (intptr_t i = 0; i < len; ++i) {
+    const CidRange* r = x.At(i);
+    Write<intptr_t>(r->cid_start);
+    Write<intptr_t>(r->cid_end);
+  }
+}
+
+template <>
+const Cids& FlowGraphDeserializer::Read<const Cids&>() {
+  Cids* cids = new (Z) Cids(Z);
+  const intptr_t len = Read<intptr_t>();
+  for (intptr_t i = 0; i < len; ++i) {
+    const intptr_t cid_start = Read<intptr_t>();
+    const intptr_t cid_end = Read<intptr_t>();
+    CidRange* r = new (Z) CidRange(cid_start, cid_end);
+    cids->Add(r);
+  }
+  return *cids;
+}
+
+template <>
+void FlowGraphSerializer::Write<const Class&>(const Class& x) {
+  if (x.IsNull()) {
+    Write<classid_t>(kIllegalCid);
+    return;
+  }
+  Write<classid_t>(x.id());
+}
+
+template <>
+const Class& FlowGraphDeserializer::Read<const Class&>() {
+  const classid_t cid = Read<classid_t>();
+  if (cid == kIllegalCid) {
+    return Class::ZoneHandle(Z);
+  }
+  return Class::ZoneHandle(Z, GetClassById(cid));
+}
+
+void ConstraintInstr::WriteExtra(FlowGraphSerializer* s) {
+  TemplateDefinition::WriteExtra(s);
+  s->WriteRef<TargetEntryInstr*>(target_);
+}
+
+void ConstraintInstr::ReadExtra(FlowGraphDeserializer* d) {
+  TemplateDefinition::ReadExtra(d);
+  target_ = d->ReadRef<TargetEntryInstr*>();
+}
+
+template <>
+void FlowGraphSerializer::Write<const Code&>(const Code& x) {
+  ASSERT(!x.IsNull());
+  ASSERT(x.IsStubCode());
+  for (intptr_t i = 0, n = StubCode::NumEntries(); i < n; ++i) {
+    if (StubCode::EntryAt(i).ptr() == x.ptr()) {
+      Write<intptr_t>(i);
+      return;
+    }
+  }
+  intptr_t index = StubCode::NumEntries();
+  ObjectStore* object_store = isolate_group()->object_store();
+#define MATCH(member, name)                                                    \
+  if (object_store->member() == x.ptr()) {                                     \
+    Write<intptr_t>(index);                                                    \
+    return;                                                                    \
+  }                                                                            \
+  ++index;
+  OBJECT_STORE_STUB_CODE_LIST(MATCH)
+#undef MATCH
+  UNIMPLEMENTED();
+}
+
+template <>
+const Code& FlowGraphDeserializer::Read<const Code&>() {
+  const intptr_t stub_id = Read<intptr_t>();
+  if (stub_id < StubCode::NumEntries()) {
+    return StubCode::EntryAt(stub_id);
+  }
+  intptr_t index = StubCode::NumEntries();
+  ObjectStore* object_store = isolate_group()->object_store();
+#define MATCH(member, name)                                                    \
+  if (index == stub_id) {                                                      \
+    return Code::ZoneHandle(Z, object_store->member());                        \
+  }                                                                            \
+  ++index;
+  OBJECT_STORE_STUB_CODE_LIST(MATCH)
+#undef MATCH
+  UNIMPLEMENTED();
+}
+
+template <>
+void FlowGraphSerializer::Write<CompileType*>(CompileType* x) {
+  if (x == nullptr) {
+    Write<bool>(false);
+  } else {
+    Write<bool>(true);
+    x->Write(this);
+  }
+}
+
+template <>
+CompileType* FlowGraphDeserializer::Read<CompileType*>() {
+  if (!Read<bool>()) {
+    return nullptr;
+  }
+  return new (Z) CompileType(this);
+}
+
+void CompileType::Write(FlowGraphSerializer* s) const {
+  s->Write<bool>(can_be_null_);
+  s->Write<bool>(can_be_sentinel_);
+  s->Write<classid_t>(cid_);
+  if (type_ == nullptr) {
+    s->Write<bool>(false);
+  } else {
+    s->Write<bool>(true);
+    s->Write<const AbstractType&>(*type_);
+  }
+}
+
+CompileType::CompileType(FlowGraphDeserializer* d)
+    : can_be_null_(d->Read<bool>()),
+      can_be_sentinel_(d->Read<bool>()),
+      cid_(d->Read<classid_t>()),
+      type_(nullptr) {
+  if (d->Read<bool>()) {
+    type_ = &d->Read<const AbstractType&>();
+  }
+}
+
+void Definition::WriteTo(FlowGraphSerializer* s) {
+  Instruction::WriteTo(s);
+  s->Write<Range*>(range_);
+  s->Write<intptr_t>(temp_index_);
+  s->Write<intptr_t>(ssa_temp_index_);
+  s->Write<CompileType*>(type_);
+}
+
+Definition::Definition(FlowGraphDeserializer* d)
+    : Instruction(d),
+      range_(d->Read<Range*>()),
+      temp_index_(d->Read<intptr_t>()),
+      ssa_temp_index_(d->Read<intptr_t>()),
+      type_(d->Read<CompileType*>()) {
+  if (HasSSATemp()) {
+    d->set_definition(ssa_temp_index(), this);
+  }
+  if (type_ != nullptr) {
+    type_->set_owner(this);
+  }
+}
+
+template <>
+void FlowGraphSerializer::WriteRef<Definition*>(Definition* x) {
+  if (!x->HasSSATemp()) {
+    if (auto* push_arg = x->AsPushArgument()) {
+      // Environments of the calls can reference PushArgument instructions
+      // and they don't have SSA temps.
+      // Write a reference to the original definition.
+      // When reading it is restored using RepairPushArgsInEnvironment.
+      x = push_arg->value()->definition();
+    } else {
+      UNREACHABLE();
+    }
+  }
+  ASSERT(x->HasSSATemp());
+  ASSERT(can_write_refs());
+  Write<intptr_t>(x->ssa_temp_index());
+}
+
+template <>
+Definition* FlowGraphDeserializer::ReadRef<Definition*>() {
+  return definition(Read<intptr_t>());
+}
+
+template <>
+void FlowGraphSerializer::Write<double>(double x) {
+  stream_->Write<int64_t>(bit_cast<int64_t>(x));
+}
+
+template <>
+double FlowGraphDeserializer::Read<double>() {
+  return bit_cast<double>(stream_->Read<int64_t>());
+}
+
+template <>
+void FlowGraphSerializer::Write<Environment*>(Environment* x) {
+  ASSERT(can_write_refs());
+  if (x == nullptr) {
+    Write<bool>(false);
+  } else {
+    Write<bool>(true);
+    x->Write(this);
+  }
+}
+
+template <>
+Environment* FlowGraphDeserializer::Read<Environment*>() {
+  if (!Read<bool>()) {
+    return nullptr;
+  }
+  return new (Z) Environment(this);
+}
+
+void Environment::Write(FlowGraphSerializer* s) const {
+  s->Write<GrowableArray<Value*>>(values_);
+  s->Write<intptr_t>(fixed_parameter_count_);
+  s->Write<uintptr_t>(bitfield_);
+  s->Write<const Function&>(function_);
+  s->Write<Environment*>(outer_);
+  if (locations_ == nullptr) {
+    s->Write<bool>(false);
+  } else {
+    s->Write<bool>(true);
+    for (intptr_t i = 0, n = values_.length(); i < n; ++i) {
+      locations_[i].Write(s);
+    }
+  }
+}
+
+Environment::Environment(FlowGraphDeserializer* d)
+    : values_(d->Read<GrowableArray<Value*>>()),
+      locations_(nullptr),
+      fixed_parameter_count_(d->Read<intptr_t>()),
+      bitfield_(d->Read<uintptr_t>()),
+      function_(d->Read<const Function&>()),
+      outer_(d->Read<Environment*>()) {
+  for (intptr_t i = 0, n = values_.length(); i < n; ++i) {
+    Value* value = values_[i];
+    value->definition()->AddEnvUse(value);
+  }
+  if (d->Read<bool>()) {
+    locations_ = d->zone()->Alloc<Location>(values_.length());
+    for (intptr_t i = 0, n = values_.length(); i < n; ++i) {
+      locations_[i] = Location::Read(d);
+    }
+  }
+}
+
+void FlowGraphSerializer::WriteFlowGraph(
+    const FlowGraph& flow_graph,
+    const ZoneGrowableArray<Definition*>& detached_defs) {
+  ASSERT(!flow_graph.is_licm_allowed());
+
+  Write<intptr_t>(flow_graph.current_ssa_temp_index());
+  Write<intptr_t>(flow_graph.max_block_id());
+  Write<intptr_t>(flow_graph.inlining_id());
+  Write<const Array&>(flow_graph.coverage_array());
+
+  PrologueInfo prologue_info = flow_graph.prologue_info();
+  Write<intptr_t>(prologue_info.min_block_id);
+  Write<intptr_t>(prologue_info.max_block_id);
+
+  // Write instructions
+  for (auto block : flow_graph.reverse_postorder()) {
+    Write<Instruction*>(block);
+    for (ForwardInstructionIterator it(block); !it.Done(); it.Advance()) {
+      Instruction* current = it.Current();
+      Write<Instruction*>(current);
+    }
+  }
+  Write<Instruction*>(nullptr);
+  Write<const ZoneGrowableArray<Definition*>&>(detached_defs);
+  can_write_refs_ = true;
+
+  // Write instructions extra info.
+  // It may contain references to other instructions.
+  for (auto block : flow_graph.reverse_postorder()) {
+    block->WriteExtra(this);
+    for (ForwardInstructionIterator it(block); !it.Done(); it.Advance()) {
+      Instruction* current = it.Current();
+      current->WriteExtra(this);
+    }
+  }
+  for (auto* instr : detached_defs) {
+    instr->WriteExtra(this);
+  }
+
+  const auto& optimized_block_order = flow_graph.optimized_block_order();
+  Write<intptr_t>(optimized_block_order.length());
+  for (intptr_t i = 0, n = optimized_block_order.length(); i < n; ++i) {
+    WriteRef<BlockEntryInstr*>(optimized_block_order[i]);
+  }
+
+  const auto* captured_parameters = flow_graph.captured_parameters();
+  if (captured_parameters->IsEmpty()) {
+    Write<bool>(false);
+  } else {
+    Write<bool>(true);
+    // Captured parameters are rare so write their bit numbers
+    // instead of writing BitVector.
+    GrowableArray<intptr_t> indices(Z, 0);
+    for (intptr_t i = 0, n = captured_parameters->length(); i < n; ++i) {
+      if (captured_parameters->Contains(i)) {
+        indices.Add(i);
+      }
+    }
+    Write<GrowableArray<intptr_t>>(indices);
+  }
+}
+
+FlowGraph* FlowGraphDeserializer::ReadFlowGraph() {
+  const intptr_t current_ssa_temp_index = Read<intptr_t>();
+  const intptr_t max_block_id = Read<intptr_t>();
+  const intptr_t inlining_id = Read<intptr_t>();
+  const Array& coverage_array = Read<const Array&>();
+  const PrologueInfo prologue_info(Read<intptr_t>(), Read<intptr_t>());
+
+  definitions_.EnsureLength(current_ssa_temp_index, nullptr);
+  blocks_.EnsureLength(max_block_id + 1, nullptr);
+
+  // Read/create instructions.
+  ZoneGrowableArray<Instruction*> instructions(16);
+  Instruction* prev = nullptr;
+  while (Instruction* instr = Read<Instruction*>()) {
+    instructions.Add(instr);
+    if (!instr->IsBlockEntry()) {
+      ASSERT(prev != nullptr);
+      prev->LinkTo(instr);
+    }
+    prev = instr;
+  }
+  ASSERT(graph_entry_ != nullptr);
+  const auto& detached_defs = Read<const ZoneGrowableArray<Definition*>&>();
+
+  // Read instructions extra info.
+  // It may contain references to other instructions.
+  for (Instruction* instr : instructions) {
+    instr->ReadExtra(this);
+  }
+  for (auto* instr : detached_defs) {
+    instr->ReadExtra(this);
+  }
+
+  FlowGraph* flow_graph = new (Z)
+      FlowGraph(parsed_function(), graph_entry_, max_block_id, prologue_info);
+  flow_graph->set_current_ssa_temp_index(current_ssa_temp_index);
+  flow_graph->CreateCommonConstants();
+  flow_graph->disallow_licm();
+  flow_graph->set_inlining_id(inlining_id);
+  flow_graph->set_coverage_array(coverage_array);
+
+  {
+    const intptr_t num_blocks = Read<intptr_t>();
+    if (num_blocks != 0) {
+      auto* codegen_block_order = flow_graph->CodegenBlockOrder(true);
+      ASSERT(codegen_block_order == &flow_graph->optimized_block_order());
+      for (intptr_t i = 0; i < num_blocks; ++i) {
+        codegen_block_order->Add(ReadRef<BlockEntryInstr*>());
+      }
+    }
+  }
+
+  if (Read<bool>()) {
+    GrowableArray<intptr_t> indices = Read<GrowableArray<intptr_t>>();
+    for (intptr_t i : indices) {
+      flow_graph->captured_parameters()->Add(i);
+    }
+  }
+
+  return flow_graph;
+}
+
+template <>
+void FlowGraphSerializer::Write<const Function&>(const Function& x) {
+  if (x.IsNull()) {
+    Write<int8_t>(-1);
+    return;
+  }
+  Write<int8_t>(x.kind());
+  switch (x.kind()) {
+    case UntaggedFunction::kRegularFunction:
+    case UntaggedFunction::kGetterFunction:
+    case UntaggedFunction::kSetterFunction:
+    case UntaggedFunction::kImplicitGetter:
+    case UntaggedFunction::kImplicitSetter:
+    case UntaggedFunction::kImplicitStaticGetter:
+    case UntaggedFunction::kConstructor: {
+      const auto& owner = Class::Handle(Z, x.Owner());
+      Write<classid_t>(owner.id());
+      const intptr_t function_index = owner.FindFunctionIndex(x);
+      ASSERT(function_index >= 0);
+      Write<intptr_t>(function_index);
+      return;
+    }
+    case UntaggedFunction::kImplicitClosureFunction: {
+      const auto& parent = Function::Handle(Z, x.parent_function());
+      Write<const Function&>(parent);
+      return;
+    }
+    case UntaggedFunction::kFieldInitializer: {
+      const auto& field = Field::Handle(Z, x.accessor_field());
+      Write<const Field&>(field);
+      return;
+    }
+    case UntaggedFunction::kClosureFunction:
+      // TODO(alexmarkov): we cannot rely on ClosureFunctionsCache
+      // as it is lazily populated when compiling functions.
+      // We need to serialize kernel offset and re-create
+      // closure functions when reading as needed.
+      Write<intptr_t>(ClosureFunctionsCache::FindClosureIndex(x));
+      return;
+    case UntaggedFunction::kMethodExtractor: {
+      Function& function = Function::Handle(Z, x.extracted_method_closure());
+      ASSERT(function.IsImplicitClosureFunction());
+      function = function.parent_function();
+      Write<const Function&>(function);
+      Write<const String&>(String::Handle(Z, x.name()));
+      return;
+    }
+    case UntaggedFunction::kInvokeFieldDispatcher: {
+      Write<const Class&>(Class::Handle(Z, x.Owner()));
+      Write<const String&>(String::Handle(Z, x.name()));
+      Write<const Array&>(Array::Handle(Z, x.saved_args_desc()));
+      return;
+    }
+    case UntaggedFunction::kDynamicInvocationForwarder: {
+      const auto& target = Function::Handle(Z, x.ForwardingTarget());
+      Write<const Function&>(target);
+      return;
+    }
+    case UntaggedFunction::kFfiTrampoline: {
+      if (x.FfiCallbackTarget() != Object::null()) {
+        UNIMPLEMENTED();
+      }
+      Write<const String&>(String::Handle(Z, x.name()));
+      Write<const FunctionType&>(FunctionType::Handle(Z, x.signature()));
+      Write<const FunctionType&>(FunctionType::Handle(Z, x.FfiCSignature()));
+      Write<bool>(x.FfiIsLeaf());
+      return;
+    }
+    default:
+      break;
+  }
+  switch (x.kind()) {
+#define UNIMPLEMENTED_FUNCTION_KIND(kind)                                      \
+  case UntaggedFunction::k##kind:                                              \
+    FATAL("Unimplemented Write<const Function&> for " #kind);
+    FOR_EACH_RAW_FUNCTION_KIND(UNIMPLEMENTED_FUNCTION_KIND)
+#undef UNIMPLEMENTED_FUNCTION_KIND
+  }
+  UNREACHABLE();
+}
+
+template <>
+const Function& FlowGraphDeserializer::Read<const Function&>() {
+  const int8_t raw_kind = Read<int8_t>();
+  if (raw_kind < 0) {
+    return Object::null_function();
+  }
+  const auto kind = static_cast<UntaggedFunction::Kind>(raw_kind);
+  switch (kind) {
+    case UntaggedFunction::kRegularFunction:
+    case UntaggedFunction::kGetterFunction:
+    case UntaggedFunction::kSetterFunction:
+    case UntaggedFunction::kImplicitGetter:
+    case UntaggedFunction::kImplicitSetter:
+    case UntaggedFunction::kImplicitStaticGetter:
+    case UntaggedFunction::kConstructor: {
+      const classid_t owner_class_id = Read<classid_t>();
+      const intptr_t function_index = Read<intptr_t>();
+      const auto& owner = Class::Handle(Z, GetClassById(owner_class_id));
+      const auto& result =
+          Function::ZoneHandle(Z, owner.FunctionFromIndex(function_index));
+      ASSERT(!result.IsNull());
+      return result;
+    }
+    case UntaggedFunction::kImplicitClosureFunction: {
+      const auto& parent = Read<const Function&>();
+      return Function::ZoneHandle(Z, parent.ImplicitClosureFunction());
+    }
+    case UntaggedFunction::kFieldInitializer: {
+      const auto& field = Read<const Field&>();
+      return Function::ZoneHandle(Z, field.EnsureInitializerFunction());
+    }
+    case UntaggedFunction::kClosureFunction: {
+      const intptr_t index = Read<intptr_t>();
+      return Function::ZoneHandle(
+          Z, ClosureFunctionsCache::ClosureFunctionFromIndex(index));
+    }
+    case UntaggedFunction::kMethodExtractor: {
+      const Function& function = Read<const Function&>();
+      const String& name = Read<const String&>();
+      return Function::ZoneHandle(Z, function.GetMethodExtractor(name));
+    }
+    case UntaggedFunction::kInvokeFieldDispatcher: {
+      const Class& owner = Read<const Class&>();
+      const String& target_name = Read<const String&>();
+      const Array& args_desc = Read<const Array&>();
+      return Function::ZoneHandle(
+          Z,
+          owner.GetInvocationDispatcher(
+              target_name, args_desc, UntaggedFunction::kInvokeFieldDispatcher,
+              /*create_if_absent=*/true));
+    }
+    case UntaggedFunction::kDynamicInvocationForwarder: {
+      const auto& target = Read<const Function&>();
+      auto& name = String::Handle(Z, target.name());
+      name = Function::CreateDynamicInvocationForwarderName(name);
+      return Function::ZoneHandle(Z,
+                                  target.GetDynamicInvocationForwarder(name));
+    }
+    case UntaggedFunction::kFfiTrampoline: {
+      const String& name = Read<const String&>();
+      const FunctionType& signature = Read<const FunctionType&>();
+      const FunctionType& c_signature = Read<const FunctionType&>();
+      const bool is_leaf = Read<bool>();
+      return Function::ZoneHandle(
+          Z, compiler::ffi::TrampolineFunction(name, signature, c_signature,
+                                               is_leaf));
+    }
+    default:
+      UNIMPLEMENTED();
+      return Object::null_function();
+  }
+}
+
+void FunctionEntryInstr::WriteTo(FlowGraphSerializer* s) {
+  BlockEntryWithInitialDefs::WriteTo(s);
+}
+
+FunctionEntryInstr::FunctionEntryInstr(FlowGraphDeserializer* d)
+    : BlockEntryWithInitialDefs(d), graph_entry_(d->graph_entry()) {}
+
+void GraphEntryInstr::WriteTo(FlowGraphSerializer* s) {
+  BlockEntryWithInitialDefs::WriteTo(s);
+  s->Write<intptr_t>(osr_id_);
+  s->Write<intptr_t>(entry_count_);
+  s->Write<intptr_t>(spill_slot_count_);
+  s->Write<intptr_t>(fixed_slot_count_);
+  s->Write<bool>(needs_frame_);
+}
+
+GraphEntryInstr::GraphEntryInstr(FlowGraphDeserializer* d)
+    : BlockEntryWithInitialDefs(d),
+      parsed_function_(d->parsed_function()),
+      osr_id_(d->Read<intptr_t>()),
+      entry_count_(d->Read<intptr_t>()),
+      spill_slot_count_(d->Read<intptr_t>()),
+      fixed_slot_count_(d->Read<intptr_t>()),
+      needs_frame_(d->Read<bool>()) {
+  d->set_graph_entry(this);
+}
+
+void GraphEntryInstr::WriteExtra(FlowGraphSerializer* s) {
+  BlockEntryWithInitialDefs::WriteExtra(s);
+  s->WriteRef<FunctionEntryInstr*>(normal_entry_);
+  s->WriteRef<FunctionEntryInstr*>(unchecked_entry_);
+  s->WriteRef<OsrEntryInstr*>(osr_entry_);
+  s->WriteGrowableArrayOfRefs<CatchBlockEntryInstr*>(catch_entries_);
+  s->WriteGrowableArrayOfRefs<IndirectEntryInstr*>(indirect_entries_);
+}
+
+void GraphEntryInstr::ReadExtra(FlowGraphDeserializer* d) {
+  BlockEntryWithInitialDefs::ReadExtra(d);
+  normal_entry_ = d->ReadRef<FunctionEntryInstr*>();
+  unchecked_entry_ = d->ReadRef<FunctionEntryInstr*>();
+  osr_entry_ = d->ReadRef<OsrEntryInstr*>();
+  catch_entries_ = d->ReadGrowableArrayOfRefs<CatchBlockEntryInstr*>();
+  indirect_entries_ = d->ReadGrowableArrayOfRefs<IndirectEntryInstr*>();
+}
+
+void GotoInstr::WriteExtra(FlowGraphSerializer* s) {
+  TemplateInstruction::WriteExtra(s);
+  if (parallel_move_ != nullptr) {
+    parallel_move_->WriteExtra(s);
+  }
+  s->WriteRef<JoinEntryInstr*>(successor_);
+}
+
+void GotoInstr::ReadExtra(FlowGraphDeserializer* d) {
+  TemplateInstruction::ReadExtra(d);
+  if (parallel_move_ != nullptr) {
+    parallel_move_->ReadExtra(d);
+  }
+  successor_ = d->ReadRef<JoinEntryInstr*>();
+}
+
+template <>
+void FlowGraphSerializer::Write<const ICData*>(const ICData* x) {
+  if (x == nullptr) {
+    Write<bool>(false);
+  } else {
+    Write<bool>(true);
+    ASSERT(!x->IsNull());
+    Write<const Object&>(*x);
+  }
+}
+
+template <>
+const ICData* FlowGraphDeserializer::Read<const ICData*>() {
+  if (!Read<bool>()) {
+    return nullptr;
+  }
+  return &ICData::Cast(Read<const Object&>());
+}
+
+void IfThenElseInstr::WriteExtra(FlowGraphSerializer* s) {
+  // IfThenElse reuses inputs from its embedded Comparison.
+  // Definition::WriteExtra is not called to avoid
+  // writing/reading inputs twice.
+  WriteExtraWithoutInputs(s);
+  comparison_->WriteExtra(s);
+}
+
+void IfThenElseInstr::ReadExtra(FlowGraphDeserializer* d) {
+  ReadExtraWithoutInputs(d);
+  comparison_->ReadExtra(d);
+  for (intptr_t i = comparison_->InputCount() - 1; i >= 0; --i) {
+    comparison_->InputAt(i)->set_instruction(this);
+  }
+}
+
+void IndirectGotoInstr::WriteTo(FlowGraphSerializer* s) {
+  TemplateInstruction::WriteTo(s);
+  s->Write<intptr_t>(offsets_.Length());
+}
+
+IndirectGotoInstr::IndirectGotoInstr(FlowGraphDeserializer* d)
+    : TemplateInstruction(d),
+      offsets_(TypedData::ZoneHandle(d->zone(),
+                                     TypedData::New(kTypedDataInt32ArrayCid,
+                                                    d->Read<intptr_t>(),
+                                                    Heap::kOld))) {}
+
+void IndirectGotoInstr::WriteExtra(FlowGraphSerializer* s) {
+  TemplateInstruction::WriteExtra(s);
+  s->WriteGrowableArrayOfRefs<TargetEntryInstr*>(successors_);
+}
+
+void IndirectGotoInstr::ReadExtra(FlowGraphDeserializer* d) {
+  TemplateInstruction::ReadExtra(d);
+  successors_ = d->ReadGrowableArrayOfRefs<TargetEntryInstr*>();
+}
+
+template <>
+void FlowGraphSerializer::Write<Instruction*>(Instruction* x) {
+  if (x == nullptr) {
+    Write<uint8_t>(Instruction::kNumInstructions);
+  } else {
+    Write<uint8_t>(static_cast<uint8_t>(x->tag()));
+    x->WriteTo(this);
+  }
+}
+
+template <>
+Instruction* FlowGraphDeserializer::Read<Instruction*>() {
+  const uint8_t tag = Read<uint8_t>();
+  switch (tag) {
+#define READ_INSTRUCTION(type, attrs)                                          \
+  case Instruction::k##type:                                                   \
+    return new (Z) type##Instr(this);
+    FOR_EACH_INSTRUCTION(READ_INSTRUCTION)
+#undef READ_INSTRUCTION
+    case Instruction::kNumInstructions:
+      return nullptr;
+  }
+  UNREACHABLE();
+  return nullptr;
+}
+
+void Instruction::WriteTo(FlowGraphSerializer* s) {
+  s->Write<intptr_t>(deopt_id_);
+  s->Write<intptr_t>(inlining_id_);
+}
+
+Instruction::Instruction(FlowGraphDeserializer* d)
+    : deopt_id_(d->Read<intptr_t>()), inlining_id_(d->Read<intptr_t>()) {}
+
+void Instruction::WriteExtra(FlowGraphSerializer* s) {
+  for (intptr_t i = 0, n = InputCount(); i < n; ++i) {
+    s->Write<Value*>(InputAt(i));
+  }
+  WriteExtraWithoutInputs(s);
+}
+
+void Instruction::ReadExtra(FlowGraphDeserializer* d) {
+  for (intptr_t i = 0, n = InputCount(); i < n; ++i) {
+    SetInputAt(i, d->Read<Value*>());
+  }
+  for (intptr_t i = InputCount() - 1; i >= 0; --i) {
+    Value* input = InputAt(i);
+    input->definition()->AddInputUse(input);
+  }
+  ReadExtraWithoutInputs(d);
+}
+
+void Instruction::WriteExtraWithoutInputs(FlowGraphSerializer* s) {
+  s->Write<Environment*>(env_);
+  s->Write<LocationSummary*>(locs_);
+}
+
+void Instruction::ReadExtraWithoutInputs(FlowGraphDeserializer* d) {
+  Environment* env = d->Read<Environment*>();
+  SetEnvironment(env);
+  locs_ = d->Read<LocationSummary*>();
+}
+
+#define INSTRUCTIONS_SERIALIZABLE_AS_INSTRUCTION(V)                            \
+  V(Comparison, ComparisonInstr)                                               \
+  V(Constant, ConstantInstr)                                                   \
+  V(Definition, Definition)                                                    \
+  V(ParallelMove, ParallelMoveInstr)                                           \
+  V(Phi, PhiInstr)
+
+#define SERIALIZABLE_AS_INSTRUCTION(name, type)                                \
+  template <>                                                                  \
+  void FlowGraphSerializer::Write<type*>(type * x) {                           \
+    Write<Instruction*>(x);                                                    \
+  }                                                                            \
+  template <>                                                                  \
+  type* FlowGraphDeserializer::Read<type*>() {                                 \
+    Instruction* instr = Read<Instruction*>();                                 \
+    ASSERT((instr == nullptr) || instr->Is##name());                           \
+    return static_cast<type*>(instr);                                          \
+  }
+
+INSTRUCTIONS_SERIALIZABLE_AS_INSTRUCTION(SERIALIZABLE_AS_INSTRUCTION)
+#undef SERIALIZABLE_AS_INSTRUCTION
+#undef INSTRUCTIONS_SERIALIZABLE_AS_INSTRUCTION
+
+template <>
+void FlowGraphSerializer::Write<int8_t>(int8_t x) {
+  stream_->Write<int8_t>(x);
+}
+
+template <>
+int8_t FlowGraphDeserializer::Read<int8_t>() {
+  return stream_->Read<int8_t>();
+}
+
+template <>
+void FlowGraphSerializer::Write<int16_t>(int16_t x) {
+  stream_->Write<int16_t>(x);
+}
+
+template <>
+int16_t FlowGraphDeserializer::Read<int16_t>() {
+  return stream_->Read<int16_t>();
+}
+
+template <>
+void FlowGraphSerializer::Write<int32_t>(int32_t x) {
+  stream_->Write<int32_t>(x);
+}
+
+template <>
+int32_t FlowGraphDeserializer::Read<int32_t>() {
+  return stream_->Read<int32_t>();
+}
+
+template <>
+void FlowGraphSerializer::Write<int64_t>(int64_t x) {
+  stream_->Write<int64_t>(x);
+}
+
+template <>
+int64_t FlowGraphDeserializer::Read<int64_t>() {
+  return stream_->Read<int64_t>();
+}
+
+void JoinEntryInstr::WriteExtra(FlowGraphSerializer* s) {
+  BlockEntryInstr::WriteExtra(s);
+  if (phis_ != nullptr) {
+    for (PhiInstr* phi : *phis_) {
+      phi->WriteExtra(s);
+    }
+  }
+}
+
+void JoinEntryInstr::ReadExtra(FlowGraphDeserializer* d) {
+  BlockEntryInstr::ReadExtra(d);
+  if (phis_ != nullptr) {
+    for (PhiInstr* phi : *phis_) {
+      phi->ReadExtra(d);
+    }
+  }
+}
+
+template <>
+void FlowGraphSerializer::Write<const LocalVariable&>(const LocalVariable& x) {
+  UNIMPLEMENTED();
+}
+
+template <>
+const LocalVariable& FlowGraphDeserializer::Read<const LocalVariable&>() {
+  UNIMPLEMENTED();
+  return *parsed_function().receiver_var();
+}
+
+void Location::Write(FlowGraphSerializer* s) const {
+  if (IsPairLocation()) {
+    s->Write<uword>(value_ & kLocationTagMask);
+    PairLocation* pair = AsPairLocation();
+    pair->At(0).Write(s);
+    pair->At(1).Write(s);
+  } else if (IsConstant()) {
+    s->Write<uword>(value_ & kLocationTagMask);
+    s->WriteRef<Definition*>(constant_instruction());
+  } else {
+    s->Write<uword>(value_);
+  }
+}
+
+Location Location::Read(FlowGraphDeserializer* d) {
+  const uword value = d->Read<uword>();
+  if (value == kPairLocationTag) {
+    return Location::Pair(Location::Read(d), Location::Read(d));
+  } else if ((value & kConstantTag) == kConstantTag) {
+    ConstantInstr* instr = d->ReadRef<Definition*>()->AsConstant();
+    ASSERT(instr != nullptr);
+    const int pair_index = (value & kPairLocationTag) != 0 ? 1 : 0;
+    return Location::Constant(instr, pair_index);
+  } else {
+    return Location(value);
+  }
+}
+
+template <>
+void FlowGraphSerializer::Write<LocationSummary*>(LocationSummary* x) {
+  ASSERT(can_write_refs());
+  if (x == nullptr) {
+    Write<bool>(false);
+  } else {
+    Write<bool>(true);
+    x->Write(this);
+  }
+}
+
+template <>
+LocationSummary* FlowGraphDeserializer::Read<LocationSummary*>() {
+  if (!Read<bool>()) {
+    return nullptr;
+  }
+  return new (Z) LocationSummary(this);
+}
+
+void LocationSummary::Write(FlowGraphSerializer* s) const {
+  s->Write<intptr_t>(input_count());
+  s->Write<intptr_t>(temp_count());
+  s->Write<int8_t>(static_cast<int8_t>(contains_call_));
+  live_registers_.Write(s);
+
+  for (intptr_t i = 0, n = input_count(); i < n; ++i) {
+    in(i).Write(s);
+  }
+  for (intptr_t i = 0, n = temp_count(); i < n; ++i) {
+    temp(i).Write(s);
+  }
+  ASSERT(output_count() == 1);
+  out(0).Write(s);
+
+  if ((stack_bitmap_ != nullptr) && (stack_bitmap_->Length() != 0)) {
+    s->Write<int8_t>(1);
+    stack_bitmap_->Write(s->stream());
+  } else {
+    s->Write<int8_t>(0);
+  }
+
+#if defined(DEBUG)
+  s->Write<intptr_t>(writable_inputs_);
+#endif
+}
+
+LocationSummary::LocationSummary(FlowGraphDeserializer* d)
+    : num_inputs_(d->Read<intptr_t>()),
+      num_temps_(d->Read<intptr_t>()),
+      output_location_(),
+      stack_bitmap_(nullptr),
+      contains_call_(static_cast<ContainsCall>(d->Read<int8_t>())),
+      live_registers_(d) {
+  input_locations_ = d->zone()->Alloc<Location>(num_inputs_);
+  for (intptr_t i = 0; i < num_inputs_; ++i) {
+    input_locations_[i] = Location::Read(d);
+  }
+  temp_locations_ = d->zone()->Alloc<Location>(num_temps_);
+  for (intptr_t i = 0; i < num_temps_; ++i) {
+    temp_locations_[i] = Location::Read(d);
+  }
+  output_location_ = Location::Read(d);
+
+  if (d->Read<int8_t>() != 0) {
+    EnsureStackBitmap().Read(d->stream());
+  }
+
+#if defined(DEBUG)
+  writable_inputs_ = d->Read<intptr_t>();
+#endif
+}
+
+void MakeTempInstr::WriteExtra(FlowGraphSerializer* s) {
+  TemplateDefinition::WriteExtra(s);
+  null_->WriteExtra(s);
+}
+
+void MakeTempInstr::ReadExtra(FlowGraphDeserializer* d) {
+  TemplateDefinition::ReadExtra(d);
+  null_->ReadExtra(d);
+}
+
+void MaterializeObjectInstr::WriteExtra(FlowGraphSerializer* s) {
+  VariadicDefinition::WriteExtra(s);
+  for (intptr_t i = 0, n = InputCount(); i < n; ++i) {
+    locations_[i].Write(s);
+  }
+}
+
+void MaterializeObjectInstr::ReadExtra(FlowGraphDeserializer* d) {
+  VariadicDefinition::ReadExtra(d);
+  locations_ = d->zone()->Alloc<Location>(InputCount());
+  for (intptr_t i = 0, n = InputCount(); i < n; ++i) {
+    locations_[i] = Location::Read(d);
+  }
+}
+
+template <>
+void FlowGraphSerializer::Write<MoveOperands*>(MoveOperands* x) {
+  ASSERT(x != nullptr);
+  x->src().Write(this);
+  x->dest().Write(this);
+}
+
+template <>
+MoveOperands* FlowGraphDeserializer::Read<MoveOperands*>() {
+  Location src = Location::Read(this);
+  Location dest = Location::Read(this);
+  return new (Z) MoveOperands(dest, src);
+}
+
+template <>
+void FlowGraphSerializer::Write<const compiler::ffi::NativeCallingConvention&>(
+    const compiler::ffi::NativeCallingConvention& x) {
+  // A subset of NativeCallingConvention currently used by CCallInstr.
+  const auto& args = x.argument_locations();
+  for (intptr_t i = 0, n = args.length(); i < n; ++i) {
+    if (args.At(i)->payload_type().AsRepresentation() != kUnboxedFfiIntPtr) {
+      UNIMPLEMENTED();
+    }
+  }
+  if (x.return_location().payload_type().AsRepresentation() !=
+      kUnboxedFfiIntPtr) {
+    UNIMPLEMENTED();
+  }
+  Write<intptr_t>(args.length());
+}
+
+template <>
+const compiler::ffi::NativeCallingConvention&
+FlowGraphDeserializer::Read<const compiler::ffi::NativeCallingConvention&>() {
+  const intptr_t num_args = Read<intptr_t>();
+  const auto& native_function_type =
+      *compiler::ffi::NativeFunctionType::FromUnboxedRepresentation(
+          Z, num_args, kUnboxedFfiIntPtr);
+  return compiler::ffi::NativeCallingConvention::FromSignature(
+      Z, native_function_type);
+}
+
+template <>
+void FlowGraphSerializer::Write<const Object&>(const Object& x) {
+  const intptr_t cid = x.GetClassId();
+  ASSERT(cid != kIllegalCid);
+  // Do not write objects repeatedly.
+  const intptr_t object_id = heap_->GetObjectId(x.ptr());
+  if (object_id != 0) {
+    const intptr_t object_index = object_id - 1;
+    Write<intptr_t>(kIllegalCid);
+    Write<intptr_t>(object_index);
+    return;
+  }
+  const intptr_t object_index = object_counter_++;
+  heap_->SetObjectId(x.ptr(), object_index + 1);
+  Write<intptr_t>(cid);
+  WriteObjectImpl(x, cid, object_index);
+}
+
+template <>
+const Object& FlowGraphDeserializer::Read<const Object&>() {
+  const intptr_t cid = Read<intptr_t>();
+  if (cid == kIllegalCid) {
+    const intptr_t object_index = Read<intptr_t>();
+    return *objects_[object_index];
+  }
+  const intptr_t object_index = object_counter_;
+  object_counter_++;
+  const Object& result = ReadObjectImpl(cid, object_index);
+  SetObjectAt(object_index, result);
+  return result;
+}
+
+void FlowGraphDeserializer::SetObjectAt(intptr_t object_index,
+                                        const Object& object) {
+  objects_.EnsureLength(object_index + 1, &Object::null_object());
+  objects_[object_index] = &object;
+}
+
+void FlowGraphSerializer::WriteObjectImpl(const Object& x,
+                                          intptr_t cid,
+                                          intptr_t object_index) {
+  switch (cid) {
+    case kArrayCid:
+    case kImmutableArrayCid: {
+      const auto& array = Array::Cast(x);
+      const intptr_t len = array.Length();
+      Write<intptr_t>(len);
+      const auto& type_args =
+          TypeArguments::Handle(Z, array.GetTypeArguments());
+      Write<const TypeArguments&>(type_args);
+      if ((len == 0) && type_args.IsNull()) {
+        break;
+      }
+      Write<bool>(array.IsCanonical());
+      auto& elem = Object::Handle(Z);
+      for (intptr_t i = 0; i < len; ++i) {
+        elem = array.At(i);
+        Write<const Object&>(elem);
+      }
+      break;
+    }
+    case kBoolCid:
+      Write<bool>(Bool::Cast(x).value());
+      break;
+    case kClosureCid: {
+      const auto& closure = Closure::Cast(x);
+      if (closure.context() != Object::null()) {
+        UNIMPLEMENTED();
+      }
+      ASSERT(closure.IsCanonical());
+      auto& type_args = TypeArguments::Handle(Z);
+      type_args = closure.instantiator_type_arguments();
+      Write<const TypeArguments&>(type_args);
+      type_args = closure.function_type_arguments();
+      Write<const TypeArguments&>(type_args);
+      type_args = closure.delayed_type_arguments();
+      Write<const TypeArguments&>(type_args);
+      Write<const Function&>(Function::Handle(Z, closure.function()));
+      break;
+    }
+    case kDoubleCid:
+      ASSERT(x.IsCanonical());
+      Write<double>(Double::Cast(x).value());
+      break;
+    case kFieldCid: {
+      const auto& field = Field::Cast(x);
+      const auto& owner = Class::Handle(Z, field.Owner());
+      Write<classid_t>(owner.id());
+      const intptr_t field_index = owner.FindFieldIndex(field);
+      ASSERT(field_index >= 0);
+      Write<intptr_t>(field_index);
+      break;
+    }
+    case kFunctionCid:
+      Write<const Function&>(Function::Cast(x));
+      break;
+    case kFunctionTypeCid: {
+      const auto& type = FunctionType::Cast(x);
+      ASSERT(type.IsFinalized());
+      TypeScope type_scope(this, type.IsRecursive());
+      Write<int8_t>(static_cast<int8_t>(type.nullability()));
+      Write<uint32_t>(type.packed_parameter_counts());
+      Write<uint16_t>(type.packed_type_parameter_counts());
+      Write<const TypeParameters&>(
+          TypeParameters::Handle(Z, type.type_parameters()));
+      AbstractType& t = AbstractType::Handle(Z, type.result_type());
+      Write<const AbstractType&>(t);
+      // Do not write parameter types as Array to avoid eager canonicalization
+      // when reading.
+      const Array& param_types = Array::Handle(Z, type.parameter_types());
+      ASSERT(param_types.Length() == type.NumParameters());
+      for (intptr_t i = 0, n = type.NumParameters(); i < n; ++i) {
+        t ^= param_types.At(i);
+        Write<const AbstractType&>(t);
+      }
+      Write<const Array&>(Array::Handle(Z, type.named_parameter_names()));
+      Write<bool>(type_scope.CanBeCanonicalized());
+      break;
+    }
+    case kICDataCid: {
+      const auto& icdata = ICData::Cast(x);
+      Write<int8_t>(static_cast<int8_t>(icdata.rebind_rule()));
+      Write<const Function&>(Function::Handle(Z, icdata.Owner()));
+      Write<const Array&>(Array::Handle(Z, icdata.arguments_descriptor()));
+      Write<intptr_t>(icdata.deopt_id());
+      Write<intptr_t>(icdata.NumArgsTested());
+      if (icdata.rebind_rule() == ICData::kStatic) {
+        ASSERT(icdata.NumberOfChecks() == 1);
+        Write<const Function&>(Function::Handle(Z, icdata.GetTargetAt(0)));
+      } else if (icdata.rebind_rule() == ICData::kInstance) {
+        if (icdata.NumberOfChecks() != 0) {
+          UNIMPLEMENTED();
+        }
+        Write<const String&>(String::Handle(Z, icdata.target_name()));
+      } else {
+        UNIMPLEMENTED();
+      }
+      break;
+    }
+    case kImmutableLinkedHashMapCid:
+    case kImmutableLinkedHashSetCid: {
+      const auto& map = LinkedHashBase::Cast(x);
+      ASSERT(map.IsCanonical());
+      const intptr_t length = map.Length();
+      Write<intptr_t>(length);
+      Write<const TypeArguments&>(
+          TypeArguments::Handle(Z, map.GetTypeArguments()));
+      const auto& data = Array::Handle(Z, map.data());
+      auto& elem = Object::Handle(Z);
+      intptr_t used_data;
+      if (cid == kImmutableLinkedHashMapCid) {
+        used_data = length << 1;
+      } else {
+        used_data = length;
+      }
+      for (intptr_t i = 0; i < used_data; ++i) {
+        elem = data.At(i);
+        Write<const Object&>(elem);
+      }
+      break;
+    }
+    case kLibraryPrefixCid: {
+      const auto& prefix = LibraryPrefix::Cast(x);
+      const Library& library = Library::Handle(Z, prefix.importer());
+      Write<classid_t>(Class::Handle(Z, library.toplevel_class()).id());
+      Write<const String&>(String::Handle(Z, prefix.name()));
+      break;
+    }
+    case kMintCid:
+      ASSERT(x.IsCanonical());
+      Write<int64_t>(Integer::Cast(x).AsInt64Value());
+      break;
+    case kNullCid:
+      break;
+    case kOneByteStringCid: {
+      ASSERT(x.IsCanonical());
+      const auto& str = String::Cast(x);
+      const intptr_t length = str.Length();
+      Write<intptr_t>(length);
+      NoSafepointScope no_safepoint;
+      uint8_t* latin1 = OneByteString::DataStart(str);
+      stream_->WriteBytes(latin1, length);
+      break;
+    }
+    case kSentinelCid:
+      if (x.ptr() == Object::sentinel().ptr()) {
+        Write<bool>(true);
+      } else if (x.ptr() == Object::transition_sentinel().ptr()) {
+        Write<bool>(false);
+      } else {
+        UNIMPLEMENTED();
+      }
+      break;
+    case kSmiCid:
+      Write<intptr_t>(Smi::Cast(x).Value());
+      break;
+    case kTwoByteStringCid: {
+      ASSERT(x.IsCanonical());
+      const auto& str = String::Cast(x);
+      const intptr_t length = str.Length();
+      Write<intptr_t>(length);
+      NoSafepointScope no_safepoint;
+      uint16_t* utf16 = TwoByteString::DataStart(str);
+      stream_->WriteBytes(reinterpret_cast<const uint8_t*>(utf16),
+                          length * sizeof(uint16_t));
+      break;
+    }
+    case kTypeCid: {
+      const auto& type = Type::Cast(x);
+      ASSERT(type.IsFinalized());
+      const auto& cls = Class::Handle(Z, type.type_class());
+      TypeScope type_scope(this, type.IsRecursive() && cls.IsGeneric());
+      Write<int8_t>(static_cast<int8_t>(type.nullability()));
+      Write<classid_t>(type.type_class_id());
+      if (cls.IsGeneric()) {
+        const auto& type_args = TypeArguments::Handle(Z, type.arguments());
+        Write<const TypeArguments&>(type_args);
+      }
+      Write<bool>(type_scope.CanBeCanonicalized());
+      break;
+    }
+    case kTypeArgumentsCid: {
+      const auto& type_args = TypeArguments::Cast(x);
+      ASSERT(type_args.IsFinalized());
+      TypeScope type_scope(this, type_args.IsRecursive());
+      const intptr_t len = type_args.Length();
+      Write<intptr_t>(len);
+      auto& type = AbstractType::Handle(Z);
+      for (intptr_t i = 0; i < len; ++i) {
+        type = type_args.TypeAt(i);
+        Write<const AbstractType&>(type);
+      }
+      Write<bool>(type_scope.CanBeCanonicalized());
+      break;
+    }
+    case kTypeParameterCid: {
+      const auto& tp = TypeParameter::Cast(x);
+      ASSERT(tp.IsFinalized());
+      TypeScope type_scope(this, tp.IsRecursive());
+      Write<classid_t>(tp.parameterized_class_id());
+      Write<intptr_t>(tp.base());
+      Write<intptr_t>(tp.index());
+      Write<int8_t>(static_cast<int8_t>(tp.nullability()));
+      Write<const AbstractType&>(AbstractType::Handle(Z, tp.bound()));
+      Write<bool>(type_scope.CanBeCanonicalized());
+      break;
+    }
+    case kTypeParametersCid: {
+      const auto& tps = TypeParameters::Cast(x);
+      Write<const Array&>(Array::Handle(Z, tps.names()));
+      Write<const Array&>(Array::Handle(Z, tps.flags()));
+      Write<const TypeArguments&>(TypeArguments::Handle(Z, tps.bounds()));
+      Write<const TypeArguments&>(TypeArguments::Handle(Z, tps.defaults()));
+      break;
+    }
+    case kTypeRefCid: {
+      const auto& tr = TypeRef::Cast(x);
+      ASSERT(tr.IsFinalized());
+      TypeScope type_scope(this, tr.IsRecursive());
+      Write<const AbstractType&>(AbstractType::Handle(Z, tr.type()));
+      Write<bool>(type_scope.CanBeCanonicalized());
+      break;
+    }
+    default: {
+      const classid_t cid = x.GetClassId();
+      if ((cid >= kNumPredefinedCids) || (cid == kInstanceCid)) {
+        const auto& instance = Instance::Cast(x);
+        ASSERT(instance.IsCanonical());
+        const auto& cls =
+            Class::Handle(Z, isolate_group()->class_table()->At(cid));
+        const auto unboxed_fields_bitmap =
+            isolate_group()->shared_class_table()->GetUnboxedFieldsMapAt(cid);
+        const intptr_t next_field_offset = cls.host_next_field_offset();
+        auto& obj = Object::Handle(Z);
+        for (intptr_t offset = Instance::NextFieldOffset();
+             offset < next_field_offset; offset += kCompressedWordSize) {
+          if (unboxed_fields_bitmap.Get(offset / kCompressedWordSize)) {
+            if (kCompressedWordSize == 8) {
+              Write<int64_t>(*reinterpret_cast<int64_t*>(
+                  instance.RawFieldAddrAtOffset(offset)));
+            } else {
+              Write<int32_t>(*reinterpret_cast<int32_t*>(
+                  instance.RawFieldAddrAtOffset(offset)));
+            }
+          } else {
+            obj = instance.RawGetFieldAtOffset(offset);
+            Write<const Object&>(obj);
+          }
+        }
+        break;
+      }
+      FATAL("Unimplemented WriteObjectImpl for %s", x.ToCString());
+    }
+  }
+}
+
+const Object& FlowGraphDeserializer::ReadObjectImpl(intptr_t cid,
+                                                    intptr_t object_index) {
+  switch (cid) {
+    case kArrayCid:
+    case kImmutableArrayCid: {
+      const intptr_t len = Read<intptr_t>();
+      const auto& type_args = Read<const TypeArguments&>();
+      if ((len == 0) && type_args.IsNull()) {
+        return Object::empty_array();
+      }
+      const bool canonicalize = Read<bool>();
+      auto& array = Array::ZoneHandle(
+          Z, Array::New(len, canonicalize ? Heap::kNew : Heap::kOld));
+      if (!type_args.IsNull()) {
+        array.SetTypeArguments(type_args);
+      }
+      for (intptr_t i = 0; i < len; ++i) {
+        array.SetAt(i, Read<const Object&>());
+      }
+      if (cid == kImmutableArrayCid) {
+        array.MakeImmutable();
+      }
+      if (canonicalize) {
+        array ^= array.Canonicalize(thread());
+      }
+      return array;
+    }
+    case kBoolCid:
+      return Bool::Get(Read<bool>());
+    case kClosureCid: {
+      const auto& instantiator_type_arguments = Read<const TypeArguments&>();
+      const auto& function_type_arguments = Read<const TypeArguments&>();
+      const auto& delayed_type_arguments = Read<const TypeArguments&>();
+      const auto& function = Read<const Function&>();
+      auto& closure = Closure::ZoneHandle(
+          Z,
+          Closure::New(instantiator_type_arguments, function_type_arguments,
+                       delayed_type_arguments, function, Context::Handle(Z)));
+      closure ^= closure.Canonicalize(thread());
+      return closure;
+    }
+    case kDoubleCid:
+      return Double::ZoneHandle(Z, Double::NewCanonical(Read<double>()));
+    case kFieldCid: {
+      const classid_t owner_class_id = Read<classid_t>();
+      const intptr_t field_index = Read<intptr_t>();
+      const auto& owner = Class::Handle(Z, GetClassById(owner_class_id));
+      auto& result = Field::ZoneHandle(Z, owner.FieldFromIndex(field_index));
+      ASSERT(!result.IsNull());
+      return result;
+    }
+    case kFunctionCid:
+      return Read<const Function&>();
+    case kFunctionTypeCid: {
+      const Nullability nullability = static_cast<Nullability>(Read<int8_t>());
+      auto& result =
+          FunctionType::ZoneHandle(Z, FunctionType::New(0, nullability));
+      SetObjectAt(object_index, result);
+      result.set_packed_parameter_counts(Read<uint32_t>());
+      result.set_packed_type_parameter_counts(Read<uint16_t>());
+      result.SetTypeParameters(Read<const TypeParameters&>());
+      result.set_result_type(Read<const AbstractType&>());
+      const Array& param_types =
+          Array::Handle(Z, Array::New(result.NumParameters(), Heap::kOld));
+      for (intptr_t i = 0, n = result.NumParameters(); i < n; ++i) {
+        param_types.SetAt(i, Read<const AbstractType&>());
+      }
+      result.set_parameter_types(param_types);
+      result.set_named_parameter_names(Read<const Array&>());
+      result.SetIsFinalized();
+      result ^= MaybeCanonicalize(result, object_index, Read<bool>());
+      return result;
+    }
+    case kICDataCid: {
+      const ICData::RebindRule rebind_rule =
+          static_cast<ICData::RebindRule>(Read<int8_t>());
+      const auto& owner = Read<const Function&>();
+      const auto& arguments_descriptor = Read<const Array&>();
+      const intptr_t deopt_id = Read<intptr_t>();
+      const intptr_t num_args_tested = Read<intptr_t>();
+
+      if (rebind_rule == ICData::kStatic) {
+        const auto& target = Read<const Function&>();
+        return ICData::ZoneHandle(
+            Z,
+            ICData::NewForStaticCall(owner, target, arguments_descriptor,
+                                     deopt_id, num_args_tested, rebind_rule));
+      } else if (rebind_rule == ICData::kInstance) {
+        const auto& target_name = Read<const String&>();
+        return ICData::ZoneHandle(
+            Z, ICData::New(owner, target_name, arguments_descriptor, deopt_id,
+                           num_args_tested, rebind_rule));
+      } else {
+        UNIMPLEMENTED();
+      }
+      break;
+    }
+    case kImmutableLinkedHashMapCid:
+    case kImmutableLinkedHashSetCid: {
+      const intptr_t length = Read<intptr_t>();
+      const auto& type_args = Read<const TypeArguments&>();
+      Instance& result = Instance::ZoneHandle(Z);
+      intptr_t used_data;
+      if (cid == kImmutableLinkedHashMapCid) {
+        result = ImmutableLinkedHashMap::NewUninitialized(Heap::kOld);
+        used_data = (length << 1);
+      } else {
+        result = ImmutableLinkedHashSet::NewUninitialized(Heap::kOld);
+        used_data = length;
+      }
+      // LinkedHashBase is not a proper handle type, so
+      // cannot create a LinkedHashBase handle upfront.
+      const LinkedHashBase& map = LinkedHashBase::Cast(result);
+      map.SetTypeArguments(type_args);
+      map.set_used_data(used_data);
+      const auto& data = Array::Handle(Z, Array::New(used_data));
+      map.set_data(data);
+      map.set_deleted_keys(0);
+      map.ComputeAndSetHashMask();
+      for (intptr_t i = 0; i < used_data; ++i) {
+        data.SetAt(i, Read<const Object&>());
+      }
+      result ^= result.Canonicalize(thread());
+      return result;
+    }
+    case kLibraryPrefixCid: {
+      const Class& toplevel_class =
+          Class::Handle(Z, GetClassById(Read<classid_t>()));
+      const Library& library = Library::Handle(Z, toplevel_class.library());
+      const String& name = Read<const String&>();
+      const auto& prefix =
+          LibraryPrefix::ZoneHandle(Z, library.LookupLocalLibraryPrefix(name));
+      ASSERT(!prefix.IsNull());
+      return prefix;
+    }
+    case kMintCid: {
+      const int64_t value = Read<int64_t>();
+      return Integer::ZoneHandle(Z, Integer::NewCanonical(value));
+    }
+    case kNullCid:
+      return Object::null_object();
+    case kOneByteStringCid: {
+      const intptr_t length = Read<intptr_t>();
+      uint8_t* latin1 = Z->Alloc<uint8_t>(length);
+      stream_->ReadBytes(latin1, length);
+      return String::ZoneHandle(Z,
+                                Symbols::FromLatin1(thread(), latin1, length));
+    }
+    case kSentinelCid:
+      return Read<bool>() ? Object::sentinel() : Object::transition_sentinel();
+    case kSmiCid:
+      return Smi::ZoneHandle(Z, Smi::New(Read<intptr_t>()));
+    case kTwoByteStringCid: {
+      const intptr_t length = Read<intptr_t>();
+      uint16_t* utf16 = Z->Alloc<uint16_t>(length);
+      stream_->ReadBytes(reinterpret_cast<uint8_t*>(utf16),
+                         length * sizeof(uint16_t));
+      return String::ZoneHandle(Z, Symbols::FromUTF16(thread(), utf16, length));
+    }
+    case kTypeCid: {
+      const Nullability nullability = static_cast<Nullability>(Read<int8_t>());
+      const classid_t type_class_id = Read<classid_t>();
+      const auto& cls = Class::Handle(Z, GetClassById(type_class_id));
+      auto& result = Type::ZoneHandle(Z);
+      if (cls.IsGeneric()) {
+        result = Type::New(cls, Object::null_type_arguments(), nullability);
+        SetObjectAt(object_index, result);
+        const auto& type_args = Read<const TypeArguments&>();
+        result.set_arguments(type_args);
+        result.SetIsFinalized();
+      } else {
+        result = cls.DeclarationType();
+        result = result.ToNullability(nullability, Heap::kOld);
+      }
+      result ^= MaybeCanonicalize(result, object_index, Read<bool>());
+      return result;
+    }
+    case kTypeArgumentsCid: {
+      const intptr_t len = Read<intptr_t>();
+      auto& type_args = TypeArguments::ZoneHandle(Z, TypeArguments::New(len));
+      SetObjectAt(object_index, type_args);
+      for (intptr_t i = 0; i < len; ++i) {
+        type_args.SetTypeAt(i, Read<const AbstractType&>());
+      }
+      type_args ^= MaybeCanonicalize(type_args, object_index, Read<bool>());
+      return type_args;
+    }
+    case kTypeParameterCid: {
+      const classid_t parameterized_class_id = Read<classid_t>();
+      const intptr_t base = Read<intptr_t>();
+      const intptr_t index = Read<intptr_t>();
+      const Nullability nullability = static_cast<Nullability>(Read<int8_t>());
+      const auto& parameterized_class =
+          Class::Handle(Z, (parameterized_class_id == kFunctionCid)
+                               ? Class::null()
+                               : GetClassById(parameterized_class_id));
+      auto& tp = TypeParameter::ZoneHandle(
+          Z, TypeParameter::New(parameterized_class, base, index,
+                                /*bound=*/Object::null_abstract_type(),
+                                nullability));
+      SetObjectAt(object_index, tp);
+      const auto& bound = Read<const AbstractType&>();
+      tp.set_bound(bound);
+      tp.SetIsFinalized();
+      tp ^= MaybeCanonicalize(tp, object_index, Read<bool>());
+      return tp;
+    }
+    case kTypeParametersCid: {
+      const auto& tps = TypeParameters::ZoneHandle(Z, TypeParameters::New());
+      tps.set_names(Read<const Array&>());
+      tps.set_flags(Read<const Array&>());
+      tps.set_bounds(Read<const TypeArguments&>());
+      tps.set_defaults(Read<const TypeArguments&>());
+      return tps;
+    }
+    case kTypeRefCid: {
+      auto& tr =
+          TypeRef::ZoneHandle(Z, TypeRef::New(Object::null_abstract_type()));
+      SetObjectAt(object_index, tr);
+      const auto& type = Read<const AbstractType&>();
+      ASSERT(!type.IsNull());
+      tr.set_type(type);
+      tr ^= MaybeCanonicalize(tr, object_index, Read<bool>());
+      return tr;
+    }
+    default:
+      if ((cid >= kNumPredefinedCids) || (cid == kInstanceCid)) {
+        const auto& cls = Class::Handle(Z, GetClassById(cid));
+        const auto unboxed_fields_bitmap =
+            isolate_group()->shared_class_table()->GetUnboxedFieldsMapAt(cid);
+        const intptr_t next_field_offset = cls.host_next_field_offset();
+        auto& instance = Instance::ZoneHandle(Z, Instance::New(cls));
+        for (intptr_t offset = Instance::NextFieldOffset();
+             offset < next_field_offset; offset += kCompressedWordSize) {
+          if (unboxed_fields_bitmap.Get(offset / kCompressedWordSize)) {
+            if (kCompressedWordSize == 8) {
+              const int64_t v = Read<int64_t>();
+              *reinterpret_cast<int64_t*>(
+                  instance.RawFieldAddrAtOffset(offset)) = v;
+            } else {
+              const int32_t v = Read<int32_t>();
+              *reinterpret_cast<int32_t*>(
+                  instance.RawFieldAddrAtOffset(offset)) = v;
+            }
+          } else {
+            const auto& obj = Read<const Object&>();
+            instance.RawSetFieldAtOffset(offset, obj);
+          }
+        }
+        instance = instance.Canonicalize(thread());
+        return instance;
+      }
+  }
+  UNIMPLEMENTED();
+  return Object::null_object();
+}
+
+InstancePtr FlowGraphDeserializer::MaybeCanonicalize(
+    const Instance& obj,
+    intptr_t object_index,
+    bool can_be_canonicalized) {
+  if (can_be_canonicalized) {
+    intptr_t remaining = 0;
+    for (intptr_t idx : pending_canonicalization_) {
+      if (idx < object_index) {
+        pending_canonicalization_[remaining++] = idx;
+      } else {
+        objects_[idx] = &Instance::ZoneHandle(
+            Z, Instance::Cast(*objects_[idx]).Canonicalize(thread()));
+      }
+    }
+    pending_canonicalization_.TruncateTo(remaining);
+    return obj.Canonicalize(thread());
+  } else {
+    ASSERT(objects_[object_index]->ptr() == obj.ptr());
+    pending_canonicalization_.Add(object_index);
+    return obj.ptr();
+  }
+}
+
+#define HANDLES_SERIALIZABLE_AS_OBJECT(V)                                      \
+  V(AbstractType, Object::null_abstract_type())                                \
+  V(Array, Object::null_array())                                               \
+  V(Field, Field::Handle(Z))                                                   \
+  V(FunctionType, Object::null_function_type())                                \
+  V(String, Object::null_string())                                             \
+  V(TypeArguments, Object::null_type_arguments())                              \
+  V(TypeParameters, TypeParameters::Handle(Z))
+
+#define SERIALIZE_HANDLE_AS_OBJECT(handle, null_handle)                        \
+  template <>                                                                  \
+  void FlowGraphSerializer::Write<const handle&>(const handle& x) {            \
+    Write<const Object&>(x);                                                   \
+  }                                                                            \
+  template <>                                                                  \
+  const handle& FlowGraphDeserializer::Read<const handle&>() {                 \
+    const Object& result = Read<const Object&>();                              \
+    if (result.IsNull()) {                                                     \
+      return null_handle;                                                      \
+    }                                                                          \
+    return handle::Cast(result);                                               \
+  }
+
+HANDLES_SERIALIZABLE_AS_OBJECT(SERIALIZE_HANDLE_AS_OBJECT)
+#undef SERIALIZE_HANDLE_AS_OBJECT
+#undef HANDLES_SERIALIZABLE_AS_OBJECT
+
+void OsrEntryInstr::WriteTo(FlowGraphSerializer* s) {
+  BlockEntryWithInitialDefs::WriteTo(s);
+}
+
+OsrEntryInstr::OsrEntryInstr(FlowGraphDeserializer* d)
+    : BlockEntryWithInitialDefs(d), graph_entry_(d->graph_entry()) {}
+
+void ParallelMoveInstr::WriteExtra(FlowGraphSerializer* s) {
+  Instruction::WriteExtra(s);
+  s->Write<GrowableArray<MoveOperands*>>(moves_);
+}
+
+void ParallelMoveInstr::ReadExtra(FlowGraphDeserializer* d) {
+  Instruction::ReadExtra(d);
+  moves_ = d->Read<GrowableArray<MoveOperands*>>();
+}
+
+void PhiInstr::WriteTo(FlowGraphSerializer* s) {
+  VariadicDefinition::WriteTo(s);
+  s->Write<Representation>(representation_);
+  s->Write<bool>(is_alive_);
+  s->Write<int8_t>(is_receiver_);
+}
+
+PhiInstr::PhiInstr(FlowGraphDeserializer* d)
+    : VariadicDefinition(d),
+      block_(d->current_block()->AsJoinEntry()),
+      representation_(d->Read<Representation>()),
+      is_alive_(d->Read<bool>()),
+      is_receiver_(d->Read<int8_t>()) {}
+
+template <>
+void FlowGraphSerializer::Write<Range*>(Range* x) {
+  if (x == nullptr) {
+    Write<bool>(false);
+  } else {
+    Write<bool>(true);
+    x->Write(this);
+  }
+}
+
+template <>
+Range* FlowGraphDeserializer::Read<Range*>() {
+  if (!Read<bool>()) {
+    return nullptr;
+  }
+  return new (Z) Range(this);
+}
+
+void Range::Write(FlowGraphSerializer* s) const {
+  min_.Write(s);
+  max_.Write(s);
+}
+
+Range::Range(FlowGraphDeserializer* d)
+    : min_(RangeBoundary(d)), max_(RangeBoundary(d)) {}
+
+void RangeBoundary::Write(FlowGraphSerializer* s) const {
+  s->Write<int8_t>(kind_);
+  s->Write<int64_t>(value_);
+  s->Write<int64_t>(offset_);
+}
+
+RangeBoundary::RangeBoundary(FlowGraphDeserializer* d)
+    : kind_(static_cast<Kind>(d->Read<int8_t>())),
+      value_(d->Read<int64_t>()),
+      offset_(d->Read<int64_t>()) {}
+
+void RegisterSet::Write(FlowGraphSerializer* s) const {
+  s->Write<uintptr_t>(cpu_registers_.data());
+  s->Write<uintptr_t>(untagged_cpu_registers_.data());
+  s->Write<uintptr_t>(fpu_registers_.data());
+}
+
+RegisterSet::RegisterSet(FlowGraphDeserializer* d)
+    : cpu_registers_(d->Read<uintptr_t>()),
+      untagged_cpu_registers_(d->Read<uintptr_t>()),
+      fpu_registers_(d->Read<uintptr_t>()) {}
+
+template <>
+void FlowGraphSerializer::Write<Representation>(Representation x) {
+  Write<uint8_t>(x);
+}
+
+template <>
+Representation FlowGraphDeserializer::Read<Representation>() {
+  return static_cast<Representation>(Read<uint8_t>());
+}
+
+template <>
+void FlowGraphSerializer::Write<const Slot&>(const Slot& x) {
+  x.Write(this);
+}
+
+template <>
+const Slot& FlowGraphDeserializer::Read<const Slot&>() {
+  return Slot::Read(this);
+}
+
+template <>
+void FlowGraphSerializer::Write<const Slot*>(const Slot* x) {
+  if (x == nullptr) {
+    Write<bool>(false);
+    return;
+  }
+  Write<bool>(true);
+  x->Write(this);
+}
+
+template <>
+const Slot* FlowGraphDeserializer::Read<const Slot*>() {
+  if (!Read<bool>()) {
+    return nullptr;
+  }
+  return &Slot::Read(this);
+}
+
+void Slot::Write(FlowGraphSerializer* s) const {
+  s->Write<serializable_type_t<Kind>>(
+      static_cast<serializable_type_t<Kind>>(kind_));
+
+  switch (kind_) {
+    case Kind::kTypeArguments:
+      s->Write<int8_t>(flags_);
+      s->Write<intptr_t>(offset_in_bytes_);
+      break;
+    case Kind::kTypeArgumentsIndex:
+      s->Write<intptr_t>(offset_in_bytes_);
+      break;
+    case Kind::kArrayElement:
+      s->Write<intptr_t>(offset_in_bytes_);
+      break;
+    case Kind::kCapturedVariable:
+      s->Write<int8_t>(flags_);
+      s->Write<intptr_t>(offset_in_bytes_);
+      s->Write<const String&>(*DataAs<const String>());
+      s->Write<const AbstractType&>(*static_type_);
+      break;
+    case Kind::kDartField:
+      s->Write<const Field&>(field());
+      break;
+    default:
+      break;
+  }
+}
+
+const Slot& Slot::Read(FlowGraphDeserializer* d) {
+  const Kind kind = static_cast<Kind>(d->Read<serializable_type_t<Kind>>());
+  int8_t flags = 0;
+  ClassIdTagType cid = kDynamicCid;
+  intptr_t offset = -1;
+  const void* data = nullptr;
+  const AbstractType* static_type = nullptr;
+  Representation representation = kTagged;
+
+  switch (kind) {
+    case Kind::kTypeArguments:
+      flags = d->Read<int8_t>();
+      offset = d->Read<intptr_t>();
+      cid = kTypeArgumentsCid;
+      data = ":type_arguments";
+      break;
+    case Kind::kTypeArgumentsIndex:
+      flags =
+          IsImmutableBit::encode(true) |
+          IsCompressedBit::encode(TypeArguments::ContainsCompressedPointers());
+      offset = d->Read<intptr_t>();
+      data = ":argument";
+      break;
+    case Kind::kArrayElement:
+      flags = IsNullableBit::encode(true) |
+              IsCompressedBit::encode(Array::ContainsCompressedPointers());
+      offset = d->Read<intptr_t>();
+      data = ":array_element";
+      break;
+    case Kind::kCapturedVariable:
+      flags = d->Read<int8_t>();
+      offset = d->Read<intptr_t>();
+      data = &d->Read<const String&>();
+      static_type = &d->Read<const AbstractType&>();
+      break;
+    case Kind::kDartField: {
+      const Field& field = d->Read<const Field&>();
+      return Slot::Get(field, &d->parsed_function());
+    }
+    default:
+      return Slot::GetNativeSlot(kind);
+  }
+
+  return GetCanonicalSlot(d->thread(), kind, flags, cid, offset, data,
+                          static_type, representation);
+}
+
+template <>
+void FlowGraphSerializer::Write<const compiler::TableSelector*>(
+    const compiler::TableSelector* x) {
+#if defined(DART_PRECOMPILER)
+  ASSERT(x != nullptr);
+  Write<int32_t>(x->id);
+#else
+  UNREACHABLE();
+#endif
+}
+
+template <>
+const compiler::TableSelector*
+FlowGraphDeserializer::Read<const compiler::TableSelector*>() {
+#if defined(DART_PRECOMPILER)
+  const int32_t id = Read<int32_t>();
+  const compiler::TableSelector* selector =
+      Precompiler::Instance()->selector_map()->GetSelector(id);
+  ASSERT(selector != nullptr);
+  return selector;
+#else
+  UNREACHABLE();
+#endif
+}
+
+void SpecialParameterInstr::WriteExtra(FlowGraphSerializer* s) {
+  TemplateDefinition::WriteExtra(s);
+  s->WriteRef<BlockEntryInstr*>(block_);
+}
+
+void SpecialParameterInstr::ReadExtra(FlowGraphDeserializer* d) {
+  TemplateDefinition::ReadExtra(d);
+  block_ = d->ReadRef<BlockEntryInstr*>();
+}
+
+template <intptr_t kExtraInputs>
+void TemplateDartCall<kExtraInputs>::WriteExtra(FlowGraphSerializer* s) {
+  VariadicDefinition::WriteExtra(s);
+  if (push_arguments_ == nullptr) {
+    s->Write<intptr_t>(-1);
+  } else {
+    s->Write<intptr_t>(push_arguments_->length());
+#if defined(DEBUG)
+    // Verify that PushArgument instructions are inserted immediately
+    // before this instruction. ReadExtra below relies on
+    // that when restoring push_arguments_.
+    Instruction* instr = this;
+    for (intptr_t i = push_arguments_->length() - 1; i >= 0; --i) {
+      do {
+        instr = instr->previous();
+        ASSERT(instr != nullptr);
+      } while (!instr->IsPushArgument());
+      ASSERT(instr == (*push_arguments_)[i]);
+    }
+#endif
+  }
+}
+
+template <intptr_t kExtraInputs>
+void TemplateDartCall<kExtraInputs>::ReadExtra(FlowGraphDeserializer* d) {
+  VariadicDefinition::ReadExtra(d);
+  const intptr_t num_push_args = d->Read<intptr_t>();
+  if (num_push_args >= 0) {
+    push_arguments_ =
+        new (d->zone()) PushArgumentsArray(d->zone(), num_push_args);
+    push_arguments_->EnsureLength(num_push_args, nullptr);
+    Instruction* instr = this;
+    for (int i = num_push_args - 1; i >= 0; --i) {
+      do {
+        instr = instr->previous();
+        ASSERT(instr != nullptr);
+      } while (!instr->IsPushArgument());
+      (*push_arguments_)[i] = instr->AsPushArgument();
+    }
+    if (env() != nullptr) {
+      RepairPushArgsInEnvironment();
+    }
+  }
+}
+
+// Explicit template instantiations, needed for the methods above.
+template class TemplateDartCall<0>;
+template class TemplateDartCall<1>;
+
+template <>
+void FlowGraphSerializer::Write<TokenPosition>(TokenPosition x) {
+  Write<int32_t>(x.Serialize());
+}
+
+template <>
+TokenPosition FlowGraphDeserializer::Read<TokenPosition>() {
+  return TokenPosition::Deserialize(Read<int32_t>());
+}
+
+template <>
+void FlowGraphSerializer::Write<uint8_t>(uint8_t x) {
+  stream_->Write<uint8_t>(x);
+}
+
+template <>
+uint8_t FlowGraphDeserializer::Read<uint8_t>() {
+  return stream_->Read<uint8_t>();
+}
+
+template <>
+void FlowGraphSerializer::Write<uint16_t>(uint16_t x) {
+  stream_->Write<uint16_t>(x);
+}
+
+template <>
+uint16_t FlowGraphDeserializer::Read<uint16_t>() {
+  return stream_->Read<uint16_t>();
+}
+
+template <>
+void FlowGraphSerializer::Write<uint32_t>(uint32_t x) {
+  stream_->Write<int32_t>(static_cast<int32_t>(x));
+}
+
+template <>
+uint32_t FlowGraphDeserializer::Read<uint32_t>() {
+  return static_cast<uint32_t>(stream_->Read<int32_t>());
+}
+
+template <>
+void FlowGraphSerializer::Write<uint64_t>(uint64_t x) {
+  stream_->Write<int64_t>(static_cast<int64_t>(x));
+}
+
+template <>
+uint64_t FlowGraphDeserializer::Read<uint64_t>() {
+  return static_cast<uint64_t>(stream_->Read<int64_t>());
+}
+
+void UnboxedConstantInstr::WriteTo(FlowGraphSerializer* s) {
+  ConstantInstr::WriteTo(s);
+  s->Write<Representation>(representation_);
+  // constant_address_ is not written - it is restored when reading.
+}
+
+UnboxedConstantInstr::UnboxedConstantInstr(FlowGraphDeserializer* d)
+    : ConstantInstr(d),
+      representation_(d->Read<Representation>()),
+      constant_address_(0) {
+  if (representation_ == kUnboxedDouble) {
+    ASSERT(value().IsDouble());
+    constant_address_ = FindDoubleConstant(Double::Cast(value()).value());
+  }
+}
+
+template <>
+void FlowGraphSerializer::Write<Value*>(Value* x) {
+  ASSERT(can_write_refs());
+  CompileType* reaching_type = x->reaching_type();
+  Definition* def = x->definition();
+  // Omit reaching type if it is the same as definition type.
+  if ((reaching_type != nullptr) && def->HasType() &&
+      (reaching_type == def->Type())) {
+    reaching_type = nullptr;
+  }
+  Write<CompileType*>(reaching_type);
+  WriteRef<Definition*>(def);
+}
+
+template <>
+Value* FlowGraphDeserializer::Read<Value*>() {
+  CompileType* type = Read<CompileType*>();
+  Definition* def = ReadRef<Definition*>();
+  Value* value = new (Z) Value(def);
+  value->SetReachingType(type);
+  return value;
+}
+
+void VariadicDefinition::WriteTo(FlowGraphSerializer* s) {
+  Definition::WriteTo(s);
+  s->Write<intptr_t>(inputs_.length());
+}
+
+VariadicDefinition::VariadicDefinition(FlowGraphDeserializer* d)
+    : Definition(d), inputs_(d->zone(), 0) {
+  const intptr_t num_inputs = d->Read<intptr_t>();
+  inputs_.EnsureLength(num_inputs, nullptr);
+}
+
+}  // namespace dart
diff --git a/runtime/vm/compiler/backend/il_serializer.h b/runtime/vm/compiler/backend/il_serializer.h
new file mode 100644
index 0000000..07948c2
--- /dev/null
+++ b/runtime/vm/compiler/backend/il_serializer.h
@@ -0,0 +1,473 @@
+// Copyright (c) 2022, the Dart project authors.  Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+#ifndef RUNTIME_VM_COMPILER_BACKEND_IL_SERIALIZER_H_
+#define RUNTIME_VM_COMPILER_BACKEND_IL_SERIALIZER_H_
+
+#if defined(DART_PRECOMPILED_RUNTIME)
+#error "AOT runtime should not use compiler sources (including header files)"
+#endif  // defined(DART_PRECOMPILED_RUNTIME)
+
+#include <utility>  // For std::move.
+
+#include "platform/globals.h"
+#include "vm/allocation.h"
+#include "vm/compiler/backend/locations.h"
+
+namespace dart {
+
+class AliasIdentity;
+class BlockEntryInstr;
+class CallTargets;
+class CatchBlockEntryInstr;
+struct CidRangeValue;
+class Cids;
+class Code;
+class ComparisonInstr;
+class CompileType;
+class Definition;
+class Environment;
+class FunctionEntryInstr;
+class Instruction;
+class FlowGraph;
+class GraphEntryInstr;
+class Heap;
+class IndirectEntryInstr;
+class JoinEntryInstr;
+class LocalVariable;
+class LocationSummary;
+class MoveOperands;
+class NonStreamingWriteStream;
+class OsrEntryInstr;
+class ParsedFunction;
+class ParallelMoveInstr;
+class PhiInstr;
+class Range;
+class ReadStream;
+class TargetEntryInstr;
+class TokenPosition;
+
+namespace compiler {
+struct TableSelector;
+
+namespace ffi {
+class CallbackMarshaller;
+class CallMarshaller;
+class NativeCallingConvention;
+}  // namespace ffi
+}  // namespace compiler
+
+// The list of types which are handled by flow graph serializer/deserializer.
+// For each type there is a corresponding Write<T>(T) and Read<T>() methods.
+//
+// This list includes all types of fields of IL instructions
+// which are serialized via DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS macro,
+// except enum types which are unwrapped with serializable_type_t.
+//
+// The list is sorted alphabetically by type name.
+#define IL_SERIALIZABLE_TYPE_LIST(V)                                           \
+  V(AliasIdentity)                                                             \
+  V(const AbstractType&)                                                       \
+  V(const AbstractType*)                                                       \
+  V(const Array&)                                                              \
+  V(bool)                                                                      \
+  V(const compiler::ffi::CallbackMarshaller&)                                  \
+  V(const compiler::ffi::CallMarshaller&)                                      \
+  V(const CallTargets&)                                                        \
+  V(const char*)                                                               \
+  V(CidRangeValue)                                                             \
+  V(const Cids&)                                                               \
+  V(const Class&)                                                              \
+  V(const Code&)                                                               \
+  V(ComparisonInstr*)                                                          \
+  V(CompileType*)                                                              \
+  V(ConstantInstr*)                                                            \
+  V(Definition*)                                                               \
+  V(double)                                                                    \
+  V(Environment*)                                                              \
+  V(const Field&)                                                              \
+  V(const ICData*)                                                             \
+  V(int8_t)                                                                    \
+  V(int16_t)                                                                   \
+  V(int32_t)                                                                   \
+  V(int64_t)                                                                   \
+  V(const Function&)                                                           \
+  V(const FunctionType&)                                                       \
+  V(Instruction*)                                                              \
+  V(const LocalVariable&)                                                      \
+  V(LocationSummary*)                                                          \
+  V(MoveOperands*)                                                             \
+  V(const compiler::ffi::NativeCallingConvention&)                             \
+  V(const Object&)                                                             \
+  V(ParallelMoveInstr*)                                                        \
+  V(PhiInstr*)                                                                 \
+  V(Range*)                                                                    \
+  V(Representation)                                                            \
+  V(const Slot&)                                                               \
+  V(const Slot*)                                                               \
+  V(const String&)                                                             \
+  V(const compiler::TableSelector*)                                            \
+  V(TokenPosition)                                                             \
+  V(const TypeArguments&)                                                      \
+  V(const TypeParameters&)                                                     \
+  V(uint8_t)                                                                   \
+  V(uint16_t)                                                                  \
+  V(uint32_t)                                                                  \
+  V(uint64_t)                                                                  \
+  V(Value*)
+
+// List of types serializable as references.
+#define IL_SERIALIZABLE_REF_TYPE_LIST(V)                                       \
+  V(BlockEntryInstr*)                                                          \
+  V(CatchBlockEntryInstr*)                                                     \
+  V(Definition*)                                                               \
+  V(FunctionEntryInstr*)                                                       \
+  V(IndirectEntryInstr*)                                                       \
+  V(JoinEntryInstr*)                                                           \
+  V(OsrEntryInstr*)                                                            \
+  V(TargetEntryInstr*)
+
+// Serializes flow graph, including constants and references
+// to objects of program structure.
+//
+// Each IL instruction is serialized in 2 step:
+// - the main step (T::WriteTo / T::T()) serializes
+//   instruction fields, basically everything required to
+//   re-create instruction object.
+// - the extra step (T::WriteExtra / T::ReadExtra) serializes
+//   references to other instructions, including inputs,
+//   environments, locations (may reference constants) and successors.
+//
+class FlowGraphSerializer : public ValueObject {
+ public:
+  explicit FlowGraphSerializer(NonStreamingWriteStream* stream);
+  ~FlowGraphSerializer();
+
+  // Writes [flow_graph] into the stream.
+  // The graph should be compacted via CompactSSA().
+  // [detached_defs] should contain all definitions which are
+  // detached from the graph but can still be referenced from
+  // environments.
+  void WriteFlowGraph(const FlowGraph& flow_graph,
+                      const ZoneGrowableArray<Definition*>& detached_defs);
+
+  // Default implementation of 'Write' method, when
+  // specialization for a particular type is not provided.
+  // This struct is used for the partial template instantiations below.
+  template <typename T>
+  struct WriteTrait {
+    using ArgType = T;
+  };
+
+  template <typename T>
+  struct WriteTrait<GrowableArray<T>> {
+    using ArgType = const GrowableArray<T>&;
+    static void Write(FlowGraphSerializer* s, ArgType x) {
+      const intptr_t len = x.length();
+      s->Write<intptr_t>(len);
+      for (intptr_t i = 0; i < len; ++i) {
+        s->Write<T>(x[i]);
+      }
+    }
+  };
+
+  template <typename T>
+  struct WriteTrait<const GrowableArray<T>&> {
+    using ArgType = const GrowableArray<T>&;
+    static void Write(FlowGraphSerializer* s, ArgType x) {
+      WriteTrait<GrowableArray<T>>::Write(s, x);
+    }
+  };
+
+  template <typename T>
+  struct WriteTrait<ZoneGrowableArray<T>*> {
+    using ArgType = const ZoneGrowableArray<T>*;
+    static void Write(FlowGraphSerializer* s, ArgType x) {
+      if (x == nullptr) {
+        s->Write<intptr_t>(-1);
+        return;
+      }
+      const intptr_t len = x->length();
+      s->Write<intptr_t>(len);
+      for (intptr_t i = 0; i < len; ++i) {
+        s->Write<T>((*x)[i]);
+      }
+    }
+  };
+
+  template <typename T>
+  struct WriteTrait<const ZoneGrowableArray<T>&> {
+    using ArgType = const ZoneGrowableArray<T>&;
+    static void Write(FlowGraphSerializer* s, ArgType x) {
+      WriteTrait<ZoneGrowableArray<T>*>::Write(s, &x);
+    }
+  };
+
+  // Specialization in case intptr_t is not mapped to intN_t.
+  template <>
+  struct WriteTrait<intptr_t> {
+    using ArgType = intptr_t;
+    static void Write(FlowGraphSerializer* s, intptr_t x) {
+#ifdef ARCH_IS_64_BIT
+      s->Write<int64_t>(x);
+#else
+      s->Write<int32_t>(x);
+#endif
+    }
+  };
+
+  // Specialization in case uintptr_t is not mapped to uintN_t.
+  template <>
+  struct WriteTrait<uintptr_t> {
+    using ArgType = uintptr_t;
+    static void Write(FlowGraphSerializer* s, uintptr_t x) {
+#ifdef ARCH_IS_64_BIT
+      s->Write<uint64_t>(x);
+#else
+      s->Write<uint32_t>(x);
+#endif
+    }
+  };
+
+  template <typename T>
+  void Write(typename WriteTrait<T>::ArgType x) {
+    WriteTrait<T>::Write(this, x);
+  }
+
+#define DECLARE_WRITE_METHOD(type)                                             \
+  template <>                                                                  \
+  void Write<type>(type x);
+  IL_SERIALIZABLE_TYPE_LIST(DECLARE_WRITE_METHOD)
+#undef DECLARE_WRITE_METHOD
+
+  template <typename T>
+  void WriteRef(T x);
+
+#define DECLARE_WRITE_REF_METHOD(type)                                         \
+  template <>                                                                  \
+  void WriteRef<type>(type x);
+  IL_SERIALIZABLE_REF_TYPE_LIST(DECLARE_WRITE_REF_METHOD)
+#undef DECLARE_WRITE_REF_METHOD
+
+  template <typename T>
+  void WriteGrowableArrayOfRefs(const GrowableArray<T>& array) {
+    const intptr_t len = array.length();
+    Write<intptr_t>(len);
+    for (intptr_t i = 0; i < len; ++i) {
+      WriteRef<T>(array[i]);
+    }
+  }
+
+  BaseWriteStream* stream() const { return stream_; }
+  IsolateGroup* isolate_group() const { return isolate_group_; }
+  bool can_write_refs() const { return can_write_refs_; }
+
+ private:
+  void WriteObjectImpl(const Object& x, intptr_t cid, intptr_t object_index);
+
+  // Used to track scopes of recursive types during serialization.
+  struct TypeScope {
+    TypeScope(FlowGraphSerializer* serializer, bool is_recursive)
+        : serializer_(serializer),
+          is_recursive_(is_recursive),
+          was_writing_recursive_type_(serializer->writing_recursive_type_) {
+      serializer->writing_recursive_type_ = is_recursive;
+    }
+
+    ~TypeScope() {
+      serializer_->writing_recursive_type_ = was_writing_recursive_type_;
+    }
+
+    // Returns true if type of the current scope can be canonicalized
+    // during deserialization. Recursive types which were not
+    // fully deserialized should not be canonicalized.
+    bool CanBeCanonicalized() const {
+      return !is_recursive_ || !was_writing_recursive_type_;
+    }
+
+    FlowGraphSerializer* const serializer_;
+    const bool is_recursive_;
+    const bool was_writing_recursive_type_;
+  };
+
+  NonStreamingWriteStream* stream_;
+  Zone* zone_;
+  IsolateGroup* isolate_group_;
+  Heap* heap_;
+  intptr_t object_counter_ = 0;
+  bool can_write_refs_ = false;
+  bool writing_recursive_type_ = false;
+};
+
+// Deserializes flow graph.
+// All constants and types are canonicalized during deserialization.
+class FlowGraphDeserializer : public ValueObject {
+ public:
+  FlowGraphDeserializer(const ParsedFunction& parsed_function,
+                        ReadStream* stream);
+
+  const ParsedFunction& parsed_function() const { return parsed_function_; }
+
+  Zone* zone() const { return zone_; }
+  ReadStream* stream() const { return stream_; }
+  Thread* thread() const { return thread_; }
+  IsolateGroup* isolate_group() const { return isolate_group_; }
+
+  GraphEntryInstr* graph_entry() const { return graph_entry_; }
+  void set_graph_entry(GraphEntryInstr* entry) { graph_entry_ = entry; }
+
+  BlockEntryInstr* current_block() const { return current_block_; }
+  void set_current_block(BlockEntryInstr* block) { current_block_ = block; }
+
+  BlockEntryInstr* block(intptr_t block_id) const {
+    BlockEntryInstr* b = blocks_[block_id];
+    ASSERT(b != nullptr);
+    return b;
+  }
+  void set_block(intptr_t block_id, BlockEntryInstr* block) {
+    ASSERT(blocks_[block_id] == nullptr);
+    blocks_[block_id] = block;
+  }
+
+  Definition* definition(intptr_t ssa_temp_index) const {
+    Definition* def = definitions_[ssa_temp_index];
+    ASSERT(def != nullptr);
+    return def;
+  }
+  void set_definition(intptr_t ssa_temp_index, Definition* def) {
+    ASSERT(definitions_[ssa_temp_index] == nullptr);
+    definitions_[ssa_temp_index] = def;
+  }
+
+  FlowGraph* ReadFlowGraph();
+
+  // Default implementation of 'Read' method, when
+  // specialization for a particular type is not provided.
+  // This struct is used for the partial template instantiations below.
+  template <typename T>
+  struct ReadTrait {};
+
+  template <typename T>
+  struct ReadTrait<GrowableArray<T>> {
+    static GrowableArray<T> Read(FlowGraphDeserializer* d) {
+      const intptr_t len = d->Read<intptr_t>();
+      GrowableArray<T> array(len);
+      for (int i = 0; i < len; ++i) {
+        array.Add(d->Read<T>());
+      }
+      return array;
+    }
+  };
+
+  template <typename T>
+  struct ReadTrait<const GrowableArray<T>&> {
+    static const GrowableArray<T>& Read(FlowGraphDeserializer* d) {
+      return ReadTrait<GrowableArray<T>>::Read(d);
+    }
+  };
+
+  template <typename T>
+  struct ReadTrait<ZoneGrowableArray<T>*> {
+    static ZoneGrowableArray<T>* Read(FlowGraphDeserializer* d) {
+      const intptr_t len = d->Read<intptr_t>();
+      if (len < 0) {
+        return nullptr;
+      }
+      auto* array = new (d->zone()) ZoneGrowableArray<T>(d->zone(), len);
+      for (int i = 0; i < len; ++i) {
+        array->Add(d->Read<T>());
+      }
+      return array;
+    }
+  };
+
+  template <typename T>
+  struct ReadTrait<const ZoneGrowableArray<T>&> {
+    static const ZoneGrowableArray<T>& Read(FlowGraphDeserializer* d) {
+      return *ReadTrait<ZoneGrowableArray<T>*>::Read(d);
+    }
+  };
+
+  // Specialization in case intptr_t is not mapped to intN_t.
+  template <>
+  struct ReadTrait<intptr_t> {
+    static intptr_t Read(FlowGraphDeserializer* d) {
+#ifdef ARCH_IS_64_BIT
+      return d->Read<int64_t>();
+#else
+      return d->Read<int32_t>();
+#endif
+    }
+  };
+
+  // Specialization in case uintptr_t is not mapped to uintN_t.
+  template <>
+  struct ReadTrait<uintptr_t> {
+    static uintptr_t Read(FlowGraphDeserializer* d) {
+#ifdef ARCH_IS_64_BIT
+      return d->Read<uint64_t>();
+#else
+      return d->Read<uint32_t>();
+#endif
+    }
+  };
+
+  template <typename T>
+  T Read() {
+    return ReadTrait<T>::Read(this);
+  }
+
+#define DECLARE_READ_METHOD(type)                                              \
+  template <>                                                                  \
+  type Read<type>();
+  IL_SERIALIZABLE_TYPE_LIST(DECLARE_READ_METHOD)
+#undef DECLARE_READ_METHOD
+
+  template <typename T>
+  T ReadRef();
+
+#define DECLARE_READ_REF_METHOD(type)                                          \
+  template <>                                                                  \
+  type ReadRef<type>();
+  IL_SERIALIZABLE_REF_TYPE_LIST(DECLARE_READ_REF_METHOD)
+#undef DECLARE_READ_REF_METHOD
+
+  template <typename T>
+  GrowableArray<T> ReadGrowableArrayOfRefs() {
+    const intptr_t len = Read<intptr_t>();
+    GrowableArray<T> array(len);
+    for (int i = 0; i < len; ++i) {
+      array.Add(ReadRef<T>());
+    }
+    return std::move(array);
+  }
+
+ private:
+  ClassPtr GetClassById(classid_t id) const;
+  const Object& ReadObjectImpl(intptr_t cid, intptr_t object_index);
+  void SetObjectAt(intptr_t object_index, const Object& object);
+
+  InstancePtr MaybeCanonicalize(const Instance& obj,
+                                intptr_t object_index,
+                                bool can_be_canonicalized);
+
+  const ParsedFunction& parsed_function_;
+  ReadStream* stream_;
+  Zone* zone_;
+  Thread* thread_;
+  IsolateGroup* isolate_group_;
+
+  // Deserialized objects.
+  GraphEntryInstr* graph_entry_ = nullptr;
+  BlockEntryInstr* current_block_ = nullptr;
+  GrowableArray<BlockEntryInstr*> blocks_;
+  GrowableArray<Definition*> definitions_;
+  GrowableArray<const Object*> objects_;
+  intptr_t object_counter_ = 0;
+  GrowableArray<intptr_t> pending_canonicalization_;
+};
+
+}  // namespace dart
+
+#endif  // RUNTIME_VM_COMPILER_BACKEND_IL_SERIALIZER_H_
diff --git a/runtime/vm/compiler/backend/locations.h b/runtime/vm/compiler/backend/locations.h
index aabd12a..c82215f 100644
--- a/runtime/vm/compiler/backend/locations.h
+++ b/runtime/vm/compiler/backend/locations.h
@@ -21,6 +21,8 @@
 class BaseTextBuffer;
 class ConstantInstr;
 class Definition;
+class FlowGraphDeserializer;
+class FlowGraphSerializer;
 class PairLocation;
 class Value;
 
@@ -430,8 +432,8 @@
 
   Location Copy() const;
 
-  static Location read(uword value) { return Location(value); }
-  uword write() const { return value_; }
+  void Write(FlowGraphSerializer* s) const;
+  static Location Read(FlowGraphDeserializer* d);
 
  private:
   explicit Location(uword value) : value_(value) {}
@@ -572,8 +574,7 @@
     ASSERT(kNumberOfFpuRegisters <= (kWordSize * kBitsPerByte));
   }
 
-  explicit RegisterSet(uintptr_t cpu_register_mask,
-                       uintptr_t fpu_register_mask = 0)
+  explicit RegisterSet(uintptr_t cpu_register_mask, uintptr_t fpu_register_mask)
       : RegisterSet() {
     AddTaggedRegisters(cpu_register_mask, fpu_register_mask);
   }
@@ -717,6 +718,9 @@
     untagged_cpu_registers_.Clear();
   }
 
+  void Write(FlowGraphSerializer* s) const;
+  explicit RegisterSet(FlowGraphDeserializer* d);
+
  private:
   SmallSet<Register> cpu_registers_;
   SmallSet<Register> untagged_cpu_registers_;
@@ -835,6 +839,9 @@
   void CheckWritableInputs();
 #endif
 
+  void Write(FlowGraphSerializer* s) const;
+  explicit LocationSummary(FlowGraphDeserializer* d);
+
  private:
   BitmapBuilder& EnsureStackBitmap() {
     if (stack_bitmap_ == NULL) {
diff --git a/runtime/vm/compiler/backend/range_analysis.cc b/runtime/vm/compiler/backend/range_analysis.cc
index f2cf0ab..9025738 100644
--- a/runtime/vm/compiler/backend/range_analysis.cc
+++ b/runtime/vm/compiler/backend/range_analysis.cc
@@ -1815,7 +1815,7 @@
   int64_t value = value_boundary.ConstantValue();
 
   if (value == 0) {
-    return RangeBoundary(0);
+    return RangeBoundary::FromConstant(0);
   } else if (shift_count == 0 ||
              (limit > 0 && Utils::IsInt(static_cast<int>(limit), value))) {
     // Result stays in 64 bit range.
diff --git a/runtime/vm/compiler/backend/range_analysis.h b/runtime/vm/compiler/backend/range_analysis.h
index aa1f6b9..dc178f6 100644
--- a/runtime/vm/compiler/backend/range_analysis.h
+++ b/runtime/vm/compiler/backend/range_analysis.h
@@ -301,6 +301,9 @@
 
   int64_t SmiLowerBound() const { return LowerBound(kRangeBoundarySmi); }
 
+  void Write(FlowGraphSerializer* s) const;
+  explicit RangeBoundary(FlowGraphDeserializer* d);
+
  private:
   RangeBoundary(Kind kind, int64_t value, int64_t offset)
       : kind_(kind), value_(value), offset_(offset) {}
@@ -536,6 +539,9 @@
                        Definition* left_defn,
                        Range* result);
 
+  void Write(FlowGraphSerializer* s) const;
+  explicit Range(FlowGraphDeserializer* d);
+
  private:
   RangeBoundary min_;
   RangeBoundary max_;
diff --git a/runtime/vm/compiler/backend/range_analysis_test.cc b/runtime/vm/compiler/backend/range_analysis_test.cc
index ce2d3d5..39b4729 100644
--- a/runtime/vm/compiler/backend/range_analysis_test.cc
+++ b/runtime/vm/compiler/backend/range_analysis_test.cc
@@ -82,13 +82,14 @@
                       RangeBoundary(compiler::target::kSmiMin),
                       RangeBoundary(compiler::target::kSmiMax));
   }
-  TEST_RANGE_OP(Range::Shl, 0, 100, 0, 64, RangeBoundary(0),
+  TEST_RANGE_OP(Range::Shl, 0, 100, 0, 64, RangeBoundary::FromConstant(0),
                 RangeBoundary::PositiveInfinity());
   TEST_RANGE_OP(Range::Shl, -100, 0, 0, 64, RangeBoundary::NegativeInfinity(),
-                RangeBoundary(0));
+                RangeBoundary::FromConstant(0));
 
   TEST_RANGE_OP(Range::Shr, -8, 8, 1, 2, RangeBoundary(-4), RangeBoundary(4));
-  TEST_RANGE_OP(Range::Shr, 1, 8, 1, 2, RangeBoundary(0), RangeBoundary(4));
+  TEST_RANGE_OP(Range::Shr, 1, 8, 1, 2, RangeBoundary::FromConstant(0),
+                RangeBoundary(4));
   TEST_RANGE_OP(Range::Shr, -16, -8, 1, 2, RangeBoundary(-8),
                 RangeBoundary(-2));
   TEST_RANGE_OP(Range::Shr, 2, 4, -1, 1, RangeBoundary(1), RangeBoundary(4));
@@ -466,24 +467,26 @@
   // [0xff, 0xfff] & [0xf, 0xf] = [0x0, 0xf].
   TEST_RANGE_AND(static_cast<int64_t>(0xff), static_cast<int64_t>(0xfff),
                  static_cast<int64_t>(0xf), static_cast<int64_t>(0xf),
-                 RangeBoundary(0), RangeBoundary(0xf));
+                 RangeBoundary::FromConstant(0), RangeBoundary(0xf));
 
   // [0xffffffff, 0xffffffff] & [0xfffffffff, 0xfffffffff] = [0x0, 0xfffffffff].
   TEST_RANGE_AND(
       static_cast<int64_t>(0xffffffff), static_cast<int64_t>(0xffffffff),
       static_cast<int64_t>(0xfffffffff), static_cast<int64_t>(0xfffffffff),
-      RangeBoundary(0), RangeBoundary(static_cast<int64_t>(0xfffffffff)));
+      RangeBoundary::FromConstant(0),
+      RangeBoundary(static_cast<int64_t>(0xfffffffff)));
 
   // [0xffffffff, 0xffffffff] & [-20, 20] = [0x0, 0xffffffff].
   TEST_RANGE_AND(static_cast<int64_t>(0xffffffff),
                  static_cast<int64_t>(0xffffffff), static_cast<int64_t>(-20),
-                 static_cast<int64_t>(20), RangeBoundary(0),
+                 static_cast<int64_t>(20), RangeBoundary::FromConstant(0),
                  RangeBoundary(static_cast<int64_t>(0xffffffff)));
 
   // [-20, 20] & [0xffffffff, 0xffffffff] = [0x0, 0xffffffff].
   TEST_RANGE_AND(static_cast<int64_t>(-20), static_cast<int64_t>(20),
                  static_cast<int64_t>(0xffffffff),
-                 static_cast<int64_t>(0xffffffff), RangeBoundary(0),
+                 static_cast<int64_t>(0xffffffff),
+                 RangeBoundary::FromConstant(0),
                  RangeBoundary(static_cast<int64_t>(0xffffffff)));
 
   // Test that [-20, 20] & [-20, 20] = [-32, 31].
diff --git a/runtime/vm/compiler/backend/redundancy_elimination.cc b/runtime/vm/compiler/backend/redundancy_elimination.cc
index c192500..454cae9 100644
--- a/runtime/vm/compiler/backend/redundancy_elimination.cc
+++ b/runtime/vm/compiler/backend/redundancy_elimination.cc
@@ -3676,8 +3676,10 @@
 // Remove materializations from the graph. Register allocator will treat them
 // as part of the environment not as a real instruction.
 void AllocationSinking::DetachMaterializations() {
-  for (intptr_t i = 0; i < materializations_.length(); i++) {
-    materializations_[i]->previous()->LinkTo(materializations_[i]->next());
+  for (MaterializeObjectInstr* mat : materializations_) {
+    mat->previous()->LinkTo(mat->next());
+    mat->set_next(nullptr);
+    mat->set_previous(nullptr);
   }
 }
 
diff --git a/runtime/vm/compiler/backend/slot.cc b/runtime/vm/compiler/backend/slot.cc
index 6554f8b..46acba6 100644
--- a/runtime/vm/compiler/backend/slot.cc
+++ b/runtime/vm/compiler/backend/slot.cc
@@ -302,48 +302,60 @@
   const intptr_t offset =
       compiler::target::Class::TypeArgumentsFieldOffset(cls);
   ASSERT(offset != Class::kNoTypeArguments);
-  return SlotCache::Instance(thread).Canonicalize(
-      Slot(Kind::kTypeArguments,
-           IsImmutableBit::encode(true) |
-               IsCompressedBit::encode(
-                   compiler::target::Class::HasCompressedPointers(cls)),
-           kTypeArgumentsCid, offset, ":type_arguments",
-           /*static_type=*/nullptr, kTagged));
+  return GetCanonicalSlot(
+      thread, Kind::kTypeArguments,
+      IsImmutableBit::encode(true) |
+          IsCompressedBit::encode(
+              compiler::target::Class::HasCompressedPointers(cls)),
+      kTypeArgumentsCid, offset, ":type_arguments",
+      /*static_type=*/nullptr, kTagged);
 }
 
 const Slot& Slot::GetContextVariableSlotFor(Thread* thread,
                                             const LocalVariable& variable) {
   ASSERT(variable.is_captured());
-  return SlotCache::Instance(thread).Canonicalize(
-      Slot(Kind::kCapturedVariable,
-           IsImmutableBit::encode(variable.is_final() && !variable.is_late()) |
-               IsNullableBit::encode(true) |
-               IsCompressedBit::encode(Context::ContainsCompressedPointers()) |
-               IsSentinelVisibleBit::encode(variable.is_late()),
-           kDynamicCid,
-           compiler::target::Context::variable_offset(variable.index().value()),
-           &variable.name(), &variable.type(), kTagged));
+  return GetCanonicalSlot(
+      thread, Kind::kCapturedVariable,
+      IsImmutableBit::encode(variable.is_final() && !variable.is_late()) |
+          IsNullableBit::encode(true) |
+          IsCompressedBit::encode(Context::ContainsCompressedPointers()) |
+          IsSentinelVisibleBit::encode(variable.is_late()),
+      kDynamicCid,
+      compiler::target::Context::variable_offset(variable.index().value()),
+      &variable.name(), &variable.type(), kTagged);
 }
 
 const Slot& Slot::GetTypeArgumentsIndexSlot(Thread* thread, intptr_t index) {
   const intptr_t offset =
       compiler::target::TypeArguments::type_at_offset(index);
-  const Slot& slot = Slot(
-      Kind::kTypeArgumentsIndex,
+  return GetCanonicalSlot(
+      thread, Kind::kTypeArgumentsIndex,
       IsImmutableBit::encode(true) |
           IsCompressedBit::encode(TypeArguments::ContainsCompressedPointers()),
       kDynamicCid, offset, ":argument", /*static_type=*/nullptr, kTagged);
-  return SlotCache::Instance(thread).Canonicalize(slot);
 }
 
 const Slot& Slot::GetArrayElementSlot(Thread* thread,
                                       intptr_t offset_in_bytes) {
-  const Slot& slot =
-      Slot(Kind::kArrayElement,
-           IsNullableBit::encode(true) |
-               IsCompressedBit::encode(Array::ContainsCompressedPointers()),
-           kDynamicCid, offset_in_bytes, ":array_element",
-           /*static_type=*/nullptr, kTagged);
+  return GetCanonicalSlot(
+      thread, Kind::kArrayElement,
+      IsNullableBit::encode(true) |
+          IsCompressedBit::encode(Array::ContainsCompressedPointers()),
+      kDynamicCid, offset_in_bytes, ":array_element",
+      /*static_type=*/nullptr, kTagged);
+}
+
+const Slot& Slot::GetCanonicalSlot(Thread* thread,
+                                   Slot::Kind kind,
+                                   int8_t flags,
+                                   ClassIdTagType cid,
+                                   intptr_t offset_in_bytes,
+                                   const void* data,
+                                   const AbstractType* static_type,
+                                   Representation representation,
+                                   const FieldGuardState& field_guard_state) {
+  const Slot& slot = Slot(kind, flags, cid, offset_in_bytes, data, static_type,
+                          representation, field_guard_state);
   return SlotCache::Instance(thread).Canonicalize(slot);
 }
 
@@ -456,8 +468,8 @@
   }
 
   Class& owner = Class::Handle(zone, field.Owner());
-  const Slot& slot = SlotCache::Instance(thread).Canonicalize(Slot(
-      Kind::kDartField,
+  const Slot& slot = GetCanonicalSlot(
+      thread, Kind::kDartField,
       IsImmutableBit::encode((field.is_final() && !field.is_late()) ||
                              field.is_const()) |
           IsNullableBit::encode(is_nullable) |
@@ -467,7 +479,7 @@
           IsSentinelVisibleBit::encode(field.is_late() && field.is_final() &&
                                        !field.has_initializer()),
       nullable_cid, compiler::target::Field::OffsetOf(field), &field, &type,
-      rep, field_guard_state));
+      rep, field_guard_state);
 
   // If properties of this slot were based on the guarded state make sure
   // to add the field to the list of guarded fields. Note that during background
diff --git a/runtime/vm/compiler/backend/slot.h b/runtime/vm/compiler/backend/slot.h
index eb87355..90838c9 100644
--- a/runtime/vm/compiler/backend/slot.h
+++ b/runtime/vm/compiler/backend/slot.h
@@ -357,9 +357,12 @@
   bool IsPotentialUnboxed() const;
   Representation UnboxedRepresentation() const;
 
+  void Write(FlowGraphSerializer* s) const;
+  static const Slot& Read(FlowGraphDeserializer* d);
+
  private:
   Slot(Kind kind,
-       int8_t bits,
+       int8_t flags,
        ClassIdTagType cid,
        intptr_t offset_in_bytes,
        const void* data,
@@ -367,7 +370,7 @@
        Representation representation,
        const FieldGuardState& field_guard_state = FieldGuardState())
       : kind_(kind),
-        flags_(bits),
+        flags_(flags),
         cid_(cid),
         offset_in_bytes_(offset_in_bytes),
         representation_(representation),
@@ -397,6 +400,17 @@
     return static_cast<const T*>(data_);
   }
 
+  static const Slot& GetCanonicalSlot(
+      Thread* thread,
+      Kind kind,
+      int8_t flags,
+      ClassIdTagType cid,
+      intptr_t offset_in_bytes,
+      const void* data,
+      const AbstractType* static_type,
+      Representation representation,
+      const FieldGuardState& field_guard_state = FieldGuardState());
+
   // There is a fixed statically known number of native slots so we cache
   // them statically.
   static AcqRelAtomic<Slot*> native_fields_;
diff --git a/runtime/vm/compiler/compiler_pass.cc b/runtime/vm/compiler/compiler_pass.cc
index f6a4dd3..6486fca 100644
--- a/runtime/vm/compiler/compiler_pass.cc
+++ b/runtime/vm/compiler/compiler_pass.cc
@@ -84,6 +84,7 @@
                       "Do --compiler-passes=help for more information.");
 DECLARE_FLAG(bool, print_flow_graph);
 DECLARE_FLAG(bool, print_flow_graph_optimized);
+DEFINE_FLAG(bool, test_il_serialization, false, "Test IL serialization.");
 
 void CompilerPassState::set_flow_graph(FlowGraph* flow_graph) {
   flow_graph_ = flow_graph;
@@ -565,6 +566,22 @@
   if (state->reorder_blocks) {
     BlockScheduler::ReorderBlocks(flow_graph);
   }
+
+  // This is the last compiler pass.
+  // Test that round-trip IL serialization works before generating code.
+  if (FLAG_test_il_serialization && CompilerState::Current().is_aot()) {
+    Zone* zone = flow_graph->zone();
+    auto* detached_defs = new (zone) ZoneGrowableArray<Definition*>(zone, 0);
+    flow_graph->CompactSSA(detached_defs);
+
+    ZoneWriteStream write_stream(flow_graph->zone(), 1024);
+    FlowGraphSerializer serializer(&write_stream);
+    serializer.WriteFlowGraph(*flow_graph, *detached_defs);
+    ReadStream read_stream(write_stream.buffer(), write_stream.bytes_written());
+    FlowGraphDeserializer deserializer(flow_graph->parsed_function(),
+                                       &read_stream);
+    state->set_flow_graph(deserializer.ReadFlowGraph());
+  }
 });
 
 COMPILER_PASS(EliminateWriteBarriers, { EliminateWriteBarriers(flow_graph); });
diff --git a/runtime/vm/compiler/compiler_sources.gni b/runtime/vm/compiler/compiler_sources.gni
index ace9914..ed545d6 100644
--- a/runtime/vm/compiler/compiler_sources.gni
+++ b/runtime/vm/compiler/compiler_sources.gni
@@ -65,6 +65,8 @@
   "backend/il_printer.cc",
   "backend/il_printer.h",
   "backend/il_riscv.cc",
+  "backend/il_serializer.cc",
+  "backend/il_serializer.h",
   "backend/il_x64.cc",
   "backend/inliner.cc",
   "backend/inliner.h",
diff --git a/runtime/vm/compiler/ffi/call.cc b/runtime/vm/compiler/ffi/call.cc
index be76d75..0cee4de 100644
--- a/runtime/vm/compiler/ffi/call.cc
+++ b/runtime/vm/compiler/ffi/call.cc
@@ -14,18 +14,15 @@
 namespace ffi {
 
 // TODO(dartbug.com/36607): Cache the trampolines.
-FunctionPtr TrampolineFunction(const FunctionType& dart_signature,
+FunctionPtr TrampolineFunction(const String& name,
+                               const FunctionType& signature,
                                const FunctionType& c_signature,
-                               bool is_leaf,
-                               const String& function_name) {
+                               bool is_leaf) {
+  ASSERT(signature.num_implicit_parameters() == 1);
   Thread* thread = Thread::Current();
   Zone* zone = thread->zone();
-  String& name =
-      String::Handle(zone, Symbols::NewFormatted(thread, "FfiTrampoline_%s",
-                                                 function_name.ToCString()));
   const Library& lib = Library::Handle(zone, Library::FfiLibrary());
   const Class& owner_class = Class::Handle(zone, lib.toplevel_class());
-  FunctionType& signature = FunctionType::Handle(zone, FunctionType::New());
   Function& function = Function::Handle(
       zone, Function::New(signature, name, UntaggedFunction::kFfiTrampoline,
                           /*is_static=*/true,
@@ -35,31 +32,47 @@
                           /*is_native=*/false, owner_class,
                           TokenPosition::kMinSource));
   function.set_is_debuggable(false);
+
+  // Create unique names for the parameters, as they are used in scope building
+  // and error messages.
+  if (signature.num_fixed_parameters() > 0) {
+    function.CreateNameArray();
+    function.SetParameterNameAt(0, Symbols::ClosureParameter());
+    auto& param_name = String::Handle(zone);
+    for (intptr_t i = 1, n = signature.num_fixed_parameters(); i < n; ++i) {
+      param_name = Symbols::NewFormatted(thread, ":ffi_param%" Pd, i);
+      function.SetParameterNameAt(i, param_name);
+    }
+  }
+
+  function.SetFfiCSignature(c_signature);
+  function.SetFfiIsLeaf(is_leaf);
+
+  return function.ptr();
+}
+
+FunctionPtr TrampolineFunction(const FunctionType& dart_signature,
+                               const FunctionType& c_signature,
+                               bool is_leaf,
+                               const String& function_name) {
+  Thread* thread = Thread::Current();
+  Zone* zone = thread->zone();
+  String& name =
+      String::Handle(zone, Symbols::NewFormatted(thread, "FfiTrampoline_%s",
+                                                 function_name.ToCString()));
+
   // Trampolines have no optional arguments.
+  FunctionType& signature = FunctionType::Handle(zone, FunctionType::New());
   const intptr_t num_fixed = dart_signature.num_fixed_parameters();
+  signature.set_num_implicit_parameters(1);
   signature.set_num_fixed_parameters(num_fixed);
   signature.set_result_type(
       AbstractType::Handle(zone, dart_signature.result_type()));
   signature.set_parameter_types(
       Array::Handle(zone, dart_signature.parameter_types()));
-
-  // Create unique names for the parameters, as they are used in scope building
-  // and error messages.
-  if (num_fixed > 0) {
-    function.CreateNameArray();
-    function.SetParameterNameAt(0, Symbols::ClosureParameter());
-    for (intptr_t i = 1; i < num_fixed; i++) {
-      name = Symbols::NewFormatted(thread, ":ffi_param%" Pd, i);
-      function.SetParameterNameAt(i, name);
-    }
-  }
-  function.SetFfiCSignature(c_signature);
   signature ^= ClassFinalizer::FinalizeType(signature);
-  function.SetSignature(signature);
 
-  function.SetFfiIsLeaf(is_leaf);
-
-  return function.ptr();
+  return TrampolineFunction(name, signature, c_signature, is_leaf);
 }
 
 }  // namespace ffi
diff --git a/runtime/vm/compiler/ffi/call.h b/runtime/vm/compiler/ffi/call.h
index aecd128..11c328a 100644
--- a/runtime/vm/compiler/ffi/call.h
+++ b/runtime/vm/compiler/ffi/call.h
@@ -19,6 +19,11 @@
 
 namespace ffi {
 
+FunctionPtr TrampolineFunction(const String& name,
+                               const FunctionType& signature,
+                               const FunctionType& c_signature,
+                               bool is_leaf);
+
 FunctionPtr TrampolineFunction(const FunctionType& dart_signature,
                                const FunctionType& c_signature,
                                bool is_leaf,
diff --git a/runtime/vm/compiler/ffi/marshaller.h b/runtime/vm/compiler/ffi/marshaller.h
index 175e12e..61eea68 100644
--- a/runtime/vm/compiler/ffi/marshaller.h
+++ b/runtime/vm/compiler/ffi/marshaller.h
@@ -131,6 +131,7 @@
 
   bool ContainsHandles() const;
 
+  const Function& dart_signature() const { return dart_signature_; }
   StringPtr function_name() const { return dart_signature_.name(); }
 
  protected:
diff --git a/runtime/vm/compiler/stub_code_compiler.cc b/runtime/vm/compiler/stub_code_compiler.cc
index 525dc4d..a4c1c5e 100644
--- a/runtime/vm/compiler/stub_code_compiler.cc
+++ b/runtime/vm/compiler/stub_code_compiler.cc
@@ -889,7 +889,8 @@
   // Fall through to &is_simple_case
 
   const RegisterSet caller_saved_registers(
-      TypeTestABI::kSubtypeTestCacheStubCallerSavedRegisters);
+      TypeTestABI::kSubtypeTestCacheStubCallerSavedRegisters,
+      /*fpu_registers=*/0);
 
   __ Bind(&is_simple_case);
   {
diff --git a/runtime/vm/object.cc b/runtime/vm/object.cc
index 76817ed..0aca34f 100644
--- a/runtime/vm/object.cc
+++ b/runtime/vm/object.cc
@@ -3222,6 +3222,28 @@
   }
 }
 
+intptr_t Class::FindFunctionIndex(const Function& needle) const {
+  Thread* thread = Thread::Current();
+  if (EnsureIsFinalized(thread) != Error::null()) {
+    return -1;
+  }
+  REUSABLE_ARRAY_HANDLESCOPE(thread);
+  REUSABLE_FUNCTION_HANDLESCOPE(thread);
+  Array& funcs = thread->ArrayHandle();
+  Function& function = thread->FunctionHandle();
+  funcs = current_functions();
+  ASSERT(!funcs.IsNull());
+  const intptr_t len = funcs.Length();
+  for (intptr_t i = 0; i < len; i++) {
+    function ^= funcs.At(i);
+    if (needle.ptr() == function.ptr()) {
+      return i;
+    }
+  }
+  // No function found.
+  return -1;
+}
+
 FunctionPtr Class::FunctionFromIndex(intptr_t idx) const {
   const Array& funcs = Array::Handle(current_functions());
   if ((idx < 0) || (idx >= funcs.Length())) {
@@ -3234,20 +3256,13 @@
 }
 
 FunctionPtr Class::ImplicitClosureFunctionFromIndex(intptr_t idx) const {
-  const Array& funcs = Array::Handle(current_functions());
-  if ((idx < 0) || (idx >= funcs.Length())) {
+  Function& func = Function::Handle(FunctionFromIndex(idx));
+  if (func.IsNull() || !func.HasImplicitClosureFunction()) {
     return Function::null();
   }
-  Function& func = Function::Handle();
-  func ^= funcs.At(idx);
+  func = func.ImplicitClosureFunction();
   ASSERT(!func.IsNull());
-  if (!func.HasImplicitClosureFunction()) {
-    return Function::null();
-  }
-  const Function& closure_func =
-      Function::Handle(func.ImplicitClosureFunction());
-  ASSERT(!closure_func.IsNull());
-  return closure_func.ptr();
+  return func.ptr();
 }
 
 intptr_t Class::FindImplicitClosureFunctionIndex(const Function& needle) const {
@@ -4682,6 +4697,35 @@
   SetFields(new_arr);
 }
 
+intptr_t Class::FindFieldIndex(const Field& needle) const {
+  Thread* thread = Thread::Current();
+  if (EnsureIsFinalized(thread) != Error::null()) {
+    return -1;
+  }
+  REUSABLE_ARRAY_HANDLESCOPE(thread);
+  REUSABLE_FIELD_HANDLESCOPE(thread);
+  Array& fields = thread->ArrayHandle();
+  Field& field = thread->FieldHandle();
+  fields = this->fields();
+  ASSERT(!fields.IsNull());
+  for (intptr_t i = 0, n = fields.Length(); i < n; ++i) {
+    field ^= fields.At(i);
+    if (needle.ptr() == field.ptr()) {
+      return i;
+    }
+  }
+  // Not found.
+  return -1;
+}
+
+FieldPtr Class::FieldFromIndex(intptr_t idx) const {
+  Array& fields = Array::Handle(this->fields());
+  if ((idx < 0) || (idx >= fields.Length())) {
+    return Field::null();
+  }
+  return Field::RawCast(fields.At(idx));
+}
+
 bool Class::InjectCIDFields() const {
   if (library() != Library::InternalLibrary() ||
       Name() != Symbols::ClassID().ptr()) {
diff --git a/runtime/vm/object.h b/runtime/vm/object.h
index ed7f206..555d22c 100644
--- a/runtime/vm/object.h
+++ b/runtime/vm/object.h
@@ -1478,6 +1478,9 @@
   void AddField(const Field& field) const;
   void AddFields(const GrowableArray<const Field*>& fields) const;
 
+  intptr_t FindFieldIndex(const Field& needle) const;
+  FieldPtr FieldFromIndex(intptr_t idx) const;
+
   // If this is a dart:internal.ClassID class, then inject our own const
   // fields. Returns true if synthetic fields are injected and regular
   // field declarations should be ignored.
@@ -1505,6 +1508,7 @@
   }
   void SetFunctions(const Array& value) const;
   void AddFunction(const Function& function) const;
+  intptr_t FindFunctionIndex(const Function& needle) const;
   FunctionPtr FunctionFromIndex(intptr_t idx) const;
   intptr_t FindImplicitClosureFunctionIndex(const Function& needle) const;
   FunctionPtr ImplicitClosureFunctionFromIndex(intptr_t idx) const;
@@ -7772,6 +7776,8 @@
   friend class Closure;
   friend class Pointer;
   friend class DeferredObject;
+  friend class FlowGraphSerializer;
+  friend class FlowGraphDeserializer;
   friend class RegExp;
   friend class StubCode;
   friend class TypedDataView;
@@ -7898,6 +7904,8 @@
   FINAL_HEAP_OBJECT_IMPLEMENTATION(TypeParameters, Object);
   friend class Class;
   friend class ClassFinalizer;
+  friend class FlowGraphSerializer;
+  friend class FlowGraphDeserializer;
   friend class Function;
   friend class FunctionType;
   friend class Object;
@@ -9911,6 +9919,7 @@
 
   friend class Class;
   friend class ExternalOneByteString;
+  friend class FlowGraphSerializer;
   friend class ImageWriter;
   friend class String;
   friend class StringHasher;
@@ -10031,6 +10040,7 @@
   }
 
   friend class Class;
+  friend class FlowGraphSerializer;
   friend class ImageWriter;
   friend class String;
   friend class StringHasher;
diff --git a/runtime/vm/regexp_assembler_ir.cc b/runtime/vm/regexp_assembler_ir.cc
index 2e7a7b4..99440a2 100644
--- a/runtime/vm/regexp_assembler_ir.cc
+++ b/runtime/vm/regexp_assembler_ir.cc
@@ -928,15 +928,9 @@
 
     Definition* is_match_def;
 
-    if (unicode) {
-      is_match_def = new (Z) CaseInsensitiveCompareInstr(
-          string_value, lhs_index_value, rhs_index_value, length_value,
-          kCaseInsensitiveCompareUTF16RuntimeEntry, specialization_cid_);
-    } else {
-      is_match_def = new (Z) CaseInsensitiveCompareInstr(
-          string_value, lhs_index_value, rhs_index_value, length_value,
-          kCaseInsensitiveCompareUCS2RuntimeEntry, specialization_cid_);
-    }
+    is_match_def = new (Z) CaseInsensitiveCompareInstr(
+        string_value, lhs_index_value, rhs_index_value, length_value,
+        /*handle_surrogates=*/unicode, specialization_cid_);
 
     BranchOrBacktrack(Comparison(kNE, is_match_def, BoolConstant(true)),
                       on_no_match);
diff --git a/runtime/vm/type_testing_stubs.cc b/runtime/vm/type_testing_stubs.cc
index 41ac9d1..aa79b1d 100644
--- a/runtime/vm/type_testing_stubs.cc
+++ b/runtime/vm/type_testing_stubs.cc
@@ -626,7 +626,7 @@
     // c) Then we'll check each value of the type argument.
     compiler::Label pop_saved_registers_on_failure;
     const RegisterSet saved_registers(
-        TTSInternalRegs::kSavedTypeArgumentRegisters);
+        TTSInternalRegs::kSavedTypeArgumentRegisters, /*fpu_registers=*/0);
     __ PushRegisters(saved_registers);
 
     AbstractType& type_arg = AbstractType::Handle();