Version 2.12.0-162.0.dev

Merge commit 'fce355b8b0247915d96aecfc300716d4c6cf27df' into 'dev'
diff --git a/runtime/vm/compiler/aot/precompiler.cc b/runtime/vm/compiler/aot/precompiler.cc
index d7f198e..f94e95b 100644
--- a/runtime/vm/compiler/aot/precompiler.cc
+++ b/runtime/vm/compiler/aot/precompiler.cc
@@ -237,7 +237,7 @@
           /*including_nonchanging_cids=*/FLAG_use_bare_instructions);
 
       {
-        CompilerState state(thread_, /*is_aot=*/true);
+        CompilerState state(thread_, /*is_aot=*/true, /*is_optimizing=*/true);
         PrecompileConstructors();
       }
 
@@ -2523,7 +2523,7 @@
       ZoneGrowableArray<const ICData*>* ic_data_array = nullptr;
       const Function& function = parsed_function()->function();
 
-      CompilerState compiler_state(thread(), /*is_aot=*/true,
+      CompilerState compiler_state(thread(), /*is_aot=*/true, optimized(),
                                    CompilerState::ShouldTrace(function));
 
       {
diff --git a/runtime/vm/compiler/backend/constant_propagator_test.cc b/runtime/vm/compiler/backend/constant_propagator_test.cc
index 24e7df1..b4a9ebe 100644
--- a/runtime/vm/compiler/backend/constant_propagator_test.cc
+++ b/runtime/vm/compiler/backend/constant_propagator_test.cc
@@ -19,7 +19,7 @@
 // its uses (which includes comparison and the phi itself).
 ISOLATE_UNIT_TEST_CASE(ConstantPropagation_PhiUnwrappingAndConvergence) {
   using compiler::BlockBuilder;
-  CompilerState S(thread, /*is_aot=*/false);
+  CompilerState S(thread, /*is_aot=*/false, /*is_optimizing=*/true);
   FlowGraphBuilderHelper H;
 
   // We are going to build the following graph:
@@ -114,7 +114,7 @@
     FoldingResult expected) {
   using compiler::BlockBuilder;
 
-  CompilerState S(thread, /*is_aot=*/false);
+  CompilerState S(thread, /*is_aot=*/false, /*is_optimizing=*/true);
   FlowGraphBuilderHelper H;
 
   // Add a variable into the scope which would provide static type for the
diff --git a/runtime/vm/compiler/backend/il_deserializer.cc b/runtime/vm/compiler/backend/il_deserializer.cc
index fec331a..1639057 100644
--- a/runtime/vm/compiler/backend/il_deserializer.cc
+++ b/runtime/vm/compiler/backend/il_deserializer.cc
@@ -7,6 +7,7 @@
 #include "vm/compiler/backend/il_serializer.h"
 #include "vm/compiler/backend/range_analysis.h"
 #include "vm/compiler/call_specializer.h"
+#include "vm/compiler/frontend/base_flow_graph_builder.h"
 #include "vm/compiler/jit/compiler.h"
 #include "vm/flags.h"
 #include "vm/json_writer.h"
@@ -2149,17 +2150,6 @@
   return true;
 }
 
-// Following the lead of BaseFlowGraphBuilder::MayCloneField here.
-const Field& FlowGraphDeserializer::MayCloneField(const Field& field) const {
-  if ((Compiler::IsBackgroundCompilation() ||
-       FLAG_force_clone_compiler_objects) &&
-      field.IsOriginal()) {
-    return Field::ZoneHandle(zone(), field.CloneFromOriginal());
-  }
-  ASSERT(field.IsZoneHandle());
-  return field;
-}
-
 bool FlowGraphDeserializer::ParseSlot(SExpList* list, const Slot** out) {
   ASSERT(out != nullptr);
   const auto offset_sexp = CheckInteger(Retrieve(list, 1));
@@ -2180,7 +2170,9 @@
       const auto field_sexp = CheckTaggedList(Retrieve(list, "field"), "Field");
       if (!ParseDartValue(field_sexp, &field)) return false;
       ASSERT(parsed_function_ != nullptr);
-      *out = &Slot::Get(MayCloneField(field), parsed_function_);
+      *out =
+          &Slot::Get(kernel::BaseFlowGraphBuilder::MayCloneField(zone(), field),
+                     parsed_function_);
       break;
     }
     case Slot::Kind::kTypeArguments:
diff --git a/runtime/vm/compiler/backend/il_test_helper.h b/runtime/vm/compiler/backend/il_test_helper.h
index 01da0c3..ddacf5f 100644
--- a/runtime/vm/compiler/backend/il_test_helper.h
+++ b/runtime/vm/compiler/backend/il_test_helper.h
@@ -66,11 +66,13 @@
 class TestPipeline : public ValueObject {
  public:
   explicit TestPipeline(const Function& function,
-                        CompilerPass::PipelineMode mode)
+                        CompilerPass::PipelineMode mode,
+                        bool is_optimizing = true)
       : function_(function),
         thread_(Thread::Current()),
         compiler_state_(thread_,
                         mode == CompilerPass::PipelineMode::kAOT,
+                        is_optimizing,
                         CompilerState::ShouldTrace(function)),
         mode_(mode) {}
   ~TestPipeline() { delete pass_state_; }
diff --git a/runtime/vm/compiler/backend/redundancy_elimination_test.cc b/runtime/vm/compiler/backend/redundancy_elimination_test.cc
index bc3b72f..2a2fdfd 100644
--- a/runtime/vm/compiler/backend/redundancy_elimination_test.cc
+++ b/runtime/vm/compiler/backend/redundancy_elimination_test.cc
@@ -224,15 +224,16 @@
   const Error& err = Error::Handle(cls.EnsureIsFinalized(thread));
   EXPECT(err.IsNull());
 
-  const Field& field = Field::Handle(
+  const Field& original_field = Field::Handle(
       cls.LookupField(String::Handle(Symbols::New(thread, "field"))));
-  EXPECT(!field.IsNull());
+  EXPECT(!original_field.IsNull());
+  const Field& field = Field::Handle(original_field.CloneFromOriginal());
 
   const Function& blackhole =
       Function::ZoneHandle(GetFunction(lib, "blackhole"));
 
   using compiler::BlockBuilder;
-  CompilerState S(thread, /*is_aot=*/false);
+  CompilerState S(thread, /*is_aot=*/false, /*is_optimizing=*/true);
   FlowGraphBuilderHelper H;
 
   // We are going to build the following graph:
@@ -387,15 +388,16 @@
   const Error& err = Error::Handle(cls.EnsureIsFinalized(thread));
   EXPECT(err.IsNull());
 
-  const Field& field = Field::Handle(
+  const Field& original_field = Field::Handle(
       cls.LookupField(String::Handle(Symbols::New(thread, "field"))));
-  EXPECT(!field.IsNull());
+  EXPECT(!original_field.IsNull());
+  const Field& field = Field::Handle(original_field.CloneFromOriginal());
 
   const Function& blackhole =
       Function::ZoneHandle(GetFunction(lib, "blackhole"));
 
   using compiler::BlockBuilder;
-  CompilerState S(thread, /*is_aot=*/false);
+  CompilerState S(thread, /*is_aot=*/false, /*is_optimizing=*/true);
   FlowGraphBuilderHelper H;
 
   // We are going to build the following graph:
@@ -550,7 +552,7 @@
 // https://github.com/flutter/flutter/issues/48114.
 ISOLATE_UNIT_TEST_CASE(LoadOptimizer_AliasingViaTypedDataAndUntaggedTypedData) {
   using compiler::BlockBuilder;
-  CompilerState S(thread, /*is_aot=*/false);
+  CompilerState S(thread, /*is_aot=*/false, /*is_optimizing=*/true);
   FlowGraphBuilderHelper H;
 
   const auto& lib = Library::Handle(Library::TypedDataLibrary());
diff --git a/runtime/vm/compiler/backend/slot_test.cc b/runtime/vm/compiler/backend/slot_test.cc
index 8c4b620..4271174 100644
--- a/runtime/vm/compiler/backend/slot_test.cc
+++ b/runtime/vm/compiler/backend/slot_test.cc
@@ -63,7 +63,8 @@
   field.set_is_nullable_unsafe(false);
 
   // Enter compiler state.
-  CompilerState compiler_state(thread, /*is_aot=*/false);
+  CompilerState compiler_state(thread, /*is_aot=*/false,
+                               /*is_optimizing=*/true);
 
   const Field& field_clone_1 = Field::ZoneHandle(field.CloneFromOriginal());
   const Field& field_clone_2 = Field::ZoneHandle(field.CloneFromOriginal());
diff --git a/runtime/vm/compiler/backend/type_propagator_test.cc b/runtime/vm/compiler/backend/type_propagator_test.cc
index 852a885..24da405 100644
--- a/runtime/vm/compiler/backend/type_propagator_test.cc
+++ b/runtime/vm/compiler/backend/type_propagator_test.cc
@@ -26,7 +26,7 @@
 using compiler::BlockBuilder;
 
 ISOLATE_UNIT_TEST_CASE(TypePropagator_RedefinitionAfterStrictCompareWithNull) {
-  CompilerState S(thread, /*is_aot=*/false);
+  CompilerState S(thread, /*is_aot=*/false, /*is_optimizing=*/true);
 
   FlowGraphBuilderHelper H;
 
@@ -101,7 +101,7 @@
 
 ISOLATE_UNIT_TEST_CASE(
     TypePropagator_RedefinitionAfterStrictCompareWithLoadClassId) {
-  CompilerState S(thread, /*is_aot=*/false);
+  CompilerState S(thread, /*is_aot=*/false, /*is_optimizing=*/true);
 
   FlowGraphBuilderHelper H;
 
@@ -164,7 +164,7 @@
 }
 
 ISOLATE_UNIT_TEST_CASE(TypePropagator_Refinement) {
-  CompilerState S(thread, /*is_aot=*/false);
+  CompilerState S(thread, /*is_aot=*/false, /*is_optimizing=*/true);
 
   const Class& object_class =
       Class::Handle(thread->isolate()->object_store()->object_class());
@@ -271,7 +271,7 @@
 // This test verifies that mutable compile types are not incorrectly cached
 // as reaching types after inference.
 ISOLATE_UNIT_TEST_CASE(TypePropagator_Regress36156) {
-  CompilerState S(thread, /*is_aot=*/false);
+  CompilerState S(thread, /*is_aot=*/false, /*is_optimizing=*/true);
   FlowGraphBuilderHelper H;
 
   // We are going to build the following graph:
@@ -402,7 +402,7 @@
 }
 
 ISOLATE_UNIT_TEST_CASE(CompileType_CanBeSmi) {
-  CompilerState S(thread, /*is_aot=*/false);
+  CompilerState S(thread, /*is_aot=*/false, /*is_optimizing=*/true);
 
   const char* late_tag = TestCase::LateTag();
   auto script_chars = Utils::CStringUniquePtr(
diff --git a/runtime/vm/compiler/compiler_state.h b/runtime/vm/compiler/compiler_state.h
index 913e36b..a200ea9 100644
--- a/runtime/vm/compiler/compiler_state.h
+++ b/runtime/vm/compiler/compiler_state.h
@@ -32,10 +32,12 @@
  public:
   CompilerState(Thread* thread,
                 bool is_aot,
+                bool is_optimizing,
                 CompilerTracing tracing = CompilerTracing::kOn)
       : ThreadStackResource(thread),
         cha_(thread),
         is_aot_(is_aot),
+        is_optimizing_(is_optimizing),
         tracing_(tracing) {
     previous_ = thread->SetCompilerState(this);
   }
@@ -82,6 +84,11 @@
 
   bool is_aot() const { return is_aot_; }
 
+  bool is_optimizing() const { return is_optimizing_; }
+  bool should_clone_fields() {
+    return !is_aot() && (is_optimizing() || FLAG_force_clone_compiler_objects);
+  }
+
   bool should_trace() const { return tracing_ == CompilerTracing::kOn; }
 
   static bool ShouldTrace() { return Current().should_trace(); }
@@ -103,6 +110,7 @@
   ZoneGrowableArray<LocalVariable*>* dummy_captured_vars_ = nullptr;
 
   const bool is_aot_;
+  const bool is_optimizing_;
 
   const CompilerTracing tracing_;
 
diff --git a/runtime/vm/compiler/frontend/base_flow_graph_builder.cc b/runtime/vm/compiler/frontend/base_flow_graph_builder.cc
index 06d5d73..c6ae149 100644
--- a/runtime/vm/compiler/frontend/base_flow_graph_builder.cc
+++ b/runtime/vm/compiler/frontend/base_flow_graph_builder.cc
@@ -462,7 +462,7 @@
 
 Fragment BaseFlowGraphBuilder::LoadField(const Field& field,
                                          bool calls_initializer) {
-  return LoadNativeField(Slot::Get(MayCloneField(field), parsed_function_),
+  return LoadNativeField(Slot::Get(MayCloneField(Z, field), parsed_function_),
                          calls_initializer);
 }
 
@@ -497,11 +497,10 @@
   return Fragment(new (Z) GuardFieldClassInstr(Pop(), field, deopt_id));
 }
 
-const Field& BaseFlowGraphBuilder::MayCloneField(const Field& field) {
-  if ((Compiler::IsBackgroundCompilation() ||
-       FLAG_force_clone_compiler_objects) &&
-      field.IsOriginal()) {
-    return Field::ZoneHandle(Z, field.CloneFromOriginal());
+const Field& BaseFlowGraphBuilder::MayCloneField(Zone* zone,
+                                                 const Field& field) {
+  if (CompilerState::Current().should_clone_fields() && field.IsOriginal()) {
+    return Field::ZoneHandle(zone, field.CloneFromOriginal());
   } else {
     ASSERT(field.IsZoneHandle());
     return field;
@@ -534,7 +533,7 @@
   }
 
   StoreInstanceFieldInstr* store = new (Z) StoreInstanceFieldInstr(
-      MayCloneField(field), Pop(), value, emit_store_barrier,
+      MayCloneField(Z, field), Pop(), value, emit_store_barrier,
       TokenPosition::kNoSource, parsed_function_, kind);
 
   return Fragment(store);
@@ -545,7 +544,7 @@
     StoreInstanceFieldInstr::Kind
         kind /* = StoreInstanceFieldInstr::Kind::kOther */) {
   Fragment instructions;
-  const Field& field_clone = MayCloneField(field);
+  const Field& field_clone = MayCloneField(Z, field);
   if (I->use_field_guards()) {
     LocalVariable* store_expression = MakeTemporary();
     instructions += LoadLocal(store_expression);
@@ -609,7 +608,7 @@
       compiler::LookupConvertUtf8DecoderScanFlagsField();
   auto scan = new (Z) Utf8ScanInstr(
       decoder, bytes, start, end, table,
-      Slot::Get(MayCloneField(scan_flags_field), parsed_function_));
+      Slot::Get(MayCloneField(Z, scan_flags_field), parsed_function_));
   Push(scan);
   return Fragment(scan);
 }
@@ -617,7 +616,7 @@
 Fragment BaseFlowGraphBuilder::StoreStaticField(TokenPosition position,
                                                 const Field& field) {
   return Fragment(
-      new (Z) StoreStaticFieldInstr(MayCloneField(field), Pop(), position));
+      new (Z) StoreStaticFieldInstr(MayCloneField(Z, field), Pop(), position));
 }
 
 Fragment BaseFlowGraphBuilder::StoreIndexed(classid_t class_id) {
diff --git a/runtime/vm/compiler/frontend/base_flow_graph_builder.h b/runtime/vm/compiler/frontend/base_flow_graph_builder.h
index 790ac1a..413fb19 100644
--- a/runtime/vm/compiler/frontend/base_flow_graph_builder.h
+++ b/runtime/vm/compiler/frontend/base_flow_graph_builder.h
@@ -190,7 +190,7 @@
   Fragment LoadContextAt(int depth);
   Fragment GuardFieldLength(const Field& field, intptr_t deopt_id);
   Fragment GuardFieldClass(const Field& field, intptr_t deopt_id);
-  const Field& MayCloneField(const Field& field);
+  static const Field& MayCloneField(Zone* zone, const Field& field);
   Fragment StoreInstanceField(
       TokenPosition position,
       const Slot& field,
diff --git a/runtime/vm/compiler/frontend/constant_reader.cc b/runtime/vm/compiler/frontend/constant_reader.cc
index a8cb442..3059114 100644
--- a/runtime/vm/compiler/frontend/constant_reader.cc
+++ b/runtime/vm/compiler/frontend/constant_reader.cc
@@ -90,6 +90,7 @@
 
   // On miss, evaluate, and insert value.
   if (result_.IsNull()) {
+    LeaveCompilerScope cs(H.thread());
     result_ = ReadConstantInternal(constant_offset);
     SafepointMutexLocker ml(
         H.thread()->isolate_group()->kernel_constants_mutex());
diff --git a/runtime/vm/compiler/frontend/kernel_binary_flowgraph.cc b/runtime/vm/compiler/frontend/kernel_binary_flowgraph.cc
index 7378aa7..b1ea0ea 100644
--- a/runtime/vm/compiler/frontend/kernel_binary_flowgraph.cc
+++ b/runtime/vm/compiler/frontend/kernel_binary_flowgraph.cc
@@ -163,6 +163,8 @@
   if (PeekTag() == kNullLiteral) {
     SkipExpression();  // read past the null literal.
     if (H.thread()->IsMutatorThread()) {
+      ASSERT(field.IsOriginal());
+      LeaveCompilerScope cs(H.thread());
       field.RecordStore(Object::null_object());
     } else {
       ASSERT(field.is_nullable(/* silence_assert = */ true));
@@ -194,6 +196,7 @@
   if (has_initializer && PeekTag() == kNullLiteral) {
     SkipExpression();  // read past the null literal.
     if (H.thread()->IsMutatorThread()) {
+      LeaveCompilerScope cs(H.thread());
       field.RecordStore(Object::null_object());
     } else {
       ASSERT(field.is_nullable(/* silence_assert = */ true));
diff --git a/runtime/vm/compiler/graph_intrinsifier.cc b/runtime/vm/compiler/graph_intrinsifier.cc
index a06ab1e..4935b0b 100644
--- a/runtime/vm/compiler/graph_intrinsifier.cc
+++ b/runtime/vm/compiler/graph_intrinsifier.cc
@@ -1143,7 +1143,7 @@
   ASSERT(Intrinsifier::CanIntrinsifyFieldAccessor(function));
 
   auto& field = Field::Handle(zone, function.accessor_field());
-  if (Field::ShouldCloneFields()) {
+  if (CompilerState::Current().should_clone_fields()) {
     field = field.CloneFromOriginal();
   }
   ASSERT(field.is_instance() && !field.is_late() && !field.needs_load_guard());
@@ -1186,7 +1186,7 @@
   ASSERT(Intrinsifier::CanIntrinsifyFieldAccessor(function));
 
   auto& field = Field::Handle(zone, function.accessor_field());
-  if (Field::ShouldCloneFields()) {
+  if (CompilerState::Current().should_clone_fields()) {
     field = field.CloneFromOriginal();
   }
   ASSERT(field.is_instance() && !field.is_final());
diff --git a/runtime/vm/compiler/intrinsifier.cc b/runtime/vm/compiler/intrinsifier.cc
index 78dc4fb..d95a65b 100644
--- a/runtime/vm/compiler/intrinsifier.cc
+++ b/runtime/vm/compiler/intrinsifier.cc
@@ -91,7 +91,7 @@
   // If we intrinsify, the intrinsified code therefore does not depend on the
   // field guard and we do not add it to the guarded fields via
   // [ParsedFunction::AddToGuardedFields].
-  if (Field::ShouldCloneFields()) {
+  if (CompilerState::Current().should_clone_fields()) {
     field = field.CloneFromOriginal();
   }
 
diff --git a/runtime/vm/compiler/jit/compiler.cc b/runtime/vm/compiler/jit/compiler.cc
index 37606511..9e31bf0 100644
--- a/runtime/vm/compiler/jit/compiler.cc
+++ b/runtime/vm/compiler/jit/compiler.cc
@@ -379,64 +379,64 @@
     function.AttachCode(code);
     function.SetWasCompiled(true);
   } else if (optimized()) {
-    // Installs code while at safepoint.
-    if (thread()->IsMutatorThread()) {
-      const bool is_osr = osr_id() != Compiler::kNoOSRDeoptId;
-      if (!is_osr) {
+    // We cannot execute generated code while installing code.
+    ASSERT(Thread::Current()->IsAtSafepoint() ||
+           (Thread::Current()->IsMutatorThread() &&
+            IsolateGroup::Current()->ContainsOnlyOneIsolate()));
+    // We are validating our CHA / field guard / ... assumptions. To prevent
+    // another thread from concurrently changing them, we have to guarantee
+    // mutual exclusion.
+    DEBUG_ASSERT(
+        IsolateGroup::Current()->program_lock()->IsCurrentThreadReader());
+
+    const bool trace_compiler =
+        FLAG_trace_compiler || FLAG_trace_optimizing_compiler;
+    bool code_is_valid = true;
+    if (!flow_graph->parsed_function().guarded_fields()->is_empty()) {
+      const ZoneGrowableArray<const Field*>& guarded_fields =
+          *flow_graph->parsed_function().guarded_fields();
+      Field& original = Field::Handle();
+      for (intptr_t i = 0; i < guarded_fields.length(); i++) {
+        const Field& field = *guarded_fields[i];
+        ASSERT(!field.IsOriginal());
+        original = field.Original();
+        if (!field.IsConsistentWith(original)) {
+          code_is_valid = false;
+          if (trace_compiler) {
+            THR_Print("--> FAIL: Field %s guarded state changed.",
+                      field.ToCString());
+          }
+          break;
+        }
+      }
+    }
+
+    if (!thread()->compiler_state().cha().IsConsistentWithCurrentHierarchy()) {
+      code_is_valid = false;
+      if (trace_compiler) {
+        THR_Print("--> FAIL: Class hierarchy has new subclasses.");
+      }
+    }
+
+    // Setting breakpoints at runtime could make a function non-optimizable.
+    if (code_is_valid && Compiler::CanOptimizeFunction(thread(), function)) {
+      if (osr_id() == Compiler::kNoOSRDeoptId) {
         function.InstallOptimizedCode(code);
+      } else {
+        // OSR is not compiled in background.
+        ASSERT(!Compiler::IsBackgroundCompilation());
       }
       ASSERT(code.owner() == function.raw());
     } else {
-      // Background compilation.
-      // Before installing code check generation counts if the code may
-      // have become invalid.
-      const bool trace_compiler =
-          FLAG_trace_compiler || FLAG_trace_optimizing_compiler;
-      bool code_is_valid = true;
-      if (!flow_graph->parsed_function().guarded_fields()->is_empty()) {
-        const ZoneGrowableArray<const Field*>& guarded_fields =
-            *flow_graph->parsed_function().guarded_fields();
-        Field& original = Field::Handle();
-        for (intptr_t i = 0; i < guarded_fields.length(); i++) {
-          const Field& field = *guarded_fields[i];
-          ASSERT(!field.IsOriginal());
-          original = field.Original();
-          if (!field.IsConsistentWith(original)) {
-            code_is_valid = false;
-            if (trace_compiler) {
-              THR_Print("--> FAIL: Field %s guarded state changed.",
-                        field.ToCString());
-            }
-            break;
-          }
-        }
-      }
-      if (!thread()
-               ->compiler_state()
-               .cha()
-               .IsConsistentWithCurrentHierarchy()) {
-        code_is_valid = false;
-        if (trace_compiler) {
-          THR_Print("--> FAIL: Class hierarchy has new subclasses.");
-        }
-      }
-
-      // Setting breakpoints at runtime could make a function non-optimizable.
-      if (code_is_valid && Compiler::CanOptimizeFunction(thread(), function)) {
-        const bool is_osr = osr_id() != Compiler::kNoOSRDeoptId;
-        ASSERT(!is_osr);  // OSR is not compiled in background.
-        function.InstallOptimizedCode(code);
+      code = Code::null();
+    }
+    if (function.usage_counter() < 0) {
+      // Reset to 0 so that it can be recompiled if needed.
+      if (code_is_valid) {
+        function.SetUsageCounter(0);
       } else {
-        code = Code::null();
-      }
-      if (function.usage_counter() < 0) {
-        // Reset to 0 so that it can be recompiled if needed.
-        if (code_is_valid) {
-          function.SetUsageCounter(0);
-        } else {
-          // Trigger another optimization pass soon.
-          function.SetUsageCounter(FLAG_optimization_counter_threshold - 100);
-        }
+        // Trigger another optimization pass soon.
+        function.SetUsageCounter(FLAG_optimization_counter_threshold - 100);
       }
     }
 
@@ -493,6 +493,7 @@
   }
   Zone* const zone = thread()->zone();
   HANDLESCOPE(thread());
+  EnterCompilerScope cs(thread());
 
   // We may reattempt compilation if the function needs to be assembled using
   // far branches on ARM. In the else branch of the setjmp call, done is set to
@@ -515,7 +516,7 @@
       FlowGraph* flow_graph = nullptr;
       ZoneGrowableArray<const ICData*>* ic_data_array = nullptr;
 
-      CompilerState compiler_state(thread(), /*is_aot=*/false,
+      CompilerState compiler_state(thread(), /*is_aot=*/false, optimized(),
                                    CompilerState::ShouldTrace(function));
 
       {
@@ -943,7 +944,7 @@
   // if state changed while compiling in background.
   Thread* thread = Thread::Current();
   Zone* zone = thread->zone();
-  CompilerState state(thread, /*is_aot=*/false);
+  CompilerState state(thread, /*is_aot=*/false, /*is_optimizing=*/false);
   LongJumpScope jump;
   if (setjmp(*jump.Set()) == 0) {
     ParsedFunction* parsed_function =
diff --git a/runtime/vm/compiler/jit/jit_call_specializer.cc b/runtime/vm/compiler/jit/jit_call_specializer.cc
index 05ce2af..e596fa3 100644
--- a/runtime/vm/compiler/jit/jit_call_specializer.cc
+++ b/runtime/vm/compiler/jit/jit_call_specializer.cc
@@ -35,7 +35,7 @@
     SpeculativeInliningPolicy* speculative_policy)
     : CallSpecializer(flow_graph,
                       speculative_policy,
-                      Field::ShouldCloneFields()) {}
+                      CompilerState::Current().should_clone_fields()) {}
 
 bool JitCallSpecializer::IsAllowedForInlining(intptr_t deopt_id) const {
   return true;
@@ -203,12 +203,16 @@
           THR_Print("  getter usage count: %" Pd "\n", getter.usage_counter());
         }
       }
-      ASSERT(field.IsOriginal());
-      field.set_is_unboxing_candidate(false);
-      Thread* thread = Thread::Current();
-      SafepointWriteRwLocker ml(thread,
-                                thread->isolate_group()->program_lock());
-      field.DeoptimizeDependentCode();
+      // We determined it's not beneficial for performance to unbox the
+      // field, therefore we mark it as boxed here.
+      //
+      // Calling `DisableFieldUnboxing` will cause transition the field to
+      // boxed and deoptimize dependent code.
+      //
+      // NOTE: It will also, as a side-effect, change our field clone's
+      // `is_unboxing_candidate()` bit. So we assume the compiler has so far
+      // not relied on this bit.
+      field.DisableFieldUnboxing();
     } else {
       flow_graph()->parsed_function().AddToGuardedFields(&field);
     }
diff --git a/runtime/vm/dart_api_impl.cc b/runtime/vm/dart_api_impl.cc
index 60f008d..119d5a0 100644
--- a/runtime/vm/dart_api_impl.cc
+++ b/runtime/vm/dart_api_impl.cc
@@ -6535,7 +6535,8 @@
     return result;
   }
   CHECK_CALLBACK_STATE(T);
-  CompilerState state(Thread::Current(), /*is_aot=*/true);
+  CompilerState state(Thread::Current(), /*is_aot=*/true,
+                      /*is_optimizing=*/true);
   CHECK_ERROR_HANDLE(Precompiler::CompileAll());
   return Api::Success();
 #endif
diff --git a/runtime/vm/isolate.cc b/runtime/vm/isolate.cc
index 069f6ab..26229f8 100644
--- a/runtime/vm/isolate.cc
+++ b/runtime/vm/isolate.cc
@@ -418,7 +418,7 @@
 
 bool IsolateGroup::ContainsOnlyOneIsolate() {
   SafepointReadRwLocker ml(Thread::Current(), isolates_lock_.get());
-  return isolate_count_ == 0;
+  return isolate_count_ == 1;
 }
 
 void IsolateGroup::RunWithLockedGroup(std::function<void()> fun) {
diff --git a/runtime/vm/isolate_reload.cc b/runtime/vm/isolate_reload.cc
index 8e586b3..542d9ae 100644
--- a/runtime/vm/isolate_reload.cc
+++ b/runtime/vm/isolate_reload.cc
@@ -163,7 +163,7 @@
     if (new_field) {
       const Field& field = Field::Handle(to_field.raw());
       field.set_needs_load_guard(true);
-      field.set_is_unboxing_candidate(false);
+      field.set_is_unboxing_candidate_unsafe(false);
       new_fields_offsets->Add(field.HostOffset());
     }
   }
diff --git a/runtime/vm/object.cc b/runtime/vm/object.cc
index 8b790ce..2a50ea2 100644
--- a/runtime/vm/object.cc
+++ b/runtime/vm/object.cc
@@ -4257,6 +4257,7 @@
   if (is_finalized()) {
     return Error::null();
   }
+  LeaveCompilerScope ncs(thread);
   ASSERT(thread->IsMutatorThread());
   ASSERT(thread != NULL);
   const Error& error =
@@ -9904,11 +9905,6 @@
       signature_type_name.IsNull() ? "null" : signature_type_name.ToCString());
 }
 
-bool Field::ShouldCloneFields() {
-  return Compiler::IsBackgroundCompilation() ||
-         FLAG_force_clone_compiler_objects;
-}
-
 FieldPtr Field::CloneFromOriginal() const {
   return this->Clone(*this);
 }
@@ -9940,6 +9936,39 @@
   return &value;
 }
 
+void Field::DisableFieldUnboxing() const {
+  Thread* thread = Thread::Current();
+  ASSERT(!IsOriginal());
+  const Field& original = Field::Handle(Original());
+  if (!original.is_unboxing_candidate()) {
+    return;
+  }
+  SafepointWriteRwLocker ml(thread, thread->isolate_group()->program_lock());
+  if (!original.is_unboxing_candidate()) {
+    return;
+  }
+  original.set_is_unboxing_candidate(false);
+  set_is_unboxing_candidate(false);
+  original.DeoptimizeDependentCode();
+}
+
+intptr_t Field::guarded_cid() const {
+#if defined(DEBUG)
+  // This assertion ensures that the cid seen by the background compiler is
+  // consistent. So the assertion passes if the field is a clone. It also
+  // passes if the field is static, because we don't use field guards on
+  // static fields.
+  Thread* thread = Thread::Current();
+  ASSERT(!thread->IsInsideCompiler() ||
+#if !defined(DART_PRECOMPILED_RUNTIME)
+         ((CompilerState::Current().should_clone_fields() == !IsOriginal())) ||
+#endif
+         is_static());
+#endif
+  return LoadNonPointer<ClassIdTagType, std::memory_order_relaxed>(
+      &raw_ptr()->guarded_cid_);
+}
+
 void Field::SetOriginal(const Field& value) const {
   ASSERT(value.IsOriginal());
   ASSERT(!value.IsNull());
@@ -10132,9 +10161,10 @@
   result.set_has_initializer(false);
   if (FLAG_precompiled_mode) {
     // May be updated by KernelLoader::ReadInferredType
-    result.set_is_unboxing_candidate(false);
+    result.set_is_unboxing_candidate_unsafe(false);
   } else {
-    result.set_is_unboxing_candidate(!is_final && !is_late && !is_static);
+    result.set_is_unboxing_candidate_unsafe(!is_final && !is_late &&
+                                            !is_static);
   }
   result.set_initializer_changed_after_initialization(false);
   NOT_IN_PRECOMPILED(result.set_kernel_offset(0));
diff --git a/runtime/vm/object.h b/runtime/vm/object.h
index 3fe0d69..ae41899 100644
--- a/runtime/vm/object.h
+++ b/runtime/vm/object.h
@@ -3962,10 +3962,9 @@
     return !raw_ptr()->owner()->IsField();
   }
 
-  // Returns whether fields must be cloned via [CloneFromOriginal] for the
-  // current compilation thread.
-  static bool ShouldCloneFields();
-
+  // Mark previously unboxed field boxed. Only operates on clones, updates
+  // original as well as this clone.
+  void DisableFieldUnboxing() const;
   // Returns a field cloned from 'this'. 'this' is set as the
   // original field of result.
   FieldPtr CloneFromOriginal() const;
@@ -3975,62 +3974,66 @@
   const char* UserVisibleNameCString() const;
   virtual StringPtr DictionaryName() const { return name(); }
 
-  bool is_static() const { return StaticBit::decode(raw_ptr()->kind_bits_); }
+  uint16_t kind_bits() const {
+    return LoadNonPointer<uint16_t, std::memory_order_acquire>(
+        &raw_ptr()->kind_bits_);
+  }
+
+  bool is_static() const { return StaticBit::decode(kind_bits()); }
   bool is_instance() const { return !is_static(); }
-  bool is_final() const { return FinalBit::decode(raw_ptr()->kind_bits_); }
-  bool is_const() const { return ConstBit::decode(raw_ptr()->kind_bits_); }
-  bool is_late() const { return IsLateBit::decode(raw_ptr()->kind_bits_); }
+  bool is_final() const { return FinalBit::decode(kind_bits()); }
+  bool is_const() const { return ConstBit::decode(kind_bits()); }
+  bool is_late() const { return IsLateBit::decode(kind_bits()); }
   bool is_extension_member() const {
-    return IsExtensionMemberBit::decode(raw_ptr()->kind_bits_);
+    return IsExtensionMemberBit::decode(kind_bits());
   }
   bool needs_load_guard() const {
-    return NeedsLoadGuardBit::decode(raw_ptr()->kind_bits_);
+    return NeedsLoadGuardBit::decode(kind_bits());
   }
-  bool is_reflectable() const {
-    return ReflectableBit::decode(raw_ptr()->kind_bits_);
-  }
+  bool is_reflectable() const { return ReflectableBit::decode(kind_bits()); }
   void set_is_reflectable(bool value) const {
     ASSERT(IsOriginal());
+    // TODO(36097): Once concurrent access is possible ensure updates are safe.
     set_kind_bits(ReflectableBit::update(value, raw_ptr()->kind_bits_));
   }
   bool is_double_initialized() const {
-    return DoubleInitializedBit::decode(raw_ptr()->kind_bits_);
+    return DoubleInitializedBit::decode(kind_bits());
   }
   // Called in parser after allocating field, immutable property otherwise.
   // Marks fields that are initialized with a simple double constant.
   void set_is_double_initialized(bool value) const {
     ASSERT(Thread::Current()->IsMutatorThread());
     ASSERT(IsOriginal());
+    // TODO(36097): Once concurrent access is possible ensure updates are safe.
     set_kind_bits(DoubleInitializedBit::update(value, raw_ptr()->kind_bits_));
   }
 
   bool initializer_changed_after_initialization() const {
-    return InitializerChangedAfterInitializatonBit::decode(
-        raw_ptr()->kind_bits_);
+    return InitializerChangedAfterInitializatonBit::decode(kind_bits());
   }
   void set_initializer_changed_after_initialization(bool value) const {
+    // TODO(36097): Once concurrent access is possible ensure updates are safe.
     set_kind_bits(InitializerChangedAfterInitializatonBit::update(
         value, raw_ptr()->kind_bits_));
   }
 
-  bool has_pragma() const {
-    return HasPragmaBit::decode(raw_ptr()->kind_bits_);
-  }
+  bool has_pragma() const { return HasPragmaBit::decode(kind_bits()); }
   void set_has_pragma(bool value) const {
+    // TODO(36097): Once concurrent access is possible ensure updates are safe.
     set_kind_bits(HasPragmaBit::update(value, raw_ptr()->kind_bits_));
   }
 
-  bool is_covariant() const {
-    return CovariantBit::decode(raw_ptr()->kind_bits_);
-  }
+  bool is_covariant() const { return CovariantBit::decode(kind_bits()); }
   void set_is_covariant(bool value) const {
+    // TODO(36097): Once concurrent access is possible ensure updates are safe.
     set_kind_bits(CovariantBit::update(value, raw_ptr()->kind_bits_));
   }
 
   bool is_generic_covariant_impl() const {
-    return GenericCovariantImplBit::decode(raw_ptr()->kind_bits_);
+    return GenericCovariantImplBit::decode(kind_bits());
   }
   void set_is_generic_covariant_impl(bool value) const {
+    // TODO(36097): Once concurrent access is possible ensure updates are safe.
     set_kind_bits(
         GenericCovariantImplBit::update(value, raw_ptr()->kind_bits_));
   }
@@ -4128,23 +4131,25 @@
   StringPtr InitializingExpression() const;
 
   bool has_nontrivial_initializer() const {
-    return HasNontrivialInitializerBit::decode(raw_ptr()->kind_bits_);
+    return HasNontrivialInitializerBit::decode(kind_bits());
   }
   // Called by parser after allocating field.
   void set_has_nontrivial_initializer(bool has_nontrivial_initializer) const {
     ASSERT(IsOriginal());
     ASSERT(Thread::Current()->IsMutatorThread());
+    // TODO(36097): Once concurrent access is possible ensure updates are safe.
     set_kind_bits(HasNontrivialInitializerBit::update(
         has_nontrivial_initializer, raw_ptr()->kind_bits_));
   }
 
   bool has_initializer() const {
-    return HasInitializerBit::decode(raw_ptr()->kind_bits_);
+    return HasInitializerBit::decode(kind_bits());
   }
   // Called by parser after allocating field.
   void set_has_initializer(bool has_initializer) const {
     ASSERT(IsOriginal());
     ASSERT(Thread::Current()->IsMutatorThread());
+    // TODO(36097): Once concurrent access is possible ensure updates are safe.
     set_kind_bits(
         HasInitializerBit::update(has_initializer, raw_ptr()->kind_bits_));
   }
@@ -4154,11 +4159,12 @@
   }
 
   bool is_non_nullable_integer() const {
-    return IsNonNullableIntBit::decode(raw_ptr()->kind_bits_);
+    return IsNonNullableIntBit::decode(kind_bits());
   }
 
   void set_is_non_nullable_integer(bool is_non_nullable_integer) const {
     ASSERT(Thread::Current()->IsMutatorThread());
+    // TODO(36097): Once concurrent access is possible ensure updates are safe.
     set_kind_bits(IsNonNullableIntBit::update(is_non_nullable_integer,
                                               raw_ptr()->kind_bits_));
   }
@@ -4179,18 +4185,7 @@
   // Return class id that any non-null value read from this field is guaranteed
   // to have or kDynamicCid if such class id is not known.
   // Stores to this field must update this information hence the name.
-  intptr_t guarded_cid() const {
-#if defined(DEBUG)
-    // This assertion ensures that the cid seen by the background compiler is
-    // consistent. So the assertion passes if the field is a clone. It also
-    // passes if the field is static, because we don't use field guards on
-    // static fields.
-    Thread* thread = Thread::Current();
-    ASSERT(!IsOriginal() || is_static() || thread->IsMutatorThread() ||
-           thread->IsAtSafepoint());
-#endif
-    return raw_ptr()->guarded_cid_;
-  }
+  intptr_t guarded_cid() const;
 
   void set_guarded_cid(intptr_t cid) const {
     DEBUG_ASSERT(
@@ -4198,11 +4193,6 @@
     set_guarded_cid_unsafe(cid);
   }
   void set_guarded_cid_unsafe(intptr_t cid) const {
-#if defined(DEBUG)
-    Thread* thread = Thread::Current();
-    ASSERT(!IsOriginal() || is_static() || thread->IsMutatorThread() ||
-           thread->IsAtSafepoint());
-#endif
     StoreNonPointer(&raw_ptr()->guarded_cid_, cid);
   }
   static intptr_t guarded_cid_offset() {
@@ -4251,27 +4241,36 @@
   intptr_t UnboxedFieldCid() const { return guarded_cid(); }
 
   bool is_unboxing_candidate() const {
-    return UnboxingCandidateBit::decode(raw_ptr()->kind_bits_);
+    return UnboxingCandidateBit::decode(kind_bits());
   }
+
   // Default 'true', set to false once optimizing compiler determines it should
   // be boxed.
-  void set_is_unboxing_candidate(bool b) const {
-    ASSERT(IsOriginal());
+  void set_is_unboxing_candidate_unsafe(bool b) const {
     set_kind_bits(UnboxingCandidateBit::update(b, raw_ptr()->kind_bits_));
   }
 
+  void set_is_unboxing_candidate(bool b) const {
+    DEBUG_ASSERT(
+        IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter());
+    set_is_unboxing_candidate_unsafe(b);
+  }
+
   enum {
     kUnknownLengthOffset = -1,
     kUnknownFixedLength = -1,
     kNoFixedLength = -2,
   };
   void set_is_late(bool value) const {
+    // TODO(36097): Once concurrent access is possible ensure updates are safe.
     set_kind_bits(IsLateBit::update(value, raw_ptr()->kind_bits_));
   }
   void set_is_extension_member(bool value) const {
+    // TODO(36097): Once concurrent access is possible ensure updates are safe.
     set_kind_bits(IsExtensionMemberBit::update(value, raw_ptr()->kind_bits_));
   }
   void set_needs_load_guard(bool value) const {
+    // TODO(36097): Once concurrent access is possible ensure updates are safe.
     set_kind_bits(NeedsLoadGuardBit::update(value, raw_ptr()->kind_bits_));
   }
   // Returns false if any value read from this field is guaranteed to be
@@ -4454,12 +4453,15 @@
 
   void set_name(const String& value) const;
   void set_is_static(bool is_static) const {
+    // TODO(36097): Once concurrent access is possible ensure updates are safe.
     set_kind_bits(StaticBit::update(is_static, raw_ptr()->kind_bits_));
   }
   void set_is_final(bool is_final) const {
+    // TODO(36097): Once concurrent access is possible ensure updates are safe.
     set_kind_bits(FinalBit::update(is_final, raw_ptr()->kind_bits_));
   }
   void set_is_const(bool value) const {
+    // TODO(36097): Once concurrent access is possible ensure updates are safe.
     set_kind_bits(ConstBit::update(value, raw_ptr()->kind_bits_));
   }
   void set_owner(const Object& value) const {
@@ -4472,7 +4474,8 @@
     StoreNonPointer(&raw_ptr()->end_token_pos_, token_pos);
   }
   void set_kind_bits(uint16_t value) const {
-    StoreNonPointer(&raw_ptr()->kind_bits_, value);
+    StoreNonPointer<uint16_t, uint16_t, std::memory_order_release>(
+        &raw_ptr()->kind_bits_, value);
   }
 
   static FieldPtr New();
diff --git a/runtime/vm/object_reload.cc b/runtime/vm/object_reload.cc
index 4aab7f8..1620dcc 100644
--- a/runtime/vm/object_reload.cc
+++ b/runtime/vm/object_reload.cc
@@ -233,7 +233,7 @@
           if (old_field.needs_load_guard()) {
             ASSERT(!old_field.is_unboxing_candidate());
             field.set_needs_load_guard(true);
-            field.set_is_unboxing_candidate(false);
+            field.set_is_unboxing_candidate_unsafe(false);
           }
         }
       }
@@ -677,7 +677,7 @@
   ASSERT(IsolateReloadContext::IsSameClass(*this, replacement));
 
   if (!is_declaration_loaded()) {
-    // The old class hasn't been used in any meanfully way, so the VM is okay
+    // The old class hasn't been used in any meaningful way, so the VM is okay
     // with any change.
     return;
   }
diff --git a/runtime/vm/object_test.cc b/runtime/vm/object_test.cc
index 5b280f2..c332d17 100644
--- a/runtime/vm/object_test.cc
+++ b/runtime/vm/object_test.cc
@@ -2515,7 +2515,8 @@
 ISOLATE_UNIT_TEST_CASE(ContextScope) {
   // We need an active compiler context to manipulate scopes, since local
   // variables and slots can be canonicalized in the compiler state.
-  CompilerState compiler_state(Thread::Current(), /*is_aot=*/false);
+  CompilerState compiler_state(Thread::Current(), /*is_aot=*/false,
+                               /*is_optimizing=*/false);
 
   const intptr_t parent_scope_function_level = 0;
   LocalScope* parent_scope =
diff --git a/runtime/vm/parser.cc b/runtime/vm/parser.cc
index c9cf923..52faef4 100644
--- a/runtime/vm/parser.cc
+++ b/runtime/vm/parser.cc
@@ -110,11 +110,12 @@
     }
   }
 
-  // Note: the list of guarded fields must contain copies during background
+  // Note: the list of guarded fields must contain copies during optimizing
   // compilation because we will look at their guarded_cid when copying
   // the array of guarded fields from callee into the caller during
   // inlining.
-  ASSERT(!field->IsOriginal() || Thread::Current()->IsMutatorThread());
+  ASSERT(field->IsOriginal() ==
+         !CompilerState::Current().should_clone_fields());
   guarded_fields_->Add(&Field::ZoneHandle(Z, field->raw()));
 }
 
diff --git a/runtime/vm/thread.h b/runtime/vm/thread.h
index a882e0d..98afe6f 100644
--- a/runtime/vm/thread.h
+++ b/runtime/vm/thread.h
@@ -428,6 +428,10 @@
 
   bool IsMutatorThread() const { return is_mutator_thread_; }
 
+#if defined(DEBUG)
+  bool IsInsideCompiler() const { return inside_compiler_; }
+#endif
+
   bool CanCollectGarbage() const;
 
   // Offset of Dart TimelineStream object.
@@ -482,6 +486,18 @@
     no_callback_scope_depth_ -= 1;
   }
 
+#if defined(DEBUG)
+  void EnterCompiler() {
+    ASSERT(!IsInsideCompiler());
+    inside_compiler_ = true;
+  }
+
+  void LeaveCompiler() {
+    ASSERT(IsInsideCompiler());
+    inside_compiler_ = false;
+  }
+#endif
+
   void StoreBufferAddObject(ObjectPtr obj);
   void StoreBufferAddObjectGC(ObjectPtr obj);
 #if defined(TESTING)
@@ -1027,6 +1043,10 @@
   Thread* next_;  // Used to chain the thread structures in an isolate.
   bool is_mutator_thread_ = false;
 
+#if defined(DEBUG)
+  bool inside_compiler_ = false;
+#endif
+
   explicit Thread(bool is_vm_isolate);
 
   void StoreBufferRelease(
@@ -1111,6 +1131,68 @@
 };
 #endif  // defined(DEBUG)
 
+// Within a EnterCompilerScope, the thread must operate on cloned fields.
+#if defined(DEBUG)
+class EnterCompilerScope : public ThreadStackResource {
+ public:
+  explicit EnterCompilerScope(Thread* thread = nullptr)
+      : ThreadStackResource(thread != nullptr ? thread : Thread::Current()) {
+    previously_is_inside_compiler_ = this->thread()->IsInsideCompiler();
+    if (!previously_is_inside_compiler_) {
+      this->thread()->EnterCompiler();
+    }
+  }
+  ~EnterCompilerScope() {
+    if (!previously_is_inside_compiler_) {
+      thread()->LeaveCompiler();
+    }
+  }
+
+ private:
+  bool previously_is_inside_compiler_;
+  DISALLOW_COPY_AND_ASSIGN(EnterCompilerScope);
+};
+#else   // defined(DEBUG)
+class EnterCompilerScope : public ValueObject {
+ public:
+  explicit EnterCompilerScope(Thread* thread = nullptr) {}
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(EnterCompilerScope);
+};
+#endif  // defined(DEBUG)
+
+// Within a LeaveCompilerScope, the thread must operate on cloned fields.
+#if defined(DEBUG)
+class LeaveCompilerScope : public ThreadStackResource {
+ public:
+  explicit LeaveCompilerScope(Thread* thread = nullptr)
+      : ThreadStackResource(thread != nullptr ? thread : Thread::Current()) {
+    previously_is_inside_compiler_ = this->thread()->IsInsideCompiler();
+    if (previously_is_inside_compiler_) {
+      this->thread()->LeaveCompiler();
+    }
+  }
+  ~LeaveCompilerScope() {
+    if (previously_is_inside_compiler_) {
+      thread()->EnterCompiler();
+    }
+  }
+
+ private:
+  bool previously_is_inside_compiler_;
+  DISALLOW_COPY_AND_ASSIGN(LeaveCompilerScope);
+};
+#else   // defined(DEBUG)
+class LeaveCompilerScope : public ValueObject {
+ public:
+  explicit LeaveCompilerScope(Thread* thread = nullptr) {}
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(LeaveCompilerScope);
+};
+#endif  // defined(DEBUG)
+
 }  // namespace dart
 
 #endif  // RUNTIME_VM_THREAD_H_
diff --git a/sdk/lib/collection/linked_list.dart b/sdk/lib/collection/linked_list.dart
index 6d6be81..0445807 100644
--- a/sdk/lib/collection/linked_list.dart
+++ b/sdk/lib/collection/linked_list.dart
@@ -19,7 +19,7 @@
 /// list, it must first be removed from its current list (if any).
 /// For the same reason, the [remove] and [contains] methods
 /// are based on *identity*, even if the [LinkedListEntry] chooses
-/// to override [Object.operator==].
+/// to override [Object.==].
 ///
 /// In return, each element knows its own place in the linked list, as well as
 /// which list it is in. This allows constant time
diff --git a/sdk/lib/io/socket.dart b/sdk/lib/io/socket.dart
index 48f428f..c19ce36 100644
--- a/sdk/lib/io/socket.dart
+++ b/sdk/lib/io/socket.dart
@@ -631,7 +631,7 @@
 /// The [Stream] interface of this class provides event notification about when
 /// a certain change has happened, for example when data has become available
 /// ([RawSocketEvent.read]) or when the remote end has stopped listening
-/// ([RawSocketEvent.close]).
+/// ([RawSocketEvent.closed]).
 abstract class RawSocket implements Stream<RawSocketEvent> {
   /**
    * Set or get, if the [RawSocket] should listen for [RawSocketEvent.read]
diff --git a/tools/VERSION b/tools/VERSION
index dab9525..7291e4f 100644
--- a/tools/VERSION
+++ b/tools/VERSION
@@ -27,5 +27,5 @@
 MAJOR 2
 MINOR 12
 PATCH 0
-PRERELEASE 161
+PRERELEASE 162
 PRERELEASE_PATCH 0
\ No newline at end of file