[vm] Update NULL to nullptr in runtime/vm/compiler.

TEST=build
Change-Id: I54cd75bbc942e11c4d719edcc64640da67077634
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/291965
Reviewed-by: Alexander Markov <alexmarkov@google.com>
Commit-Queue: Ryan Macnak <rmacnak@google.com>
diff --git a/runtime/vm/compiler/aot/aot_call_specializer.cc b/runtime/vm/compiler/aot/aot_call_specializer.cc
index a1976ab..34654ae 100644
--- a/runtime/vm/compiler/aot/aot_call_specializer.cc
+++ b/runtime/vm/compiler/aot/aot_call_specializer.cc
@@ -105,7 +105,7 @@
       !target_function.AreValidArgumentCounts(
           call->type_args_len(), call->ArgumentCountWithoutTypeArgs(),
           call->argument_names().IsNull() ? 0 : call->argument_names().Length(),
-          /* error_message = */ NULL)) {
+          /* error_message = */ nullptr)) {
     return false;
   }
 
@@ -151,7 +151,8 @@
 }
 
 bool AotCallSpecializer::RecognizeRuntimeTypeGetter(InstanceCallInstr* call) {
-  if ((precompiler_ == NULL) || !precompiler_->get_runtime_type_is_unique()) {
+  if ((precompiler_ == nullptr) ||
+      !precompiler_->get_runtime_type_is_unique()) {
     return false;
   }
 
@@ -175,8 +176,8 @@
 
 static bool IsGetRuntimeType(Definition* defn) {
   StaticCallInstr* call = defn->AsStaticCall();
-  return (call != NULL) && (call->function().recognized_kind() ==
-                            MethodRecognizer::kObjectRuntimeType);
+  return (call != nullptr) && (call->function().recognized_kind() ==
+                               MethodRecognizer::kObjectRuntimeType);
 }
 
 // Recognize a.runtimeType == b.runtimeType and fold it into
@@ -284,7 +285,7 @@
   input = input->CopyWithType(Z);
 
   if (cid == kDoubleCid && input->Type()->IsNullableInt()) {
-    Definition* conversion = NULL;
+    Definition* conversion = nullptr;
 
     if (input->Type()->ToNullableCid() == kSmiCid) {
       conversion = new (Z) SmiToDoubleInstr(input, call->source());
@@ -298,7 +299,7 @@
     if (FLAG_trace_strong_mode_types) {
       THR_Print("[Strong mode] Inserted %s\n", conversion->ToCString());
     }
-    InsertBefore(call, conversion, /* env = */ NULL, FlowGraph::kValue);
+    InsertBefore(call, conversion, /* env = */ nullptr, FlowGraph::kValue);
     return new (Z) Value(conversion);
   }
 
@@ -419,7 +420,7 @@
 #if defined(TARGET_ARCH_ARM)
   Definition* right_definition = new (Z) UnboxedConstantInstr(
       Smi::ZoneHandle(Z, Smi::New(modulus - 1)), kUnboxedInt32);
-  InsertBefore(instr, right_definition, /*env=*/NULL, FlowGraph::kValue);
+  InsertBefore(instr, right_definition, /*env=*/nullptr, FlowGraph::kValue);
   right_definition = new (Z)
       IntConverterInstr(kUnboxedInt32, kUnboxedInt64,
                         new (Z) Value(right_definition), DeoptId::kNone);
@@ -428,7 +429,7 @@
       Smi::ZoneHandle(Z, Smi::New(modulus - 1)), kUnboxedInt64);
 #endif
   if (modulus == 1) return right_definition;
-  InsertBefore(instr, right_definition, /*env=*/NULL, FlowGraph::kValue);
+  InsertBefore(instr, right_definition, /*env=*/nullptr, FlowGraph::kValue);
   right_value = new (Z) Value(right_definition);
   return new (Z)
       BinaryInt64OpInstr(Token::kBIT_AND, left_value, right_value,
@@ -442,7 +443,7 @@
     return false;
   }
 
-  Definition* replacement = NULL;
+  Definition* replacement = nullptr;
   if (instr->ArgumentCount() == 2) {
     Value* left_value = instr->ArgumentValueAt(0);
     Value* right_value = instr->ArgumentValueAt(1);
@@ -592,7 +593,7 @@
     return false;
   }
 
-  Definition* replacement = NULL;
+  Definition* replacement = nullptr;
 
   if (instr->ArgumentCount() == 2) {
     Value* left_value = instr->ArgumentValueAt(0);
@@ -685,7 +686,7 @@
     }
   }
 
-  if (replacement != NULL && !replacement->ComputeCanDeoptimize()) {
+  if (replacement != nullptr && !replacement->ComputeCanDeoptimize()) {
     if (FLAG_trace_strong_mode_types) {
       THR_Print("[Strong mode] Optimization: replacing %s with %s\n",
                 instr->ToCString(), replacement->ToCString());
@@ -1060,13 +1061,13 @@
 bool AotCallSpecializer::TryReplaceInstanceOfWithRangeCheck(
     InstanceCallInstr* call,
     const AbstractType& type) {
-  if (precompiler_ == NULL) {
+  if (precompiler_ == nullptr) {
     // Loading not complete, can't do CHA yet.
     return false;
   }
 
   HierarchyInfo* hi = thread()->hierarchy_info();
-  if (hi == NULL) {
+  if (hi == nullptr) {
     return false;
   }
 
@@ -1080,7 +1081,7 @@
   // left.instanceof(type) =>
   //     _classRangeCheck(left.cid, lower_limit, upper_limit)
   LoadClassIdInstr* left_cid = new (Z) LoadClassIdInstr(new (Z) Value(left));
-  InsertBefore(call, left_cid, NULL, FlowGraph::kValue);
+  InsertBefore(call, left_cid, nullptr, FlowGraph::kValue);
   ConstantInstr* lower_cid =
       flow_graph()->GetConstant(Smi::Handle(Z, Smi::New(lower_limit)));
 
diff --git a/runtime/vm/compiler/aot/precompiler.cc b/runtime/vm/compiler/aot/precompiler.cc
index cc4bfe9..c791baf 100644
--- a/runtime/vm/compiler/aot/precompiler.cc
+++ b/runtime/vm/compiler/aot/precompiler.cc
@@ -386,7 +386,7 @@
 
 Precompiler::Precompiler(Thread* thread)
     : thread_(thread),
-      zone_(NULL),
+      zone_(nullptr),
       changed_(false),
       retain_root_library_caches_(false),
       function_count_(0),
@@ -426,7 +426,7 @@
       api_uses_(),
       error_(Error::Handle()),
       get_runtime_type_is_unique_(false) {
-  ASSERT(Precompiler::singleton_ == NULL);
+  ASSERT(Precompiler::singleton_ == nullptr);
   Precompiler::singleton_ = this;
 
   if (FLAG_print_precompiler_timings) {
@@ -443,7 +443,7 @@
   functions_to_retain_.Release();
 
   ASSERT(Precompiler::singleton_ == this);
-  Precompiler::singleton_ = NULL;
+  Precompiler::singleton_ = nullptr;
 
   delete thread()->compiler_timings();
   thread()->set_compiler_timings(nullptr);
@@ -673,7 +673,7 @@
       retained_reasons_writer_ = nullptr;
     }
 
-    zone_ = NULL;
+    zone_ = nullptr;
   }
 
   intptr_t symbols_before = -1;
@@ -3555,7 +3555,7 @@
           precompiler_->global_object_pool_builder());
       compiler::Assembler assembler(&object_pool_builder, far_branch_level);
 
-      CodeStatistics* function_stats = NULL;
+      CodeStatistics* function_stats = nullptr;
       if (FLAG_print_instruction_stats) {
         // At the moment we are leaking CodeStatistics objects for
         // simplicity because this is just a development mode flag.
@@ -3760,7 +3760,7 @@
 }
 
 Obfuscator::Obfuscator(Thread* thread, const String& private_key)
-    : state_(NULL) {
+    : state_(nullptr) {
   auto isolate_group = thread->isolate_group();
   if (!isolate_group->obfuscate()) {
     // Nothing to do.
@@ -3791,7 +3791,7 @@
 }
 
 Obfuscator::~Obfuscator() {
-  if (state_ != NULL) {
+  if (state_ != nullptr) {
     state_->SaveState();
   }
 }
@@ -3912,7 +3912,7 @@
 void Obfuscator::PreventRenaming(const char* name) {
   // For constructor names Class.name skip class name (if any) and a dot.
   const char* dot = strchr(name, '.');
-  if (dot != NULL) {
+  if (dot != nullptr) {
     name = dot + 1;
   }
 
@@ -4095,7 +4095,7 @@
       Array::Handle(thread->zone(),
                     thread->isolate_group()->object_store()->obfuscation_map());
   if (obfuscation_state.IsNull()) {
-    return NULL;
+    return nullptr;
   }
 
   const Array& renames = Array::Handle(
@@ -4114,7 +4114,7 @@
     str ^= renames_map.GetPayload(entry, 0);
     result[idx++] = StringToCString(str);
   }
-  result[idx++] = NULL;
+  result[idx++] = nullptr;
   renames_map.Release();
 
   return result;
diff --git a/runtime/vm/compiler/aot/precompiler.h b/runtime/vm/compiler/aot/precompiler.h
index ce64a2c..6aa2742 100644
--- a/runtime/vm/compiler/aot/precompiler.h
+++ b/runtime/vm/compiler/aot/precompiler.h
@@ -483,7 +483,7 @@
   // This method is guaranteed to return the same value for the same
   // input and it always preserves leading '_' even for atomic renames.
   StringPtr Rename(const String& name, bool atomic = false) {
-    if (state_ == NULL) {
+    if (state_ == nullptr) {
       return name.ptr();
     }
 
@@ -610,7 +610,7 @@
     String& renamed_;
   };
 
-  // Current obfuscation state or NULL if obfuscation is not enabled.
+  // Current obfuscation state or nullptr if obfuscation is not enabled.
   ObfuscationState* state_;
 };
 #else
diff --git a/runtime/vm/compiler/aot/precompiler_tracer.cc b/runtime/vm/compiler/aot/precompiler_tracer.cc
index 9588400..d155cb3 100644
--- a/runtime/vm/compiler/aot/precompiler_tracer.cc
+++ b/runtime/vm/compiler/aot/precompiler_tracer.cc
@@ -29,7 +29,7 @@
     return nullptr;
   }
   void* file = Dart::file_open_callback()(filename, /*write=*/true);
-  if (file == NULL) {
+  if (file == nullptr) {
     OS::PrintErr("warning: Failed to write precompiler trace: %s\n", filename);
     return nullptr;
   }
diff --git a/runtime/vm/compiler/assembler/assembler_arm.cc b/runtime/vm/compiler/assembler/assembler_arm.cc
index f38bb31..a8efcb0 100644
--- a/runtime/vm/compiler/assembler/assembler_arm.cc
+++ b/runtime/vm/compiler/assembler/assembler_arm.cc
@@ -3544,7 +3544,7 @@
                                   JumpDistance distance,
                                   Register instance_reg,
                                   Register temp_reg) {
-  ASSERT(failure != NULL);
+  ASSERT(failure != nullptr);
   ASSERT(instance_reg != kNoRegister);
   ASSERT(instance_reg != temp_reg);
   ASSERT(instance_reg != IP);
diff --git a/runtime/vm/compiler/assembler/assembler_arm64.cc b/runtime/vm/compiler/assembler/assembler_arm64.cc
index 2c8531f..6195495 100644
--- a/runtime/vm/compiler/assembler/assembler_arm64.cc
+++ b/runtime/vm/compiler/assembler/assembler_arm64.cc
@@ -297,7 +297,7 @@
 // If it can't be encoded, the function returns false, and the operand is
 // undefined.
 bool Operand::IsImmLogical(uint64_t value, uint8_t width, Operand* imm_op) {
-  ASSERT(imm_op != NULL);
+  ASSERT(imm_op != nullptr);
   ASSERT((width == kWRegSizeInBits) || (width == kXRegSizeInBits));
   if (width == kWRegSizeInBits) {
     value &= 0xffffffffUL;
@@ -2008,7 +2008,7 @@
                                   JumpDistance distance,
                                   Register instance_reg,
                                   Register temp_reg) {
-  ASSERT(failure != NULL);
+  ASSERT(failure != nullptr);
   ASSERT(instance_size != 0);
   ASSERT(instance_reg != temp_reg);
   ASSERT(temp_reg != kNoRegister);
diff --git a/runtime/vm/compiler/assembler/assembler_arm64.h b/runtime/vm/compiler/assembler/assembler_arm64.h
index a6aca04..a1e9f0f 100644
--- a/runtime/vm/compiler/assembler/assembler_arm64.h
+++ b/runtime/vm/compiler/assembler/assembler_arm64.h
@@ -482,7 +482,7 @@
   // used as an operand in either instruction. The encoded operand is written
   // to op.
   static OperandType CanHold(int64_t imm, uint8_t sz, Operand* op) {
-    ASSERT(op != NULL);
+    ASSERT(op != nullptr);
     ASSERT((sz == kXRegSizeInBits) || (sz == kWRegSizeInBits));
     if (Utils::IsUint(12, imm)) {
       op->encoding_ = imm << kImm12Shift;
diff --git a/runtime/vm/compiler/assembler/assembler_arm64_test.cc b/runtime/vm/compiler/assembler/assembler_arm64_test.cc
index 24cdc4f..85eb6d9 100644
--- a/runtime/vm/compiler/assembler/assembler_arm64_test.cc
+++ b/runtime/vm/compiler/assembler/assembler_arm64_test.cc
@@ -843,7 +843,7 @@
 }
 
 ASSEMBLER_TEST_RUN(PushRegisterPair, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef int (*PushRegisterPair)() DART_UNUSED;
   EXPECT_EQ(12, EXECUTE_TEST_CODE_INT64(PushRegisterPair, test->entry()));
   EXPECT_DISASSEMBLY(
@@ -870,7 +870,7 @@
 }
 
 ASSEMBLER_TEST_RUN(PushRegisterPairReversed, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef int (*PushRegisterPairReversed)() DART_UNUSED;
   EXPECT_EQ(12,
             EXECUTE_TEST_CODE_INT64(PushRegisterPairReversed, test->entry()));
@@ -898,7 +898,7 @@
 }
 
 ASSEMBLER_TEST_RUN(PopRegisterPair, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef int (*PopRegisterPair)() DART_UNUSED;
   EXPECT_EQ(12, EXECUTE_TEST_CODE_INT64(PopRegisterPair, test->entry()));
   EXPECT_DISASSEMBLY(
@@ -925,7 +925,7 @@
 }
 
 ASSEMBLER_TEST_RUN(PopRegisterPairReversed, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef int (*PopRegisterPairReversed)() DART_UNUSED;
   EXPECT_EQ(12,
             EXECUTE_TEST_CODE_INT64(PopRegisterPairReversed, test->entry()));
@@ -1018,7 +1018,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Semaphore, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef intptr_t (*Semaphore)() DART_UNUSED;
   EXPECT_EQ(42, EXECUTE_TEST_CODE_INT64(Semaphore, test->entry()));
   EXPECT_DISASSEMBLY(
@@ -1051,7 +1051,7 @@
 }
 
 ASSEMBLER_TEST_RUN(FailedSemaphore, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef intptr_t (*FailedSemaphore)() DART_UNUSED;
   EXPECT_EQ(41, EXECUTE_TEST_CODE_INT64(FailedSemaphore, test->entry()));
   EXPECT_DISASSEMBLY(
@@ -1092,7 +1092,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Semaphore32, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef intptr_t (*Semaphore32)() DART_UNUSED;
   // Lower word has been atomically switched from 40 to 42k, whereas upper word
   // is unchanged at 40.
@@ -1134,7 +1134,7 @@
 }
 
 ASSEMBLER_TEST_RUN(FailedSemaphore32, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef intptr_t (*FailedSemaphore32)() DART_UNUSED;
   // Lower word has had the failure code (1) added to it.  Upper word is
   // unchanged at 40.
@@ -3034,7 +3034,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Udiv, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef int64_t (*Int64Return)() DART_UNUSED;
   EXPECT_EQ(3, EXECUTE_TEST_CODE_INT64(Int64Return, test->entry()));
   EXPECT_DISASSEMBLY(
@@ -3055,7 +3055,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Sdiv, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef int64_t (*Int64Return)() DART_UNUSED;
   EXPECT_EQ(-3, EXECUTE_TEST_CODE_INT64(Int64Return, test->entry()));
   EXPECT_DISASSEMBLY(
@@ -3076,7 +3076,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Udiv_zero, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef int64_t (*Int64Return)() DART_UNUSED;
   EXPECT_EQ(0, EXECUTE_TEST_CODE_INT64(Int64Return, test->entry()));
   EXPECT_DISASSEMBLY(
@@ -3096,7 +3096,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Sdiv_zero, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef int64_t (*Int64Return)() DART_UNUSED;
   EXPECT_EQ(0, EXECUTE_TEST_CODE_INT64(Int64Return, test->entry()));
   EXPECT_DISASSEMBLY(
@@ -3116,7 +3116,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Udiv_corner, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef int64_t (*Int64Return)() DART_UNUSED;
   EXPECT_EQ(0, EXECUTE_TEST_CODE_INT64(Int64Return, test->entry()));
   EXPECT_DISASSEMBLY(
@@ -3136,7 +3136,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Sdiv_corner, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef int64_t (*Int64Return)() DART_UNUSED;
   EXPECT_EQ(static_cast<int64_t>(0x8000000000000000),
             EXECUTE_TEST_CODE_INT64(Int64Return, test->entry()));
@@ -3689,7 +3689,7 @@
 }
 
 ASSEMBLER_TEST_RUN(LoadHalfWordUnaligned, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef intptr_t (*LoadHalfWordUnaligned)(intptr_t) DART_UNUSED;
   uint8_t buffer[4] = {
       0x89, 0xAB, 0xCD, 0xEF,
@@ -3716,7 +3716,7 @@
 }
 
 ASSEMBLER_TEST_RUN(LoadHalfWordUnsignedUnaligned, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef intptr_t (*LoadHalfWordUnsignedUnaligned)(intptr_t) DART_UNUSED;
   uint8_t buffer[4] = {
       0x89, 0xAB, 0xCD, 0xEF,
@@ -3742,7 +3742,7 @@
 }
 
 ASSEMBLER_TEST_RUN(StoreHalfWordUnaligned, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef intptr_t (*StoreHalfWordUnaligned)(intptr_t) DART_UNUSED;
   uint8_t buffer[4] = {
       0, 0, 0, 0,
@@ -3775,7 +3775,7 @@
 }
 
 ASSEMBLER_TEST_RUN(LoadWordUnaligned, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef int32_t (*LoadWordUnaligned)(intptr_t) DART_UNUSED;
   uint8_t buffer[8] = {0x12, 0x34, 0x56, 0x78, 0x9A, 0xBC, 0xDE, 0xF0};
 
@@ -3809,7 +3809,7 @@
 }
 
 ASSEMBLER_TEST_RUN(StoreWordUnaligned, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef intptr_t (*StoreWordUnaligned)(intptr_t) DART_UNUSED;
   uint8_t buffer[8] = {0, 0, 0, 0, 0, 0, 0, 0};
 
@@ -5355,7 +5355,7 @@
 }
 
 ASSEMBLER_TEST_RUN(VinswVmovrs, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef int64_t (*Int64Return)() DART_UNUSED;
   EXPECT_EQ(174, EXECUTE_TEST_CODE_INT64(Int64Return, test->entry()));
   EXPECT_DISASSEMBLY(
@@ -5392,7 +5392,7 @@
 }
 
 ASSEMBLER_TEST_RUN(VinsxVmovrd, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef int64_t (*Int64Return)() DART_UNUSED;
   EXPECT_EQ(85, EXECUTE_TEST_CODE_INT64(Int64Return, test->entry()));
   EXPECT_DISASSEMBLY(
@@ -5427,7 +5427,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Vnot, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef int64_t (*Int64Return)() DART_UNUSED;
   EXPECT_EQ(2, EXECUTE_TEST_CODE_INT64(Int64Return, test->entry()));
   EXPECT_DISASSEMBLY(
@@ -7148,7 +7148,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Vrecpes, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef double (*DoubleReturn)() DART_UNUSED;
   float res = EXECUTE_TEST_CODE_DOUBLE(DoubleReturn, test->entry());
   EXPECT_FLOAT_EQ(arm_recip_estimate(147.0), res, 0.0001);
@@ -7180,7 +7180,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Vrecpss, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef double (*DoubleReturn)() DART_UNUSED;
   double res = EXECUTE_TEST_CODE_DOUBLE(DoubleReturn, test->entry());
   EXPECT_FLOAT_EQ(2.0 - 10.0 * 5.0, res, 0.0001);
@@ -7314,7 +7314,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Vrsqrtes, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef double (*DoubleReturn)() DART_UNUSED;
   double res = EXECUTE_TEST_CODE_DOUBLE(DoubleReturn, test->entry());
   EXPECT_FLOAT_EQ(arm_reciprocal_sqrt_estimate(147.0), res, 0.0001);
@@ -7342,7 +7342,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Vrsqrtss, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef double (*DoubleReturn)() DART_UNUSED;
   double res = EXECUTE_TEST_CODE_DOUBLE(DoubleReturn, test->entry());
   EXPECT_FLOAT_EQ((3.0 - 10.0 * 5.0) / 2.0, res, 0.0001);
@@ -7367,7 +7367,7 @@
 }
 
 ASSEMBLER_TEST_RUN(ReciprocalSqrt, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef double (*DoubleReturn)() DART_UNUSED;
   double res = EXECUTE_TEST_CODE_DOUBLE(DoubleReturn, test->entry());
   EXPECT_FLOAT_EQ(1.0 / sqrt(147000.0), res, 0.0001);
@@ -7428,7 +7428,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Drop, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef int64_t (*Int64Return)() DART_UNUSED;
   EXPECT_EQ(kMaxPushedNumber,
             EXECUTE_TEST_CODE_INT64(Int64Return, test->entry()));
diff --git a/runtime/vm/compiler/assembler/assembler_arm_test.cc b/runtime/vm/compiler/assembler/assembler_arm_test.cc
index 917bea8..fb5a45d 100644
--- a/runtime/vm/compiler/assembler/assembler_arm_test.cc
+++ b/runtime/vm/compiler/assembler/assembler_arm_test.cc
@@ -80,7 +80,7 @@
 }
 
 ASSEMBLER_TEST_RUN(MoveNegated, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef int (*MoveNegated)() DART_UNUSED;
   EXPECT_EQ(~42, EXECUTE_TEST_CODE_INT32(MoveNegated, test->entry()));
 }
@@ -95,7 +95,7 @@
 }
 
 ASSEMBLER_TEST_RUN(MoveRotImm, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef int (*MoveRotImm)() DART_UNUSED;
   EXPECT_EQ(0x30550003, EXECUTE_TEST_CODE_INT32(MoveRotImm, test->entry()));
 }
@@ -106,7 +106,7 @@
 }
 
 ASSEMBLER_TEST_RUN(MovImm16, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef int (*MovImm16)() DART_UNUSED;
   EXPECT_EQ(0x12345678, EXECUTE_TEST_CODE_INT32(MovImm16, test->entry()));
 }
@@ -120,7 +120,7 @@
 }
 
 ASSEMBLER_TEST_RUN(LoadImmediate, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef int (*LoadImmediate)() DART_UNUSED;
   EXPECT_EQ(0x12345678, EXECUTE_TEST_CODE_INT32(LoadImmediate, test->entry()));
 }
@@ -132,7 +132,7 @@
 }
 
 ASSEMBLER_TEST_RUN(LoadHalfWordUnaligned, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef intptr_t (*LoadHalfWordUnaligned)(intptr_t) DART_UNUSED;
   uint8_t buffer[4] = {
       0x89, 0xAB, 0xCD, 0xEF,
@@ -155,7 +155,7 @@
 }
 
 ASSEMBLER_TEST_RUN(LoadHalfWordUnsignedUnaligned, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef intptr_t (*LoadHalfWordUnsignedUnaligned)(intptr_t) DART_UNUSED;
   uint8_t buffer[4] = {
       0x89, 0xAB, 0xCD, 0xEF,
@@ -177,7 +177,7 @@
 }
 
 ASSEMBLER_TEST_RUN(StoreHalfWordUnaligned, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef intptr_t (*StoreHalfWordUnaligned)(intptr_t) DART_UNUSED;
   uint8_t buffer[4] = {
       0, 0, 0, 0,
@@ -205,7 +205,7 @@
 }
 
 ASSEMBLER_TEST_RUN(LoadWordUnaligned, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef intptr_t (*LoadWordUnaligned)(intptr_t) DART_UNUSED;
   uint8_t buffer[8] = {0x12, 0x34, 0x56, 0x78, 0x9A, 0xBC, 0xDE, 0xF0};
 
@@ -235,7 +235,7 @@
 }
 
 ASSEMBLER_TEST_RUN(StoreWordUnaligned, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef intptr_t (*StoreWordUnaligned)(intptr_t) DART_UNUSED;
   uint8_t buffer[8] = {0, 0, 0, 0, 0, 0, 0, 0};
 
@@ -288,7 +288,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Vmov, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef int (*Vmov)() DART_UNUSED;
   EXPECT_EQ(2, EXECUTE_TEST_CODE_INT32(Vmov, test->entry()));
 }
@@ -305,7 +305,7 @@
 }
 
 ASSEMBLER_TEST_RUN(SingleVLoadStore, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef float (*SingleVLoadStore)() DART_UNUSED;
   float res = EXECUTE_TEST_CODE_FLOAT(SingleVLoadStore, test->entry());
   EXPECT_FLOAT_EQ(2 * 12.3f, res, 0.001f);
@@ -328,7 +328,7 @@
 }
 
 ASSEMBLER_TEST_RUN(SingleVShiftLoadStore, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef float (*SingleVLoadStore)() DART_UNUSED;
   float res = EXECUTE_TEST_CODE_FLOAT(SingleVLoadStore, test->entry());
   EXPECT_FLOAT_EQ(2 * 12.3f, res, 0.001f);
@@ -350,10 +350,10 @@
 }
 
 ASSEMBLER_TEST_RUN(DoubleVLoadStore, test) {
-  EXPECT(test != NULL);
-  typedef double (*DoubleVLoadStore)() DART_UNUSED;
-  float res = EXECUTE_TEST_CODE_DOUBLE(DoubleVLoadStore, test->entry());
-  EXPECT_FLOAT_EQ(2 * 12.3f, res, 0.001f);
+    EXPECT(test != nullptr);
+    typedef double (*DoubleVLoadStore)() DART_UNUSED;
+    float res = EXECUTE_TEST_CODE_DOUBLE(DoubleVLoadStore, test->entry());
+    EXPECT_FLOAT_EQ(2 * 12.3f, res, 0.001f);
 }
 
 ASSEMBLER_TEST_GENERATE(SingleFPOperations, assembler) {
@@ -370,7 +370,7 @@
 }
 
 ASSEMBLER_TEST_RUN(SingleFPOperations, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef float (*SingleFPOperations)() DART_UNUSED;
   float res = EXECUTE_TEST_CODE_FLOAT(SingleFPOperations, test->entry());
   EXPECT_FLOAT_EQ(3.8340579f, res, 0.001f);
@@ -390,7 +390,7 @@
 }
 
 ASSEMBLER_TEST_RUN(DoubleFPOperations, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef double (*DoubleFPOperations)() DART_UNUSED;
   double res = EXECUTE_TEST_CODE_DOUBLE(DoubleFPOperations, test->entry());
   EXPECT_FLOAT_EQ(3.8340579, res, 0.001);
@@ -408,9 +408,9 @@
 }
 
 ASSEMBLER_TEST_RUN(DoubleSqrtNeg, test) {
-  EXPECT(test != NULL);
-  typedef int (*DoubleSqrtNeg)() DART_UNUSED;
-  EXPECT_EQ(1, EXECUTE_TEST_CODE_INT32(DoubleSqrtNeg, test->entry()));
+    EXPECT(test != nullptr);
+    typedef int (*DoubleSqrtNeg)() DART_UNUSED;
+    EXPECT_EQ(1, EXECUTE_TEST_CODE_INT32(DoubleSqrtNeg, test->entry()));
 }
 
 ASSEMBLER_TEST_GENERATE(IntToDoubleConversion, assembler) {
@@ -421,7 +421,7 @@
 }
 
 ASSEMBLER_TEST_RUN(IntToDoubleConversion, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef double (*IntToDoubleConversionCode)() DART_UNUSED;
   double res =
       EXECUTE_TEST_CODE_DOUBLE(IntToDoubleConversionCode, test->entry());
@@ -442,7 +442,7 @@
 }
 
 ASSEMBLER_TEST_RUN(LongToDoubleConversion, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef double (*LongToDoubleConversionCode)() DART_UNUSED;
   double res =
       EXECUTE_TEST_CODE_DOUBLE(LongToDoubleConversionCode, test->entry());
@@ -457,7 +457,7 @@
 }
 
 ASSEMBLER_TEST_RUN(IntToFloatConversion, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef float (*IntToFloatConversionCode)() DART_UNUSED;
   float res = EXECUTE_TEST_CODE_FLOAT(IntToFloatConversionCode, test->entry());
   EXPECT_FLOAT_EQ(6.0, res, 0.001);
@@ -470,7 +470,7 @@
 }
 
 ASSEMBLER_TEST_RUN(FloatToIntConversion, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef int (*FloatToIntConversion)(float arg) DART_UNUSED;
   EXPECT_EQ(12, EXECUTE_TEST_CODE_INT32_F(FloatToIntConversion, test->entry(),
                                           12.8f));
@@ -488,7 +488,7 @@
 
 ASSEMBLER_TEST_RUN(DoubleToIntConversion, test) {
   typedef int (*DoubleToIntConversion)(double arg) DART_UNUSED;
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   EXPECT_EQ(12, EXECUTE_TEST_CODE_INT32_D(DoubleToIntConversion, test->entry(),
                                           12.8));
   EXPECT_EQ(INT32_MIN, EXECUTE_TEST_CODE_INT32_D(DoubleToIntConversion,
@@ -505,7 +505,7 @@
 
 ASSEMBLER_TEST_RUN(FloatToDoubleConversion, test) {
   typedef double (*FloatToDoubleConversionCode)() DART_UNUSED;
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   double res =
       EXECUTE_TEST_CODE_DOUBLE(FloatToDoubleConversionCode, test->entry());
   EXPECT_FLOAT_EQ(12.8, res, 0.001);
@@ -518,7 +518,7 @@
 }
 
 ASSEMBLER_TEST_RUN(DoubleToFloatConversion, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef float (*DoubleToFloatConversionCode)() DART_UNUSED;
   float res =
       EXECUTE_TEST_CODE_FLOAT(DoubleToFloatConversionCode, test->entry());
@@ -552,9 +552,9 @@
 }
 
 ASSEMBLER_TEST_RUN(FloatCompare, test) {
-  EXPECT(test != NULL);
-  typedef int (*FloatCompare)() DART_UNUSED;
-  EXPECT_EQ(0, EXECUTE_TEST_CODE_INT32(FloatCompare, test->entry()));
+    EXPECT(test != nullptr);
+    typedef int (*FloatCompare)() DART_UNUSED;
+    EXPECT_EQ(0, EXECUTE_TEST_CODE_INT32(FloatCompare, test->entry()));
 }
 
 ASSEMBLER_TEST_GENERATE(DoubleCompare, assembler) {
@@ -584,9 +584,9 @@
 }
 
 ASSEMBLER_TEST_RUN(DoubleCompare, test) {
-  EXPECT(test != NULL);
-  typedef int (*DoubleCompare)() DART_UNUSED;
-  EXPECT_EQ(0, EXECUTE_TEST_CODE_INT32(DoubleCompare, test->entry()));
+    EXPECT(test != nullptr);
+    typedef int (*DoubleCompare)() DART_UNUSED;
+    EXPECT_EQ(0, EXECUTE_TEST_CODE_INT32(DoubleCompare, test->entry()));
 }
 
 ASSEMBLER_TEST_GENERATE(Loop, assembler) {
@@ -601,7 +601,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Loop, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef int (*Loop)() DART_UNUSED;
   EXPECT_EQ(4, EXECUTE_TEST_CODE_INT32(Loop, test->entry()));
 }
@@ -616,7 +616,7 @@
 }
 
 ASSEMBLER_TEST_RUN(ForwardBranch, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef int (*ForwardBranch)() DART_UNUSED;
   EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(ForwardBranch, test->entry()));
 }
@@ -634,7 +634,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Loop2, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef int (*Loop)() DART_UNUSED;
   EXPECT_EQ(4, EXECUTE_TEST_CODE_INT32(Loop, test->entry()));
 }
@@ -655,7 +655,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Loop3, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef int (*Loop)() DART_UNUSED;
   EXPECT_EQ(4, EXECUTE_TEST_CODE_INT32(Loop, test->entry()));
 }
@@ -668,7 +668,7 @@
 }
 
 ASSEMBLER_TEST_RUN(LoadStore, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef int (*LoadStore)() DART_UNUSED;
   EXPECT_EQ(123, EXECUTE_TEST_CODE_INT32(LoadStore, test->entry()));
 }
@@ -683,7 +683,7 @@
 }
 
 ASSEMBLER_TEST_RUN(PushRegisterPair, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef int (*PushRegisterPair)() DART_UNUSED;
   EXPECT_EQ(12, EXECUTE_TEST_CODE_INT32(PushRegisterPair, test->entry()));
 }
@@ -698,7 +698,7 @@
 }
 
 ASSEMBLER_TEST_RUN(PushRegisterPairReversed, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef int (*PushRegisterPairReversed)() DART_UNUSED;
   EXPECT_EQ(12,
             EXECUTE_TEST_CODE_INT32(PushRegisterPairReversed, test->entry()));
@@ -714,7 +714,7 @@
 }
 
 ASSEMBLER_TEST_RUN(PopRegisterPair, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef int (*PopRegisterPair)() DART_UNUSED;
   EXPECT_EQ(12, EXECUTE_TEST_CODE_INT32(PopRegisterPair, test->entry()));
 }
@@ -729,7 +729,7 @@
 }
 
 ASSEMBLER_TEST_RUN(PopRegisterPairReversed, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef int (*PopRegisterPairReversed)() DART_UNUSED;
   EXPECT_EQ(12,
             EXECUTE_TEST_CODE_INT32(PopRegisterPairReversed, test->entry()));
@@ -750,7 +750,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Semaphore, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef int (*Semaphore)() DART_UNUSED;
   EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(Semaphore, test->entry()));
 }
@@ -768,7 +768,7 @@
 }
 
 ASSEMBLER_TEST_RUN(FailedSemaphore, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef int (*FailedSemaphore)() DART_UNUSED;
   EXPECT_EQ(41, EXECUTE_TEST_CODE_INT32(FailedSemaphore, test->entry()));
 }
@@ -783,7 +783,7 @@
 }
 
 ASSEMBLER_TEST_RUN(AddSub, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef int (*AddSub)() DART_UNUSED;
   EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(AddSub, test->entry()));
 }
@@ -798,7 +798,7 @@
 }
 
 ASSEMBLER_TEST_RUN(AddCarry, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef int (*AddCarry)() DART_UNUSED;
   EXPECT_EQ(1, EXECUTE_TEST_CODE_INT32(AddCarry, test->entry()));
 }
@@ -814,7 +814,7 @@
 }
 
 ASSEMBLER_TEST_RUN(AddCarryInOut, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef int (*AddCarryInOut)() DART_UNUSED;
   EXPECT_EQ(1, EXECUTE_TEST_CODE_INT32(AddCarryInOut, test->entry()));
 }
@@ -829,7 +829,7 @@
 }
 
 ASSEMBLER_TEST_RUN(SubCarry, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef int (*SubCarry)() DART_UNUSED;
   EXPECT_EQ(-1, EXECUTE_TEST_CODE_INT32(SubCarry, test->entry()));
 }
@@ -844,7 +844,7 @@
 }
 
 ASSEMBLER_TEST_RUN(SubCarryInOut, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef int (*SubCarryInOut)() DART_UNUSED;
   EXPECT_EQ(-1, EXECUTE_TEST_CODE_INT32(SubCarryInOut, test->entry()));
 }
@@ -859,7 +859,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Overflow, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef int (*Overflow)() DART_UNUSED;
   EXPECT_EQ(1, EXECUTE_TEST_CODE_INT32(Overflow, test->entry()));
 }
@@ -874,7 +874,7 @@
 }
 
 ASSEMBLER_TEST_RUN(AndOrr, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef int (*AndOrr)() DART_UNUSED;
   EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(AndOrr, test->entry()));
 }
@@ -891,7 +891,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Orrs, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef int (*Orrs)() DART_UNUSED;
   EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(Orrs, test->entry()));
 }
@@ -905,7 +905,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Multiply, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef int (*Multiply)() DART_UNUSED;
   EXPECT_EQ(800, EXECUTE_TEST_CODE_INT32(Multiply, test->entry()));
 }
@@ -923,7 +923,7 @@
 }
 
 ASSEMBLER_TEST_RUN(QuotientRemainder, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef int64_t (*QuotientRemainder)(int64_t dividend, int64_t divisor)
       DART_UNUSED;
   EXPECT_EQ(0x1000400000da8LL,
@@ -943,7 +943,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Multiply64To64, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef int64_t (*Multiply64To64)(int64_t operand0, int64_t operand1)
       DART_UNUSED;
   EXPECT_EQ(6,
@@ -956,7 +956,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Multiply32To64, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef int64_t (*Multiply32To64)(int64_t operand0, int64_t operand1)
       DART_UNUSED;
   EXPECT_EQ(6,
@@ -969,7 +969,7 @@
 }
 
 ASSEMBLER_TEST_RUN(MultiplyAccumAccum32To64, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef int64_t (*MultiplyAccumAccum32To64)(int64_t operand0,
                                               int64_t operand1) DART_UNUSED;
   EXPECT_EQ(3 + 7 + 5 * 11,
@@ -1004,7 +1004,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Clz, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef int (*Clz)() DART_UNUSED;
   EXPECT_EQ(0, EXECUTE_TEST_CODE_INT32(Clz, test->entry()));
 }
@@ -1016,7 +1016,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Rbit, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef int (*Rbit)() DART_UNUSED;
   const int32_t expected = 0xa8000000;
   EXPECT_EQ(expected, EXECUTE_TEST_CODE_INT32(Rbit, test->entry()));
@@ -1035,7 +1035,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Tst, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef int (*Tst)() DART_UNUSED;
   EXPECT_EQ(0, EXECUTE_TEST_CODE_INT32(Tst, test->entry()));
 }
@@ -1051,7 +1051,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Lsl, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef int (*Tst)() DART_UNUSED;
   EXPECT_EQ(4, EXECUTE_TEST_CODE_INT32(Tst, test->entry()));
 }
@@ -1067,7 +1067,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Lsr, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef int (*Tst)() DART_UNUSED;
   EXPECT_EQ(1, EXECUTE_TEST_CODE_INT32(Tst, test->entry()));
 }
@@ -1082,7 +1082,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Lsr1, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef int (*Tst)() DART_UNUSED;
   EXPECT_EQ(1, EXECUTE_TEST_CODE_INT32(Tst, test->entry()));
 }
@@ -1097,7 +1097,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Asr1, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef int (*Tst)() DART_UNUSED;
   EXPECT_EQ(-1, EXECUTE_TEST_CODE_INT32(Tst, test->entry()));
 }
@@ -1109,7 +1109,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Rsb, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef int (*Rsb)() DART_UNUSED;
   EXPECT_EQ(32, EXECUTE_TEST_CODE_INT32(Rsb, test->entry()));
 }
@@ -1153,7 +1153,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Ldrh, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef int (*Tst)() DART_UNUSED;
   EXPECT_EQ(0, EXECUTE_TEST_CODE_INT32(Tst, test->entry()));
 }
@@ -1168,7 +1168,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Ldrsb, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef int (*Tst)() DART_UNUSED;
   EXPECT_EQ(-1, EXECUTE_TEST_CODE_INT32(Tst, test->entry()));
 }
@@ -1183,7 +1183,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Ldrb, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef int (*Tst)() DART_UNUSED;
   EXPECT_EQ(0xff, EXECUTE_TEST_CODE_INT32(Tst, test->entry()));
 }
@@ -1198,7 +1198,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Ldrsh, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef int (*Tst)() DART_UNUSED;
   EXPECT_EQ(0xff, EXECUTE_TEST_CODE_INT32(Tst, test->entry()));
 }
@@ -1213,7 +1213,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Ldrh1, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef int (*Tst)() DART_UNUSED;
   EXPECT_EQ(0xff, EXECUTE_TEST_CODE_INT32(Tst, test->entry()));
 }
@@ -1232,7 +1232,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Ldrd, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef int64_t (*Tst)(int64_t r0r1, int64_t r2r3) DART_UNUSED;
   EXPECT_EQ(0x0000444400002222LL,
             EXECUTE_TEST_CODE_INT64_LL(Tst, test->entry(), 0x0000111100000000LL,
@@ -1275,7 +1275,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Ldm_stm_da, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef int (*Tst)() DART_UNUSED;
   EXPECT_EQ(-52, EXECUTE_TEST_CODE_INT32(Tst, test->entry()));
 }
@@ -1289,7 +1289,7 @@
 }
 
 ASSEMBLER_TEST_RUN(AddressShiftStrLSL1NegOffset, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef int (*Tst)() DART_UNUSED;
   EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(Tst, test->entry()));
 }
@@ -1303,7 +1303,7 @@
 }
 
 ASSEMBLER_TEST_RUN(AddressShiftLdrLSL5NegOffset, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef int (*Tst)() DART_UNUSED;
   EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(Tst, test->entry()));
 }
@@ -1317,7 +1317,7 @@
 }
 
 ASSEMBLER_TEST_RUN(AddressShiftStrLRS1NegOffset, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef int (*Tst)() DART_UNUSED;
   EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(Tst, test->entry()));
 }
@@ -1331,7 +1331,7 @@
 }
 
 ASSEMBLER_TEST_RUN(AddressShiftLdrLRS1NegOffset, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef int (*Tst)() DART_UNUSED;
   EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(Tst, test->entry()));
 }
@@ -1347,7 +1347,7 @@
 }
 
 ASSEMBLER_TEST_RUN(AddressShiftStrLSLNegPreIndex, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef int (*Tst)() DART_UNUSED;
   EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(Tst, test->entry()));
 }
@@ -1361,7 +1361,7 @@
 }
 
 ASSEMBLER_TEST_RUN(AddressShiftLdrLSLNegPreIndex, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef int (*Tst)() DART_UNUSED;
   EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(Tst, test->entry()));
 }
@@ -1411,7 +1411,7 @@
 }
 
 ASSEMBLER_TEST_RUN(VstmdVldmd, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef int (*Tst)() DART_UNUSED;
   EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(Tst, test->entry()));
 }
@@ -1461,7 +1461,7 @@
 }
 
 ASSEMBLER_TEST_RUN(VstmsVldms, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef int (*Tst)() DART_UNUSED;
   EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(Tst, test->entry()));
 }
@@ -1509,7 +1509,7 @@
 }
 
 ASSEMBLER_TEST_RUN(VstmdVldmd1, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef int (*Tst)() DART_UNUSED;
   EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(Tst, test->entry()));
 }
@@ -1557,7 +1557,7 @@
 }
 
 ASSEMBLER_TEST_RUN(VstmsVldms1, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef int (*Tst)() DART_UNUSED;
   EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(Tst, test->entry()));
 }
@@ -1615,7 +1615,7 @@
 }
 
 ASSEMBLER_TEST_RUN(VstmdVldmd_off, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef int (*Tst)() DART_UNUSED;
   EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(Tst, test->entry()));
 }
@@ -1667,7 +1667,7 @@
 }
 
 ASSEMBLER_TEST_RUN(VstmsVldms_off, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef int (*Tst)() DART_UNUSED;
   EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(Tst, test->entry()));
 }
@@ -1741,7 +1741,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Udiv, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   if (TargetCPUFeatures::integer_division_supported()) {
     typedef int (*Tst)() DART_UNUSED;
     EXPECT_EQ(3, EXECUTE_TEST_CODE_INT32(Tst, test->entry()));
@@ -1759,7 +1759,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Sdiv, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   if (TargetCPUFeatures::integer_division_supported()) {
     typedef int (*Tst)() DART_UNUSED;
     EXPECT_EQ(-3, EXECUTE_TEST_CODE_INT32(Tst, test->entry()));
@@ -1777,7 +1777,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Udiv_zero, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   if (TargetCPUFeatures::integer_division_supported()) {
     typedef int (*Tst)() DART_UNUSED;
     EXPECT_EQ(0, EXECUTE_TEST_CODE_INT32(Tst, test->entry()));
@@ -1795,7 +1795,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Sdiv_zero, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   if (TargetCPUFeatures::integer_division_supported()) {
     typedef int (*Tst)() DART_UNUSED;
     EXPECT_EQ(0, EXECUTE_TEST_CODE_INT32(Tst, test->entry()));
@@ -1813,7 +1813,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Udiv_corner, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   if (TargetCPUFeatures::integer_division_supported()) {
     typedef int (*Tst)() DART_UNUSED;
     EXPECT_EQ(0, EXECUTE_TEST_CODE_INT32(Tst, test->entry()));
@@ -1831,7 +1831,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Sdiv_corner, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   if (TargetCPUFeatures::integer_division_supported()) {
     typedef int (*Tst)() DART_UNUSED;
     EXPECT_EQ(static_cast<int32_t>(0x80000000),
@@ -1857,7 +1857,7 @@
 }
 
 ASSEMBLER_TEST_RUN(IntDiv_supported, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
 #if defined(USING_SIMULATOR)
   bool orig = TargetCPUFeatures::integer_division_supported();
   HostCPUFeatures::set_integer_division_supported(true);
@@ -1888,7 +1888,7 @@
 }
 
 ASSEMBLER_TEST_RUN(IntDiv_unsupported, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
 #if defined(USING_SIMULATOR)
   bool orig = TargetCPUFeatures::integer_division_supported();
   HostCPUFeatures::set_integer_division_supported(false);
@@ -1910,7 +1910,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Muls, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   typedef int (*Tst)() DART_UNUSED;
   EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(Tst, test->entry()));
 }
@@ -1949,7 +1949,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Vaddqi8, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   if (TargetCPUFeatures::neon_supported()) {
     typedef int (*Tst)() DART_UNUSED;
     EXPECT_EQ(36, EXECUTE_TEST_CODE_INT32(Tst, test->entry()));
@@ -1990,7 +1990,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Vaddqi16, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   if (TargetCPUFeatures::neon_supported()) {
     typedef int (*Tst)() DART_UNUSED;
     EXPECT_EQ(36, EXECUTE_TEST_CODE_INT32(Tst, test->entry()));
@@ -2031,7 +2031,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Vaddqi32, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   if (TargetCPUFeatures::neon_supported()) {
     typedef int (*Tst)() DART_UNUSED;
     EXPECT_EQ(36, EXECUTE_TEST_CODE_INT32(Tst, test->entry()));
@@ -2060,7 +2060,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Vaddqi64, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   if (TargetCPUFeatures::neon_supported()) {
     typedef int (*Tst)() DART_UNUSED;
     EXPECT_EQ(10, EXECUTE_TEST_CODE_INT32(Tst, test->entry()));
@@ -2095,7 +2095,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Vshlqu64, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   if (TargetCPUFeatures::neon_supported()) {
     typedef int (*Tst)() DART_UNUSED;
     EXPECT_EQ(1, EXECUTE_TEST_CODE_INT32(Tst, test->entry()));
@@ -2130,7 +2130,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Vshlqi64, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   if (TargetCPUFeatures::neon_supported()) {
     typedef int (*Tst)() DART_UNUSED;
     EXPECT_EQ(1, EXECUTE_TEST_CODE_INT32(Tst, test->entry()));
@@ -2185,7 +2185,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Mint_shl_ok, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   if (TargetCPUFeatures::neon_supported()) {
     typedef int (*Tst)() DART_UNUSED;
     EXPECT_EQ(1, EXECUTE_TEST_CODE_INT32(Tst, test->entry()));
@@ -2240,7 +2240,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Mint_shl_overflow, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   if (TargetCPUFeatures::neon_supported()) {
     typedef int (*Tst)() DART_UNUSED;
     EXPECT_EQ(1, EXECUTE_TEST_CODE_INT32(Tst, test->entry()));
@@ -2281,7 +2281,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Vsubqi8, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   if (TargetCPUFeatures::neon_supported()) {
     typedef int (*Tst)() DART_UNUSED;
     EXPECT_EQ(10, EXECUTE_TEST_CODE_INT32(Tst, test->entry()));
@@ -2322,7 +2322,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Vsubqi16, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   if (TargetCPUFeatures::neon_supported()) {
     typedef int (*Tst)() DART_UNUSED;
     EXPECT_EQ(10, EXECUTE_TEST_CODE_INT32(Tst, test->entry()));
@@ -2363,7 +2363,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Vsubqi32, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   if (TargetCPUFeatures::neon_supported()) {
     typedef int (*Tst)() DART_UNUSED;
     EXPECT_EQ(10, EXECUTE_TEST_CODE_INT32(Tst, test->entry()));
@@ -2392,7 +2392,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Vsubqi64, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   if (TargetCPUFeatures::neon_supported()) {
     typedef int (*Tst)() DART_UNUSED;
     EXPECT_EQ(3, EXECUTE_TEST_CODE_INT32(Tst, test->entry()));
@@ -2433,7 +2433,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Vmulqi8, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   if (TargetCPUFeatures::neon_supported()) {
     typedef int (*Tst)() DART_UNUSED;
     EXPECT_EQ(70, EXECUTE_TEST_CODE_INT32(Tst, test->entry()));
@@ -2474,7 +2474,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Vmulqi16, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   if (TargetCPUFeatures::neon_supported()) {
     typedef int (*Tst)() DART_UNUSED;
     EXPECT_EQ(70, EXECUTE_TEST_CODE_INT32(Tst, test->entry()));
@@ -2515,7 +2515,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Vmulqi32, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   if (TargetCPUFeatures::neon_supported()) {
     typedef int (*Tst)() DART_UNUSED;
     EXPECT_EQ(70, EXECUTE_TEST_CODE_INT32(Tst, test->entry()));
@@ -2546,7 +2546,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Vaddqs, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   if (TargetCPUFeatures::neon_supported()) {
     typedef int (*Tst)() DART_UNUSED;
     EXPECT_EQ(36, EXECUTE_TEST_CODE_INT32(Tst, test->entry()));
@@ -2577,7 +2577,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Vsubqs, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   if (TargetCPUFeatures::neon_supported()) {
     typedef int (*Tst)() DART_UNUSED;
     EXPECT_EQ(10, EXECUTE_TEST_CODE_INT32(Tst, test->entry()));
@@ -2608,7 +2608,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Vmulqs, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   if (TargetCPUFeatures::neon_supported()) {
     typedef int (*Tst)() DART_UNUSED;
     EXPECT_EQ(70, EXECUTE_TEST_CODE_INT32(Tst, test->entry()));
@@ -2648,7 +2648,7 @@
 }
 
 ASSEMBLER_TEST_RUN(VtblX, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   if (TargetCPUFeatures::neon_supported()) {
     typedef int (*Tst)() DART_UNUSED;
     EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(Tst, test->entry()));
@@ -2688,7 +2688,7 @@
 }
 
 ASSEMBLER_TEST_RUN(VtblY, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   if (TargetCPUFeatures::neon_supported()) {
     typedef int (*Tst)() DART_UNUSED;
     EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(Tst, test->entry()));
@@ -2728,7 +2728,7 @@
 }
 
 ASSEMBLER_TEST_RUN(VtblZ, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   if (TargetCPUFeatures::neon_supported()) {
     typedef int (*Tst)() DART_UNUSED;
     EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(Tst, test->entry()));
@@ -2768,7 +2768,7 @@
 }
 
 ASSEMBLER_TEST_RUN(VtblW, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   if (TargetCPUFeatures::neon_supported()) {
     typedef int (*Tst)() DART_UNUSED;
     EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(Tst, test->entry()));
@@ -2807,7 +2807,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Veorq, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   if (TargetCPUFeatures::neon_supported()) {
     typedef int (*Tst)() DART_UNUSED;
     EXPECT_EQ(-8, EXECUTE_TEST_CODE_INT32(Tst, test->entry()));
@@ -2846,7 +2846,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Vornq, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   if (TargetCPUFeatures::neon_supported()) {
     typedef int (*Tst)() DART_UNUSED;
     EXPECT_EQ(60, EXECUTE_TEST_CODE_INT32(Tst, test->entry()));
@@ -2885,7 +2885,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Vorrq, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   if (TargetCPUFeatures::neon_supported()) {
     typedef int (*Tst)() DART_UNUSED;
     EXPECT_EQ(-4, EXECUTE_TEST_CODE_INT32(Tst, test->entry()));
@@ -2924,7 +2924,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Vandq, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   if (TargetCPUFeatures::neon_supported()) {
     typedef int (*Tst)() DART_UNUSED;
     EXPECT_EQ(4, EXECUTE_TEST_CODE_INT32(Tst, test->entry()));
@@ -2958,7 +2958,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Vmovq, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   if (TargetCPUFeatures::neon_supported()) {
     typedef int (*Tst)() DART_UNUSED;
     EXPECT_EQ(4, EXECUTE_TEST_CODE_INT32(Tst, test->entry()));
@@ -2977,7 +2977,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Vmvnq, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   if (TargetCPUFeatures::neon_supported()) {
     typedef int (*Tst)() DART_UNUSED;
     EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(Tst, test->entry()));
@@ -3007,7 +3007,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Vdupb, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   if (TargetCPUFeatures::neon_supported()) {
     typedef int (*Tst)() DART_UNUSED;
     EXPECT_EQ(-4, EXECUTE_TEST_CODE_INT32(Tst, test->entry()));
@@ -3037,7 +3037,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Vduph, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   if (TargetCPUFeatures::neon_supported()) {
     typedef int (*Tst)() DART_UNUSED;
     EXPECT_EQ(-4, EXECUTE_TEST_CODE_INT32(Tst, test->entry()));
@@ -3067,7 +3067,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Vdupw, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   if (TargetCPUFeatures::neon_supported()) {
     typedef int (*Tst)() DART_UNUSED;
     EXPECT_EQ(-4, EXECUTE_TEST_CODE_INT32(Tst, test->entry()));
@@ -3097,7 +3097,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Vzipqw, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   if (TargetCPUFeatures::neon_supported()) {
     typedef float (*Vzipqw)() DART_UNUSED;
     float res = EXECUTE_TEST_CODE_FLOAT(Vzipqw, test->entry());
@@ -3139,7 +3139,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Vceqqi32, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   if (TargetCPUFeatures::neon_supported()) {
     typedef int (*Tst)() DART_UNUSED;
     EXPECT_EQ(-2, EXECUTE_TEST_CODE_INT32(Tst, test->entry()));
@@ -3172,7 +3172,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Vceqqs, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   if (TargetCPUFeatures::neon_supported()) {
     typedef int (*Tst)() DART_UNUSED;
     EXPECT_EQ(-2, EXECUTE_TEST_CODE_INT32(Tst, test->entry()));
@@ -3213,7 +3213,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Vcgeqi32, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   if (TargetCPUFeatures::neon_supported()) {
     typedef int (*Tst)() DART_UNUSED;
     EXPECT_EQ(-2, EXECUTE_TEST_CODE_INT32(Tst, test->entry()));
@@ -3254,7 +3254,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Vcugeqi32, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   if (TargetCPUFeatures::neon_supported()) {
     typedef int (*Tst)() DART_UNUSED;
     EXPECT_EQ(-2, EXECUTE_TEST_CODE_INT32(Tst, test->entry()));
@@ -3287,7 +3287,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Vcgeqs, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   if (TargetCPUFeatures::neon_supported()) {
     typedef int (*Tst)() DART_UNUSED;
     EXPECT_EQ(-2, EXECUTE_TEST_CODE_INT32(Tst, test->entry()));
@@ -3328,7 +3328,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Vcgtqi32, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   if (TargetCPUFeatures::neon_supported()) {
     typedef int (*Tst)() DART_UNUSED;
     EXPECT_EQ(-2, EXECUTE_TEST_CODE_INT32(Tst, test->entry()));
@@ -3369,7 +3369,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Vcugtqi32, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   if (TargetCPUFeatures::neon_supported()) {
     typedef int (*Tst)() DART_UNUSED;
     EXPECT_EQ(-2, EXECUTE_TEST_CODE_INT32(Tst, test->entry()));
@@ -3402,7 +3402,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Vcgtqs, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   if (TargetCPUFeatures::neon_supported()) {
     typedef int (*Tst)() DART_UNUSED;
     EXPECT_EQ(-2, EXECUTE_TEST_CODE_INT32(Tst, test->entry()));
@@ -3420,7 +3420,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Vminqs_zero, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   if (TargetCPUFeatures::neon_supported()) {
     typedef int (*Tst)() DART_UNUSED;
     float res = EXECUTE_TEST_CODE_FLOAT(Tst, test->entry());
@@ -3453,7 +3453,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Vminqs, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   if (TargetCPUFeatures::neon_supported()) {
     typedef int (*Tst)() DART_UNUSED;
     EXPECT_EQ(8, EXECUTE_TEST_CODE_INT32(Tst, test->entry()));
@@ -3471,7 +3471,7 @@
 
 // Verify that vmaxqs(-0.0, 0.0) = 0.0
 ASSEMBLER_TEST_RUN(Vmaxqs_zero, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   if (TargetCPUFeatures::neon_supported()) {
     typedef int (*Tst)() DART_UNUSED;
     float res = EXECUTE_TEST_CODE_FLOAT(Tst, test->entry());
@@ -3504,7 +3504,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Vmaxqs, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   if (TargetCPUFeatures::neon_supported()) {
     typedef int (*Tst)() DART_UNUSED;
     EXPECT_EQ(14, EXECUTE_TEST_CODE_INT32(Tst, test->entry()));
@@ -3523,7 +3523,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Vrecpeqs, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   if (TargetCPUFeatures::neon_supported()) {
     typedef float (*Vrecpeqs)() DART_UNUSED;
     float res = EXECUTE_TEST_CODE_FLOAT(Vrecpeqs, test->entry());
@@ -3549,7 +3549,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Vrecpsqs, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   if (TargetCPUFeatures::neon_supported()) {
     typedef float (*Vrecpsqs)() DART_UNUSED;
     float res = EXECUTE_TEST_CODE_FLOAT(Vrecpsqs, test->entry());
@@ -3576,7 +3576,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Reciprocal, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   if (TargetCPUFeatures::neon_supported()) {
     typedef float (*Reciprocal)() DART_UNUSED;
     float res = EXECUTE_TEST_CODE_FLOAT(Reciprocal, test->entry());
@@ -3597,7 +3597,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Vrsqrteqs, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   if (TargetCPUFeatures::neon_supported()) {
     typedef float (*Vrsqrteqs)() DART_UNUSED;
     float res = EXECUTE_TEST_CODE_FLOAT(Vrsqrteqs, test->entry());
@@ -3623,7 +3623,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Vrsqrtsqs, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   if (TargetCPUFeatures::neon_supported()) {
     typedef float (*Vrsqrtsqs)() DART_UNUSED;
     float res = EXECUTE_TEST_CODE_FLOAT(Vrsqrtsqs, test->entry());
@@ -3654,7 +3654,7 @@
 }
 
 ASSEMBLER_TEST_RUN(ReciprocalSqrt, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   if (TargetCPUFeatures::neon_supported()) {
     typedef float (*ReciprocalSqrt)() DART_UNUSED;
     float res = EXECUTE_TEST_CODE_FLOAT(ReciprocalSqrt, test->entry());
@@ -3695,7 +3695,7 @@
 }
 
 ASSEMBLER_TEST_RUN(SIMDSqrt, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   if (TargetCPUFeatures::neon_supported()) {
     typedef float (*SIMDSqrt)() DART_UNUSED;
     float res = EXECUTE_TEST_CODE_FLOAT(SIMDSqrt, test->entry());
@@ -3740,7 +3740,7 @@
 }
 
 ASSEMBLER_TEST_RUN(SIMDSqrt2, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   if (TargetCPUFeatures::neon_supported()) {
     typedef float (*SIMDSqrt2)() DART_UNUSED;
     float res = EXECUTE_TEST_CODE_FLOAT(SIMDSqrt2, test->entry());
@@ -3777,7 +3777,7 @@
 }
 
 ASSEMBLER_TEST_RUN(SIMDDiv, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   if (TargetCPUFeatures::neon_supported()) {
     typedef float (*SIMDDiv)() DART_UNUSED;
     float res = EXECUTE_TEST_CODE_FLOAT(SIMDDiv, test->entry());
@@ -3802,7 +3802,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Vabsqs, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   if (TargetCPUFeatures::neon_supported()) {
     typedef float (*Vabsqs)() DART_UNUSED;
     float res = EXECUTE_TEST_CODE_FLOAT(Vabsqs, test->entry());
@@ -3827,7 +3827,7 @@
 }
 
 ASSEMBLER_TEST_RUN(Vnegqs, test) {
-  EXPECT(test != NULL);
+  EXPECT(test != nullptr);
   if (TargetCPUFeatures::neon_supported()) {
     typedef float (*Vnegqs)() DART_UNUSED;
     float res = EXECUTE_TEST_CODE_FLOAT(Vnegqs, test->entry());
diff --git a/runtime/vm/compiler/assembler/assembler_base.cc b/runtime/vm/compiler/assembler/assembler_base.cc
index ff1fa89..f3ff9fe 100644
--- a/runtime/vm/compiler/assembler/assembler_base.cc
+++ b/runtime/vm/compiler/assembler/assembler_base.cc
@@ -161,7 +161,7 @@
   contents_ = NewContents(kInitialBufferCapacity);
   cursor_ = contents_;
   limit_ = ComputeLimit(contents_, kInitialBufferCapacity);
-  fixup_ = NULL;
+  fixup_ = nullptr;
 #if defined(DEBUG)
   has_ensured_capacity_ = false;
   fixups_processed_ = false;
@@ -176,7 +176,7 @@
 
 void AssemblerBuffer::ProcessFixups(const MemoryRegion& region) {
   AssemblerFixup* fixup = fixup_;
-  while (fixup != NULL) {
+  while (fixup != nullptr) {
     fixup->Process(region, fixup->position());
     fixup = fixup->previous();
   }
@@ -245,7 +245,7 @@
 intptr_t AssemblerBuffer::CountPointerOffsets() const {
   intptr_t count = 0;
   AssemblerFixup* current = fixup_;
-  while (current != NULL) {
+  while (current != nullptr) {
     if (current->IsPointerOffset()) ++count;
     current = current->previous_;
   }
@@ -266,7 +266,7 @@
 // Shared macros are implemented here.
 void AssemblerBase::Unimplemented(const char* message) {
   const char* format = "Unimplemented: %s";
-  const intptr_t len = Utils::SNPrint(NULL, 0, format, message);
+  const intptr_t len = Utils::SNPrint(nullptr, 0, format, message);
   char* buffer = reinterpret_cast<char*>(malloc(len + 1));
   Utils::SNPrint(buffer, len + 1, format, message);
   Stop(buffer);
@@ -274,7 +274,7 @@
 
 void AssemblerBase::Untested(const char* message) {
   const char* format = "Untested: %s";
-  const intptr_t len = Utils::SNPrint(NULL, 0, format, message);
+  const intptr_t len = Utils::SNPrint(nullptr, 0, format, message);
   char* buffer = reinterpret_cast<char*>(malloc(len + 1));
   Utils::SNPrint(buffer, len + 1, format, message);
   Stop(buffer);
@@ -282,7 +282,7 @@
 
 void AssemblerBase::Unreachable(const char* message) {
   const char* format = "Unreachable: %s";
-  const intptr_t len = Utils::SNPrint(NULL, 0, format, message);
+  const intptr_t len = Utils::SNPrint(nullptr, 0, format, message);
   char* buffer = reinterpret_cast<char*>(malloc(len + 1));
   Utils::SNPrint(buffer, len + 1, format, message);
   Stop(buffer);
@@ -379,15 +379,15 @@
 intptr_t ObjectPoolBuilder::AddObject(ObjectPoolBuilderEntry entry) {
   DEBUG_ASSERT((entry.type() != ObjectPoolBuilderEntry::kTaggedObject) ||
                (IsNotTemporaryScopedHandle(*entry.obj_) &&
-                (entry.equivalence_ == NULL ||
+                (entry.equivalence_ == nullptr ||
                  IsNotTemporaryScopedHandle(*entry.equivalence_))));
 
   if (entry.type() == ObjectPoolBuilderEntry::kTaggedObject) {
     // If the owner of the object pool wrapper specified a specific zone we
     // should use we'll do so.
-    if (zone_ != NULL) {
+    if (zone_ != nullptr) {
       entry.obj_ = &NewZoneHandle(zone_, *entry.obj_);
-      if (entry.equivalence_ != NULL) {
+      if (entry.equivalence_ != nullptr) {
         entry.equivalence_ = &NewZoneHandle(zone_, *entry.equivalence_);
       }
     }
diff --git a/runtime/vm/compiler/assembler/assembler_ia32.cc b/runtime/vm/compiler/assembler/assembler_ia32.cc
index 93618b9..bbfcb65 100644
--- a/runtime/vm/compiler/assembler/assembler_ia32.cc
+++ b/runtime/vm/compiler/assembler/assembler_ia32.cc
@@ -2768,7 +2768,7 @@
                                   JumpDistance distance,
                                   Register instance_reg,
                                   Register temp_reg) {
-  ASSERT(failure != NULL);
+  ASSERT(failure != nullptr);
   ASSERT(instance_size != 0);
   ASSERT(Utils::IsAligned(instance_size,
                           target::ObjectAlignment::kObjectAlignment));
@@ -2803,7 +2803,7 @@
                                  Register instance,
                                  Register end_address,
                                  Register temp_reg) {
-  ASSERT(failure != NULL);
+  ASSERT(failure != nullptr);
   ASSERT(temp_reg != kNoRegister);
   if (FLAG_inline_alloc &&
       target::Heap::IsAllocatableInNewSpace(instance_size)) {
diff --git a/runtime/vm/compiler/assembler/assembler_riscv.cc b/runtime/vm/compiler/assembler/assembler_riscv.cc
index 3830963..6c7ffa8 100644
--- a/runtime/vm/compiler/assembler/assembler_riscv.cc
+++ b/runtime/vm/compiler/assembler/assembler_riscv.cc
@@ -4403,7 +4403,7 @@
                                   JumpDistance distance,
                                   Register instance_reg,
                                   Register temp_reg) {
-  ASSERT(failure != NULL);
+  ASSERT(failure != nullptr);
   ASSERT(instance_size != 0);
   ASSERT(instance_reg != temp_reg);
   ASSERT(temp_reg != kNoRegister);
diff --git a/runtime/vm/compiler/assembler/assembler_x64.cc b/runtime/vm/compiler/assembler/assembler_x64.cc
index e1c3120..4d41d9a 100644
--- a/runtime/vm/compiler/assembler/assembler_x64.cc
+++ b/runtime/vm/compiler/assembler/assembler_x64.cc
@@ -2285,7 +2285,7 @@
                                   JumpDistance distance,
                                   Register instance_reg,
                                   Register temp_reg) {
-  ASSERT(failure != NULL);
+  ASSERT(failure != nullptr);
   ASSERT(instance_size != 0);
   ASSERT(Utils::IsAligned(instance_size,
                           target::ObjectAlignment::kObjectAlignment));
@@ -2320,7 +2320,7 @@
                                  Register instance,
                                  Register end_address,
                                  Register temp) {
-  ASSERT(failure != NULL);
+  ASSERT(failure != nullptr);
   if (FLAG_inline_alloc &&
       target::Heap::IsAllocatableInNewSpace(instance_size)) {
     // If this allocation is traced, program will jump to failure path
diff --git a/runtime/vm/compiler/assembler/disassembler.cc b/runtime/vm/compiler/assembler/disassembler.cc
index 432f3dd..f00c417 100644
--- a/runtime/vm/compiler/assembler/disassembler.cc
+++ b/runtime/vm/compiler/assembler/disassembler.cc
@@ -49,7 +49,7 @@
     }
   }
   THR_Print("%s", human_buffer);
-  if (object != NULL) {
+  if (object != nullptr) {
     THR_Print("   %s", object->ToCString());
   }
   THR_Print("\n");
@@ -118,7 +118,7 @@
   }
   va_list measure_args;
   va_start(measure_args, format);
-  intptr_t len = Utils::VSNPrint(NULL, 0, format, measure_args);
+  intptr_t len = Utils::VSNPrint(nullptr, 0, format, measure_args);
   va_end(measure_args);
   if (remaining_ < len + 100) {
     *buffer_++ = '.';
@@ -149,7 +149,7 @@
   if (comments == nullptr) {
     comments = code.IsNull() ? &Code::Comments::New(0) : &code.comments();
   }
-  ASSERT(formatter != NULL);
+  ASSERT(formatter != nullptr);
   char hex_buffer[kHexadecimalBufferSize];  // Instruction in hexadecimal form.
   char human_buffer[kUserReadableBufferSize];  // Human-readable instruction.
   uword pc = start;
@@ -492,7 +492,7 @@
   jsarr_.AddValue(hex_buffer);
   jsarr_.AddValue(human_buffer);
 
-  if (object != NULL) {
+  if (object != nullptr) {
     jsarr_.AddValue(*object);
   } else {
     jsarr_.AddValueNull();  // Not a reference to null.
@@ -502,7 +502,7 @@
 void DisassembleToJSONStream::Print(const char* format, ...) {
   va_list measure_args;
   va_start(measure_args, format);
-  intptr_t len = Utils::VSNPrint(NULL, 0, format, measure_args);
+  intptr_t len = Utils::VSNPrint(nullptr, 0, format, measure_args);
   va_end(measure_args);
 
   char* p = reinterpret_cast<char*>(malloc(len + 1));
diff --git a/runtime/vm/compiler/assembler/disassembler_arm.cc b/runtime/vm/compiler/assembler/disassembler_arm.cc
index 712416c..95f806c 100644
--- a/runtime/vm/compiler/assembler/disassembler_arm.cc
+++ b/runtime/vm/compiler/assembler/disassembler_arm.cc
@@ -1510,13 +1510,13 @@
     *out_instr_size = Instr::kInstrSize;
   }
 
-  *object = NULL;
+  *object = nullptr;
   // TODO(36839): Make DecodeLoadObjectFromPoolOrThread work on simarm_x64.
 #if !defined(IS_SIMARM_HOST64)
   if (!code.IsNull()) {
     *object = &Object::Handle();
     if (!DecodeLoadObjectFromPoolOrThread(pc, code, *object)) {
-      *object = NULL;
+      *object = nullptr;
     }
   }
 #endif  // !defined(IS_SIMARM_HOST64)
diff --git a/runtime/vm/compiler/assembler/disassembler_arm64.cc b/runtime/vm/compiler/assembler/disassembler_arm64.cc
index 76c6ded..711dc70 100644
--- a/runtime/vm/compiler/assembler/disassembler_arm64.cc
+++ b/runtime/vm/compiler/assembler/disassembler_arm64.cc
@@ -612,7 +612,7 @@
     case 'v': {
       if (format[1] == 's') {
         ASSERT(STRING_STARTS_WITH(format, "vsz"));
-        char const* sz_str = NULL;
+        char const* sz_str = nullptr;
         if (instr->Bits(14, 2) == 3) {
           switch (instr->Bit(22)) {
             case 0:
@@ -1667,11 +1667,11 @@
     *out_instr_size = Instr::kInstrSize;
   }
 
-  *object = NULL;
+  *object = nullptr;
   if (!code.IsNull()) {
     *object = &Object::Handle();
     if (!DecodeLoadObjectFromPoolOrThread(pc, code, *object)) {
-      *object = NULL;
+      *object = nullptr;
     }
   }
 }
diff --git a/runtime/vm/compiler/assembler/disassembler_riscv.cc b/runtime/vm/compiler/assembler/disassembler_riscv.cc
index d217b9b..06e7ef6 100644
--- a/runtime/vm/compiler/assembler/disassembler_riscv.cc
+++ b/runtime/vm/compiler/assembler/disassembler_riscv.cc
@@ -1792,11 +1792,11 @@
     *out_instr_size = instr_size;
   }
 
-  *object = NULL;
+  *object = nullptr;
   if (!code.IsNull()) {
     *object = &Object::Handle();
     if (!DecodeLoadObjectFromPoolOrThread(pc, code, *object)) {
-      *object = NULL;
+      *object = nullptr;
     }
   }
 }
diff --git a/runtime/vm/compiler/assembler/disassembler_x86.cc b/runtime/vm/compiler/assembler/disassembler_x86.cc
index 5b8cf80..11ee3b6 100644
--- a/runtime/vm/compiler/assembler/disassembler_x86.cc
+++ b/runtime/vm/compiler/assembler/disassembler_x86.cc
@@ -202,7 +202,7 @@
   for (uint8_t b = 0x70; b <= 0x7F; b++) {
     InstructionDesc* id = &instructions_[b];
     ASSERT(NO_INSTR == id->type);  // Information not already entered
-    id->mnem = NULL;               // Computed depending on condition code.
+    id->mnem = nullptr;            // Computed depending on condition code.
     id->type = JUMP_CONDITIONAL_SHORT_INSTR;
   }
 }
@@ -592,7 +592,7 @@
   } else {
     Print("+%#x", disp);
   }
-  if (after != NULL) Print("%s", after);
+  if (after != nullptr) Print("%s", after);
 }
 
 // Returns number of bytes used by machine instruction, including *data byte.
@@ -646,8 +646,8 @@
   uint8_t modrm = *(data + 1);
   int mod, regop, rm;
   get_modrm(modrm, &mod, &regop, &rm);
-  static const char* const mnemonics[] = {"test", NULL,   "not", "neg",
-                                          "mul",  "imul", "div", "idiv"};
+  static const char* const mnemonics[] = {"test", nullptr, "not", "neg",
+                                          "mul",  "imul",  "div", "idiv"};
   const char* mnem = mnemonics[regop];
   if (mod == 3 && regop != 0) {
     if (regop > 3) {
@@ -689,7 +689,7 @@
   get_modrm(*modrm, &mod, &regop, &rm);
   regop &= 0x7;  // The REX.R bit does not affect the operation.
   int num_bytes = 1;
-  const char* mnem = NULL;
+  const char* mnem = nullptr;
   switch (regop) {
     case 0:
       mnem = "rol";
@@ -716,7 +716,7 @@
       UnimplementedInstruction();
       return num_bytes;
   }
-  ASSERT(NULL != mnem);
+  ASSERT(nullptr != mnem);
   Print("%s%s ", mnem, operand_size_code());
   if (byte_size_operand_) {
     num_bytes += PrintRightByteOperand(modrm);
@@ -801,7 +801,7 @@
   // Try to print as stub name.
   uword addr = reinterpret_cast<uword>(addr_byte_ptr);
   const char* name_of_stub = StubCode::NameOfStub(addr);
-  if (name_of_stub != NULL) {
+  if (name_of_stub != nullptr) {
     Print("  [stub: %s]", name_of_stub);
   }
 }
@@ -1534,10 +1534,11 @@
     current += PrintOperands(idesc.mnem, idesc.op_order_, current);
   } else if (0x10 <= opcode && opcode <= 0x16) {
     // ...ps xmm, xmm/m128
-    static const char* const mnemonics[] = {
-        "movups", NULL, "movhlps", NULL, "unpcklps", "unpckhps", "movlhps"};
+    static const char* const mnemonics[] = {"movups", nullptr,    "movhlps",
+                                            nullptr,  "unpcklps", "unpckhps",
+                                            "movlhps"};
     const char* mnemonic = mnemonics[opcode - 0x10];
-    if (mnemonic == NULL) {
+    if (mnemonic == nullptr) {
       UnimplementedInstruction();
       mnemonic = "???";
     }
@@ -1610,7 +1611,7 @@
 
 // Mnemonics for two-byte opcode instructions starting with 0x0F.
 // The argument is the second byte of the two-byte opcode.
-// Returns NULL if the instruction is not handled here.
+// Returns nullptr if the instruction is not handled here.
 const char* DisassemblerX64::TwoByteMnemonic(uint8_t opcode) {
   if (opcode == 0x5A) {
     return "cvtps2pd";
@@ -1619,11 +1620,11 @@
   }
   if (0xA2 <= opcode && opcode <= 0xBF) {
     static const char* const mnemonics[] = {
-        "cpuid", "bt",   "shld",    "shld",    NULL,     NULL,
-        NULL,    NULL,   NULL,      "bts",     "shrd",   "shrd",
-        NULL,    "imul", "cmpxchg", "cmpxchg", NULL,     NULL,
-        NULL,    NULL,   "movzxb",  "movzxw",  "popcnt", NULL,
-        NULL,    NULL,   "bsf",     "bsr",     "movsxb", "movsxw"};
+        "cpuid", "bt",    "shld",    "shld",    nullptr,  nullptr,
+        nullptr, nullptr, nullptr,   "bts",     "shrd",   "shrd",
+        nullptr, "imul",  "cmpxchg", "cmpxchg", nullptr,  nullptr,
+        nullptr, nullptr, "movzxb",  "movzxw",  "popcnt", nullptr,
+        nullptr, nullptr, "bsf",     "bsr",     "movsxb", "movsxw"};
     return mnemonics[opcode - 0xA2];
   }
   switch (opcode) {
@@ -1638,7 +1639,7 @@
     case 0x31:
       return "rdtsc";
     default:
-      return NULL;
+      return nullptr;
   }
 }
 
@@ -1699,7 +1700,7 @@
         data++;
         int mod, regop, rm;
         get_modrm(*data, &mod, &regop, &rm);
-        const char* mnem = NULL;
+        const char* mnem = nullptr;
         switch (regop) {
           case 0:
             mnem = "inc";
@@ -2013,12 +2014,12 @@
     *out_instr_len = instruction_length;
   }
 
-  *object = NULL;
+  *object = nullptr;
 #if defined(TARGET_ARCH_X64)
   if (!code.IsNull()) {
     *object = &Object::Handle();
     if (!DecodeLoadObjectFromPoolOrThread(pc, code, *object)) {
-      *object = NULL;
+      *object = nullptr;
     }
   }
 #else
diff --git a/runtime/vm/compiler/assembler/object_pool_builder.h b/runtime/vm/compiler/assembler/object_pool_builder.h
index 5db42e4..77fa9be 100644
--- a/runtime/vm/compiler/assembler/object_pool_builder.h
+++ b/runtime/vm/compiler/assembler/object_pool_builder.h
@@ -254,8 +254,8 @@
   DirectChainedHashMap<ObjIndexPair> object_pool_index_table_;
 
   // The zone used for allocating the handles we keep in the map and array (or
-  // NULL, in which case allocations happen using the zone active at the point
-  // of insertion).
+  // nullptr, in which case allocations happen using the zone active at the
+  // point of insertion).
   Zone* zone_;
 };
 
diff --git a/runtime/vm/compiler/backend/block_scheduler.cc b/runtime/vm/compiler/backend/block_scheduler.cc
index 7fb1408..3ecb67a 100644
--- a/runtime/vm/compiler/backend/block_scheduler.cc
+++ b/runtime/vm/compiler/backend/block_scheduler.cc
@@ -117,7 +117,7 @@
 // a length to support adding a shorter chain's links to a longer chain.
 struct Chain : public ZoneAllocated {
   explicit Chain(BlockEntryInstr* block)
-      : first(new Link(block, NULL)), last(first), length(1) {}
+      : first(new Link(block, nullptr)), last(first), length(1) {}
 
   Link* first;
   Link* last;
@@ -137,7 +137,7 @@
                   Chain* source_chain,
                   Chain* target_chain) {
   if (source_chain->length < target_chain->length) {
-    for (Link* link = source_chain->first; link != NULL; link = link->next) {
+    for (Link* link = source_chain->first; link != nullptr; link = link->next) {
       (*chains)[link->block->postorder_number()] = target_chain;
     }
     // Link the chains.
@@ -146,7 +146,7 @@
     target_chain->first = source_chain->first;
     target_chain->length += source_chain->length;
   } else {
-    for (Link* link = target_chain->first; link != NULL; link = link->next) {
+    for (Link* link = target_chain->first; link != nullptr; link = link->next) {
       (*chains)[link->block->postorder_number()] = source_chain;
     }
     source_chain->last->next = target_chain->first;
@@ -229,7 +229,7 @@
   // sort of the blocks).
   for (intptr_t i = block_count - 1; i >= 0; --i) {
     if (chains[i]->first->block == flow_graph->postorder()[i]) {
-      for (Link* link = chains[i]->first; link != NULL; link = link->next) {
+      for (Link* link = chains[i]->first; link != nullptr; link = link->next) {
         if ((link->block != checked_entry) && (link->block != graph_entry)) {
           flow_graph->CodegenBlockOrder(true)->Add(link->block);
         }
diff --git a/runtime/vm/compiler/backend/branch_optimizer.cc b/runtime/vm/compiler/backend/branch_optimizer.cc
index 23511b4..1622206 100644
--- a/runtime/vm/compiler/backend/branch_optimizer.cc
+++ b/runtime/vm/compiler/backend/branch_optimizer.cc
@@ -13,12 +13,12 @@
 // is used in the environments either at the corresponding block entry or
 // at the same instruction where input use is.
 static bool PhiHasSingleUse(PhiInstr* phi, Value* use) {
-  if ((use->next_use() != NULL) || (phi->input_use_list() != use)) {
+  if ((use->next_use() != nullptr) || (phi->input_use_list() != use)) {
     return false;
   }
 
   BlockEntryInstr* block = phi->block();
-  for (Value* env_use = phi->env_use_list(); env_use != NULL;
+  for (Value* env_use = phi->env_use_list(); env_use != nullptr;
        env_use = env_use->next_use()) {
     if ((env_use->instruction() != block) &&
         (env_use->instruction() != use->instruction())) {
@@ -40,7 +40,7 @@
   // has no other phis and no instructions intervening between the phi and
   // branch so the block can simply be eliminated.
   BranchInstr* branch = block->last_instruction()->AsBranch();
-  ASSERT(branch != NULL);
+  ASSERT(branch != nullptr);
   ComparisonInstr* comparison = branch->comparison();
   if (comparison->InputCount() != 2) {
     return false;
@@ -52,10 +52,10 @@
   PhiInstr* phi = left->definition()->AsPhi();
   Value* right = comparison->right();
   ConstantInstr* constant =
-      (right == NULL) ? NULL : right->definition()->AsConstant();
-  return (phi != NULL) && (constant != NULL) && (phi->GetBlock() == block) &&
-         PhiHasSingleUse(phi, left) && (block->next() == branch) &&
-         (block->phis()->length() == 1);
+      (right == nullptr) ? nullptr : right->definition()->AsConstant();
+  return (phi != nullptr) && (constant != nullptr) &&
+         (phi->GetBlock() == block) && PhiHasSingleUse(phi, left) &&
+         (block->next() == branch) && (block->phis()->length() == 1);
 }
 
 JoinEntryInstr* BranchSimplifier::ToJoinEntry(Zone* zone,
@@ -122,7 +122,7 @@
   while (!worklist.is_empty()) {
     // All blocks in the worklist are join blocks (ending with a branch).
     JoinEntryInstr* block = worklist.RemoveLast()->AsJoinEntry();
-    ASSERT(block != NULL);
+    ASSERT(block != nullptr);
 
     if (Match(block)) {
       changed = true;
@@ -136,19 +136,19 @@
       // instance of the pattern.  There is thus no need to add it to the
       // worklist.
       BranchInstr* branch = block->last_instruction()->AsBranch();
-      ASSERT(branch != NULL);
+      ASSERT(branch != nullptr);
       JoinEntryInstr* join_true = ToJoinEntry(zone, branch->true_successor());
       JoinEntryInstr* join_false = ToJoinEntry(zone, branch->false_successor());
 
       ComparisonInstr* comparison = branch->comparison();
       PhiInstr* phi = comparison->left()->definition()->AsPhi();
       ConstantInstr* constant = comparison->right()->definition()->AsConstant();
-      ASSERT(constant != NULL);
+      ASSERT(constant != nullptr);
       // Copy the constant and branch and push it to all the predecessors.
       for (intptr_t i = 0, count = block->PredecessorCount(); i < count; ++i) {
         GotoInstr* old_goto =
             block->PredecessorAt(i)->last_instruction()->AsGoto();
-        ASSERT(old_goto != NULL);
+        ASSERT(old_goto != nullptr);
 
         // Replace the goto in each predecessor with a rewritten branch,
         // rewritten to use the corresponding phi input instead of the phi.
@@ -156,7 +156,7 @@
         Value* new_right = new (zone) Value(constant);
         BranchInstr* new_branch =
             CloneBranch(zone, branch, new_left, new_right);
-        if (branch->env() == NULL) {
+        if (branch->env() == nullptr) {
           new_branch->InheritDeoptTarget(zone, old_goto);
         } else {
           // Take the environment from the branch if it has one.
@@ -172,7 +172,7 @@
         }
 
         new_branch->InsertBefore(old_goto);
-        new_branch->set_next(NULL);  // Detaching the goto from the graph.
+        new_branch->set_next(nullptr);  // Detaching the goto from the graph.
         old_goto->UnuseAllInputs();
 
         // Update the predecessor block.  We may have created another
@@ -268,7 +268,7 @@
     // Ba:
     //   v3 = IfThenElse(COMP ? v1 : v2)
     //
-    if ((join != NULL) && (join->phis() != NULL) &&
+    if ((join != nullptr) && (join->phis() != nullptr) &&
         (join->phis()->length() == 1) && (block->PredecessorCount() == 2)) {
       BlockEntryInstr* pred1 = block->PredecessorAt(0);
       BlockEntryInstr* pred2 = block->PredecessorAt(1);
@@ -305,7 +305,7 @@
           IfThenElseInstr* if_then_else =
               new (zone) IfThenElseInstr(new_comparison, if_true->Copy(zone),
                                          if_false->Copy(zone), DeoptId::kNone);
-          flow_graph->InsertBefore(branch, if_then_else, NULL,
+          flow_graph->InsertBefore(branch, if_then_else, nullptr,
                                    FlowGraph::kValue);
 
           phi->ReplaceUsesWith(if_then_else);
diff --git a/runtime/vm/compiler/backend/code_statistics.cc b/runtime/vm/compiler/backend/code_statistics.cc
index 3a0621b..14d416c 100644
--- a/runtime/vm/compiler/backend/code_statistics.cc
+++ b/runtime/vm/compiler/backend/code_statistics.cc
@@ -171,7 +171,7 @@
   const intptr_t unaligned_bytes = Instructions::HeaderSize() + function_size;
   alignment_bytes_ =
       Utils::RoundUp(unaligned_bytes, kObjectAlignment) - unaligned_bytes;
-  assembler_ = NULL;
+  assembler_ = nullptr;
 }
 
 void CodeStatistics::AppendTo(CombinedCodeStatistics* stat) {
diff --git a/runtime/vm/compiler/backend/constant_propagator.cc b/runtime/vm/compiler/backend/constant_propagator.cc
index d591104..564a23a 100644
--- a/runtime/vm/compiler/backend/constant_propagator.cc
+++ b/runtime/vm/compiler/backend/constant_propagator.cc
@@ -72,7 +72,7 @@
   // heap-allocated and so not necessarily pointer-equal on each iteration).
   if (definition->constant_value().ptr() != value.ptr()) {
     definition->constant_value() = value.ptr();
-    if (definition->input_use_list() != NULL) {
+    if (definition->input_use_list() != nullptr) {
       definition_worklist_.Add(definition);
     }
     return true;
@@ -245,7 +245,7 @@
   // might be analyzing it because the constant value of one of its inputs
   // has changed.)
   if (reachable_->Contains(instr->GetBlock()->preorder_number())) {
-    if (instr->constant_target() != NULL) {
+    if (instr->constant_target() != nullptr) {
       ASSERT((instr->constant_target() == instr->true_successor()) ||
              (instr->constant_target() == instr->false_successor()));
       SetReachable(instr->constant_target());
@@ -305,10 +305,10 @@
   if (defn->IsPhi()) {
     JoinEntryInstr* block = defn->AsPhi()->block();
 
-    Definition* input = NULL;
+    Definition* input = nullptr;
     for (intptr_t i = 0; i < defn->InputCount(); ++i) {
       if (reachable_->Contains(block->PredecessorAt(i)->preorder_number())) {
-        if (input == NULL) {
+        if (input == nullptr) {
           input = defn->InputAt(i)->definition();
         } else {
           return defn;
@@ -1569,8 +1569,8 @@
     BlockEntryInstr* block = b.Current();
     BranchInstr* branch = block->last_instruction()->AsBranch();
     empty_blocks->Clear();
-    if ((branch != NULL) && !branch->HasUnknownSideEffects()) {
-      ASSERT(branch->previous() != NULL);  // Not already eliminated.
+    if ((branch != nullptr) && !branch->HasUnknownSideEffects()) {
+      ASSERT(branch->previous() != nullptr);  // Not already eliminated.
       BlockEntryInstr* if_true =
           FindFirstNonEmptySuccessor(branch->true_successor(), empty_blocks);
       BlockEntryInstr* if_false =
@@ -1584,7 +1584,7 @@
           graph_->CopyDeoptTarget(jump, branch);
 
           Instruction* previous = branch->previous();
-          branch->set_previous(NULL);
+          branch->set_previous(nullptr);
           previous->LinkTo(jump);
 
           // Remove uses from branch and all the empty blocks that
@@ -1633,12 +1633,12 @@
     }
 
     JoinEntryInstr* join = block->AsJoinEntry();
-    if (join != NULL) {
+    if (join != nullptr) {
       // Remove phi inputs corresponding to unreachable predecessor blocks.
       // Predecessors will be recomputed (in block id order) after removing
       // unreachable code so we merely have to keep the phi inputs in order.
       ZoneGrowableArray<PhiInstr*>* phis = join->phis();
-      if ((phis != NULL) && !phis->is_empty()) {
+      if ((phis != nullptr) && !phis->is_empty()) {
         intptr_t pred_count = join->PredecessorCount();
         intptr_t live_count = 0;
         for (intptr_t pred_idx = 0; pred_idx < pred_count; ++pred_idx) {
@@ -1647,7 +1647,7 @@
             if (live_count < pred_idx) {
               for (PhiIterator it(join); !it.Done(); it.Advance()) {
                 PhiInstr* phi = it.Current();
-                ASSERT(phi != NULL);
+                ASSERT(phi != nullptr);
                 phi->SetInputAt(live_count, phi->InputAt(pred_idx));
               }
             }
@@ -1655,7 +1655,7 @@
           } else {
             for (PhiIterator it(join); !it.Done(); it.Advance()) {
               PhiInstr* phi = it.Current();
-              ASSERT(phi != NULL);
+              ASSERT(phi != nullptr);
               phi->InputAt(pred_idx)->RemoveFromUseList();
             }
           }
@@ -1664,7 +1664,7 @@
           intptr_t to_idx = 0;
           for (intptr_t from_idx = 0; from_idx < phis->length(); ++from_idx) {
             PhiInstr* phi = (*phis)[from_idx];
-            ASSERT(phi != NULL);
+            ASSERT(phi != nullptr);
             if (FLAG_remove_redundant_phis && (live_count == 1)) {
               Value* input = phi->InputAt(0);
               phi->ReplaceUsesWith(input->definition());
@@ -1675,7 +1675,7 @@
             }
           }
           if (to_idx == 0) {
-            join->phis_ = NULL;
+            join->phis_ = nullptr;
           } else {
             phis->TruncateTo(to_idx);
           }
@@ -1700,22 +1700,22 @@
 
     // Replace branches where one target is unreachable with jumps.
     BranchInstr* branch = block->last_instruction()->AsBranch();
-    if (branch != NULL) {
+    if (branch != nullptr) {
       TargetEntryInstr* if_true = branch->true_successor();
       TargetEntryInstr* if_false = branch->false_successor();
-      JoinEntryInstr* join = NULL;
-      Instruction* next = NULL;
+      JoinEntryInstr* join = nullptr;
+      Instruction* next = nullptr;
 
       if (!reachable_->Contains(if_true->preorder_number())) {
         ASSERT(reachable_->Contains(if_false->preorder_number()));
-        ASSERT(if_false->parallel_move() == NULL);
+        ASSERT(if_false->parallel_move() == nullptr);
         join = new (Z) JoinEntryInstr(if_false->block_id(),
                                       if_false->try_index(), DeoptId::kNone);
         graph_->CopyDeoptTarget(join, if_false);
         if_false->UnuseAllInputs();
         next = if_false->next();
       } else if (!reachable_->Contains(if_false->preorder_number())) {
-        ASSERT(if_true->parallel_move() == NULL);
+        ASSERT(if_true->parallel_move() == nullptr);
         join = new (Z) JoinEntryInstr(if_true->block_id(), if_true->try_index(),
                                       DeoptId::kNone);
         graph_->CopyDeoptTarget(join, if_true);
@@ -1723,7 +1723,7 @@
         next = if_true->next();
       }
 
-      if (join != NULL) {
+      if (join != nullptr) {
         // Replace the branch with a jump to the reachable successor.
         // Drop the comparison, which does not have side effects as long
         // as it is a strict compare (the only one we can determine is
@@ -1732,7 +1732,7 @@
         graph_->CopyDeoptTarget(jump, branch);
 
         Instruction* previous = branch->previous();
-        branch->set_previous(NULL);
+        branch->set_previous(nullptr);
         previous->LinkTo(jump);
 
         // Replace the false target entry with the new join entry. We will
diff --git a/runtime/vm/compiler/backend/flow_graph.cc b/runtime/vm/compiler/backend/flow_graph.cc
index 7b16a61..2148202 100644
--- a/runtime/vm/compiler/backend/flow_graph.cc
+++ b/runtime/vm/compiler/backend/flow_graph.cc
@@ -139,9 +139,9 @@
                                           Instruction* current,
                                           Instruction* replacement) {
   Definition* current_defn = current->AsDefinition();
-  if ((replacement != NULL) && (current_defn != NULL)) {
+  if ((replacement != nullptr) && (current_defn != nullptr)) {
     Definition* replacement_defn = replacement->AsDefinition();
-    ASSERT(replacement_defn != NULL);
+    ASSERT(replacement_defn != nullptr);
     current_defn->ReplaceUsesWith(replacement_defn);
     EnsureSSATempIndex(current_defn, replacement_defn);
 
@@ -151,7 +151,7 @@
                 replacement_defn->ssa_temp_index());
     }
   } else if (FLAG_trace_optimization) {
-    if (current_defn == NULL) {
+    if (current_defn == nullptr) {
       THR_Print("Removing %s\n", current->DebugName());
     } else {
       ASSERT(!current_defn->HasUses());
@@ -275,8 +275,8 @@
     AllocateSSAIndex(instr->AsDefinition());
   }
   instr->InsertAfter(prev);
-  ASSERT(instr->env() == NULL);
-  if (env != NULL) {
+  ASSERT(instr->env() == nullptr);
+  if (env != nullptr) {
     env->DeepCopyTo(zone(), instr);
   }
 }
@@ -299,8 +299,8 @@
     ASSERT(instr->IsDefinition());
     AllocateSSAIndex(instr->AsDefinition());
   }
-  ASSERT(instr->env() == NULL);
-  if (env != NULL) {
+  ASSERT(instr->env() == nullptr);
+  if (env != nullptr) {
     env->DeepCopyTo(zone(), instr);
   }
   return prev->AppendInstruction(instr);
@@ -351,7 +351,7 @@
   parent_.Clear();
 
   GrowableArray<BlockTraversalState> block_stack;
-  graph_entry_->DiscoverBlock(NULL, &preorder_, &parent_);
+  graph_entry_->DiscoverBlock(nullptr, &preorder_, &parent_);
   block_stack.Add(BlockTraversalState(graph_entry_));
   while (!block_stack.is_empty()) {
     BlockTraversalState& state = block_stack.Last();
@@ -463,7 +463,7 @@
     PhiInstr* phi = unmark.RemoveLast();
     for (Value::Iterator it(phi->input_use_list()); !it.Done(); it.Advance()) {
       PhiInstr* use = it.Current()->instruction()->AsPhi();
-      if ((use != NULL) && (use->is_receiver() == PhiInstr::kReceiver)) {
+      if ((use != nullptr) && (use->is_receiver() == PhiInstr::kReceiver)) {
         use->set_is_receiver(PhiInstr::kNotReceiver);
         unmark.Add(use);
       }
@@ -648,7 +648,7 @@
     for (ForwardInstructionIterator instr_it(block_it.Current());
          !instr_it.Done(); instr_it.Advance()) {
       RedefinitionInstr* redefinition = instr_it.Current()->AsRedefinition();
-      if (redefinition != NULL) {
+      if (redefinition != nullptr) {
         Definition* original = redefinition->value()->definition();
         for (Value::Iterator it(original->input_use_list()); !it.Done();
              it.Advance()) {
@@ -683,10 +683,10 @@
   BitVector* live_out = live_out_[block.postorder_number()];
   bool changed = false;
   Instruction* last = block.last_instruction();
-  ASSERT(last != NULL);
+  ASSERT(last != nullptr);
   for (intptr_t i = 0; i < last->SuccessorCount(); i++) {
     BlockEntryInstr* succ = last->SuccessorAt(i);
-    ASSERT(succ != NULL);
+    ASSERT(succ != nullptr);
     if (live_out->AddAll(live_in_[succ->postorder_number()])) {
       changed = true;
     }
@@ -861,7 +861,7 @@
       Instruction* current = it.Current();
 
       LoadLocalInstr* load = current->AsLoadLocal();
-      if (load != NULL) {
+      if (load != nullptr) {
         const intptr_t index = flow_graph_->EnvIndex(&load->local());
         if (index >= live_in->length()) continue;  // Skip tmp_locals.
         live_in->Add(index);
@@ -873,7 +873,7 @@
       }
 
       StoreLocalInstr* store = current->AsStoreLocal();
-      if (store != NULL) {
+      if (store != nullptr) {
         const intptr_t index = flow_graph_->EnvIndex(&store->local());
         if (index >= live_in->length()) continue;  // Skip tmp_locals.
         if (kill->Contains(index)) {
@@ -1012,7 +1012,7 @@
     block->ClearDominatedBlocks();
     for (intptr_t i = 0, count = block->PredecessorCount(); i < count; ++i) {
       BlockEntryInstr* pred = block->PredecessorAt(i);
-      ASSERT(pred != NULL);
+      ASSERT(pred != nullptr);
 
       // Look for the semidominator by ascending the semidominator path
       // starting from pred.
@@ -1210,7 +1210,7 @@
 
   // Check if inlining_parameters include a type argument vector parameter.
   const intptr_t inlined_type_args_param =
-      ((inlining_parameters != NULL) && function().IsGeneric()) ? 1 : 0;
+      ((inlining_parameters != nullptr) && function().IsGeneric()) ? 1 : 0;
 
   ASSERT(variable_count() == env->length());
   ASSERT(direct_parameter_count <= env->length());
@@ -1249,7 +1249,7 @@
   // inlining arguments, type parameter, args descriptor, context, ...)
   {
     // Replace parameter slots with inlining definitions coming in.
-    if (inlining_parameters != NULL) {
+    if (inlining_parameters != nullptr) {
       for (intptr_t i = 0; i < function().NumParameters(); ++i) {
         Definition* defn = (*inlining_parameters)[inlined_type_args_param + i];
         if (defn->IsConstant()) {
@@ -1268,9 +1268,9 @@
     // Replace the type arguments slot with a special parameter.
     const bool reify_generic_argument = function().IsGeneric();
     if (reify_generic_argument) {
-      ASSERT(parsed_function().function_type_arguments() != NULL);
+      ASSERT(parsed_function().function_type_arguments() != nullptr);
       Definition* defn;
-      if (inlining_parameters == NULL) {
+      if (inlining_parameters == nullptr) {
         // Note: If we are not inlining, then the prologue builder will
         // take care of checking that we got the correct reified type
         // arguments.  This includes checking the argument descriptor in order
@@ -1476,7 +1476,7 @@
 
     // 2b. Handle LoadLocal/StoreLocal/MakeTemp/DropTemps/Constant specially.
     // Other definitions are just pushed to the environment directly.
-    Definition* result = NULL;
+    Definition* result = nullptr;
     switch (current->tag()) {
       case Instruction::kLoadLocal: {
         LoadLocalInstr* load = current->Cast<LoadLocalInstr>();
@@ -1488,7 +1488,7 @@
         result = (*env)[index];
 
         PhiInstr* phi = result->AsPhi();
-        if ((phi != NULL) && !phi->is_alive()) {
+        if ((phi != nullptr) && !phi->is_alive()) {
           phi->mark_alive();
           live_phis->Add(phi);
         }
@@ -1553,10 +1553,10 @@
         for (intptr_t j = 0; j < drop->num_temps(); j++) {
           env->RemoveLast();
         }
-        if (drop->value() != NULL) {
+        if (drop->value() != nullptr) {
           result = drop->value()->definition();
         }
-        ASSERT((drop->value() != NULL) || !drop->HasTemp());
+        ASSERT((drop->value() != nullptr) || !drop->HasTemp());
         break;
       }
 
@@ -1637,7 +1637,7 @@
         block_entry->last_instruction()->SuccessorAt(0)->AsJoinEntry();
     intptr_t pred_index = successor->IndexOfPredecessor(block_entry);
     ASSERT(pred_index >= 0);
-    if (successor->phis() != NULL) {
+    if (successor->phis() != nullptr) {
       for (intptr_t i = 0; i < successor->phis()->length(); ++i) {
         PhiInstr* phi = (*successor->phis())[i];
         if (phi != nullptr) {
@@ -1668,7 +1668,7 @@
         last_instruction->SuccessorAt(0)->IsJoinEntry()) {
       JoinEntryInstr* successor =
           last_instruction->SuccessorAt(0)->AsJoinEntry();
-      if (successor->phis() != NULL) {
+      if (successor->phis() != nullptr) {
         for (intptr_t j = 0; j < successor->phis()->length(); ++j) {
           PhiInstr* phi = (*successor->phis())[j];
           if (phi == nullptr && !IsImmortalVariable(j)) {
@@ -1699,11 +1699,12 @@
   if (!graph_entry()->catch_entries().is_empty()) {
     for (BlockIterator it(postorder_iterator()); !it.Done(); it.Advance()) {
       JoinEntryInstr* join = it.Current()->AsJoinEntry();
-      if (join == NULL) continue;
+      if (join == nullptr) continue;
       for (PhiIterator phi_it(join); !phi_it.Done(); phi_it.Advance()) {
         PhiInstr* phi = phi_it.Current();
-        if (phi == NULL || phi->is_alive() || (phi->input_use_list() != NULL) ||
-            (phi->env_use_list() == NULL)) {
+        if (phi == nullptr || phi->is_alive() ||
+            (phi->input_use_list() != nullptr) ||
+            (phi->env_use_list() == nullptr)) {
           continue;
         }
         for (Value::Iterator it(phi->env_use_list()); !it.Done();
@@ -1725,7 +1726,7 @@
     for (intptr_t i = 0; i < phi->InputCount(); i++) {
       Value* val = phi->InputAt(i);
       PhiInstr* used_phi = val->definition()->AsPhi();
-      if ((used_phi != NULL) && !used_phi->is_alive()) {
+      if ((used_phi != nullptr) && !used_phi->is_alive()) {
         used_phi->mark_alive();
         live_phis->Add(used_phi);
       }
@@ -1734,7 +1735,7 @@
 
   for (BlockIterator it(postorder_iterator()); !it.Done(); it.Advance()) {
     JoinEntryInstr* join = it.Current()->AsJoinEntry();
-    if (join != NULL) join->RemoveDeadPhis(constant_dead());
+    if (join != nullptr) join->RemoveDeadPhis(constant_dead());
   }
 }
 
@@ -1910,7 +1911,7 @@
   ASSERT(from != to);
   Instruction* insert_before;
   PhiInstr* phi = use->instruction()->AsPhi();
-  if (phi != NULL) {
+  if (phi != nullptr) {
     ASSERT(phi->is_alive());
     // For phis conversions have to be inserted in the predecessor.
     auto predecessor = phi->block()->PredecessorAt(use->use_index());
@@ -1926,7 +1927,7 @@
     deopt_target = insert_before;
   }
 
-  Definition* converted = NULL;
+  Definition* converted = nullptr;
   if (IsUnboxedInteger(from) && IsUnboxedInteger(to)) {
     const intptr_t deopt_id = (to == kUnboxedInt32) && (deopt_target != nullptr)
                                   ? deopt_target->DeoptimizationTarget()
@@ -2244,7 +2245,7 @@
   for (BlockIterator block_it = reverse_postorder_iterator(); !block_it.Done();
        block_it.Advance()) {
     JoinEntryInstr* join_entry = block_it.Current()->AsJoinEntry();
-    if (join_entry != NULL) {
+    if (join_entry != nullptr) {
       for (PhiIterator it(join_entry); !it.Done(); it.Advance()) {
         PhiInstr* phi = it.Current();
         phi_unboxing_heuristic.Process(phi);
@@ -2276,14 +2277,14 @@
     if (JoinEntryInstr* join_entry = entry->AsJoinEntry()) {
       for (PhiIterator it(join_entry); !it.Done(); it.Advance()) {
         PhiInstr* phi = it.Current();
-        ASSERT(phi != NULL);
+        ASSERT(phi != nullptr);
         ASSERT(phi->is_alive());
         InsertConversionsFor(phi);
       }
     }
     for (ForwardInstructionIterator it(entry); !it.Done(); it.Advance()) {
       Definition* def = it.Current()->AsDefinition();
-      if (def != NULL) {
+      if (def != nullptr) {
         InsertConversionsFor(def);
       }
     }
@@ -2337,7 +2338,7 @@
     for (ForwardInstructionIterator instr_it(block_it.Current());
          !instr_it.Done(); instr_it.Advance()) {
       BinarySmiOpInstr* smi_op = instr_it.Current()->AsBinarySmiOp();
-      if ((smi_op != NULL) && smi_op->HasSSATemp() &&
+      if ((smi_op != nullptr) && smi_op->HasSSATemp() &&
           BenefitsFromWidening(smi_op) && CanBeWidened(smi_op)) {
         candidates.Add(smi_op);
       }
@@ -2427,11 +2428,11 @@
       }
 
       // Process all uses.
-      for (Value* use = defn->input_use_list(); use != NULL;
+      for (Value* use = defn->input_use_list(); use != nullptr;
            use = use->next_use()) {
         Instruction* instr = use->instruction();
         Definition* use_defn = instr->AsDefinition();
-        if (use_defn == NULL) {
+        if (use_defn == nullptr) {
           // We assume that tagging before returning or pushing argument costs
           // very little compared to the cost of the return/call itself.
           ASSERT(!instr->IsMoveArgument());
@@ -2495,7 +2496,7 @@
               smi_op->op_kind(), smi_op->left()->CopyWithType(),
               smi_op->right()->CopyWithType(), smi_op->DeoptimizationTarget());
 
-          smi_op->ReplaceWith(int32_op, NULL);
+          smi_op->ReplaceWith(int32_op, nullptr);
         } else if (defn->IsPhi()) {
           defn->AsPhi()->set_representation(kUnboxedInt32);
           ASSERT(defn->Type()->IsInt());
@@ -2579,7 +2580,7 @@
       Instruction* replacement = current->Canonicalize(this);
 
       if (replacement != current) {
-        // For non-definitions Canonicalize should return either NULL or
+        // For non-definitions Canonicalize should return either nullptr or
         // this.
         if (replacement != nullptr) {
           ASSERT(current->IsDefinition());
@@ -2696,7 +2697,7 @@
   Instruction* instr = use->instruction();
 
   PhiInstr* phi = instr->AsPhi();
-  if (phi != NULL) {
+  if (phi != nullptr) {
     return dom_block->Dominates(phi->block()->PredecessorAt(use->use_index()));
   }
 
@@ -2705,7 +2706,8 @@
     // Fast path for the case of block entry.
     if (dom_block == dom) return true;
 
-    for (Instruction* curr = dom->next(); curr != NULL; curr = curr->next()) {
+    for (Instruction* curr = dom->next(); curr != nullptr;
+         curr = curr->next()) {
       if (curr == instr) return true;
     }
 
@@ -2750,7 +2752,7 @@
 
 static bool IsPositiveOrZeroSmiConst(Definition* d) {
   ConstantInstr* const_instr = d->AsConstant();
-  if ((const_instr != NULL) && (const_instr->value().IsSmi())) {
+  if ((const_instr != nullptr) && (const_instr->value().IsSmi())) {
     return Smi::Cast(const_instr->value()).Value() >= 0;
   }
   return false;
@@ -2758,10 +2760,10 @@
 
 static BinarySmiOpInstr* AsSmiShiftLeftInstruction(Definition* d) {
   BinarySmiOpInstr* instr = d->AsBinarySmiOp();
-  if ((instr != NULL) && (instr->op_kind() == Token::kSHL)) {
+  if ((instr != nullptr) && (instr->op_kind() == Token::kSHL)) {
     return instr;
   }
-  return NULL;
+  return nullptr;
 }
 
 void FlowGraph::OptimizeLeftShiftBitAndSmiOp(
@@ -2769,8 +2771,8 @@
     Definition* bit_and_instr,
     Definition* left_instr,
     Definition* right_instr) {
-  ASSERT(bit_and_instr != NULL);
-  ASSERT((left_instr != NULL) && (right_instr != NULL));
+  ASSERT(bit_and_instr != nullptr);
+  ASSERT((left_instr != nullptr) && (right_instr != nullptr));
 
   // Check for pattern, smi_shift_left must be single-use.
   bool is_positive_or_zero = IsPositiveOrZeroSmiConst(left_instr);
@@ -2779,14 +2781,15 @@
   }
   if (!is_positive_or_zero) return;
 
-  BinarySmiOpInstr* smi_shift_left = NULL;
+  BinarySmiOpInstr* smi_shift_left = nullptr;
   if (bit_and_instr->InputAt(0)->IsSingleUse()) {
     smi_shift_left = AsSmiShiftLeftInstruction(left_instr);
   }
-  if ((smi_shift_left == NULL) && (bit_and_instr->InputAt(1)->IsSingleUse())) {
+  if ((smi_shift_left == nullptr) &&
+      (bit_and_instr->InputAt(1)->IsSingleUse())) {
     smi_shift_left = AsSmiShiftLeftInstruction(right_instr);
   }
-  if (smi_shift_left == NULL) return;
+  if (smi_shift_left == nullptr) return;
 
   // Pattern recognized.
   smi_shift_left->mark_truncating();
@@ -2825,7 +2828,7 @@
   }
   for (intptr_t i = 0; i < merge_candidates->length(); i++) {
     BinarySmiOpInstr* curr_instr = (*merge_candidates)[i];
-    if (curr_instr == NULL) {
+    if (curr_instr == nullptr) {
       // Instruction was merged already.
       continue;
     }
@@ -2839,11 +2842,11 @@
     Definition* right_def = curr_instr->right()->definition();
     for (intptr_t k = i + 1; k < merge_candidates->length(); k++) {
       BinarySmiOpInstr* other_binop = (*merge_candidates)[k];
-      // 'other_binop' can be NULL if it was already merged.
-      if ((other_binop != NULL) && (other_binop->op_kind() == other_kind) &&
+      // 'other_binop' can be nullptr if it was already merged.
+      if ((other_binop != nullptr) && (other_binop->op_kind() == other_kind) &&
           (other_binop->left()->definition() == left_def) &&
           (other_binop->right()->definition() == right_def)) {
-        (*merge_candidates)[k] = NULL;  // Clear it.
+        (*merge_candidates)[k] = nullptr;  // Clear it.
         ASSERT(curr_instr->HasUses());
         AppendExtractNthOutputForMerged(
             curr_instr, TruncDivModInstr::OutputIndexOf(curr_instr->op_kind()),
@@ -2858,7 +2861,7 @@
         TruncDivModInstr* div_mod = new (Z) TruncDivModInstr(
             curr_instr->left()->CopyWithType(),
             curr_instr->right()->CopyWithType(), curr_instr->deopt_id());
-        curr_instr->ReplaceWith(div_mod, NULL);
+        curr_instr->ReplaceWith(div_mod, nullptr);
         other_binop->ReplaceUsesWith(div_mod);
         other_binop->RemoveFromGraph();
         // Only one merge possible. Because canonicalization happens later,
@@ -2877,7 +2880,7 @@
   ExtractNthOutputInstr* extract =
       new (Z) ExtractNthOutputInstr(new (Z) Value(instr), index, rep, cid);
   instr->ReplaceUsesWith(extract);
-  InsertAfter(instr, extract, NULL, FlowGraph::kValue);
+  InsertAfter(instr, extract, nullptr, FlowGraph::kValue);
 }
 
 //
diff --git a/runtime/vm/compiler/backend/flow_graph.h b/runtime/vm/compiler/backend/flow_graph.h
index 131296c..7b8a71e 100644
--- a/runtime/vm/compiler/backend/flow_graph.h
+++ b/runtime/vm/compiler/backend/flow_graph.h
@@ -374,7 +374,8 @@
   // Insert a redefinition of an original definition after prev and rename all
   // dominated uses of the original.  If an equivalent redefinition is already
   // present, nothing is inserted.
-  // Returns the redefinition, if a redefinition was inserted, NULL otherwise.
+  // Returns the redefinition, if a redefinition was inserted, nullptr
+  // otherwise.
   RedefinitionInstr* EnsureRedefinition(Instruction* prev,
                                         Definition* original,
                                         CompileType compile_type);
diff --git a/runtime/vm/compiler/backend/flow_graph_compiler.cc b/runtime/vm/compiler/backend/flow_graph_compiler.cc
index 5ad3393..74cc592 100644
--- a/runtime/vm/compiler/backend/flow_graph_compiler.cc
+++ b/runtime/vm/compiler/backend/flow_graph_compiler.cc
@@ -100,7 +100,7 @@
 // them immediately before the call instruction and right before
 // register allocation.
 void CompilerDeoptInfo::AllocateOutgoingArguments(Environment* env) {
-  if (env == NULL) return;
+  if (env == nullptr) return;
   for (Environment::ShallowIterator it(env); !it.Done(); it.Advance()) {
     if (it.CurrentLocation().IsInvalid()) {
       if (auto move_arg = it.CurrentValue()->definition()->AsMoveArgument()) {
@@ -116,7 +116,7 @@
     if (it.CurrentLocation().IsInvalid()) {
       MaterializeObjectInstr* mat =
           it.CurrentValue()->definition()->AsMaterializeObject();
-      ASSERT(mat != NULL);
+      ASSERT(mat != nullptr);
       builder->AddMaterialization(mat);
     }
   }
@@ -132,7 +132,7 @@
     const GrowableArray<TokenPosition>& inline_id_to_token_pos,
     const GrowableArray<intptr_t>& caller_inline_id,
     ZoneGrowableArray<const ICData*>* deopt_id_to_ic_data,
-    CodeStatistics* stats /* = NULL */)
+    CodeStatistics* stats /* = nullptr */)
     : thread_(Thread::Current()),
       zone_(Thread::Current()->zone()),
       assembler_(assembler),
@@ -166,7 +166,7 @@
           Class::ZoneHandle(isolate_group()->object_store()->int32x4_class())),
       list_class_(Class::ZoneHandle(Library::Handle(Library::CoreLibrary())
                                         .LookupClass(Symbols::List()))),
-      pending_deoptimization_env_(NULL),
+      pending_deoptimization_env_(nullptr),
       deopt_id_to_ic_data_(deopt_id_to_ic_data),
       edge_counters_array_(Array::ZoneHandle()) {
   ASSERT(flow_graph->parsed_function().function().ptr() ==
@@ -178,7 +178,7 @@
     const intptr_t len = thread()->compiler_state().deopt_id();
     deopt_id_to_ic_data_->EnsureLength(len, nullptr);
   }
-  ASSERT(assembler != NULL);
+  ASSERT(assembler != nullptr);
   ASSERT(!list_class_.IsNull());
 
 #if defined(PRODUCT)
@@ -272,14 +272,14 @@
       return true;
     }
   }
-  if (FLAG_stacktrace_filter != NULL &&
+  if (FLAG_stacktrace_filter != nullptr &&
       strstr(parsed_function().function().ToFullyQualifiedCString(),
-             FLAG_stacktrace_filter) != NULL) {
+             FLAG_stacktrace_filter) != nullptr) {
     return true;
   }
-  if (is_optimizing() && FLAG_deoptimize_filter != NULL &&
+  if (is_optimizing() && FLAG_deoptimize_filter != nullptr &&
       strstr(parsed_function().function().ToFullyQualifiedCString(),
-             FLAG_deoptimize_filter) != NULL) {
+             FLAG_deoptimize_filter) != nullptr) {
     return true;
   }
 #endif  // !defined(PRODUCT)
@@ -318,7 +318,7 @@
   // This algorithm does not garbage collect blocks in place, but merely
   // records forwarding label information.  In this way it avoids having to
   // change join and target entries.
-  compiler::Label* nonempty_label = NULL;
+  compiler::Label* nonempty_label = nullptr;
   for (intptr_t i = block_order().length() - 1; i >= 1; --i) {
     BlockEntryInstr* block = block_order()[i];
 
@@ -701,14 +701,14 @@
     }
 
     BeginCodeSourceRange(entry->source());
-    ASSERT(pending_deoptimization_env_ == NULL);
+    ASSERT(pending_deoptimization_env_ == nullptr);
     pending_deoptimization_env_ = entry->env();
     set_current_instruction(entry);
     StatsBegin(entry);
     entry->EmitNativeCode(this);
     StatsEnd(entry);
     set_current_instruction(nullptr);
-    pending_deoptimization_env_ = NULL;
+    pending_deoptimization_env_ = nullptr;
     EndCodeSourceRange(entry->source());
 
     if (skip_body_compilation()) {
@@ -739,12 +739,12 @@
 
       BeginCodeSourceRange(instr->source());
       EmitInstructionPrologue(instr);
-      ASSERT(pending_deoptimization_env_ == NULL);
+      ASSERT(pending_deoptimization_env_ == nullptr);
       pending_deoptimization_env_ = instr->env();
       DEBUG_ONLY(current_instruction_ = instr);
       instr->EmitNativeCode(this);
       DEBUG_ONLY(current_instruction_ = nullptr);
-      pending_deoptimization_env_ = NULL;
+      pending_deoptimization_env_ = nullptr;
       if (IsPeephole(instr)) {
         ASSERT(top_of_stack_ == nullptr);
         top_of_stack_ = instr->AsDefinition();
@@ -771,7 +771,7 @@
 #endif
   }
 
-  set_current_block(NULL);
+  set_current_block(nullptr);
 }
 
 void FlowGraphCompiler::Bailout(const char* reason) {
@@ -1016,7 +1016,7 @@
         is_optimizing() ? flow_graph_.graph_entry()->spill_slot_count() : 0;
 
     RegisterSet* registers = locs->live_registers();
-    ASSERT(registers != NULL);
+    ASSERT(registers != nullptr);
     const intptr_t kFpuRegisterSpillFactor =
         kFpuRegisterSize / compiler::target::kWordSize;
     const bool using_shared_stub = locs->call_on_shared_slow_path();
@@ -1239,14 +1239,14 @@
 }
 
 void FlowGraphCompiler::FinalizeExceptionHandlers(const Code& code) {
-  ASSERT(exception_handlers_list_ != NULL);
+  ASSERT(exception_handlers_list_ != nullptr);
   const ExceptionHandlers& handlers = ExceptionHandlers::Handle(
       exception_handlers_list_->FinalizeExceptionHandlers(code.PayloadStart()));
   code.set_exception_handlers(handlers);
 }
 
 void FlowGraphCompiler::FinalizePcDescriptors(const Code& code) {
-  ASSERT(pc_descriptors_list_ != NULL);
+  ASSERT(pc_descriptors_list_ != nullptr);
   const PcDescriptors& descriptors = PcDescriptors::Handle(
       pc_descriptors_list_->FinalizePcDescriptors(code.PayloadStart()));
   if (!is_optimizing_) descriptors.Verify(parsed_function_.function());
@@ -1286,7 +1286,7 @@
 }
 
 void FlowGraphCompiler::FinalizeStackMaps(const Code& code) {
-  ASSERT(compressed_stackmaps_builder_ != NULL);
+  ASSERT(compressed_stackmaps_builder_ != nullptr);
   // Finalize the compressed stack maps and add it to the code object.
   const auto& maps =
       CompressedStackMaps::Handle(compressed_stackmaps_builder_->Finalize());
@@ -1858,8 +1858,8 @@
     intptr_t num_args_tested,
     const AbstractType& receiver_type,
     const Function& binary_smi_target) {
-  if ((deopt_id_to_ic_data_ != NULL) &&
-      ((*deopt_id_to_ic_data_)[deopt_id] != NULL)) {
+  if ((deopt_id_to_ic_data_ != nullptr) &&
+      ((*deopt_id_to_ic_data_)[deopt_id] != nullptr)) {
     const ICData* res = (*deopt_id_to_ic_data_)[deopt_id];
     ASSERT(res->deopt_id() == deopt_id);
     ASSERT(res->target_name() == target_name.ptr());
@@ -1888,7 +1888,7 @@
                           ICData::kInstance, receiver_type);
   }
 
-  if (deopt_id_to_ic_data_ != NULL) {
+  if (deopt_id_to_ic_data_ != nullptr) {
     (*deopt_id_to_ic_data_)[deopt_id] = &ic_data;
   }
   ASSERT(!ic_data.is_static_call());
@@ -1901,8 +1901,8 @@
     const Array& arguments_descriptor,
     intptr_t num_args_tested,
     ICData::RebindRule rebind_rule) {
-  if ((deopt_id_to_ic_data_ != NULL) &&
-      ((*deopt_id_to_ic_data_)[deopt_id] != NULL)) {
+  if ((deopt_id_to_ic_data_ != nullptr) &&
+      ((*deopt_id_to_ic_data_)[deopt_id] != nullptr)) {
     const ICData* res = (*deopt_id_to_ic_data_)[deopt_id];
     ASSERT(res->deopt_id() == deopt_id);
     ASSERT(res->target_name() == target.name());
@@ -1917,7 +1917,7 @@
       zone(), ICData::NewForStaticCall(parsed_function().function(), target,
                                        arguments_descriptor, deopt_id,
                                        num_args_tested, rebind_rule));
-  if (deopt_id_to_ic_data_ != NULL) {
+  if (deopt_id_to_ic_data_ != nullptr) {
     (*deopt_id_to_ic_data_)[deopt_id] = &ic_data;
   }
   return &ic_data;
@@ -1996,7 +1996,7 @@
   ArgumentsDescriptor args_desc(args_desc_array);
 
   Function& fn = Function::ZoneHandle(zone);
-  if (!LookupMethodFor(cid, selector, args_desc, &fn)) return NULL;
+  if (!LookupMethodFor(cid, selector, args_desc, &fn)) return nullptr;
 
   CallTargets* targets = new (zone) CallTargets(zone);
   targets->Add(new (zone) TargetInfo(cid, cid, &fn, /* count = */ 1,
@@ -2023,7 +2023,7 @@
   if (!cls.is_finalized()) return false;
   if (Array::Handle(cls.current_functions()).IsNull()) return false;
 
-  if (class_is_abstract_return != NULL) {
+  if (class_is_abstract_return != nullptr) {
     *class_is_abstract_return = cls.is_abstract();
   }
   const bool allow_add = false;
@@ -2060,8 +2060,8 @@
     if (complete) {
       compiler::Label ok;
       EmitTestAndCall(targets, call->function_name(), args_info,
-                      NULL,  // No cid match.
-                      &ok,   // Found cid.
+                      nullptr,  // No cid match.
+                      &ok,      // Found cid.
                       deopt_id, source, locs, true, total_ic_calls,
                       call->entry_kind());
       assembler()->Bind(&ok);
@@ -2155,7 +2155,7 @@
                            UntaggedPcDescriptors::kOther, locs, function,
                            entry_kind);
     EmitDropArguments(args_info.size_with_type_args);
-    if (match_found != NULL) {
+    if (match_found != nullptr) {
       __ Jump(match_found);
     }
     __ Bind(&after_smi_test);
@@ -2221,7 +2221,7 @@
                                                   const Class& type_class,
                                                   compiler::Label* is_subtype) {
   HierarchyInfo* hi = Thread::Current()->hierarchy_info();
-  if (hi != NULL) {
+  if (hi != nullptr) {
     const CidRangeVector& ranges =
         hi->SubtypeRangesForClass(type_class,
                                   /*include_abstract=*/false,
@@ -3037,7 +3037,7 @@
 
   FrameStatePop(instr->ArgumentCount());
   Definition* defn = instr->AsDefinition();
-  if ((defn != NULL) && defn->HasTemp()) {
+  if ((defn != nullptr) && defn->HasTemp()) {
     FrameStatePush(defn);
   }
 }
diff --git a/runtime/vm/compiler/backend/flow_graph_compiler.h b/runtime/vm/compiler/backend/flow_graph_compiler.h
index 6be75b0..fcc96fa 100644
--- a/runtime/vm/compiler/backend/flow_graph_compiler.h
+++ b/runtime/vm/compiler/backend/flow_graph_compiler.h
@@ -76,7 +76,7 @@
         reason_(reason),
         flags_(flags),
         deopt_env_(deopt_env) {
-    ASSERT(deopt_env != NULL);
+    ASSERT(deopt_env != nullptr);
   }
   virtual ~CompilerDeoptInfo() {}
 
@@ -126,7 +126,7 @@
 
   const char* Name() const {
     const char* kFormat = "Deopt stub for id %d, reason: %s";
-    const intptr_t len = Utils::SNPrint(NULL, 0, kFormat, deopt_id(),
+    const intptr_t len = Utils::SNPrint(nullptr, 0, kFormat, deopt_id(),
                                         DeoptReasonToCString(reason())) +
                          1;
     char* chars = Thread::Current()->zone()->Alloc<char>(len);
@@ -327,7 +327,7 @@
     BlockInfo()
         : block_label_(),
           jump_label_(&block_label_),
-          next_nonempty_label_(NULL),
+          next_nonempty_label_(nullptr),
           is_marked_(false) {}
 
     // The label to jump to when control is transferred to this block.  For
@@ -337,7 +337,7 @@
     void set_jump_label(compiler::Label* label) { jump_label_ = label; }
 
     // The label of the first nonempty block after this one in the block
-    // order, or NULL if there is no nonempty block following this one.
+    // order, or nullptr if there is no nonempty block following this one.
     compiler::Label* next_nonempty_label() const {
       return next_nonempty_label_;
     }
@@ -371,7 +371,7 @@
                     const GrowableArray<TokenPosition>& inline_id_to_token_pos,
                     const GrowableArray<intptr_t>& caller_inline_id,
                     ZoneGrowableArray<const ICData*>* deopt_id_to_ic_data,
-                    CodeStatistics* stats = NULL);
+                    CodeStatistics* stats = nullptr);
 
   void ArchSpecificInitialization();
 
@@ -446,19 +446,19 @@
   const GrowableArray<BlockInfo*>& block_info() const { return block_info_; }
 
   void StatsBegin(Instruction* instr) {
-    if (stats_ != NULL) stats_->Begin(instr);
+    if (stats_ != nullptr) stats_->Begin(instr);
   }
 
   void StatsEnd(Instruction* instr) {
-    if (stats_ != NULL) stats_->End(instr);
+    if (stats_ != nullptr) stats_->End(instr);
   }
 
   void SpecialStatsBegin(intptr_t tag) {
-    if (stats_ != NULL) stats_->SpecialBegin(tag);
+    if (stats_ != nullptr) stats_->SpecialBegin(tag);
   }
 
   void SpecialStatsEnd(intptr_t tag) {
-    if (stats_ != NULL) stats_->SpecialEnd(tag);
+    if (stats_ != nullptr) stats_->SpecialEnd(tag);
   }
 
   GrowableArray<const Field*>& used_static_fields() {
@@ -628,12 +628,13 @@
   // the range.
   //
   // Returns whether [class_id_reg] is clobbered by the check.
-  static bool GenerateCidRangesCheck(compiler::Assembler* assembler,
-                                     Register class_id_reg,
-                                     const CidRangeVector& cid_ranges,
-                                     compiler::Label* inside_range_lbl,
-                                     compiler::Label* outside_range_lbl = NULL,
-                                     bool fall_through_if_inside = false);
+  static bool GenerateCidRangesCheck(
+      compiler::Assembler* assembler,
+      Register class_id_reg,
+      const CidRangeVector& cid_ranges,
+      compiler::Label* inside_range_lbl,
+      compiler::Label* outside_range_lbl = nullptr,
+      bool fall_through_if_inside = false);
 
   void EmitOptimizedInstanceCall(
       const Code& stub,
@@ -846,7 +847,7 @@
                                       intptr_t num_slow_path_args);
 
   intptr_t CurrentTryIndex() const {
-    if (current_block_ == NULL) {
+    if (current_block_ == nullptr) {
       return kInvalidTryIndex;
     }
     return current_block_->try_index();
@@ -898,7 +899,7 @@
                               const String& name,
                               const ArgumentsDescriptor& args_desc,
                               Function* fn_return,
-                              bool* class_is_abstract_return = NULL);
+                              bool* class_is_abstract_return = nullptr);
 
   // Returns new class-id bias.
   //
@@ -1097,7 +1098,7 @@
   bool CanPcRelativeCall(const Code& target) const;
   bool CanPcRelativeCall(const AbstractType& target) const;
 
-  // This struct contains either function or code, the other one being NULL.
+  // This struct contains either function or code, the other one being nullptr.
   class StaticCallsStruct : public ZoneAllocated {
    public:
     Code::CallKind call_kind;
diff --git a/runtime/vm/compiler/backend/flow_graph_compiler_arm.cc b/runtime/vm/compiler/backend/flow_graph_compiler_arm.cc
index eb9f031..9098d8c 100644
--- a/runtime/vm/compiler/backend/flow_graph_compiler_arm.cc
+++ b/runtime/vm/compiler/backend/flow_graph_compiler_arm.cc
@@ -94,7 +94,7 @@
 TypedDataPtr CompilerDeoptInfo::CreateDeoptInfo(FlowGraphCompiler* compiler,
                                                 DeoptInfoBuilder* builder,
                                                 const Array& deopt_table) {
-  if (deopt_env_ == NULL) {
+  if (deopt_env_ == nullptr) {
     ++builder->current_info_number_;
     return TypedData::null();
   }
@@ -131,7 +131,7 @@
 
   Environment* previous = current;
   current = current->outer();
-  while (current != NULL) {
+  while (current != nullptr) {
     builder->AddPp(current->function(), slot_ix++);
     builder->AddPcMarker(previous->function(), slot_ix++);
     builder->AddCallerFp(slot_ix++);
@@ -160,7 +160,7 @@
     current = current->outer();
   }
   // The previous pointer is now the outermost environment.
-  ASSERT(previous != NULL);
+  ASSERT(previous != nullptr);
 
   // Set slots for the outermost environment.
   builder->AddCallerPp(slot_ix++);
@@ -188,7 +188,7 @@
     __ bkpt(0);
   }
 
-  ASSERT(deopt_env() != NULL);
+  ASSERT(deopt_env() != nullptr);
   __ Call(compiler::Address(
       THR, compiler::target::Thread::deoptimize_entry_offset()));
   set_pc_offset(assembler->CodeSize());
diff --git a/runtime/vm/compiler/backend/flow_graph_compiler_arm64.cc b/runtime/vm/compiler/backend/flow_graph_compiler_arm64.cc
index d5e954a..ef80ec8 100644
--- a/runtime/vm/compiler/backend/flow_graph_compiler_arm64.cc
+++ b/runtime/vm/compiler/backend/flow_graph_compiler_arm64.cc
@@ -88,7 +88,7 @@
 TypedDataPtr CompilerDeoptInfo::CreateDeoptInfo(FlowGraphCompiler* compiler,
                                                 DeoptInfoBuilder* builder,
                                                 const Array& deopt_table) {
-  if (deopt_env_ == NULL) {
+  if (deopt_env_ == nullptr) {
     ++builder->current_info_number_;
     return TypedData::null();
   }
@@ -125,7 +125,7 @@
 
   Environment* previous = current;
   current = current->outer();
-  while (current != NULL) {
+  while (current != nullptr) {
     builder->AddPp(current->function(), slot_ix++);
     builder->AddPcMarker(previous->function(), slot_ix++);
     builder->AddCallerFp(slot_ix++);
@@ -154,7 +154,7 @@
     current = current->outer();
   }
   // The previous pointer is now the outermost environment.
-  ASSERT(previous != NULL);
+  ASSERT(previous != nullptr);
 
   // Add slots for the outermost environment.
   builder->AddCallerPp(slot_ix++);
@@ -182,7 +182,7 @@
     __ brk(0);
   }
 
-  ASSERT(deopt_env() != NULL);
+  ASSERT(deopt_env() != nullptr);
   __ Call(compiler::Address(THR, Thread::deoptimize_entry_offset()));
   set_pc_offset(assembler->CodeSize());
 #undef __
diff --git a/runtime/vm/compiler/backend/flow_graph_compiler_ia32.cc b/runtime/vm/compiler/backend/flow_graph_compiler_ia32.cc
index 18dced1..6dfb986 100644
--- a/runtime/vm/compiler/backend/flow_graph_compiler_ia32.cc
+++ b/runtime/vm/compiler/backend/flow_graph_compiler_ia32.cc
@@ -66,7 +66,7 @@
 TypedDataPtr CompilerDeoptInfo::CreateDeoptInfo(FlowGraphCompiler* compiler,
                                                 DeoptInfoBuilder* builder,
                                                 const Array& deopt_table) {
-  if (deopt_env_ == NULL) {
+  if (deopt_env_ == nullptr) {
     ++builder->current_info_number_;
     return TypedData::null();
   }
@@ -105,7 +105,7 @@
 
   Environment* previous = current;
   current = current->outer();
-  while (current != NULL) {
+  while (current != nullptr) {
     // For any outer environment the deopt id is that of the call instruction
     // which is recorded in the outer environment.
     builder->AddReturnAddress(current->function(),
@@ -133,7 +133,7 @@
     current = current->outer();
   }
   // The previous pointer is now the outermost environment.
-  ASSERT(previous != NULL);
+  ASSERT(previous != nullptr);
 
   // For the outermost environment, set caller PC.
   builder->AddCallerPc(slot_ix++);
@@ -158,7 +158,7 @@
     __ int3();
   }
 
-  ASSERT(deopt_env() != NULL);
+  ASSERT(deopt_env() != nullptr);
   __ pushl(CODE_REG);
   __ Call(StubCode::Deoptimize());
   set_pc_offset(assembler->CodeSize());
@@ -244,7 +244,7 @@
 }
 
 // Optimize assignable type check by adding inlined tests for:
-// - NULL -> return NULL.
+// - null -> return null.
 // - Smi -> compile time subtype check (only if dst class is not parameterized).
 // - Class equality (only if class is not parameterized).
 // Inputs:
diff --git a/runtime/vm/compiler/backend/flow_graph_compiler_riscv.cc b/runtime/vm/compiler/backend/flow_graph_compiler_riscv.cc
index bea2c36..fa6f225 100644
--- a/runtime/vm/compiler/backend/flow_graph_compiler_riscv.cc
+++ b/runtime/vm/compiler/backend/flow_graph_compiler_riscv.cc
@@ -75,7 +75,7 @@
 TypedDataPtr CompilerDeoptInfo::CreateDeoptInfo(FlowGraphCompiler* compiler,
                                                 DeoptInfoBuilder* builder,
                                                 const Array& deopt_table) {
-  if (deopt_env_ == NULL) {
+  if (deopt_env_ == nullptr) {
     ++builder->current_info_number_;
     return TypedData::null();
   }
@@ -112,7 +112,7 @@
 
   Environment* previous = current;
   current = current->outer();
-  while (current != NULL) {
+  while (current != nullptr) {
     builder->AddPp(current->function(), slot_ix++);
     builder->AddPcMarker(previous->function(), slot_ix++);
     builder->AddCallerFp(slot_ix++);
@@ -141,7 +141,7 @@
     current = current->outer();
   }
   // The previous pointer is now the outermost environment.
-  ASSERT(previous != NULL);
+  ASSERT(previous != nullptr);
 
   // Add slots for the outermost environment.
   builder->AddCallerPp(slot_ix++);
@@ -169,7 +169,7 @@
     __ trap();
   }
 
-  ASSERT(deopt_env() != NULL);
+  ASSERT(deopt_env() != nullptr);
   __ Call(compiler::Address(THR, Thread::deoptimize_entry_offset()));
   set_pc_offset(assembler->CodeSize());
 #undef __
diff --git a/runtime/vm/compiler/backend/flow_graph_compiler_x64.cc b/runtime/vm/compiler/backend/flow_graph_compiler_x64.cc
index 4979645..b628241 100644
--- a/runtime/vm/compiler/backend/flow_graph_compiler_x64.cc
+++ b/runtime/vm/compiler/backend/flow_graph_compiler_x64.cc
@@ -89,7 +89,7 @@
 TypedDataPtr CompilerDeoptInfo::CreateDeoptInfo(FlowGraphCompiler* compiler,
                                                 DeoptInfoBuilder* builder,
                                                 const Array& deopt_table) {
-  if (deopt_env_ == NULL) {
+  if (deopt_env_ == nullptr) {
     ++builder->current_info_number_;
     return TypedData::null();
   }
@@ -126,7 +126,7 @@
 
   Environment* previous = current;
   current = current->outer();
-  while (current != NULL) {
+  while (current != nullptr) {
     builder->AddPp(current->function(), slot_ix++);
     builder->AddPcMarker(previous->function(), slot_ix++);
     builder->AddCallerFp(slot_ix++);
@@ -155,7 +155,7 @@
     current = current->outer();
   }
   // The previous pointer is now the outermost environment.
-  ASSERT(previous != NULL);
+  ASSERT(previous != nullptr);
 
   // Set slots for the outermost environment.
   builder->AddCallerPp(slot_ix++);
@@ -183,7 +183,7 @@
     __ int3();
   }
 
-  ASSERT(deopt_env() != NULL);
+  ASSERT(deopt_env() != nullptr);
   __ call(compiler::Address(THR, Thread::deoptimize_entry_offset()));
   set_pc_offset(assembler->CodeSize());
   __ int3();
diff --git a/runtime/vm/compiler/backend/il.cc b/runtime/vm/compiler/backend/il.cc
index 5da9ce3..70c2899 100644
--- a/runtime/vm/compiler/backend/il.cc
+++ b/runtime/vm/compiler/backend/il.cc
@@ -522,7 +522,7 @@
 //    - a constant (any non-sentinel value)
 //    - unknown sentinel
 Object& Definition::constant_value() {
-  if (constant_value_ == NULL) {
+  if (constant_value_ == nullptr) {
     constant_value_ = &Object::ZoneHandle(ConstantPropagator::Unknown());
   }
   return *constant_value_;
@@ -854,7 +854,7 @@
 
 bool CheckClassInstr::AttributesEqual(const Instruction& other) const {
   auto const other_check = other.AsCheckClass();
-  ASSERT(other_check != NULL);
+  ASSERT(other_check != nullptr);
   return cids().Equals(other_check->cids());
 }
 
@@ -1044,7 +1044,7 @@
 
 bool StrictCompareInstr::AttributesEqual(const Instruction& other) const {
   auto const other_op = other.AsStrictCompare();
-  ASSERT(other_op != NULL);
+  ASSERT(other_op != nullptr);
   return ComparisonInstr::AttributesEqual(other) &&
          (needs_number_check() == other_op->needs_number_check());
 }
@@ -1056,7 +1056,7 @@
 
 bool MathMinMaxInstr::AttributesEqual(const Instruction& other) const {
   auto const other_op = other.AsMathMinMax();
-  ASSERT(other_op != NULL);
+  ASSERT(other_op != nullptr);
   return (op_kind() == other_op->op_kind()) &&
          (result_cid() == other_op->result_cid());
 }
@@ -1071,7 +1071,7 @@
 
 bool LoadFieldInstr::AttributesEqual(const Instruction& other) const {
   auto const other_load = other.AsLoadField();
-  ASSERT(other_load != NULL);
+  ASSERT(other_load != nullptr);
   return &this->slot_ == &other_load->slot_;
 }
 
@@ -1114,7 +1114,7 @@
 
 bool ConstantInstr::AttributesEqual(const Instruction& other) const {
   auto const other_constant = other.AsConstant();
-  ASSERT(other_constant != NULL);
+  ASSERT(other_constant != nullptr);
   return (value().ptr() == other_constant->value().ptr() &&
           representation() == other_constant->representation());
 }
@@ -1138,13 +1138,13 @@
 // Returns true if the value represents constant null.
 bool Value::BindsToConstantNull() const {
   ConstantInstr* constant = definition()->OriginalDefinition()->AsConstant();
-  return (constant != NULL) && constant->value().IsNull();
+  return (constant != nullptr) && constant->value().IsNull();
 }
 
 const Object& Value::BoundConstant() const {
   ASSERT(BindsToConstant());
   ConstantInstr* constant = definition()->OriginalDefinition()->AsConstant();
-  ASSERT(constant != NULL);
+  ASSERT(constant != nullptr);
   return constant->value();
 }
 
@@ -1182,10 +1182,10 @@
   ASSERT(initial_definitions()->length() > 0);
   for (intptr_t i = 0; i < initial_definitions()->length(); ++i) {
     ConstantInstr* defn = (*initial_definitions())[i]->AsConstant();
-    if (defn != NULL && defn->value().IsNull()) return defn;
+    if (defn != nullptr && defn->value().IsNull()) return defn;
   }
   UNREACHABLE();
-  return NULL;
+  return nullptr;
 }
 
 CatchBlockEntryInstr* GraphEntryInstr::GetCatchEntry(intptr_t index) {
@@ -1194,7 +1194,7 @@
   for (intptr_t i = 0; i < catch_entries_.length(); ++i) {
     if (catch_entries_[i]->catch_try_index() == index) return catch_entries_[i];
   }
-  return NULL;
+  return nullptr;
 }
 
 bool GraphEntryInstr::IsCompiledForOsr() const {
@@ -1226,7 +1226,7 @@
   for (Environment::DeepIterator it(env()); !it.Done(); it.Advance()) {
     it.CurrentValue()->RemoveFromUseList();
   }
-  env_ = NULL;
+  env_ = nullptr;
 }
 
 void Instruction::ReplaceInEnvironment(Definition* current,
@@ -1248,26 +1248,26 @@
   ASSERT(!IsReturn());
   ASSERT(!IsReThrow());
   ASSERT(!IsGoto());
-  ASSERT(previous() != NULL);
+  ASSERT(previous() != nullptr);
   // We cannot assert that the instruction, if it is a definition, has no
   // uses.  This function is used to remove instructions from the graph and
   // reinsert them elsewhere (e.g., hoisting).
   Instruction* prev_instr = previous();
   Instruction* next_instr = next();
-  ASSERT(next_instr != NULL);
+  ASSERT(next_instr != nullptr);
   ASSERT(!next_instr->IsBlockEntry());
   prev_instr->LinkTo(next_instr);
   UnuseAllInputs();
   // Reset the successor and previous instruction to indicate that the
   // instruction is removed from the graph.
-  set_previous(NULL);
-  set_next(NULL);
+  set_previous(nullptr);
+  set_next(nullptr);
   return return_previous ? prev_instr : next_instr;
 }
 
 void Instruction::InsertAfter(Instruction* prev) {
-  ASSERT(previous_ == NULL);
-  ASSERT(next_ == NULL);
+  ASSERT(previous_ == nullptr);
+  ASSERT(next_ == nullptr);
   previous_ = prev;
   next_ = prev->next_;
   next_->previous_ = this;
@@ -1317,7 +1317,7 @@
 
 // Default implementation of visiting basic blocks.  Can be overridden.
 void FlowGraphVisitor::VisitBlocks() {
-  ASSERT(current_iterator_ == NULL);
+  ASSERT(current_iterator_ == nullptr);
   for (intptr_t i = 0; i < block_order_->length(); ++i) {
     BlockEntryInstr* entry = (*block_order_)[i];
     entry->Accept(this);
@@ -1326,7 +1326,7 @@
     for (; !it.Done(); it.Advance()) {
       it.Current()->Accept(this);
     }
-    current_iterator_ = NULL;
+    current_iterator_ = nullptr;
   }
 }
 
@@ -1386,8 +1386,8 @@
   ASSERT(value != next);
   *list = value;
   value->set_next_use(next);
-  value->set_previous_use(NULL);
-  if (next != NULL) next->set_previous_use(value);
+  value->set_previous_use(nullptr);
+  if (next != nullptr) next->set_previous_use(value);
 }
 
 void Value::RemoveFromUseList() {
@@ -1395,17 +1395,17 @@
   Value* next = next_use();
   if (this == def->input_use_list()) {
     def->set_input_use_list(next);
-    if (next != NULL) next->set_previous_use(NULL);
+    if (next != nullptr) next->set_previous_use(nullptr);
   } else if (this == def->env_use_list()) {
     def->set_env_use_list(next);
-    if (next != NULL) next->set_previous_use(NULL);
+    if (next != nullptr) next->set_previous_use(nullptr);
   } else if (Value* prev = previous_use()) {
     prev->set_next_use(next);
-    if (next != NULL) next->set_previous_use(prev);
+    if (next != nullptr) next->set_previous_use(prev);
   }
 
-  set_previous_use(NULL);
-  set_next_use(NULL);
+  set_previous_use(nullptr);
+  set_next_use(nullptr);
 }
 
 // True if the definition has a single input use and is used only in
@@ -1423,18 +1423,18 @@
 }
 
 bool Definition::HasOnlyInputUse(Value* use) const {
-  return (input_use_list() == use) && (use->next_use() == NULL);
+  return (input_use_list() == use) && (use->next_use() == nullptr);
 }
 
 void Definition::ReplaceUsesWith(Definition* other) {
-  ASSERT(other != NULL);
+  ASSERT(other != nullptr);
   ASSERT(this != other);
 
-  Value* current = NULL;
+  Value* current = nullptr;
   Value* next = input_use_list();
-  if (next != NULL) {
+  if (next != nullptr) {
     // Change all the definitions.
-    while (next != NULL) {
+    while (next != nullptr) {
       current = next;
       current->set_definition(other);
       current->RefineReachingType(other->Type());
@@ -1444,16 +1444,16 @@
     // Concatenate the lists.
     next = other->input_use_list();
     current->set_next_use(next);
-    if (next != NULL) next->set_previous_use(current);
+    if (next != nullptr) next->set_previous_use(current);
     other->set_input_use_list(input_use_list());
-    set_input_use_list(NULL);
+    set_input_use_list(nullptr);
   }
 
   // Repeat for environment uses.
-  current = NULL;
+  current = nullptr;
   next = env_use_list();
-  if (next != NULL) {
-    while (next != NULL) {
+  if (next != nullptr) {
+    while (next != nullptr) {
       current = next;
       current->set_definition(other);
       current->RefineReachingType(other->Type());
@@ -1461,9 +1461,9 @@
     }
     next = other->env_use_list();
     current->set_next_use(next);
-    if (next != NULL) next->set_previous_use(current);
+    if (next != nullptr) next->set_previous_use(current);
     other->set_env_use_list(env_use_list());
-    set_env_use_list(NULL);
+    set_env_use_list(nullptr);
   }
 }
 
@@ -1494,22 +1494,22 @@
 void Instruction::InheritDeoptTargetAfter(FlowGraph* flow_graph,
                                           Definition* call,
                                           Definition* result) {
-  ASSERT(call->env() != NULL);
+  ASSERT(call->env() != nullptr);
   deopt_id_ = DeoptId::ToDeoptAfter(call->deopt_id_);
   call->env()->DeepCopyAfterTo(
       flow_graph->zone(), this, call->ArgumentCount(),
       flow_graph->constant_dead(),
-      result != NULL ? result : flow_graph->constant_dead());
+      result != nullptr ? result : flow_graph->constant_dead());
 }
 
 void Instruction::InheritDeoptTarget(Zone* zone, Instruction* other) {
-  ASSERT(other->env() != NULL);
+  ASSERT(other->env() != nullptr);
   CopyDeoptIdFrom(*other);
   other->env()->DeepCopyTo(zone, this);
 }
 
 void BranchInstr::InheritDeoptTarget(Zone* zone, Instruction* other) {
-  ASSERT(env() == NULL);
+  ASSERT(env() == nullptr);
   Instruction::InheritDeoptTarget(zone, other);
   comparison()->SetDeoptId(*this);
 }
@@ -1531,7 +1531,8 @@
       return false;
     }
 
-    for (Instruction* curr = dom->next(); curr != NULL; curr = curr->next()) {
+    for (Instruction* curr = dom->next(); curr != nullptr;
+         curr = curr->next()) {
       if (curr == this) return true;
     }
 
@@ -1573,7 +1574,7 @@
     input->definition()->AddInputUse(input);
   }
   // Take replacement's environment from this definition.
-  ASSERT(replacement->env() == NULL);
+  ASSERT(replacement->env() == nullptr);
   replacement->SetEnvironment(env());
   ClearEnv();
   // Replace all uses of this definition with replacement_for_uses.
@@ -1581,7 +1582,7 @@
 
   // Finally replace this one with the replacement instruction in the graph.
   previous()->LinkTo(replacement);
-  if ((iterator != NULL) && (this == iterator->Current())) {
+  if ((iterator != nullptr) && (this == iterator->Current())) {
     // Remove through the iterator.
     replacement->LinkTo(this);
     iterator->RemoveCurrentFromGraph();
@@ -1590,8 +1591,8 @@
     // Remove this definition's input uses.
     UnuseAllInputs();
   }
-  set_previous(NULL);
-  set_next(NULL);
+  set_previous(nullptr);
+  set_next(nullptr);
 }
 
 void Definition::ReplaceWith(Definition* other,
@@ -1611,8 +1612,8 @@
     input->set_instruction(this);
   }
   // There should be no need to copy or unuse an environment.
-  ASSERT(comparison()->env() == NULL);
-  ASSERT(new_comparison->env() == NULL);
+  ASSERT(comparison()->env() == nullptr);
+  ASSERT(new_comparison->env() == nullptr);
   // Remove the current comparison's input uses.
   comparison()->UnuseAllInputs();
   ASSERT(!new_comparison->HasUses());
@@ -1636,14 +1637,14 @@
                                     GrowableArray<intptr_t>* parent) {
   // If this block has a predecessor (i.e., is not the graph entry) we can
   // assume the preorder array is non-empty.
-  ASSERT((predecessor == NULL) || !preorder->is_empty());
+  ASSERT((predecessor == nullptr) || !preorder->is_empty());
   // Blocks with a single predecessor cannot have been reached before.
   ASSERT(IsJoinEntry() || !IsMarked(this, preorder));
 
   // 1. If the block has already been reached, add current_block as a
   // basic-block predecessor and we are done.
   if (IsMarked(this, preorder)) {
-    ASSERT(predecessor != NULL);
+    ASSERT(predecessor != nullptr);
     AddPredecessor(predecessor);
     return false;
   }
@@ -1651,12 +1652,12 @@
   // 2. Otherwise, clear the predecessors which might have been computed on
   // some earlier call to DiscoverBlocks and record this predecessor.
   ClearPredecessors();
-  if (predecessor != NULL) AddPredecessor(predecessor);
+  if (predecessor != nullptr) AddPredecessor(predecessor);
 
   // 3. The predecessor is the spanning-tree parent.  The graph entry has no
   // parent, indicated by -1.
   intptr_t parent_number =
-      (predecessor == NULL) ? -1 : predecessor->preorder_number();
+      (predecessor == nullptr) ? -1 : predecessor->preorder_number();
   parent->Add(parent_number);
 
   // 4. Assign the preorder number and add the block entry to the list.
@@ -1684,7 +1685,7 @@
 void GraphEntryInstr::RelinkToOsrEntry(Zone* zone, intptr_t max_block_id) {
   ASSERT(osr_id_ != Compiler::kNoOSRDeoptId);
   BitVector* block_marks = new (zone) BitVector(zone, max_block_id + 1);
-  bool found = FindOsrEntryAndRelink(this, /*parent=*/NULL, block_marks);
+  bool found = FindOsrEntryAndRelink(this, /*parent=*/nullptr, block_marks);
   ASSERT(found);
 }
 
@@ -1750,9 +1751,9 @@
 bool BlockEntryInstr::Dominates(BlockEntryInstr* other) const {
   // TODO(fschneider): Make this faster by e.g. storing dominators for each
   // block while computing the dominator tree.
-  ASSERT(other != NULL);
+  ASSERT(other != nullptr);
   BlockEntryInstr* current = other;
-  while (current != NULL && current != this) {
+  while (current != nullptr && current != this) {
     current = current->dominator();
   }
   return current == this;
@@ -1763,7 +1764,7 @@
   if ((last->SuccessorCount() == 1) && (last->SuccessorAt(0) == this)) {
     return dominator();
   }
-  return NULL;
+  return nullptr;
 }
 
 bool BlockEntryInstr::IsLoopHeader() const {
@@ -1787,13 +1788,13 @@
   for (intptr_t sidx = 0; sidx < last->SuccessorCount(); ++sidx) {
     // If the successor is a target, update its predecessor.
     TargetEntryInstr* target = last->SuccessorAt(sidx)->AsTargetEntry();
-    if (target != NULL) {
+    if (target != nullptr) {
       target->predecessor_ = new_block;
       continue;
     }
     // If the successor is a join, update each predecessor and the phis.
     JoinEntryInstr* join = last->SuccessorAt(sidx)->AsJoinEntry();
-    ASSERT(join != NULL);
+    ASSERT(join != nullptr);
     // Find the old predecessor index.
     intptr_t old_index = join->IndexOfPredecessor(this);
     intptr_t pred_count = join->PredecessorCount();
@@ -1817,11 +1818,11 @@
     }
     join->predecessors_[new_index] = new_block;
     // If the new and old predecessor index match there is nothing to update.
-    if ((join->phis() == NULL) || (old_index == new_index)) return;
+    if ((join->phis() == nullptr) || (old_index == new_index)) return;
     // Otherwise, reorder the predecessor uses in each phi.
     for (PhiIterator it(join); !it.Done(); it.Advance()) {
       PhiInstr* phi = it.Current();
-      ASSERT(phi != NULL);
+      ASSERT(phi != nullptr);
       ASSERT(pred_count == phi->InputCount());
       // Save the predecessor use.
       Value* pred_use = phi->InputAt(old_index);
@@ -1839,7 +1840,7 @@
 
 void BlockEntryInstr::ClearAllInstructions() {
   JoinEntryInstr* join = this->AsJoinEntry();
-  if (join != NULL) {
+  if (join != nullptr) {
     for (PhiIterator it(join); !it.Done(); it.Advance()) {
       it.Current()->UnuseAllInputs();
     }
@@ -1855,26 +1856,26 @@
   // Currently, phis are stored in a sparse array that holds the phi
   // for variable with index i at position i.
   // TODO(fschneider): Store phis in a more compact way.
-  if (phis_ == NULL) {
+  if (phis_ == nullptr) {
     phis_ = new ZoneGrowableArray<PhiInstr*>(var_count);
     for (intptr_t i = 0; i < var_count; i++) {
-      phis_->Add(NULL);
+      phis_->Add(nullptr);
     }
   }
-  ASSERT((*phis_)[var_index] == NULL);
+  ASSERT((*phis_)[var_index] == nullptr);
   return (*phis_)[var_index] = new PhiInstr(this, PredecessorCount());
 }
 
 void JoinEntryInstr::InsertPhi(PhiInstr* phi) {
   // Lazily initialize the array of phis.
-  if (phis_ == NULL) {
+  if (phis_ == nullptr) {
     phis_ = new ZoneGrowableArray<PhiInstr*>(1);
   }
   phis_->Add(phi);
 }
 
 void JoinEntryInstr::RemovePhi(PhiInstr* phi) {
-  ASSERT(phis_ != NULL);
+  ASSERT(phis_ != nullptr);
   for (intptr_t index = 0; index < phis_->length(); ++index) {
     if (phi == (*phis_)[index]) {
       (*phis_)[index] = phis_->Last();
@@ -1885,12 +1886,12 @@
 }
 
 void JoinEntryInstr::RemoveDeadPhis(Definition* replacement) {
-  if (phis_ == NULL) return;
+  if (phis_ == nullptr) return;
 
   intptr_t to_index = 0;
   for (intptr_t from_index = 0; from_index < phis_->length(); ++from_index) {
     PhiInstr* phi = (*phis_)[from_index];
-    if (phi != NULL) {
+    if (phi != nullptr) {
       if (phi->is_alive()) {
         (*phis_)[to_index++] = phi;
         for (intptr_t i = phi->InputCount() - 1; i >= 0; --i) {
@@ -1903,7 +1904,7 @@
     }
   }
   if (to_index == 0) {
-    phis_ = NULL;
+    phis_ = nullptr;
   } else {
     phis_->TruncateTo(to_index);
   }
@@ -1917,7 +1918,7 @@
   // Called only if index is in range.  Only control-transfer instructions
   // can have non-zero successor counts and they override this function.
   UNREACHABLE();
-  return NULL;
+  return nullptr;
 }
 
 intptr_t GraphEntryInstr::SuccessorCount() const {
@@ -1950,7 +1951,7 @@
   if (index == 0) return true_successor_;
   if (index == 1) return false_successor_;
   UNREACHABLE();
-  return NULL;
+  return nullptr;
 }
 
 intptr_t GotoInstr::SuccessorCount() const {
@@ -2098,7 +2099,7 @@
                                                            Value* right) {
   int64_t left_value;
   if (!Evaluator::ToIntegerConstant(left, &left_value)) {
-    return NULL;
+    return nullptr;
   }
 
   // Can't apply 0.0 * x -> 0.0 equivalence to double operation because
@@ -2112,7 +2113,7 @@
           // did not run yet. We need it to guarantee that right value is
           // correctly coerced to double. The second canonicalization pass
           // will apply this equivalence.
-          return NULL;
+          return nullptr;
         } else {
           return right->definition();
         }
@@ -2122,7 +2123,7 @@
       break;
   }
 
-  return NULL;
+  return nullptr;
 }
 
 Definition* DoubleToFloatInstr::Canonicalize(FlowGraph* flow_graph) {
@@ -2130,8 +2131,8 @@
   // Must only be used in Float32 StoreIndexedInstr, FloatToDoubleInstr,
   // Phis introduce by load forwarding, or MaterializeObject for
   // eliminated Float32 array.
-  ASSERT(env_use_list() == NULL);
-  for (Value* use = input_use_list(); use != NULL; use = use->next_use()) {
+  ASSERT(env_use_list() == nullptr);
+  for (Value* use = input_use_list(); use != nullptr; use = use->next_use()) {
     ASSERT(use->instruction()->IsPhi() ||
            use->instruction()->IsFloatToDouble() ||
            (use->instruction()->IsStoreIndexed() &&
@@ -2142,7 +2143,7 @@
              kTypedDataFloat32ArrayCid)));
   }
 #endif
-  if (!HasUses()) return NULL;
+  if (!HasUses()) return nullptr;
   if (value()->definition()->IsFloatToDouble()) {
     // F2D(D2F(v)) == v.
     return value()->definition()->AsFloatToDouble()->value()->definition();
@@ -2151,21 +2152,21 @@
 }
 
 Definition* FloatToDoubleInstr::Canonicalize(FlowGraph* flow_graph) {
-  return HasUses() ? this : NULL;
+  return HasUses() ? this : nullptr;
 }
 
 Definition* BinaryDoubleOpInstr::Canonicalize(FlowGraph* flow_graph) {
-  if (!HasUses()) return NULL;
+  if (!HasUses()) return nullptr;
 
-  Definition* result = NULL;
+  Definition* result = nullptr;
 
   result = CanonicalizeCommutativeDoubleArithmetic(op_kind(), left(), right());
-  if (result != NULL) {
+  if (result != nullptr) {
     return result;
   }
 
   result = CanonicalizeCommutativeDoubleArithmetic(op_kind(), right(), left());
-  if (result != NULL) {
+  if (result != nullptr) {
     return result;
   }
 
@@ -2182,7 +2183,7 @@
 }
 
 Definition* DoubleTestOpInstr::Canonicalize(FlowGraph* flow_graph) {
-  return HasUses() ? this : NULL;
+  return HasUses() ? this : nullptr;
 }
 
 static bool IsCommutative(Token::Kind op) {
@@ -2207,13 +2208,13 @@
                                                Value* value,
                                                intptr_t deopt_id,
                                                Range* range) {
-  UnaryIntegerOpInstr* op = NULL;
+  UnaryIntegerOpInstr* op = nullptr;
   switch (representation) {
     case kTagged:
       op = new UnarySmiOpInstr(op_kind, value, deopt_id);
       break;
     case kUnboxedInt32:
-      return NULL;
+      return nullptr;
     case kUnboxedUint32:
       op = new UnaryUint32OpInstr(op_kind, value, deopt_id);
       break;
@@ -2222,10 +2223,10 @@
       break;
     default:
       UNREACHABLE();
-      return NULL;
+      return nullptr;
   }
 
-  if (op == NULL) {
+  if (op == nullptr) {
     return op;
   }
 
@@ -2439,7 +2440,7 @@
         UnaryIntegerOpInstr* bit_not = UnaryIntegerOpInstr::Make(
             representation(), Token::kBIT_NOT, left()->CopyWithType(),
             GetDeoptId(), range());
-        if (bit_not != NULL) {
+        if (bit_not != nullptr) {
           flow_graph->InsertBefore(this, bit_not, env(), FlowGraph::kValue);
           return bit_not;
         }
@@ -2459,7 +2460,7 @@
         UnaryIntegerOpInstr* negation = UnaryIntegerOpInstr::Make(
             representation(), Token::kNEGATE, left()->CopyWithType(),
             GetDeoptId(), range());
-        if (negation != NULL) {
+        if (negation != nullptr) {
           flow_graph->InsertBefore(this, negation, env(), FlowGraph::kValue);
           return negation;
         }
@@ -2549,7 +2550,7 @@
   // uses. RenameUsesDominatedByRedefinitions would normalize the graph and
   // route those uses through this redefinition.
   if (!HasUses() && !flow_graph->is_licm_allowed()) {
-    return NULL;
+    return nullptr;
   }
   if (constrained_type() != nullptr &&
       constrained_type()->IsEqualTo(value()->Type())) {
@@ -2564,7 +2565,7 @@
       return this;
     case kOsrOnly:
       // Don't need OSR entries in the optimized code.
-      return NULL;
+      return nullptr;
   }
 
   // Switch above exhausts all possibilities but some compilers can't figure
@@ -2638,7 +2639,7 @@
 }
 
 Definition* ConstantInstr::Canonicalize(FlowGraph* flow_graph) {
-  return HasUses() ? this : NULL;
+  return HasUses() ? this : nullptr;
 }
 
 // A math unary instruction has a side effect (exception
@@ -3046,7 +3047,7 @@
 }
 
 Definition* InstantiateTypeArgumentsInstr::Canonicalize(FlowGraph* flow_graph) {
-  return HasUses() ? this : NULL;
+  return HasUses() ? this : nullptr;
 }
 
 LocationSummary* DebugStepCheckInstr::MakeLocationSummary(Zone* zone,
@@ -3059,7 +3060,7 @@
 }
 
 Instruction* DebugStepCheckInstr::Canonicalize(FlowGraph* flow_graph) {
-  return NULL;
+  return nullptr;
 }
 
 Instruction* RecordCoverageInstr::Canonicalize(FlowGraph* flow_graph) {
@@ -3082,7 +3083,7 @@
   // Fold away Box<rep>(Unbox<rep>(v)) if value is known to be of the
   // right class.
   UnboxInstr* unbox_defn = value()->definition()->AsUnbox();
-  if ((unbox_defn != NULL) &&
+  if ((unbox_defn != nullptr) &&
       (unbox_defn->representation() == from_representation()) &&
       (unbox_defn->value()->Type()->ToCid() == Type()->ToCid())) {
     return unbox_defn->value()->definition();
@@ -3143,7 +3144,7 @@
         UNREACHABLE();
         break;
     }
-    flow_graph->InsertBefore(this, replacement, NULL, FlowGraph::kValue);
+    flow_graph->InsertBefore(this, replacement, nullptr, FlowGraph::kValue);
     return replacement;
   }
 
@@ -3151,7 +3152,7 @@
 }
 
 Definition* UnboxInstr::Canonicalize(FlowGraph* flow_graph) {
-  if (!HasUses() && !CanDeoptimize()) return NULL;
+  if (!HasUses() && !CanDeoptimize()) return nullptr;
 
   // Fold away Unbox<rep>(v) if v has a target representation already.
   Definition* value_defn = value()->definition();
@@ -3161,7 +3162,7 @@
 
   // Fold away Unbox<rep>(Box<rep>(v)).
   BoxInstr* box_defn = value()->definition()->AsBox();
-  if ((box_defn != NULL) &&
+  if ((box_defn != nullptr) &&
       (box_defn->from_representation() == representation())) {
     return box_defn->value()->definition();
   }
@@ -3182,7 +3183,7 @@
 }
 
 Definition* UnboxIntegerInstr::Canonicalize(FlowGraph* flow_graph) {
-  if (!HasUses() && !CanDeoptimize()) return NULL;
+  if (!HasUses() && !CanDeoptimize()) return nullptr;
 
   // Fold away Unbox<rep>(v) if v has a target representation already.
   Definition* value_defn = value()->definition();
@@ -3198,7 +3199,7 @@
 
   // Fold away UnboxInteger<rep_to>(BoxInteger<rep_from>(v)).
   BoxIntegerInstr* box_defn = value()->definition()->AsBoxInteger();
-  if (box_defn != NULL && !box_defn->HasUnmatchedInputRepresentations()) {
+  if (box_defn != nullptr && !box_defn->HasUnmatchedInputRepresentations()) {
     Representation from_representation =
         box_defn->value()->definition()->representation();
     if (from_representation == representation()) {
@@ -3239,7 +3240,7 @@
   }
 
   ConstantInstr* c = value()->definition()->AsConstant();
-  if ((c != NULL) && c->value().IsInteger()) {
+  if ((c != nullptr) && c->value().IsInteger()) {
     if (!is_truncating()) {
       // Check that constant fits into 32-bit integer.
       const int64_t value = Integer::Cast(c->value()).AsInt64Value();
@@ -3261,7 +3262,7 @@
   }
 
   ConstantInstr* c = value()->definition()->AsConstant();
-  if (c != NULL && c->value().IsInteger()) {
+  if (c != nullptr && c->value().IsInteger()) {
     return flow_graph->GetConstant(c->value(), kUnboxedInt64);
   }
 
@@ -3269,7 +3270,7 @@
 }
 
 Definition* IntConverterInstr::Canonicalize(FlowGraph* flow_graph) {
-  if (!HasUses()) return NULL;
+  if (!HasUses()) return nullptr;
 
   // Fold IntConverter({Unboxed}Constant(...)) to UnboxedConstant.
   if (auto constant = value()->definition()->AsConstant()) {
@@ -3287,7 +3288,7 @@
   }
 
   IntConverterInstr* box_defn = value()->definition()->AsIntConverter();
-  if ((box_defn != NULL) && (box_defn->representation() == from())) {
+  if ((box_defn != nullptr) && (box_defn->representation() == from())) {
     // If the first conversion can erase bits (or deoptimize) we can't
     // canonicalize it away.
     auto src_defn = box_defn->value()->definition();
@@ -3320,7 +3321,7 @@
   }
 
   UnboxInt64Instr* unbox_defn = value()->definition()->AsUnboxInt64();
-  if (unbox_defn != NULL && (from() == kUnboxedInt64) &&
+  if (unbox_defn != nullptr && (from() == kUnboxedInt64) &&
       (to() == kUnboxedInt32) && unbox_defn->HasOnlyInputUse(value())) {
     // TODO(vegorov): there is a duplication of code between UnboxedIntConverter
     // and code path that unboxes Mint into Int32. We should just schedule
@@ -3394,7 +3395,7 @@
   }
   *negated = false;
   PassiveObject& constant = PassiveObject::Handle();
-  Value* other = NULL;
+  Value* other = nullptr;
   if (compare->right()->BindsToConstant()) {
     constant = compare->right()->BoundConstant().ptr();
     other = compare->left();
@@ -3458,7 +3459,7 @@
   }
 
   BinarySmiOpInstr* mask_op = left->definition()->AsBinarySmiOp();
-  if ((mask_op == NULL) || (mask_op->op_kind() != Token::kBIT_AND) ||
+  if ((mask_op == nullptr) || (mask_op->op_kind() != Token::kBIT_AND) ||
       !mask_op->HasOnlyUse(left)) {
     return false;
   }
@@ -3492,7 +3493,7 @@
       return this;
     }
     ComparisonInstr* comp = replacement->AsComparison();
-    if ((comp == NULL) || comp->CanDeoptimize() ||
+    if ((comp == nullptr) || comp->CanDeoptimize() ||
         comp->HasUnmatchedInputRepresentations()) {
       return this;
     }
@@ -3517,13 +3518,13 @@
       }
       // Clear the comparison's temp index and ssa temp index since the
       // value of the comparison is not used outside the branch anymore.
-      ASSERT(comp->input_use_list() == NULL);
+      ASSERT(comp->input_use_list() == nullptr);
       comp->ClearSSATempIndex();
       comp->ClearTempIndex();
     }
   } else if (comparison()->IsEqualityCompare() &&
              comparison()->operation_cid() == kSmiCid) {
-    BinarySmiOpInstr* bit_and = NULL;
+    BinarySmiOpInstr* bit_and = nullptr;
     bool negate = false;
     if (RecognizeTestPattern(comparison()->left(), comparison()->right(),
                              &negate)) {
@@ -3532,7 +3533,7 @@
                                     &negate)) {
       bit_and = comparison()->right()->definition()->AsBinarySmiOp();
     }
-    if (bit_and != NULL) {
+    if (bit_and != nullptr) {
       if (FLAG_trace_optimization) {
         THR_Print("Merging test smi v%" Pd "\n", bit_and->ssa_temp_index());
       }
@@ -3552,7 +3553,7 @@
 }
 
 Definition* StrictCompareInstr::Canonicalize(FlowGraph* flow_graph) {
-  if (!HasUses()) return NULL;
+  if (!HasUses()) return nullptr;
   bool negated = false;
   Definition* replacement = CanonicalizeStrictCompare(this, &negated,
                                                       /* is_branch = */ false);
@@ -3597,7 +3598,7 @@
     return this;
   }
 
-  return cids().HasClassId(value_cid) ? NULL : this;
+  return cids().HasClassId(value_cid) ? nullptr : this;
 }
 
 Definition* LoadClassIdInstr::Canonicalize(FlowGraph* flow_graph) {
@@ -3618,7 +3619,7 @@
     const Object& constant_value = value()->BoundConstant();
     if (constant_value.IsSmi() &&
         cids_.Contains(Smi::Cast(constant_value).Value())) {
-      return NULL;
+      return nullptr;
     }
   }
   return this;
@@ -3674,17 +3675,17 @@
 
 Instruction* GuardFieldClassInstr::Canonicalize(FlowGraph* flow_graph) {
   if (field().guarded_cid() == kDynamicCid) {
-    return NULL;  // Nothing to guard.
+    return nullptr;  // Nothing to guard.
   }
 
   if (field().is_nullable() && value()->Type()->IsNull()) {
-    return NULL;
+    return nullptr;
   }
 
   const intptr_t cid = field().is_nullable() ? value()->Type()->ToNullableCid()
                                              : value()->Type()->ToCid();
   if (field().guarded_cid() == cid) {
-    return NULL;  // Value is guaranteed to have this cid.
+    return nullptr;  // Value is guaranteed to have this cid.
   }
 
   return this;
@@ -3692,7 +3693,7 @@
 
 Instruction* GuardFieldLengthInstr::Canonicalize(FlowGraph* flow_graph) {
   if (!field().needs_length_check()) {
-    return NULL;  // Nothing to guard.
+    return nullptr;  // Nothing to guard.
   }
 
   const intptr_t expected_length = field().guarded_list_length();
@@ -3702,11 +3703,11 @@
 
   // Check if length is statically known.
   StaticCallInstr* call = value()->definition()->AsStaticCall();
-  if (call == NULL) {
+  if (call == nullptr) {
     return this;
   }
 
-  ConstantInstr* length = NULL;
+  ConstantInstr* length = nullptr;
   if (call->is_known_list_constructor() &&
       LoadFieldInstr::IsFixedLengthArrayCid(call->Type()->ToCid())) {
     length = call->ArgumentAt(1)->AsConstant();
@@ -3716,9 +3717,9 @@
   } else if (LoadFieldInstr::IsTypedDataViewFactory(call->function())) {
     length = call->ArgumentAt(3)->AsConstant();
   }
-  if ((length != NULL) && length->value().IsSmi() &&
+  if ((length != nullptr) && length->value().IsSmi() &&
       Smi::Cast(length->value()).Value() == expected_length) {
-    return NULL;  // Expected length matched.
+    return nullptr;  // Expected length matched.
   }
 
   return this;
@@ -3730,13 +3731,13 @@
 }
 
 Instruction* CheckSmiInstr::Canonicalize(FlowGraph* flow_graph) {
-  return (value()->Type()->ToCid() == kSmiCid) ? NULL : this;
+  return (value()->Type()->ToCid() == kSmiCid) ? nullptr : this;
 }
 
 Instruction* CheckEitherNonSmiInstr::Canonicalize(FlowGraph* flow_graph) {
   if ((left()->Type()->ToCid() == kDoubleCid) ||
       (right()->Type()->ToCid() == kDoubleCid)) {
-    return NULL;  // Remove from the graph.
+    return nullptr;  // Remove from the graph.
   }
   return this;
 }
@@ -3782,7 +3783,7 @@
 
     default:
       UNREACHABLE();
-      return NULL;
+      return nullptr;
   }
 }
 
@@ -3815,7 +3816,7 @@
 
     default:
       UNREACHABLE();
-      return NULL;
+      return nullptr;
   }
 }
 
@@ -4020,13 +4021,13 @@
 LocationSummary* GraphEntryInstr::MakeLocationSummary(Zone* zone,
                                                       bool optimizing) const {
   UNREACHABLE();
-  return NULL;
+  return nullptr;
 }
 
 LocationSummary* JoinEntryInstr::MakeLocationSummary(Zone* zone,
                                                      bool optimizing) const {
   UNREACHABLE();
-  return NULL;
+  return nullptr;
 }
 
 void JoinEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
@@ -4043,7 +4044,7 @@
 LocationSummary* TargetEntryInstr::MakeLocationSummary(Zone* zone,
                                                        bool optimizing) const {
   UNREACHABLE();
-  return NULL;
+  return nullptr;
 }
 
 void TargetEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
@@ -4074,7 +4075,7 @@
     Zone* zone,
     bool optimizing) const {
   UNREACHABLE();
-  return NULL;
+  return nullptr;
 }
 
 void FunctionEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
@@ -4211,7 +4212,7 @@
 LocationSummary* OsrEntryInstr::MakeLocationSummary(Zone* zone,
                                                     bool optimizing) const {
   UNREACHABLE();
-  return NULL;
+  return nullptr;
 }
 
 void OsrEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
@@ -4586,7 +4587,7 @@
 LocationSummary* PhiInstr::MakeLocationSummary(Zone* zone,
                                                bool optimizing) const {
   UNREACHABLE();
-  return NULL;
+  return nullptr;
 }
 
 void PhiInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
@@ -4596,7 +4597,7 @@
 LocationSummary* RedefinitionInstr::MakeLocationSummary(Zone* zone,
                                                         bool optimizing) const {
   UNREACHABLE();
-  return NULL;
+  return nullptr;
 }
 
 void RedefinitionInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
@@ -4621,7 +4622,7 @@
 LocationSummary* ParameterInstr::MakeLocationSummary(Zone* zone,
                                                      bool optimizing) const {
   UNREACHABLE();
-  return NULL;
+  return nullptr;
 }
 
 void ParameterInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
@@ -4677,7 +4678,7 @@
 
 LocationSummary* ParallelMoveInstr::MakeLocationSummary(Zone* zone,
                                                         bool optimizing) const {
-  return NULL;
+  return nullptr;
 }
 
 void ParallelMoveInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
@@ -4687,7 +4688,7 @@
 LocationSummary* ConstraintInstr::MakeLocationSummary(Zone* zone,
                                                       bool optimizing) const {
   UNREACHABLE();
-  return NULL;
+  return nullptr;
 }
 
 void ConstraintInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
@@ -4698,7 +4699,7 @@
     Zone* zone,
     bool optimizing) const {
   UNREACHABLE();
-  return NULL;
+  return nullptr;
 }
 
 void MaterializeObjectInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
@@ -4748,7 +4749,7 @@
                                                             bool opt) const {
   // Only appears in initial definitions, never in normal code.
   UNREACHABLE();
-  return NULL;
+  return nullptr;
 }
 
 void SpecialParameterInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
@@ -5006,9 +5007,9 @@
     }
   }
 
-  const ICData* call_ic_data = NULL;
+  const ICData* call_ic_data = nullptr;
   if (!FLAG_propagate_ic_data || !compiler->is_optimizing() ||
-      (ic_data() == NULL)) {
+      (ic_data() == nullptr)) {
     const Array& arguments_descriptor =
         Array::Handle(zone, GetArgumentsDescriptor());
 
@@ -5373,7 +5374,7 @@
           receiver_cid,
           String::Handle(flow_graph->zone(), ic_data()->target_name()),
           Array::Handle(flow_graph->zone(), ic_data()->arguments_descriptor()));
-  if (new_target == NULL) {
+  if (new_target == nullptr) {
     // No specialization.
     return this;
   }
@@ -5550,7 +5551,7 @@
   }
 
   if (kind == MethodRecognizer::kObjectRuntimeType) {
-    if (input_use_list() == NULL) {
+    if (input_use_list() == nullptr) {
       // This function has only environment uses. In precompiled mode it is
       // fine to remove it - because we will never deoptimize.
       return flow_graph->constant_dead();
@@ -5629,9 +5630,9 @@
 
 void StaticCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
   Zone* zone = compiler->zone();
-  const ICData* call_ic_data = NULL;
+  const ICData* call_ic_data = nullptr;
   if (!FLAG_propagate_ic_data || !compiler->is_optimizing() ||
-      (ic_data() == NULL)) {
+      (ic_data() == nullptr)) {
     const Array& arguments_descriptor =
         Array::Handle(zone, GetArgumentsDescriptor());
     const int num_args_checked =
@@ -6092,9 +6093,9 @@
                                intptr_t fixed_parameter_count,
                                intptr_t lazy_deopt_pruning_count,
                                const ParsedFunction& parsed_function) {
-  Environment* env = new (zone)
-      Environment(definitions.length(), fixed_parameter_count,
-                  lazy_deopt_pruning_count, parsed_function.function(), NULL);
+  Environment* env = new (zone) Environment(
+      definitions.length(), fixed_parameter_count, lazy_deopt_pruning_count,
+      parsed_function.function(), nullptr);
   for (intptr_t i = 0; i < definitions.length(); ++i) {
     env->values_.Add(new (zone) Value(definitions[i]));
   }
@@ -6107,21 +6108,21 @@
 
 Environment* Environment::DeepCopy(Zone* zone, intptr_t length) const {
   ASSERT(length <= values_.length());
-  Environment* copy = new (zone)
-      Environment(length, fixed_parameter_count_, LazyDeoptPruneCount(),
-                  function_, (outer_ == NULL) ? NULL : outer_->DeepCopy(zone));
+  Environment* copy = new (zone) Environment(
+      length, fixed_parameter_count_, LazyDeoptPruneCount(), function_,
+      (outer_ == nullptr) ? nullptr : outer_->DeepCopy(zone));
   copy->SetDeoptId(DeoptIdBits::decode(bitfield_));
   copy->SetLazyDeoptToBeforeDeoptId(LazyDeoptToBeforeDeoptId());
   if (IsHoisted()) {
     copy->MarkAsHoisted();
   }
-  if (locations_ != NULL) {
+  if (locations_ != nullptr) {
     Location* new_locations = zone->Alloc<Location>(length);
     copy->set_locations(new_locations);
   }
   for (intptr_t i = 0; i < length; ++i) {
     copy->values_.Add(values_[i]->CopyWithType(zone));
-    if (locations_ != NULL) {
+    if (locations_ != nullptr) {
       copy->locations_[i] = locations_[i].Copy();
     }
   }
@@ -6172,8 +6173,7 @@
                                   Instruction* instr,
                                   intptr_t outer_deopt_id) const {
   // Create a deep copy removing caller arguments from the environment.
-  ASSERT(this != NULL);
-  ASSERT(instr->env()->outer() == NULL);
+  ASSERT(instr->env()->outer() == nullptr);
   intptr_t argument_count = instr->env()->fixed_parameter_count();
   Environment* outer =
       DeepCopy(zone, values_.length() - argument_count - LazyDeoptPruneCount());
@@ -6192,7 +6192,7 @@
 ComparisonInstr* DoubleTestOpInstr::CopyWithNewOperands(Value* new_left,
                                                         Value* new_right) {
   UNREACHABLE();
-  return NULL;
+  return nullptr;
 }
 
 ComparisonInstr* EqualityCompareInstr::CopyWithNewOperands(Value* new_left,
@@ -6772,7 +6772,7 @@
   bool auto_setup_scope = true;
   NativeFunction native_function = NativeEntry::ResolveNative(
       library, native_name(), num_params, &auto_setup_scope);
-  if (native_function == NULL) {
+  if (native_function == nullptr) {
     if (has_inlining_id()) {
       UNIMPLEMENTED();
     }
diff --git a/runtime/vm/compiler/backend/il.h b/runtime/vm/compiler/backend/il.h
index 88ab745..b29ff26 100644
--- a/runtime/vm/compiler/backend/il.h
+++ b/runtime/vm/compiler/backend/il.h
@@ -80,11 +80,11 @@
    public:
     explicit Iterator(Value* head) : next_(head) { Advance(); }
     Value* Current() const { return current_; }
-    bool Done() const { return current_ == NULL; }
+    bool Done() const { return current_ == nullptr; }
     void Advance() {
       // Pre-fetch next on advance and cache it.
       current_ = next_;
-      if (next_ != NULL) next_ = next_->next_use();
+      if (next_ != nullptr) next_ = next_->next_use();
     }
 
    private:
@@ -94,11 +94,11 @@
 
   explicit Value(Definition* definition)
       : definition_(definition),
-        previous_use_(NULL),
-        next_use_(NULL),
-        instruction_(NULL),
+        previous_use_(nullptr),
+        next_use_(nullptr),
+        instruction_(nullptr),
         use_index_(-1),
-        reaching_type_(NULL) {}
+        reaching_type_(nullptr) {}
 
   Definition* definition() const { return definition_; }
   void set_definition(Definition* definition) {
@@ -115,7 +115,7 @@
   void set_next_use(Value* next) { next_use_ = next; }
 
   bool IsSingleUse() const {
-    return (next_use_ == NULL) && (previous_use_ == NULL);
+    return (next_use_ == nullptr) && (previous_use_ == nullptr);
   }
 
   Instruction* instruction() const { return instruction_; }
@@ -274,7 +274,7 @@
     thread->set_hierarchy_info(this);
   }
 
-  ~HierarchyInfo() { thread()->set_hierarchy_info(NULL); }
+  ~HierarchyInfo() { thread()->set_hierarchy_info(nullptr); }
 
   // Returned from FindBestTAVOffset and SplitOnConsistentTypeArguments
   // to denote a failure to find a compatible concrete, finalized class.
@@ -974,7 +974,7 @@
   virtual intptr_t InputCount() const = 0;
   virtual Value* InputAt(intptr_t i) const = 0;
   void SetInputAt(intptr_t i, Value* value) {
-    ASSERT(value != NULL);
+    ASSERT(value != nullptr);
     value->set_instruction(this);
     value->set_use_index(i);
     RawSetInputAt(i, value);
@@ -1056,9 +1056,9 @@
   void set_next(Instruction* instr) {
     ASSERT(!IsGraphEntry());
     ASSERT(!IsReturn());
-    ASSERT(!IsBranch() || (instr == NULL));
+    ASSERT(!IsBranch() || (instr == nullptr));
     ASSERT(!IsPhi());
-    ASSERT(instr == NULL || !instr->IsBlockEntry());
+    ASSERT(instr == nullptr || !instr->IsBlockEntry());
     // TODO(fschneider): Also add Throw and ReThrow to the list of instructions
     // that do not have a successor. Currently, the graph builder will continue
     // to append instruction in case of a Throw inside an expression. This
@@ -1153,17 +1153,17 @@
   // Returns structure describing location constraints required
   // to emit native code for this instruction.
   LocationSummary* locs() {
-    ASSERT(locs_ != NULL);
+    ASSERT(locs_ != nullptr);
     return locs_;
   }
 
-  bool HasLocs() const { return locs_ != NULL; }
+  bool HasLocs() const { return locs_ != nullptr; }
 
   virtual LocationSummary* MakeLocationSummary(Zone* zone,
                                                bool is_optimizing) const = 0;
 
   void InitializeLocationSummary(Zone* zone, bool optimizing) {
-    ASSERT(locs_ == NULL);
+    ASSERT(locs_ == nullptr);
     locs_ = MakeLocationSummary(zone, optimizing);
   }
 
@@ -1222,7 +1222,7 @@
   // Representation of the value produced by this computation.
   virtual Representation representation() const { return kTagged; }
 
-  bool WasEliminated() const { return next() == NULL; }
+  bool WasEliminated() const { return next() == nullptr; }
 
   // Returns deoptimization id that corresponds to the deoptimization target
   // that input operands conversions inserted for this instruction can jump
@@ -1232,7 +1232,7 @@
     return DeoptId::kNone;
   }
 
-  // Returns a replacement for the instruction or NULL if the instruction can
+  // Returns a replacement for the instruction or nullptr if the instruction can
   // be eliminated.  By default returns the this instruction which means no
   // change.
   virtual Instruction* Canonicalize(FlowGraph* flow_graph);
@@ -1313,7 +1313,7 @@
 
   bool IsDominatedBy(Instruction* dom);
 
-  void ClearEnv() { env_ = NULL; }
+  void ClearEnv() { env_ = nullptr; }
 
   void Unsupported(FlowGraphCompiler* compiler);
 
@@ -1640,14 +1640,14 @@
 
   ParallelMoveInstr* parallel_move() const { return parallel_move_; }
 
-  bool HasParallelMove() const { return parallel_move_ != NULL; }
+  bool HasParallelMove() const { return parallel_move_ != nullptr; }
 
   bool HasNonRedundantParallelMove() const {
     return HasParallelMove() && !parallel_move()->IsRedundant();
   }
 
   ParallelMoveInstr* GetParallelMove() {
-    if (parallel_move_ == NULL) {
+    if (parallel_move_ == nullptr) {
       parallel_move_ = new ParallelMoveInstr();
     }
     return parallel_move_;
@@ -1797,7 +1797,7 @@
     current_ = current_->next();
   }
 
-  bool Done() const { return current_ == NULL; }
+  bool Done() const { return current_ == nullptr; }
 
   // Removes 'current_' from graph and sets 'current_' to previous instruction.
   void RemoveCurrentFromGraph();
@@ -1836,7 +1836,7 @@
  public:
   explicit BackwardInstructionIterator(BlockEntryInstr* block_entry)
       : block_entry_(block_entry), current_(block_entry->last_instruction()) {
-    ASSERT(block_entry_->previous() == NULL);
+    ASSERT(block_entry_->previous() == nullptr);
   }
 
   void Advance() {
@@ -1901,7 +1901,7 @@
   virtual intptr_t PredecessorCount() const { return 0; }
   virtual BlockEntryInstr* PredecessorAt(intptr_t index) const {
     UNREACHABLE();
-    return NULL;
+    return nullptr;
   }
   virtual intptr_t SuccessorCount() const;
   virtual BlockEntryInstr* SuccessorAt(intptr_t index) const;
@@ -2061,7 +2061,9 @@
     index_++;
   }
 
-  bool Done() const { return (phis_ == NULL) || (index_ >= phis_->length()); }
+  bool Done() const {
+    return (phis_ == nullptr) || (index_ >= phis_->length());
+  }
 
   PhiInstr* Current() const { return (*phis_)[index_]; }
 
@@ -2089,10 +2091,10 @@
   void adjust_edge_weight(double scale_factor) { edge_weight_ *= scale_factor; }
 
   virtual intptr_t PredecessorCount() const {
-    return (predecessor_ == NULL) ? 0 : 1;
+    return (predecessor_ == nullptr) ? 0 : 1;
   }
   virtual BlockEntryInstr* PredecessorAt(intptr_t index) const {
-    ASSERT((index == 0) && (predecessor_ != NULL));
+    ASSERT((index == 0) && (predecessor_ != nullptr));
     return predecessor_;
   }
 
@@ -2107,9 +2109,9 @@
  private:
   friend class BlockEntryInstr;  // Access to predecessor_ when inlining.
 
-  virtual void ClearPredecessors() { predecessor_ = NULL; }
+  virtual void ClearPredecessors() { predecessor_ = nullptr; }
   virtual void AddPredecessor(BlockEntryInstr* predecessor) {
-    ASSERT(predecessor_ == NULL);
+    ASSERT(predecessor_ == nullptr);
     predecessor_ = predecessor;
   }
 
@@ -2281,7 +2283,7 @@
                                   deopt_id,
                                   /*stack_depth=*/0),
         graph_entry_(graph_entry),
-        predecessor_(NULL),
+        predecessor_(nullptr),
         catch_handler_types_(Array::ZoneHandle(handler_types.ptr())),
         catch_try_index_(catch_try_index),
         exception_var_(exception_var),
@@ -2294,10 +2296,10 @@
   DECLARE_INSTRUCTION(CatchBlockEntry)
 
   virtual intptr_t PredecessorCount() const {
-    return (predecessor_ == NULL) ? 0 : 1;
+    return (predecessor_ == nullptr) ? 0 : 1;
   }
   virtual BlockEntryInstr* PredecessorAt(intptr_t index) const {
-    ASSERT((index == 0) && (predecessor_ != NULL));
+    ASSERT((index == 0) && (predecessor_ != nullptr));
     return predecessor_;
   }
 
@@ -2327,9 +2329,9 @@
  private:
   friend class BlockEntryInstr;  // Access to predecessor_ when inlining.
 
-  virtual void ClearPredecessors() { predecessor_ = NULL; }
+  virtual void ClearPredecessors() { predecessor_ = nullptr; }
   virtual void AddPredecessor(BlockEntryInstr* predecessor) {
-    ASSERT(predecessor_ == NULL);
+    ASSERT(predecessor_ == nullptr);
     predecessor_ = predecessor;
   }
 
@@ -2458,7 +2460,7 @@
   // Compile time type of the definition, which may be requested before type
   // propagation during graph building.
   CompileType* Type() {
-    if (type_ == NULL) {
+    if (type_ == nullptr) {
       auto type = new CompileType(ComputeType());
       type->set_owner(this);
       set_type(type);
@@ -2466,7 +2468,7 @@
     return type_;
   }
 
-  bool HasType() const { return (type_ != NULL); }
+  bool HasType() const { return (type_ != nullptr); }
 
   inline bool IsInt64Definition();
 
@@ -2503,7 +2505,7 @@
   }
 
   bool HasUses() const {
-    return (input_use_list_ != NULL) || (env_use_list_ != NULL);
+    return (input_use_list_ != nullptr) || (env_use_list_ != nullptr);
   }
   bool HasOnlyUse(Value* use) const;
   bool HasOnlyInputUse(Value* use) const;
@@ -2529,7 +2531,7 @@
   // Replace this definition with another instruction. Use the provided result
   // definition to replace uses of the original definition. If replacing during
   // iteration, pass the iterator so that the instruction can be replaced
-  // without affecting iteration order, otherwise pass a NULL iterator.
+  // without affecting iteration order, otherwise pass a nullptr iterator.
   void ReplaceWithResult(Instruction* replacement,
                          Definition* replacement_for_uses,
                          ForwardInstructionIterator* iterator);
@@ -2537,7 +2539,7 @@
   // Replace this definition and all uses with another definition.  If
   // replacing during iteration, pass the iterator so that the instruction
   // can be replaced without affecting iteration order, otherwise pass a
-  // NULL iterator.
+  // nullptr iterator.
   void ReplaceWith(Definition* other, ForwardInstructionIterator* iterator);
 
   // A value in the constant propagation lattice.
@@ -3425,7 +3427,7 @@
 class StopInstr : public TemplateInstruction<0, NoThrow> {
  public:
   explicit StopInstr(const char* message) : message_(message) {
-    ASSERT(message != NULL);
+    ASSERT(message != nullptr);
   }
 
   const char* message() const { return message_; }
@@ -3484,14 +3486,14 @@
 
   ParallelMoveInstr* parallel_move() const { return parallel_move_; }
 
-  bool HasParallelMove() const { return parallel_move_ != NULL; }
+  bool HasParallelMove() const { return parallel_move_ != nullptr; }
 
   bool HasNonRedundantParallelMove() const {
     return HasParallelMove() && !parallel_move()->IsRedundant();
   }
 
   ParallelMoveInstr* GetParallelMove() {
-    if (parallel_move_ == NULL) {
+    if (parallel_move_ == nullptr) {
       parallel_move_ = new ParallelMoveInstr();
     }
     return parallel_move_;
@@ -3708,7 +3710,7 @@
  public:
   explicit BranchInstr(ComparisonInstr* comparison, intptr_t deopt_id)
       : Instruction(deopt_id), comparison_(comparison) {
-    ASSERT(comparison->env() == NULL);
+    ASSERT(comparison->env() == nullptr);
     for (intptr_t i = comparison->InputCount() - 1; i >= 0; --i) {
       comparison->InputAt(i)->set_instruction(this);
     }
@@ -3829,7 +3831,7 @@
 
 class RedefinitionInstr : public TemplateDefinition<1, NoThrow> {
  public:
-  explicit RedefinitionInstr(Value* value) : constrained_type_(NULL) {
+  explicit RedefinitionInstr(Value* value) : constrained_type_(nullptr) {
     SetInputAt(0, value);
   }
 
@@ -4005,7 +4007,7 @@
 
   virtual Representation representation() const { return representation_; }
 
-  // Either NULL or the address of the unboxed constant.
+  // Either nullptr or the address of the unboxed constant.
   uword constant_address() const { return constant_address_; }
 
   DECLARE_INSTRUCTION(UnboxedConstant)
@@ -4013,7 +4015,8 @@
 
  private:
   const Representation representation_;
-  uword constant_address_;  // Either NULL or points to the untagged constant.
+  uword
+      constant_address_;  // Either nullptr or points to the untagged constant.
 
   DISALLOW_COPY_AND_ASSIGN(UnboxedConstantInstr);
 };
@@ -5136,7 +5139,7 @@
         if_true_(Smi::Cast(if_true->BoundConstant()).Value()),
         if_false_(Smi::Cast(if_false->BoundConstant()).Value()) {
     // Adjust uses at the comparison.
-    ASSERT(comparison->env() == NULL);
+    ASSERT(comparison->env() == nullptr);
     for (intptr_t i = comparison->InputCount() - 1; i >= 0; --i) {
       comparison->InputAt(i)->set_instruction(this);
     }
@@ -5232,7 +5235,7 @@
         call_count_(0),
         function_(function),
         rebind_rule_(rebind_rule),
-        result_type_(NULL),
+        result_type_(nullptr),
         is_known_list_constructor_(false),
         entry_kind_(Code::EntryKind::kNormal),
         identity_(AliasIdentity::Unknown()) {
@@ -5253,11 +5256,11 @@
                          argument_names,
                          std::move(arguments),
                          source),
-        ic_data_(NULL),
+        ic_data_(nullptr),
         call_count_(call_count),
         function_(function),
         rebind_rule_(rebind_rule),
-        result_type_(NULL),
+        result_type_(nullptr),
         is_known_list_constructor_(false),
         entry_kind_(Code::EntryKind::kNormal),
         identity_(AliasIdentity::Unknown()) {
@@ -5280,7 +5283,7 @@
     StaticCallInstr* new_call = new (zone) StaticCallInstr(
         call->source(), target, call->type_args_len(), call->argument_names(),
         std::move(args), call->deopt_id(), call_count, ICData::kNoRebind);
-    if (call->result_type() != NULL) {
+    if (call->result_type() != nullptr) {
       new_call->result_type_ = call->result_type();
     }
     new_call->set_entry_kind(call->entry_kind());
@@ -5289,7 +5292,9 @@
 
   // ICData for static calls carries call count.
   const ICData* ic_data() const { return ic_data_; }
-  bool HasICData() const { return (ic_data() != NULL) && !ic_data()->IsNull(); }
+  bool HasICData() const {
+    return (ic_data() != nullptr) && !ic_data()->IsNull();
+  }
 
   void set_ic_data(const ICData* value) { ic_data_ = value; }
 
@@ -5306,7 +5311,7 @@
   const Function& function() const { return function_; }
 
   virtual intptr_t CallCount() const {
-    return ic_data() == NULL ? call_count_ : ic_data()->AggregateCount();
+    return ic_data() == nullptr ? call_count_ : ic_data()->AggregateCount();
   }
 
   virtual bool ComputeCanDeoptimize() const {
@@ -5334,7 +5339,7 @@
   CompileType* result_type() const { return result_type_; }
 
   intptr_t result_cid() const {
-    if (result_type_ == NULL) {
+    if (result_type_ == nullptr) {
       return kDynamicCid;
     }
     return result_type_->ToCid();
@@ -6479,7 +6484,7 @@
 class StringToCharCodeInstr : public TemplateDefinition<1, NoThrow, Pure> {
  public:
   StringToCharCodeInstr(Value* str, intptr_t cid) : cid_(cid) {
-    ASSERT(str != NULL);
+    ASSERT(str != nullptr);
     SetInputAt(0, str);
   }
 
@@ -10681,7 +10686,7 @@
 
 class Environment : public ZoneAllocated {
  public:
-  // Iterate the non-NULL values in the innermost level of an environment.
+  // Iterate the non-nullptr values in the innermost level of an environment.
   class ShallowIterator : public ValueObject {
    public:
     explicit ShallowIterator(Environment* environment)
@@ -10706,18 +10711,18 @@
     }
 
     bool Done() const {
-      return (environment_ == NULL) || (index_ >= environment_->Length());
+      return (environment_ == nullptr) || (index_ >= environment_->Length());
     }
 
     Value* CurrentValue() const {
       ASSERT(!Done());
-      ASSERT(environment_->values_[index_] != NULL);
+      ASSERT(environment_->values_[index_] != nullptr);
       return environment_->values_[index_];
     }
 
     void SetCurrentValue(Value* value) {
       ASSERT(!Done());
-      ASSERT(value != NULL);
+      ASSERT(value != nullptr);
       environment_->values_[index_] = value;
     }
 
@@ -10736,7 +10741,7 @@
     intptr_t index_;
   };
 
-  // Iterate all non-NULL values in an environment, including outer
+  // Iterate all non-nullptr values in an environment, including outer
   // environments.  Note that the iterator skips empty environments.
   class DeepIterator : public ValueObject {
    public:
@@ -10750,7 +10755,7 @@
       SkipDone();
     }
 
-    bool Done() const { return iterator_.environment() == NULL; }
+    bool Done() const { return iterator_.environment() == nullptr; }
 
     Value* CurrentValue() const {
       ASSERT(!Done());
@@ -10790,7 +10795,7 @@
                            const ParsedFunction& parsed_function);
 
   void set_locations(Location* locations) {
-    ASSERT(locations_ == NULL);
+    ASSERT(locations_ == nullptr);
     locations_ = locations;
   }
 
@@ -10829,7 +10834,7 @@
 
   Environment* Outermost() {
     Environment* result = this;
-    while (result->outer() != NULL)
+    while (result->outer() != nullptr)
       result = result->outer();
     return result;
   }
@@ -10849,7 +10854,7 @@
   Value* ValueAtUseIndex(intptr_t index) const {
     const Environment* env = this;
     while (index >= env->Length()) {
-      ASSERT(env->outer_ != NULL);
+      ASSERT(env->outer_ != nullptr);
       index -= env->Length();
       env = env->outer_;
     }
@@ -10969,7 +10974,7 @@
 class FlowGraphVisitor : public InstructionVisitor {
  public:
   explicit FlowGraphVisitor(const GrowableArray<BlockEntryInstr*>& block_order)
-      : current_iterator_(NULL), block_order_(&block_order) {}
+      : current_iterator_(nullptr), block_order_(&block_order) {}
   virtual ~FlowGraphVisitor() {}
 
   ForwardInstructionIterator* current_iterator() const {
@@ -10996,9 +11001,11 @@
 #define DEFINE_UNIMPLEMENTED_INSTRUCTION(Name)                                 \
   LocationSummary* Name::MakeLocationSummary(Zone* zone, bool opt) const {     \
     UNIMPLEMENTED();                                                           \
-    return NULL;                                                               \
+    return nullptr;                                                            \
   }                                                                            \
-  void Name::EmitNativeCode(FlowGraphCompiler* compiler) { UNIMPLEMENTED(); }
+  void Name::EmitNativeCode(FlowGraphCompiler* compiler) {                     \
+    UNIMPLEMENTED();                                                           \
+  }
 
 template <intptr_t kExtraInputs>
 StringPtr TemplateDartCall<kExtraInputs>::Selector() {
diff --git a/runtime/vm/compiler/backend/il_arm.cc b/runtime/vm/compiler/backend/il_arm.cc
index ed5d945a..2eb6358 100644
--- a/runtime/vm/compiler/backend/il_arm.cc
+++ b/runtime/vm/compiler/backend/il_arm.cc
@@ -608,7 +608,7 @@
   // Emit comparison code. This must not overwrite the result register.
   // IfThenElseInstr::Supports() should prevent EmitComparisonCode from using
   // the labels or returning an invalid condition.
-  BranchLabels labels = {NULL, NULL, NULL};
+  BranchLabels labels = {nullptr, nullptr, nullptr};
   Condition true_condition = comparison()->EmitComparisonCode(compiler, labels);
   ASSERT(true_condition != kInvalidCondition);
 
@@ -1009,18 +1009,18 @@
     return locs;
   }
   UNREACHABLE();
-  return NULL;
+  return nullptr;
 }
 
 static void LoadValueCid(FlowGraphCompiler* compiler,
                          Register value_cid_reg,
                          Register value_reg,
-                         compiler::Label* value_is_smi = NULL) {
-  if (value_is_smi == NULL) {
+                         compiler::Label* value_is_smi = nullptr) {
+  if (value_is_smi == nullptr) {
     __ mov(value_cid_reg, compiler::Operand(kSmiCid));
   }
   __ tst(value_reg, compiler::Operand(kSmiTagMask));
-  if (value_is_smi == NULL) {
+  if (value_is_smi == nullptr) {
     __ LoadClassId(value_cid_reg, value_reg, NE);
   } else {
     __ b(value_is_smi, EQ);
@@ -1328,7 +1328,7 @@
   compiler::Label* deopt =
       CanDeoptimize()
           ? compiler->AddDeoptStub(deopt_id(), ICData::kDeoptTestCids)
-          : NULL;
+          : nullptr;
 
   const intptr_t true_result = (kind() == Token::kIS) ? 1 : 0;
   const ZoneGrowableArray<intptr_t>& data = cid_results();
@@ -1346,7 +1346,7 @@
     __ b(result ? labels.true_label : labels.false_label, EQ);
   }
   // No match found, deoptimize or default action.
-  if (deopt == NULL) {
+  if (deopt == nullptr) {
     // If the cid is not in the list, jump to the opposite label from the cids
     // that are in the list.  These must be all the same (see asserts in the
     // constructor).
@@ -1972,7 +1972,7 @@
   }
 
   ConstantInstr* constant = value->definition()->AsConstant();
-  if ((constant == NULL) ||
+  if ((constant == nullptr) ||
       !compiler::Assembler::IsSafeSmi(constant->value())) {
     return false;
   }
@@ -2312,7 +2312,7 @@
       break;
     default:
       UNREACHABLE();
-      return NULL;
+      return nullptr;
   }
 
   return locs;
@@ -2560,9 +2560,9 @@
   compiler::Label* deopt =
       compiler->is_optimizing()
           ? compiler->AddDeoptStub(deopt_id(), ICData::kDeoptGuardField)
-          : NULL;
+          : nullptr;
 
-  compiler::Label* fail = (deopt != NULL) ? deopt : &fail_label;
+  compiler::Label* fail = (deopt != nullptr) ? deopt : &fail_label;
 
   if (emit_full_guard) {
     __ LoadObject(field_reg, Field::ZoneHandle(field().Original()));
@@ -2613,7 +2613,7 @@
       __ b(&ok);
     }
 
-    if (deopt == NULL) {
+    if (deopt == nullptr) {
       __ Bind(fail);
 
       __ ldr(IP, compiler::FieldAddress(
@@ -2631,7 +2631,7 @@
     }
   } else {
     ASSERT(compiler->is_optimizing());
-    ASSERT(deopt != NULL);
+    ASSERT(deopt != nullptr);
 
     // Field guard class has been initialized and is known.
     if (value_cid == kDynamicCid) {
@@ -2699,7 +2699,7 @@
   compiler::Label* deopt =
       compiler->is_optimizing()
           ? compiler->AddDeoptStub(deopt_id(), ICData::kDeoptGuardField)
-          : NULL;
+          : nullptr;
 
   const Register value_reg = locs()->in(0).reg();
 
@@ -2732,7 +2732,7 @@
     __ ldr(IP, compiler::Address(value_reg, offset_reg));
     __ cmp(length_reg, compiler::Operand(IP));
 
-    if (deopt == NULL) {
+    if (deopt == nullptr) {
       __ b(&ok, EQ);
 
       __ Push(field_reg);
@@ -3254,7 +3254,7 @@
     }
     // pending_deoptimization_env_ is needed to generate a runtime call that
     // may throw an exception.
-    ASSERT(compiler->pending_deoptimization_env_ == NULL);
+    ASSERT(compiler->pending_deoptimization_env_ == nullptr);
     Environment* env =
         compiler->SlowPathEnvironmentFor(instruction(), kNumSlowPathArgs);
     compiler->pending_deoptimization_env_ = env;
@@ -3283,7 +3283,7 @@
                                      instruction()->deopt_id(),
                                      InstructionSource());
     }
-    compiler->pending_deoptimization_env_ = NULL;
+    compiler->pending_deoptimization_env_ = nullptr;
     if (!using_shared_stub) {
       compiler->RestoreLiveRegisters(instruction()->locs());
     }
@@ -3365,7 +3365,7 @@
       shift_left->CanDeoptimize()
           ? compiler->AddDeoptStub(shift_left->deopt_id(),
                                    ICData::kDeoptBinarySmiOp)
-          : NULL;
+          : nullptr;
   if (locs.in(1).IsConstant()) {
     const Object& constant = locs.in(1).constant();
     ASSERT(compiler::target::IsSmi(constant));
@@ -3515,7 +3515,7 @@
 
   const Register left = locs()->in(0).reg();
   const Register result = locs()->out(0).reg();
-  compiler::Label* deopt = NULL;
+  compiler::Label* deopt = nullptr;
   if (CanDeoptimize()) {
     deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinarySmiOp);
   }
@@ -3526,7 +3526,7 @@
     const int32_t imm = compiler::target::ToRawSmi(constant);
     switch (op_kind()) {
       case Token::kADD: {
-        if (deopt == NULL) {
+        if (deopt == nullptr) {
           __ AddImmediate(result, left, imm);
         } else {
           __ AddImmediateSetFlags(result, left, imm);
@@ -3535,7 +3535,7 @@
         break;
       }
       case Token::kSUB: {
-        if (deopt == NULL) {
+        if (deopt == nullptr) {
           __ AddImmediate(result, left, -imm);
         } else {
           // Negating imm and using AddImmediateSetFlags would not detect the
@@ -3548,7 +3548,7 @@
       case Token::kMUL: {
         // Keep left value tagged and untag right value.
         const intptr_t value = compiler::target::SmiValue(constant);
-        if (deopt == NULL) {
+        if (deopt == nullptr) {
           __ LoadImmediate(IP, value);
           __ mul(result, left, IP);
         } else {
@@ -3681,7 +3681,7 @@
   const Register right = locs()->in(1).reg();
   switch (op_kind()) {
     case Token::kADD: {
-      if (deopt == NULL) {
+      if (deopt == nullptr) {
         __ add(result, left, compiler::Operand(right));
       } else {
         __ adds(result, left, compiler::Operand(right));
@@ -3690,7 +3690,7 @@
       break;
     }
     case Token::kSUB: {
-      if (deopt == NULL) {
+      if (deopt == nullptr) {
         __ sub(result, left, compiler::Operand(right));
       } else {
         __ subs(result, left, compiler::Operand(right));
@@ -3700,7 +3700,7 @@
     }
     case Token::kMUL: {
       __ SmiUntag(IP, left);
-      if (deopt == NULL) {
+      if (deopt == nullptr) {
         __ mul(result, IP, right);
       } else {
         __ smull(result, IP, IP, right);
@@ -3889,7 +3889,7 @@
       shift_left->CanDeoptimize()
           ? compiler->AddDeoptStub(shift_left->deopt_id(),
                                    ICData::kDeoptBinarySmiOp)
-          : NULL;
+          : nullptr;
   ASSERT(locs.in(1).IsConstant());
   const Object& constant = locs.in(1).constant();
   ASSERT(compiler::target::IsSmi(constant));
@@ -3938,7 +3938,7 @@
 
   const Register left = locs()->in(0).reg();
   const Register result = locs()->out(0).reg();
-  compiler::Label* deopt = NULL;
+  compiler::Label* deopt = nullptr;
   if (CanDeoptimize()) {
     deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinarySmiOp);
   }
@@ -3949,7 +3949,7 @@
     const intptr_t value = compiler::target::SmiValue(constant);
     switch (op_kind()) {
       case Token::kADD: {
-        if (deopt == NULL) {
+        if (deopt == nullptr) {
           __ AddImmediate(result, left, value);
         } else {
           __ AddImmediateSetFlags(result, left, value);
@@ -3958,7 +3958,7 @@
         break;
       }
       case Token::kSUB: {
-        if (deopt == NULL) {
+        if (deopt == nullptr) {
           __ AddImmediate(result, left, -value);
         } else {
           // Negating value and using AddImmediateSetFlags would not detect the
@@ -3969,7 +3969,7 @@
         break;
       }
       case Token::kMUL: {
-        if (deopt == NULL) {
+        if (deopt == nullptr) {
           __ LoadImmediate(IP, value);
           __ mul(result, left, IP);
         } else {
@@ -4073,7 +4073,7 @@
   const Register right = locs()->in(1).reg();
   switch (op_kind()) {
     case Token::kADD: {
-      if (deopt == NULL) {
+      if (deopt == nullptr) {
         __ add(result, left, compiler::Operand(right));
       } else {
         __ adds(result, left, compiler::Operand(right));
@@ -4082,7 +4082,7 @@
       break;
     }
     case Token::kSUB: {
-      if (deopt == NULL) {
+      if (deopt == nullptr) {
         __ sub(result, left, compiler::Operand(right));
       } else {
         __ subs(result, left, compiler::Operand(right));
@@ -4091,7 +4091,7 @@
       break;
     }
     case Token::kMUL: {
-      if (deopt == NULL) {
+      if (deopt == nullptr) {
         __ mul(result, left, right);
       } else {
         __ smull(result, IP, left, right);
@@ -4458,7 +4458,7 @@
                               Register temp,
                               compiler::Label* deopt) {
   __ LoadFieldFromOffset(result, mint, compiler::target::Mint::value_offset());
-  if (deopt != NULL) {
+  if (deopt != nullptr) {
     __ LoadFieldFromOffset(
         temp, mint,
         compiler::target::Mint::value_offset() + compiler::target::kWordSize);
@@ -4493,8 +4493,8 @@
   compiler::Label* deopt =
       CanDeoptimize()
           ? compiler->AddDeoptStub(GetDeoptId(), ICData::kDeoptUnboxInteger)
-          : NULL;
-  compiler::Label* out_of_range = !is_truncating() ? deopt : NULL;
+          : nullptr;
+  compiler::Label* out_of_range = !is_truncating() ? deopt : nullptr;
   ASSERT(value != out);
 
   if (value_cid == kSmiCid) {
@@ -4504,7 +4504,7 @@
   } else if (!CanDeoptimize()) {
     compiler::Label done;
     __ SmiUntag(out, value, &done);
-    LoadInt32FromMint(compiler, value, out, kNoRegister, NULL);
+    LoadInt32FromMint(compiler, value, out, kNoRegister, nullptr);
     __ Bind(&done);
   } else {
     compiler::Label done;
@@ -5217,7 +5217,7 @@
       break;
   }
   UNREACHABLE();
-  return NULL;
+  return nullptr;
 }
 
 void SimdOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
@@ -5463,7 +5463,7 @@
 LocationSummary* Int64ToDoubleInstr::MakeLocationSummary(Zone* zone,
                                                          bool opt) const {
   UNIMPLEMENTED();
-  return NULL;
+  return nullptr;
 }
 
 void Int64ToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
@@ -5538,7 +5538,7 @@
 LocationSummary* DoubleToDoubleInstr::MakeLocationSummary(Zone* zone,
                                                           bool opt) const {
   UNIMPLEMENTED();
-  return NULL;
+  return nullptr;
 }
 
 void DoubleToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
@@ -6615,7 +6615,7 @@
     Register right_hi = right_pair->At(1).reg();
 
     // Jump to a slow path if shift is larger than 63 or less than 0.
-    ShiftInt64OpSlowPath* slow_path = NULL;
+    ShiftInt64OpSlowPath* slow_path = nullptr;
     if (!IsShiftCountInRange()) {
       slow_path = new (Z) ShiftInt64OpSlowPath(this);
       compiler->AddSlowPathCode(slow_path);
@@ -6628,7 +6628,7 @@
     EmitShiftInt64ByRegister(compiler, op_kind(), out_lo, out_hi, left_lo,
                              left_hi, right_lo);
 
-    if (slow_path != NULL) {
+    if (slow_path != nullptr) {
       __ Bind(slow_path->exit_label());
     }
   }
@@ -6747,7 +6747,7 @@
     Register right_hi = right_pair->At(1).reg();
 
     // Jump to a slow path if shift count is > 31 or negative.
-    ShiftUint32OpSlowPath* slow_path = NULL;
+    ShiftUint32OpSlowPath* slow_path = nullptr;
     if (!IsShiftCountInRange(kUint32ShiftCountLimit)) {
       slow_path = new (Z) ShiftUint32OpSlowPath(this);
       compiler->AddSlowPathCode(slow_path);
@@ -6760,7 +6760,7 @@
 
     EmitShiftUint32ByRegister(compiler, op_kind(), out, left, right_lo);
 
-    if (slow_path != NULL) {
+    if (slow_path != nullptr) {
       __ Bind(slow_path->exit_label());
     }
   }
@@ -7194,14 +7194,14 @@
   // If a constant has more than one use, make sure it is loaded in register
   // so that multiple immediate loads can be avoided.
   ConstantInstr* constant = left()->definition()->AsConstant();
-  if ((constant != NULL) && !left()->IsSingleUse()) {
+  if ((constant != nullptr) && !left()->IsSingleUse()) {
     locs->set_in(0, Location::RequiresRegister());
   } else {
     locs->set_in(0, LocationRegisterOrConstant(left()));
   }
 
   constant = right()->definition()->AsConstant();
-  if ((constant != NULL) && !right()->IsSingleUse()) {
+  if ((constant != nullptr) && !right()->IsSingleUse()) {
     locs->set_in(1, Location::RequiresRegister());
   } else {
     // Only one of the inputs can be a constant. Choose register if the first
diff --git a/runtime/vm/compiler/backend/il_arm64.cc b/runtime/vm/compiler/backend/il_arm64.cc
index 237327a..95a1663 100644
--- a/runtime/vm/compiler/backend/il_arm64.cc
+++ b/runtime/vm/compiler/backend/il_arm64.cc
@@ -552,7 +552,7 @@
   // Emit comparison code. This must not overwrite the result register.
   // IfThenElseInstr::Supports() should prevent EmitComparisonCode from using
   // the labels or returning an invalid condition.
-  BranchLabels labels = {NULL, NULL, NULL};
+  BranchLabels labels = {nullptr, nullptr, nullptr};
   Condition true_condition = comparison()->EmitComparisonCode(compiler, labels);
   ASSERT(true_condition != kInvalidCondition);
 
@@ -1094,7 +1094,7 @@
     return locs;
   }
   UNREACHABLE();
-  return NULL;
+  return nullptr;
 }
 
 static Condition TokenKindToDoubleCondition(Token::Kind kind) {
@@ -1197,7 +1197,7 @@
   compiler::Label* deopt =
       CanDeoptimize()
           ? compiler->AddDeoptStub(deopt_id(), ICData::kDeoptTestCids)
-          : NULL;
+          : nullptr;
 
   const intptr_t true_result = (kind() == Token::kIS) ? 1 : 0;
   const ZoneGrowableArray<intptr_t>& data = cid_results();
@@ -1214,7 +1214,7 @@
     __ b(result ? labels.true_label : labels.false_label, EQ);
   }
   // No match found, deoptimize or default action.
-  if (deopt == NULL) {
+  if (deopt == nullptr) {
     // If the cid is not in the list, jump to the opposite label from the cids
     // that are in the list.  These must be all the same (see asserts in the
     // constructor).
@@ -1256,7 +1256,7 @@
   }
 
   UNREACHABLE();
-  return NULL;
+  return nullptr;
 }
 
 Condition RelationalOpInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
@@ -1870,7 +1870,7 @@
 
 static bool CanBeImmediateIndex(Value* value, intptr_t cid, bool is_external) {
   ConstantInstr* constant = value->definition()->AsConstant();
-  if ((constant == NULL) || !constant->value().IsSmi()) {
+  if ((constant == nullptr) || !constant->value().IsSmi()) {
     return false;
   }
   const int64_t index = Smi::Cast(constant->value()).AsInt64Value();
@@ -2125,7 +2125,7 @@
       break;
     default:
       UNREACHABLE();
-      return NULL;
+      return nullptr;
   }
   return locs;
 }
@@ -2275,12 +2275,12 @@
 static void LoadValueCid(FlowGraphCompiler* compiler,
                          Register value_cid_reg,
                          Register value_reg,
-                         compiler::Label* value_is_smi = NULL) {
+                         compiler::Label* value_is_smi = nullptr) {
   compiler::Label done;
-  if (value_is_smi == NULL) {
+  if (value_is_smi == nullptr) {
     __ LoadImmediate(value_cid_reg, kSmiCid);
   }
-  __ BranchIfSmi(value_reg, value_is_smi == NULL ? &done : value_is_smi);
+  __ BranchIfSmi(value_reg, value_is_smi == nullptr ? &done : value_is_smi);
   __ LoadClassId(value_cid_reg, value_reg);
   __ Bind(&done);
 }
@@ -2356,9 +2356,9 @@
   compiler::Label* deopt =
       compiler->is_optimizing()
           ? compiler->AddDeoptStub(deopt_id(), ICData::kDeoptGuardField)
-          : NULL;
+          : nullptr;
 
-  compiler::Label* fail = (deopt != NULL) ? deopt : &fail_label;
+  compiler::Label* fail = (deopt != nullptr) ? deopt : &fail_label;
 
   if (emit_full_guard) {
     __ LoadObject(field_reg, Field::ZoneHandle((field().Original())));
@@ -2413,7 +2413,7 @@
       __ b(&ok);
     }
 
-    if (deopt == NULL) {
+    if (deopt == nullptr) {
       __ Bind(fail);
 
       __ LoadFieldFromOffset(TMP, field_reg, Field::guarded_cid_offset(),
@@ -2430,7 +2430,7 @@
     }
   } else {
     ASSERT(compiler->is_optimizing());
-    ASSERT(deopt != NULL);
+    ASSERT(deopt != nullptr);
 
     // Field guard class has been initialized and is known.
     if (value_cid == kDynamicCid) {
@@ -2491,7 +2491,7 @@
   compiler::Label* deopt =
       compiler->is_optimizing()
           ? compiler->AddDeoptStub(deopt_id(), ICData::kDeoptGuardField)
-          : NULL;
+          : nullptr;
 
   const Register value_reg = locs()->in(0).reg();
 
@@ -2523,7 +2523,7 @@
     __ LoadCompressedSmi(TMP, compiler::Address(value_reg, offset_reg));
     __ CompareObjectRegisters(length_reg, TMP);
 
-    if (deopt == NULL) {
+    if (deopt == nullptr) {
       __ b(&ok, EQ);
 
       __ PushPair(value_reg, field_reg);
@@ -2912,7 +2912,7 @@
     }
     // pending_deoptimization_env_ is needed to generate a runtime call that
     // may throw an exception.
-    ASSERT(compiler->pending_deoptimization_env_ == NULL);
+    ASSERT(compiler->pending_deoptimization_env_ == nullptr);
     Environment* env =
         compiler->SlowPathEnvironmentFor(instruction(), kNumSlowPathArgs);
     compiler->pending_deoptimization_env_ = env;
@@ -2954,7 +2954,7 @@
                                      instruction()->deopt_id(),
                                      InstructionSource());
     }
-    compiler->pending_deoptimization_env_ = NULL;
+    compiler->pending_deoptimization_env_ = nullptr;
     if (!using_shared_stub) {
       compiler->RestoreLiveRegisters(locs);
     }
@@ -3011,7 +3011,7 @@
       shift_left->CanDeoptimize()
           ? compiler->AddDeoptStub(shift_left->deopt_id(),
                                    ICData::kDeoptBinarySmiOp)
-          : NULL;
+          : nullptr;
   if (locs.in(1).IsConstant()) {
     const Object& constant = locs.in(1).constant();
     ASSERT(constant.IsSmi());
@@ -3149,7 +3149,7 @@
 
   const Register left = locs()->in(0).reg();
   const Register result = locs()->out(0).reg();
-  compiler::Label* deopt = NULL;
+  compiler::Label* deopt = nullptr;
   if (CanDeoptimize()) {
     deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinarySmiOp);
   }
@@ -3160,7 +3160,7 @@
     const int64_t imm = Smi::RawValue(Smi::Cast(constant).Value());
     switch (op_kind()) {
       case Token::kADD: {
-        if (deopt == NULL) {
+        if (deopt == nullptr) {
           __ AddImmediate(result, left, imm, compiler::kObjectBytes);
         } else {
           __ AddImmediateSetFlags(result, left, imm, compiler::kObjectBytes);
@@ -3169,7 +3169,7 @@
         break;
       }
       case Token::kSUB: {
-        if (deopt == NULL) {
+        if (deopt == nullptr) {
           __ AddImmediate(result, left, -imm);
         } else {
           // Negating imm and using AddImmediateSetFlags would not detect the
@@ -3188,7 +3188,7 @@
 #else
         __ smull(result, left, TMP);
 #endif
-        if (deopt != NULL) {
+        if (deopt != nullptr) {
 #if !defined(DART_COMPRESSED_POINTERS)
           __ smulh(TMP, left, TMP);
           // TMP: result bits 64..127.
@@ -3281,7 +3281,7 @@
   const Register right = locs()->in(1).reg();
   switch (op_kind()) {
     case Token::kADD: {
-      if (deopt == NULL) {
+      if (deopt == nullptr) {
         __ add(result, left, compiler::Operand(right), compiler::kObjectBytes);
       } else {
         __ adds(result, left, compiler::Operand(right), compiler::kObjectBytes);
@@ -3290,7 +3290,7 @@
       break;
     }
     case Token::kSUB: {
-      if (deopt == NULL) {
+      if (deopt == nullptr) {
         __ sub(result, left, compiler::Operand(right), compiler::kObjectBytes);
       } else {
         __ subs(result, left, compiler::Operand(right), compiler::kObjectBytes);
@@ -3305,7 +3305,7 @@
 #else
       __ smull(result, TMP, right);
 #endif
-      if (deopt != NULL) {
+      if (deopt != nullptr) {
 #if !defined(DART_COMPRESSED_POINTERS)
         __ smulh(TMP, TMP, right);
         // TMP: result bits 64..127.
@@ -3808,7 +3808,7 @@
   compiler::Label* deopt =
       CanDeoptimize()
           ? compiler->AddDeoptStub(GetDeoptId(), ICData::kDeoptUnboxInteger)
-          : NULL;
+          : nullptr;
 
   if (value_cid == kSmiCid) {
     __ SmiUntag(out, value);
@@ -3836,7 +3836,7 @@
 
   // TODO(vegorov): as it is implemented right now truncating unboxing would
   // leave "garbage" in the higher word.
-  if (!is_truncating() && (deopt != NULL)) {
+  if (!is_truncating() && (deopt != nullptr)) {
     ASSERT(representation() == kUnboxedInt32);
     __ cmp(out, compiler::Operand(out, SXTW, 0));
     __ b(deopt, NE);
@@ -4358,7 +4358,7 @@
       break;
   }
   UNREACHABLE();
-  return NULL;
+  return nullptr;
 }
 
 void SimdOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
@@ -4711,7 +4711,7 @@
 LocationSummary* DoubleToDoubleInstr::MakeLocationSummary(Zone* zone,
                                                           bool opt) const {
   UNIMPLEMENTED();
-  return NULL;
+  return nullptr;
 }
 
 void DoubleToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
@@ -5763,7 +5763,7 @@
     Register shift = locs()->in(1).reg();
 
     // Jump to a slow path if shift is larger than 63 or less than 0.
-    ShiftInt64OpSlowPath* slow_path = NULL;
+    ShiftInt64OpSlowPath* slow_path = nullptr;
     if (!IsShiftCountInRange()) {
       slow_path = new (Z) ShiftInt64OpSlowPath(this);
       compiler->AddSlowPathCode(slow_path);
@@ -5773,7 +5773,7 @@
 
     EmitShiftInt64ByRegister(compiler, op_kind(), out, left, shift);
 
-    if (slow_path != NULL) {
+    if (slow_path != nullptr) {
       __ Bind(slow_path->exit_label());
     }
   }
@@ -6068,7 +6068,7 @@
   const Register out = locs()->out(0).reg();
   compiler::Label* deopt =
       !CanDeoptimize()
-          ? NULL
+          ? nullptr
           : compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnboxInteger);
   if (from() == kUnboxedInt32 && to() == kUnboxedUint32) {
     if (CanDeoptimize()) {
diff --git a/runtime/vm/compiler/backend/il_ia32.cc b/runtime/vm/compiler/backend/il_ia32.cc
index e639555..320e152 100644
--- a/runtime/vm/compiler/backend/il_ia32.cc
+++ b/runtime/vm/compiler/backend/il_ia32.cc
@@ -652,19 +652,19 @@
     return locs;
   }
   UNREACHABLE();
-  return NULL;
+  return nullptr;
 }
 
 static void LoadValueCid(FlowGraphCompiler* compiler,
                          Register value_cid_reg,
                          Register value_reg,
-                         compiler::Label* value_is_smi = NULL) {
+                         compiler::Label* value_is_smi = nullptr) {
   compiler::Label done;
-  if (value_is_smi == NULL) {
+  if (value_is_smi == nullptr) {
     __ movl(value_cid_reg, compiler::Immediate(kSmiCid));
   }
   __ testl(value_reg, compiler::Immediate(kSmiTagMask));
-  if (value_is_smi == NULL) {
+  if (value_is_smi == nullptr) {
     __ j(ZERO, &done, compiler::Assembler::kNearJump);
   } else {
     __ j(ZERO, value_is_smi);
@@ -938,7 +938,7 @@
   compiler::Label* deopt =
       CanDeoptimize()
           ? compiler->AddDeoptStub(deopt_id(), ICData::kDeoptTestCids)
-          : NULL;
+          : nullptr;
 
   const intptr_t true_result = (kind() == Token::kIS) ? 1 : 0;
   const ZoneGrowableArray<intptr_t>& data = cid_results();
@@ -955,7 +955,7 @@
     __ j(EQUAL, result ? labels.true_label : labels.false_label);
   }
   // No match found, deoptimize or default action.
-  if (deopt == NULL) {
+  if (deopt == nullptr) {
     // If the cid is not in the list, jump to the opposite label from the cids
     // that are in the list.  These must be all the same (see asserts in the
     // constructor).
@@ -1345,7 +1345,7 @@
 
 static bool CanBeImmediateIndex(Value* value, intptr_t cid) {
   ConstantInstr* constant = value->definition()->AsConstant();
-  if ((constant == NULL) ||
+  if ((constant == nullptr) ||
       !compiler::Assembler::IsSafeSmi(constant->value())) {
     return false;
   }
@@ -1796,7 +1796,7 @@
       break;
     default:
       UNREACHABLE();
-      return NULL;
+      return nullptr;
   }
   return locs;
 }
@@ -1994,7 +1994,7 @@
     deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptGuardField);
   }
 
-  compiler::Label* fail = (deopt != NULL) ? deopt : &fail_label;
+  compiler::Label* fail = (deopt != nullptr) ? deopt : &fail_label;
 
   if (emit_full_guard) {
     __ LoadObject(field_reg, Field::ZoneHandle(field().Original()));
@@ -2047,7 +2047,7 @@
       __ jmp(&ok);
     }
 
-    if (deopt == NULL) {
+    if (deopt == nullptr) {
       __ Bind(fail);
 
       __ cmpl(compiler::FieldAddress(field_reg, Field::guarded_cid_offset()),
@@ -2064,7 +2064,7 @@
     }
   } else {
     ASSERT(compiler->is_optimizing());
-    ASSERT(deopt != NULL);
+    ASSERT(deopt != nullptr);
     ASSERT(fail == deopt);
 
     // Field guard class has been initialized and is known.
@@ -2131,7 +2131,7 @@
   compiler::Label* deopt =
       compiler->is_optimizing()
           ? compiler->AddDeoptStub(deopt_id(), ICData::kDeoptGuardField)
-          : NULL;
+          : nullptr;
 
   const Register value_reg = locs()->in(0).reg();
 
@@ -2161,7 +2161,7 @@
     // why we use Address instead of FieldAddress.
     __ cmpl(length_reg, compiler::Address(value_reg, offset_reg, TIMES_1, 0));
 
-    if (deopt == NULL) {
+    if (deopt == nullptr) {
       __ j(EQUAL, &ok);
 
       __ pushl(field_reg);
@@ -2534,7 +2534,7 @@
     compiler->SaveLiveRegisters(instruction()->locs());
     // pending_deoptimization_env_ is needed to generate a runtime call that
     // may throw an exception.
-    ASSERT(compiler->pending_deoptimization_env_ == NULL);
+    ASSERT(compiler->pending_deoptimization_env_ == nullptr);
     Environment* env = compiler->SlowPathEnvironmentFor(
         instruction(), /*num_slow_path_args=*/0);
     compiler->pending_deoptimization_env_ = env;
@@ -2551,7 +2551,7 @@
                                      instruction()->deopt_id(),
                                      InstructionSource());
     }
-    compiler->pending_deoptimization_env_ = NULL;
+    compiler->pending_deoptimization_env_ = nullptr;
     compiler->RestoreLiveRegisters(instruction()->locs());
     __ jmp(exit_label());
   }
@@ -2603,7 +2603,7 @@
       shift_left->CanDeoptimize()
           ? compiler->AddDeoptStub(shift_left->deopt_id(),
                                    ICData::kDeoptBinarySmiOp)
-          : NULL;
+          : nullptr;
   if (locs.in(1).IsConstant()) {
     const Object& constant = locs.in(1).constant();
     ASSERT(constant.IsSmi());
@@ -2755,7 +2755,7 @@
     ConstantInstr* right_constant = right()->definition()->AsConstant();
     // Shift-by-1 overflow checking can use flags, otherwise we need a temp.
     const bool shiftBy1 =
-        (right_constant != NULL) && IsSmiValue(right_constant->value(), 1);
+        (right_constant != nullptr) && IsSmiValue(right_constant->value(), 1);
     const intptr_t kNumTemps = (can_overflow() && !shiftBy1) ? 1 : 0;
     LocationSummary* summary = new (zone)
         LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
@@ -2772,7 +2772,7 @@
         LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
     summary->set_in(0, Location::RequiresRegister());
     ConstantInstr* constant = right()->definition()->AsConstant();
-    if (constant != NULL) {
+    if (constant != nullptr) {
       summary->set_in(1, LocationRegisterOrSmiConstant(right()));
     } else {
       summary->set_in(1, Location::PrefersRegister());
@@ -2810,7 +2810,7 @@
     default:
       UNREACHABLE();
   }
-  if (deopt != NULL) __ j(OVERFLOW, deopt);
+  if (deopt != nullptr) __ j(OVERFLOW, deopt);
 }
 
 void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
@@ -2822,7 +2822,7 @@
   Register left = locs()->in(0).reg();
   Register result = locs()->out(0).reg();
   ASSERT(left == result);
-  compiler::Label* deopt = NULL;
+  compiler::Label* deopt = nullptr;
   if (CanDeoptimize()) {
     deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinarySmiOp);
   }
@@ -3143,10 +3143,10 @@
   const intptr_t kNumInputs = 2;
   if (op_kind() == Token::kTRUNCDIV) {
     UNREACHABLE();
-    return NULL;
+    return nullptr;
   } else if (op_kind() == Token::kMOD) {
     UNREACHABLE();
-    return NULL;
+    return nullptr;
   } else if ((op_kind() == Token::kSHR) || (op_kind() == Token::kUSHR)) {
     const intptr_t kNumTemps = 0;
     LocationSummary* summary = new (zone)
@@ -3172,7 +3172,7 @@
         LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
     summary->set_in(0, Location::RequiresRegister());
     ConstantInstr* constant = right()->definition()->AsConstant();
-    if (constant != NULL) {
+    if (constant != nullptr) {
       summary->set_in(1, LocationRegisterOrSmiConstant(right()));
     } else {
       summary->set_in(1, Location::PrefersRegister());
@@ -3192,7 +3192,7 @@
       shift_left->CanDeoptimize()
           ? compiler->AddDeoptStub(shift_left->deopt_id(),
                                    ICData::kDeoptBinarySmiOp)
-          : NULL;
+          : nullptr;
   ASSERT(locs.in(1).IsConstant());
 
   const Object& constant = locs.in(1).constant();
@@ -3223,7 +3223,7 @@
   Register left = locs()->in(0).reg();
   Register result = locs()->out(0).reg();
   ASSERT(left == result);
-  compiler::Label* deopt = NULL;
+  compiler::Label* deopt = nullptr;
   if (CanDeoptimize()) {
     deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinarySmiOp);
   }
@@ -3354,7 +3354,7 @@
     case Token::kBIT_XOR:
     case Token::kADD:
     case Token::kSUB:
-      EmitIntegerArithmetic(compiler, op_kind(), left, right, NULL);
+      EmitIntegerArithmetic(compiler, op_kind(), left, right, nullptr);
       return;
 
     case Token::kMUL:
@@ -3730,7 +3730,7 @@
                               Register temp,
                               compiler::Label* deopt) {
   __ movl(result, lo);
-  if (deopt != NULL) {
+  if (deopt != nullptr) {
     ASSERT(temp != result);
     __ movl(temp, result);
     __ sarl(temp, compiler::Immediate(31));
@@ -3748,7 +3748,7 @@
   if (CanDeoptimize()) {
     deopt = compiler->AddDeoptStub(GetDeoptId(), ICData::kDeoptUnboxInteger);
   }
-  compiler::Label* out_of_range = !is_truncating() ? deopt : NULL;
+  compiler::Label* out_of_range = !is_truncating() ? deopt : nullptr;
 
   const intptr_t lo_offset = Mint::value_offset();
   const intptr_t hi_offset = Mint::value_offset() + kWordSize;
@@ -3757,7 +3757,7 @@
     ASSERT(value == result);
     __ SmiUntag(value);
   } else if (value_cid == kMintCid) {
-    ASSERT((value != result) || (out_of_range == NULL));
+    ASSERT((value != result) || (out_of_range == nullptr));
     LoadInt32FromMint(
         compiler, result, compiler::FieldAddress(value, lo_offset),
         compiler::FieldAddress(value, hi_offset), temp, out_of_range);
@@ -3773,7 +3773,7 @@
     compiler::Label done;
     __ SmiUntagOrCheckClass(value, kMintCid, temp, &done);
     __ j(NOT_EQUAL, deopt);
-    if (out_of_range != NULL) {
+    if (out_of_range != nullptr) {
       Register value_temp = locs()->temp(1).reg();
       __ movl(value_temp, value);
       value = value_temp;
@@ -4400,7 +4400,7 @@
       break;
   }
   UNREACHABLE();
-  return NULL;
+  return nullptr;
 }
 
 void SimdOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
@@ -5543,7 +5543,7 @@
     }
     default:
       UNREACHABLE();
-      return NULL;
+      return nullptr;
   }
 }
 
@@ -5864,7 +5864,7 @@
     Register right_hi = locs()->in(1).AsPairLocation()->At(1).reg();
 
     // Jump to a slow path if shift count is > 63 or negative.
-    ShiftInt64OpSlowPath* slow_path = NULL;
+    ShiftInt64OpSlowPath* slow_path = nullptr;
     if (!IsShiftCountInRange()) {
       slow_path = new (Z) ShiftInt64OpSlowPath(this);
       compiler->AddSlowPathCode(slow_path);
@@ -5876,7 +5876,7 @@
 
     EmitShiftInt64ByECX(compiler, op_kind(), left_lo, left_hi);
 
-    if (slow_path != NULL) {
+    if (slow_path != nullptr) {
       __ Bind(slow_path->exit_label());
     }
   }
@@ -5999,7 +5999,7 @@
     Register right_hi = locs()->in(1).AsPairLocation()->At(1).reg();
 
     // Jump to a slow path if shift count is > 31 or negative.
-    ShiftUint32OpSlowPath* slow_path = NULL;
+    ShiftUint32OpSlowPath* slow_path = nullptr;
     if (!IsShiftCountInRange(kUint32ShiftCountLimit)) {
       slow_path = new (Z) ShiftUint32OpSlowPath(this);
       compiler->AddSlowPathCode(slow_path);
@@ -6012,7 +6012,7 @@
 
     EmitShiftUint32ByECX(compiler, op_kind(), left);
 
-    if (slow_path != NULL) {
+    if (slow_path != nullptr) {
       __ Bind(slow_path->exit_label());
     }
   }
@@ -6375,7 +6375,7 @@
   // Emit comparison code. This must not overwrite the result register.
   // IfThenElseInstr::Supports() should prevent EmitComparisonCode from using
   // the labels or returning an invalid condition.
-  BranchLabels labels = {NULL, NULL, NULL};
+  BranchLabels labels = {nullptr, nullptr, nullptr};
   Condition true_condition = comparison()->EmitComparisonCode(compiler, labels);
   ASSERT(true_condition != kInvalidCondition);
 
diff --git a/runtime/vm/compiler/backend/il_printer.cc b/runtime/vm/compiler/backend/il_printer.cc
index fbc3a7e..2bfd89c 100644
--- a/runtime/vm/compiler/backend/il_printer.cc
+++ b/runtime/vm/compiler/backend/il_printer.cc
@@ -320,7 +320,7 @@
   char str[4000];
   BufferFormatter f(str, sizeof(str));
   instr->PrintTo(&f);
-  if (FLAG_print_environments && (instr->env() != NULL)) {
+  if (FLAG_print_environments && (instr->env() != nullptr)) {
     instr->env()->PrintTo(&f);
   }
   if (print_locations && (instr->HasLocs())) {
@@ -343,7 +343,7 @@
                                       const String& dst_name,
                                       bool eliminated) {
   const char* compile_type_name = "unknown";
-  if (value != NULL && value->reaching_type_ != NULL) {
+  if (value != nullptr && value->reaching_type_ != nullptr) {
     compile_type_name = value->reaching_type_->ToCString();
   }
   THR_Print(
@@ -527,7 +527,7 @@
 void Instruction::PrintOperandsTo(BaseTextBuffer* f) const {
   for (int i = 0; i < InputCount(); ++i) {
     if (i > 0) f->AddString(", ");
-    if (InputAt(i) != NULL) InputAt(i)->PrintTo(f);
+    if (InputAt(i) != nullptr) InputAt(i)->PrintTo(f);
   }
 }
 
@@ -541,12 +541,12 @@
   }
   PrintOperandsTo(f);
   f->AddString(")");
-  if (range_ != NULL) {
+  if (range_ != nullptr) {
     f->AddString(" ");
     range_->PrintTo(f);
   }
 
-  if (type_ != NULL) {
+  if (type_ != nullptr) {
     f->AddString(" ");
     type_->PrintTo(f);
   }
@@ -570,7 +570,7 @@
 void Definition::PrintOperandsTo(BaseTextBuffer* f) const {
   for (int i = 0; i < InputCount(); ++i) {
     if (i > 0) f->AddString(", ");
-    if (InputAt(i) != NULL) {
+    if (InputAt(i) != nullptr) {
       InputAt(i)->PrintTo(f);
     }
   }
@@ -597,7 +597,7 @@
 void Value::PrintTo(BaseTextBuffer* f) const {
   PrintUse(f, *definition());
 
-  if ((reaching_type_ != NULL) && (reaching_type_ != definition()->type_)) {
+  if ((reaching_type_ != nullptr) && (reaching_type_ != definition()->type_)) {
     f->AddString(" ");
     reaching_type_->PrintTo(f);
   }
@@ -606,7 +606,7 @@
 void ConstantInstr::PrintOperandsTo(BaseTextBuffer* f) const {
   const char* cstr = value().ToCString();
   const char* new_line = strchr(cstr, '\n');
-  if (new_line == NULL) {
+  if (new_line == nullptr) {
     f->Printf("#%s", cstr);
   } else {
     const intptr_t pos = new_line - cstr;
@@ -636,7 +636,7 @@
 }
 
 const char* Range::ToCString(const Range* range) {
-  if (range == NULL) return "[_|_, _|_]";
+  if (range == nullptr) return "[_|_, _|_]";
 
   char buffer[256];
   BufferFormatter f(buffer, sizeof(buffer));
@@ -677,7 +677,7 @@
 
 void DropTempsInstr::PrintOperandsTo(BaseTextBuffer* f) const {
   f->Printf("%" Pd "", num_temps());
-  if (value() != NULL) {
+  if (value() != nullptr) {
     f->AddString(", ");
     value()->PrintTo(f);
   }
@@ -1134,10 +1134,10 @@
     f->Printf("B%" Pd, predecessors_[i]->block_id());
   }
   f->AddString(")");
-  if (phis_ != NULL) {
+  if (phis_ != nullptr) {
     f->AddString(" {");
     for (intptr_t i = 0; i < phis_->length(); ++i) {
-      if ((*phis_)[i] == NULL) continue;
+      if ((*phis_)[i] == nullptr) continue;
       f->AddString("\n      ");
       (*phis_)[i]->PrintTo(f);
     }
@@ -1160,10 +1160,10 @@
     f->Printf("B%" Pd, predecessors_[i]->block_id());
   }
   f->AddString(")");
-  if (phis_ != NULL) {
+  if (phis_ != nullptr) {
     f->AddString(" {");
     for (intptr_t i = 0; i < phis_->length(); ++i) {
-      if ((*phis_)[i] == NULL) continue;
+      if ((*phis_)[i] == nullptr) continue;
       f->AddString("\n      ");
       (*phis_)[i]->PrintTo(f);
     }
@@ -1178,12 +1178,12 @@
 void PhiInstr::PrintTo(BaseTextBuffer* f) const {
   f->Printf("v%" Pd " <- phi(", ssa_temp_index());
   for (intptr_t i = 0; i < inputs_.length(); ++i) {
-    if (inputs_[i] != NULL) inputs_[i]->PrintTo(f);
+    if (inputs_[i] != nullptr) inputs_[i]->PrintTo(f);
     if (i < inputs_.length() - 1) f->AddString(", ");
   }
   f->AddString(")");
   f->AddString(is_alive() ? " alive" : " dead");
-  if (range_ != NULL) {
+  if (range_ != nullptr) {
     f->AddString(" ");
     range_->PrintTo(f);
   }
@@ -1492,14 +1492,14 @@
     } else {
       values_[i]->PrintTo(f);
     }
-    if ((locations_ != NULL) && !locations_[i].IsInvalid()) {
+    if ((locations_ != nullptr) && !locations_[i].IsInvalid()) {
       f->AddString(" [");
       locations_[i].PrintTo(f);
       f->AddString("]");
     }
   }
   f->AddString(" }");
-  if (outer_ != NULL) outer_->PrintTo(f);
+  if (outer_ != nullptr) outer_->PrintTo(f);
 }
 
 const char* Environment::ToCString() const {
diff --git a/runtime/vm/compiler/backend/il_riscv.cc b/runtime/vm/compiler/backend/il_riscv.cc
index c1dab4c..8cf6e50 100644
--- a/runtime/vm/compiler/backend/il_riscv.cc
+++ b/runtime/vm/compiler/backend/il_riscv.cc
@@ -554,7 +554,7 @@
   // Emit comparison code. This must not overwrite the result register.
   // IfThenElseInstr::Supports() should prevent EmitComparisonCode from using
   // the labels or returning an invalid condition.
-  BranchLabels labels = {NULL, NULL, NULL};
+  BranchLabels labels = {nullptr, nullptr, nullptr};
   Condition true_condition = comparison()->EmitComparisonCode(compiler, labels);
   ASSERT(true_condition != kInvalidCondition);
 
@@ -1168,7 +1168,7 @@
     return locs;
   }
   UNREACHABLE();
-  return NULL;
+  return nullptr;
 }
 
 static Condition EmitDoubleComparisonOp(FlowGraphCompiler* compiler,
@@ -1277,7 +1277,7 @@
   compiler::Label* deopt =
       CanDeoptimize()
           ? compiler->AddDeoptStub(deopt_id(), ICData::kDeoptTestCids)
-          : NULL;
+          : nullptr;
 
   const intptr_t true_result = (kind() == Token::kIS) ? 1 : 0;
   const ZoneGrowableArray<intptr_t>& data = cid_results();
@@ -1294,7 +1294,7 @@
     __ BranchIf(EQ, result ? labels.true_label : labels.false_label);
   }
   // No match found, deoptimize or default action.
-  if (deopt == NULL) {
+  if (deopt == nullptr) {
     // If the cid is not in the list, jump to the opposite label from the cids
     // that are in the list.  These must be all the same (see asserts in the
     // constructor).
@@ -1348,7 +1348,7 @@
   }
 
   UNREACHABLE();
-  return NULL;
+  return nullptr;
 }
 
 Condition RelationalOpInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
@@ -2034,7 +2034,7 @@
 
 static bool CanBeImmediateIndex(Value* value, intptr_t cid, bool is_external) {
   ConstantInstr* constant = value->definition()->AsConstant();
-  if ((constant == NULL) || !constant->value().IsSmi()) {
+  if ((constant == nullptr) || !constant->value().IsSmi()) {
     return false;
   }
   const int64_t index = Smi::Cast(constant->value()).AsInt64Value();
@@ -2383,7 +2383,7 @@
       break;
     default:
       UNREACHABLE();
-      return NULL;
+      return nullptr;
   }
   return locs;
 }
@@ -2561,12 +2561,12 @@
 static void LoadValueCid(FlowGraphCompiler* compiler,
                          Register value_cid_reg,
                          Register value_reg,
-                         compiler::Label* value_is_smi = NULL) {
+                         compiler::Label* value_is_smi = nullptr) {
   compiler::Label done;
-  if (value_is_smi == NULL) {
+  if (value_is_smi == nullptr) {
     __ LoadImmediate(value_cid_reg, kSmiCid);
   }
-  __ BranchIfSmi(value_reg, value_is_smi == NULL ? &done : value_is_smi,
+  __ BranchIfSmi(value_reg, value_is_smi == nullptr ? &done : value_is_smi,
                  compiler::Assembler::kNearJump);
   __ LoadClassId(value_cid_reg, value_reg);
   __ Bind(&done);
@@ -2643,9 +2643,9 @@
   compiler::Label* deopt =
       compiler->is_optimizing()
           ? compiler->AddDeoptStub(deopt_id(), ICData::kDeoptGuardField)
-          : NULL;
+          : nullptr;
 
-  compiler::Label* fail = (deopt != NULL) ? deopt : &fail_label;
+  compiler::Label* fail = (deopt != nullptr) ? deopt : &fail_label;
 
   if (emit_full_guard) {
     __ LoadObject(field_reg, Field::ZoneHandle((field().Original())));
@@ -2698,7 +2698,7 @@
       __ j(&ok, compiler::Assembler::kNearJump);
     }
 
-    if (deopt == NULL) {
+    if (deopt == nullptr) {
       __ Bind(fail);
 
       __ LoadFieldFromOffset(TMP, field_reg, Field::guarded_cid_offset(),
@@ -2715,7 +2715,7 @@
     }
   } else {
     ASSERT(compiler->is_optimizing());
-    ASSERT(deopt != NULL);
+    ASSERT(deopt != nullptr);
 
     // Field guard class has been initialized and is known.
     if (value_cid == kDynamicCid) {
@@ -2776,7 +2776,7 @@
   compiler::Label* deopt =
       compiler->is_optimizing()
           ? compiler->AddDeoptStub(deopt_id(), ICData::kDeoptGuardField)
-          : NULL;
+          : nullptr;
 
   const Register value_reg = locs()->in(0).reg();
 
@@ -2807,7 +2807,7 @@
     __ lx(TMP, compiler::Address(TMP, 0));
     __ CompareObjectRegisters(length_reg, TMP);
 
-    if (deopt == NULL) {
+    if (deopt == nullptr) {
       __ BranchIf(EQ, &ok, compiler::Assembler::kNearJump);
 
       __ PushRegisterPair(value_reg, field_reg);
@@ -3193,7 +3193,7 @@
     }
     // pending_deoptimization_env_ is needed to generate a runtime call that
     // may throw an exception.
-    ASSERT(compiler->pending_deoptimization_env_ == NULL);
+    ASSERT(compiler->pending_deoptimization_env_ == nullptr);
     Environment* env =
         compiler->SlowPathEnvironmentFor(instruction(), kNumSlowPathArgs);
     compiler->pending_deoptimization_env_ = env;
@@ -3235,7 +3235,7 @@
                                      instruction()->deopt_id(),
                                      InstructionSource());
     }
-    compiler->pending_deoptimization_env_ = NULL;
+    compiler->pending_deoptimization_env_ = nullptr;
     if (!using_shared_stub) {
       compiler->RestoreLiveRegisters(locs);
     }
@@ -3291,7 +3291,7 @@
       shift_left->CanDeoptimize()
           ? compiler->AddDeoptStub(shift_left->deopt_id(),
                                    ICData::kDeoptBinarySmiOp)
-          : NULL;
+          : nullptr;
   if (locs.in(1).IsConstant()) {
     const Object& constant = locs.in(1).constant();
     ASSERT(constant.IsSmi());
@@ -3415,7 +3415,7 @@
 
   const Register left = locs()->in(0).reg();
   const Register result = locs()->out(0).reg();
-  compiler::Label* deopt = NULL;
+  compiler::Label* deopt = nullptr;
   if (CanDeoptimize()) {
     deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinarySmiOp);
   }
@@ -3426,7 +3426,7 @@
     const intx_t imm = static_cast<intx_t>(constant.ptr());
     switch (op_kind()) {
       case Token::kADD: {
-        if (deopt == NULL) {
+        if (deopt == nullptr) {
           __ AddImmediate(result, left, imm);
         } else {
           __ AddImmediateBranchOverflow(result, left, imm, deopt);
@@ -3434,7 +3434,7 @@
         break;
       }
       case Token::kSUB: {
-        if (deopt == NULL) {
+        if (deopt == nullptr) {
           __ AddImmediate(result, left, -imm);
         } else {
           // Negating imm and using AddImmediateSetFlags would not detect the
@@ -3446,7 +3446,7 @@
       case Token::kMUL: {
         // Keep left value tagged and untag right value.
         const intptr_t value = Smi::Cast(constant).Value();
-        if (deopt == NULL) {
+        if (deopt == nullptr) {
           __ LoadImmediate(TMP, value);
           __ mul(result, left, TMP);
         } else {
@@ -3565,7 +3565,7 @@
   const Register right = locs()->in(1).reg();
   switch (op_kind()) {
     case Token::kADD: {
-      if (deopt == NULL) {
+      if (deopt == nullptr) {
         __ add(result, left, right);
       } else if (RangeUtils::IsPositive(right_range())) {
         ASSERT(result != left);
@@ -3581,7 +3581,7 @@
       break;
     }
     case Token::kSUB: {
-      if (deopt == NULL) {
+      if (deopt == nullptr) {
         __ sub(result, left, right);
       } else if (RangeUtils::IsPositive(right_range())) {
         ASSERT(result != left);
@@ -3599,7 +3599,7 @@
     case Token::kMUL: {
       const Register temp = locs()->temp(0).reg();
       __ SmiUntag(temp, left);
-      if (deopt == NULL) {
+      if (deopt == nullptr) {
         __ mul(result, temp, right);
       } else {
         __ MultiplyBranchOverflow(result, temp, right, deopt);
@@ -4228,7 +4228,7 @@
                               Register result,
                               compiler::Label* deopt) {
   __ LoadFieldFromOffset(result, mint, compiler::target::Mint::value_offset());
-  if (deopt != NULL) {
+  if (deopt != nullptr) {
     __ LoadFieldFromOffset(
         TMP, mint,
         compiler::target::Mint::value_offset() + compiler::target::kWordSize);
@@ -4257,8 +4257,8 @@
   compiler::Label* deopt =
       CanDeoptimize()
           ? compiler->AddDeoptStub(GetDeoptId(), ICData::kDeoptUnboxInteger)
-          : NULL;
-  compiler::Label* out_of_range = !is_truncating() ? deopt : NULL;
+          : nullptr;
+  compiler::Label* out_of_range = !is_truncating() ? deopt : nullptr;
   ASSERT(value != out);
 
   if (value_cid == kSmiCid) {
@@ -4269,7 +4269,7 @@
     compiler::Label done;
     __ SmiUntag(out, value);
     __ BranchIfSmi(value, &done, compiler::Assembler::kNearJump);
-    LoadInt32FromMint(compiler, value, out, NULL);
+    LoadInt32FromMint(compiler, value, out, nullptr);
     __ Bind(&done);
   } else {
     compiler::Label done;
@@ -4287,7 +4287,7 @@
   compiler::Label* deopt =
       CanDeoptimize()
           ? compiler->AddDeoptStub(GetDeoptId(), ICData::kDeoptUnboxInteger)
-          : NULL;
+          : nullptr;
 
   if (value_cid == kSmiCid) {
     __ SmiUntag(out, value);
@@ -4315,7 +4315,7 @@
 
   // TODO(vegorov): as it is implemented right now truncating unboxing would
   // leave "garbage" in the higher word.
-  if (!is_truncating() && (deopt != NULL)) {
+  if (!is_truncating() && (deopt != nullptr)) {
     ASSERT(representation() == kUnboxedInt32);
     __ sextw(TMP, out);
     __ bne(TMP, out, deopt);
@@ -4592,7 +4592,7 @@
       break;
   }
   UNREACHABLE();
-  return NULL;
+  return nullptr;
 }
 
 void SimdOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
@@ -4826,7 +4826,7 @@
                                                          bool opt) const {
 #if XLEN == 32
   UNIMPLEMENTED();
-  return NULL;
+  return nullptr;
 #else
   const intptr_t kNumInputs = 1;
   const intptr_t kNumTemps = 0;
@@ -4931,7 +4931,7 @@
 LocationSummary* DoubleToDoubleInstr::MakeLocationSummary(Zone* zone,
                                                           bool opt) const {
   UNIMPLEMENTED();
-  return NULL;
+  return nullptr;
 }
 
 void DoubleToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
@@ -6259,7 +6259,7 @@
     Register right_hi = right_pair->At(1).reg();
 
     // Jump to a slow path if shift is larger than 63 or less than 0.
-    ShiftInt64OpSlowPath* slow_path = NULL;
+    ShiftInt64OpSlowPath* slow_path = nullptr;
     if (!IsShiftCountInRange()) {
       slow_path = new (Z) ShiftInt64OpSlowPath(this);
       compiler->AddSlowPathCode(slow_path);
@@ -6272,7 +6272,7 @@
     EmitShiftInt64ByRegister(compiler, op_kind(), out_lo, out_hi, left_lo,
                              left_hi, right_lo);
 
-    if (slow_path != NULL) {
+    if (slow_path != nullptr) {
       __ Bind(slow_path->exit_label());
     }
   }
@@ -6289,7 +6289,7 @@
     Register shift = locs()->in(1).reg();
 
     // Jump to a slow path if shift is larger than 63 or less than 0.
-    ShiftInt64OpSlowPath* slow_path = NULL;
+    ShiftInt64OpSlowPath* slow_path = nullptr;
     if (!IsShiftCountInRange()) {
       slow_path = new (Z) ShiftInt64OpSlowPath(this);
       compiler->AddSlowPathCode(slow_path);
@@ -6299,7 +6299,7 @@
 
     EmitShiftInt64ByRegister(compiler, op_kind(), out, left, shift);
 
-    if (slow_path != NULL) {
+    if (slow_path != nullptr) {
       __ Bind(slow_path->exit_label());
     }
   }
@@ -6475,7 +6475,7 @@
     Register right_hi = right_pair->At(1).reg();
 
     // Jump to a slow path if shift count is > 31 or negative.
-    ShiftUint32OpSlowPath* slow_path = NULL;
+    ShiftUint32OpSlowPath* slow_path = nullptr;
     if (!IsShiftCountInRange(kUint32ShiftCountLimit)) {
       slow_path = new (Z) ShiftUint32OpSlowPath(this);
       compiler->AddSlowPathCode(slow_path);
@@ -6488,7 +6488,7 @@
 
     EmitShiftUint32ByRegister(compiler, op_kind(), out, left, right_lo);
 
-    if (slow_path != NULL) {
+    if (slow_path != nullptr) {
       __ Bind(slow_path->exit_label());
     }
   }
@@ -6719,7 +6719,7 @@
       shift_left->CanDeoptimize()
           ? compiler->AddDeoptStub(shift_left->deopt_id(),
                                    ICData::kDeoptBinarySmiOp)
-          : NULL;
+          : nullptr;
   ASSERT(locs.in(1).IsConstant());
   const Object& constant = locs.in(1).constant();
   ASSERT(compiler::target::IsSmi(constant));
@@ -6765,7 +6765,7 @@
 
   const Register left = locs()->in(0).reg();
   const Register result = locs()->out(0).reg();
-  compiler::Label* deopt = NULL;
+  compiler::Label* deopt = nullptr;
   if (CanDeoptimize()) {
     deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinarySmiOp);
   }
@@ -6776,7 +6776,7 @@
     const intptr_t value = compiler::target::SmiValue(constant);
     switch (op_kind()) {
       case Token::kADD: {
-        if (deopt == NULL) {
+        if (deopt == nullptr) {
           __ AddImmediate(result, left, value);
         } else {
           __ AddImmediateBranchOverflow(result, left, value, deopt);
@@ -6784,7 +6784,7 @@
         break;
       }
       case Token::kSUB: {
-        if (deopt == NULL) {
+        if (deopt == nullptr) {
           __ AddImmediate(result, left, -value);
         } else {
           // Negating value and using AddImmediateSetFlags would not detect the
@@ -6796,7 +6796,7 @@
       case Token::kMUL: {
         const Register right = locs()->temp(0).reg();
         __ LoadImmediate(right, value);
-        if (deopt == NULL) {
+        if (deopt == nullptr) {
           __ mul(result, left, right);
         } else {
           __ MultiplyBranchOverflow(result, left, right, deopt);
@@ -6839,7 +6839,7 @@
   const Register right = locs()->in(1).reg();
   switch (op_kind()) {
     case Token::kADD: {
-      if (deopt == NULL) {
+      if (deopt == nullptr) {
         __ add(result, left, right);
       } else {
         __ AddBranchOverflow(result, left, right, deopt);
@@ -6847,7 +6847,7 @@
       break;
     }
     case Token::kSUB: {
-      if (deopt == NULL) {
+      if (deopt == nullptr) {
         __ sub(result, left, right);
       } else {
         __ SubtractBranchOverflow(result, left, right, deopt);
@@ -6855,7 +6855,7 @@
       break;
     }
     case Token::kMUL: {
-      if (deopt == NULL) {
+      if (deopt == nullptr) {
         __ mul(result, left, right);
       } else {
         __ MultiplyBranchOverflow(result, left, right, deopt);
@@ -7017,7 +7017,7 @@
   const Register out = locs()->out(0).reg();
   compiler::Label* deopt =
       !CanDeoptimize()
-          ? NULL
+          ? nullptr
           : compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnboxInteger);
   if (from() == kUnboxedInt32 && to() == kUnboxedUint32) {
     if (CanDeoptimize()) {
diff --git a/runtime/vm/compiler/backend/il_x64.cc b/runtime/vm/compiler/backend/il_x64.cc
index a3e9da8..d636993 100644
--- a/runtime/vm/compiler/backend/il_x64.cc
+++ b/runtime/vm/compiler/backend/il_x64.cc
@@ -532,7 +532,7 @@
   // Emit comparison code. This must not overwrite the result register.
   // IfThenElseInstr::Supports() should prevent EmitComparisonCode from using
   // the labels or returning an invalid condition.
-  BranchLabels labels = {NULL, NULL, NULL};
+  BranchLabels labels = {nullptr, nullptr, nullptr};
   Condition true_condition = comparison()->EmitComparisonCode(compiler, labels);
   ASSERT(true_condition != kInvalidCondition);
 
@@ -831,19 +831,19 @@
     return locs;
   }
   UNREACHABLE();
-  return NULL;
+  return nullptr;
 }
 
 static void LoadValueCid(FlowGraphCompiler* compiler,
                          Register value_cid_reg,
                          Register value_reg,
-                         compiler::Label* value_is_smi = NULL) {
+                         compiler::Label* value_is_smi = nullptr) {
   compiler::Label done;
-  if (value_is_smi == NULL) {
+  if (value_is_smi == nullptr) {
     __ LoadImmediate(value_cid_reg, compiler::Immediate(kSmiCid));
   }
   __ testq(value_reg, compiler::Immediate(kSmiTagMask));
-  if (value_is_smi == NULL) {
+  if (value_is_smi == nullptr) {
     __ j(ZERO, &done, compiler::Assembler::kNearJump);
   } else {
     __ j(ZERO, value_is_smi);
@@ -911,7 +911,7 @@
   Condition true_condition = TokenKindToIntCondition(kind);
   if (left.IsConstant() || right.IsConstant()) {
     // Ensure constant is on the right.
-    ConstantInstr* constant = NULL;
+    ConstantInstr* constant = nullptr;
     if (left.IsConstant()) {
       constant = left.constant_instruction();
       Location tmp = right;
@@ -949,7 +949,7 @@
   Condition true_condition = TokenKindToIntCondition(kind);
   if (left.IsConstant() || right.IsConstant()) {
     // Ensure constant is on the right.
-    ConstantInstr* constant = NULL;
+    ConstantInstr* constant = nullptr;
     if (left.IsConstant()) {
       constant = left.constant_instruction();
       Location tmp = right;
@@ -1137,7 +1137,7 @@
   compiler::Label* deopt =
       CanDeoptimize()
           ? compiler->AddDeoptStub(deopt_id(), ICData::kDeoptTestCids)
-          : NULL;
+          : nullptr;
 
   const intptr_t true_result = (kind() == Token::kIS) ? 1 : 0;
   const ZoneGrowableArray<intptr_t>& data = cid_results();
@@ -1154,7 +1154,7 @@
     __ j(EQUAL, result ? labels.true_label : labels.false_label);
   }
   // No match found, deoptimize or default action.
-  if (deopt == NULL) {
+  if (deopt == nullptr) {
     // If the cid is not in the list, jump to the opposite label from the cids
     // that are in the list.  These must be all the same (see asserts in the
     // constructor).
@@ -1195,7 +1195,7 @@
     return summary;
   }
   UNREACHABLE();
-  return NULL;
+  return nullptr;
 }
 
 Condition RelationalOpInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
@@ -2120,7 +2120,7 @@
       break;
     default:
       UNREACHABLE();
-      return NULL;
+      return nullptr;
   }
   return locs;
 }
@@ -2318,12 +2318,12 @@
 
   compiler::Label ok, fail_label;
 
-  compiler::Label* deopt = NULL;
+  compiler::Label* deopt = nullptr;
   if (compiler->is_optimizing()) {
     deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptGuardField);
   }
 
-  compiler::Label* fail = (deopt != NULL) ? deopt : &fail_label;
+  compiler::Label* fail = (deopt != nullptr) ? deopt : &fail_label;
 
   if (emit_full_guard) {
     __ LoadObject(field_reg, Field::ZoneHandle(field().Original()));
@@ -2372,7 +2372,7 @@
       __ jmp(&ok);
     }
 
-    if (deopt == NULL) {
+    if (deopt == nullptr) {
       __ Bind(fail);
 
       __ cmpl(compiler::FieldAddress(field_reg, Field::guarded_cid_offset()),
@@ -2389,7 +2389,7 @@
     }
   } else {
     ASSERT(compiler->is_optimizing());
-    ASSERT(deopt != NULL);
+    ASSERT(deopt != nullptr);
 
     // Field guard class has been initialized and is known.
     if (value_cid == kDynamicCid) {
@@ -2450,7 +2450,7 @@
   compiler::Label* deopt =
       compiler->is_optimizing()
           ? compiler->AddDeoptStub(deopt_id(), ICData::kDeoptGuardField)
-          : NULL;
+          : nullptr;
 
   const Register value_reg = locs()->in(0).reg();
 
@@ -2482,7 +2482,7 @@
     __ OBJ(cmp)(length_reg,
                 compiler::Address(value_reg, offset_reg, TIMES_1, 0));
 
-    if (deopt == NULL) {
+    if (deopt == nullptr) {
       __ j(EQUAL, &ok);
 
       __ pushq(field_reg);
@@ -2533,7 +2533,7 @@
   compiler::Label* deopt =
       compiler->is_optimizing()
           ? compiler->AddDeoptStub(deopt_id(), ICData::kDeoptGuardField)
-          : NULL;
+          : nullptr;
 
   compiler::Label ok;
 
@@ -2959,7 +2959,7 @@
     }
     // pending_deoptimization_env_ is needed to generate a runtime call that
     // may throw an exception.
-    ASSERT(compiler->pending_deoptimization_env_ == NULL);
+    ASSERT(compiler->pending_deoptimization_env_ == nullptr);
     Environment* env =
         compiler->SlowPathEnvironmentFor(instruction(), kNumSlowPathArgs);
     compiler->pending_deoptimization_env_ = env;
@@ -2988,7 +2988,7 @@
                                      instruction()->deopt_id(),
                                      InstructionSource());
     }
-    compiler->pending_deoptimization_env_ = NULL;
+    compiler->pending_deoptimization_env_ = nullptr;
     if (!using_shared_stub) {
       compiler->RestoreLiveRegisters(instruction()->locs());
     }
@@ -3042,7 +3042,7 @@
       shift_left->CanDeoptimize()
           ? compiler->AddDeoptStub(shift_left->deopt_id(),
                                    ICData::kDeoptBinarySmiOp)
-          : NULL;
+          : nullptr;
   if (locs.in(1).IsConstant()) {
     const Object& constant = locs.in(1).constant();
     ASSERT(constant.IsSmi());
@@ -3108,7 +3108,7 @@
   if (!shift_left->can_overflow()) {
     if (right_needs_check) {
       const bool right_may_be_negative =
-          (right_range == NULL) || !right_range->IsPositive();
+          (right_range == nullptr) || !right_range->IsPositive();
       if (right_may_be_negative) {
         ASSERT(shift_left->CanDeoptimize());
         __ CompareImmediate(right, compiler::Immediate(0),
@@ -3165,7 +3165,7 @@
   const intptr_t kNumInputs = 2;
 
   ConstantInstr* right_constant = right()->definition()->AsConstant();
-  if ((right_constant != NULL) && (op_kind() != Token::kTRUNCDIV) &&
+  if ((right_constant != nullptr) && (op_kind() != Token::kTRUNCDIV) &&
       (op_kind() != Token::kSHL) &&
 #if defined(DART_COMPRESSED_POINTERS)
       (op_kind() != Token::kUSHR) &&
@@ -3242,7 +3242,7 @@
   } else if (op_kind() == Token::kSHL) {
     // Shift-by-1 overflow checking can use flags, otherwise we need a temp.
     const bool shiftBy1 =
-        (right_constant != NULL) && IsSmiValue(right_constant->value(), 1);
+        (right_constant != nullptr) && IsSmiValue(right_constant->value(), 1);
     const intptr_t kNumTemps = (can_overflow() && !shiftBy1) ? 1 : 0;
     LocationSummary* summary = new (zone)
         LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
@@ -3259,7 +3259,7 @@
         LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
     summary->set_in(0, Location::RequiresRegister());
     ConstantInstr* constant = right()->definition()->AsConstant();
-    if (constant != NULL) {
+    if (constant != nullptr) {
       summary->set_in(1, LocationRegisterOrSmiConstant(right()));
     } else {
       summary->set_in(1, Location::PrefersRegister());
@@ -3278,7 +3278,7 @@
   Register left = locs()->in(0).reg();
   Register result = locs()->out(0).reg();
   ASSERT(left == result);
-  compiler::Label* deopt = NULL;
+  compiler::Label* deopt = nullptr;
   if (CanDeoptimize()) {
     deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinarySmiOp);
   }
@@ -3290,12 +3290,12 @@
     switch (op_kind()) {
       case Token::kADD: {
         __ AddImmediate(left, compiler::Immediate(imm), compiler::kObjectBytes);
-        if (deopt != NULL) __ j(OVERFLOW, deopt);
+        if (deopt != nullptr) __ j(OVERFLOW, deopt);
         break;
       }
       case Token::kSUB: {
         __ SubImmediate(left, compiler::Immediate(imm), compiler::kObjectBytes);
-        if (deopt != NULL) __ j(OVERFLOW, deopt);
+        if (deopt != nullptr) __ j(OVERFLOW, deopt);
         break;
       }
       case Token::kMUL: {
@@ -3303,7 +3303,7 @@
         const intptr_t value = Smi::Cast(constant).Value();
         __ MulImmediate(left, compiler::Immediate(value),
                         compiler::kObjectBytes);
-        if (deopt != NULL) __ j(OVERFLOW, deopt);
+        if (deopt != nullptr) __ j(OVERFLOW, deopt);
         break;
       }
       case Token::kTRUNCDIV: {
@@ -3399,18 +3399,18 @@
     switch (op_kind()) {
       case Token::kADD: {
         __ OBJ(add)(left, right);
-        if (deopt != NULL) __ j(OVERFLOW, deopt);
+        if (deopt != nullptr) __ j(OVERFLOW, deopt);
         break;
       }
       case Token::kSUB: {
         __ OBJ(sub)(left, right);
-        if (deopt != NULL) __ j(OVERFLOW, deopt);
+        if (deopt != nullptr) __ j(OVERFLOW, deopt);
         break;
       }
       case Token::kMUL: {
         __ SmiUntag(left);
         __ OBJ(imul)(left, right);
-        if (deopt != NULL) __ j(OVERFLOW, deopt);
+        if (deopt != nullptr) __ j(OVERFLOW, deopt);
         break;
       }
       case Token::kBIT_AND: {
@@ -3440,18 +3440,18 @@
   switch (op_kind()) {
     case Token::kADD: {
       __ OBJ(add)(left, right);
-      if (deopt != NULL) __ j(OVERFLOW, deopt);
+      if (deopt != nullptr) __ j(OVERFLOW, deopt);
       break;
     }
     case Token::kSUB: {
       __ OBJ(sub)(left, right);
-      if (deopt != NULL) __ j(OVERFLOW, deopt);
+      if (deopt != nullptr) __ j(OVERFLOW, deopt);
       break;
     }
     case Token::kMUL: {
       __ SmiUntag(left);
       __ OBJ(imul)(left, right);
-      if (deopt != NULL) __ j(OVERFLOW, deopt);
+      if (deopt != nullptr) __ j(OVERFLOW, deopt);
       break;
     }
     case Token::kBIT_AND: {
@@ -3925,7 +3925,7 @@
   compiler::Label* deopt =
       CanDeoptimize()
           ? compiler->AddDeoptStub(GetDeoptId(), ICData::kDeoptUnboxInteger)
-          : NULL;
+          : nullptr;
   ASSERT(value == locs()->out(0).reg());
 
   if (value_cid == kSmiCid) {
@@ -3985,7 +3985,7 @@
 
   // TODO(vegorov): as it is implemented right now truncating unboxing would
   // leave "garbage" in the higher word.
-  if (!is_truncating() && (deopt != NULL)) {
+  if (!is_truncating() && (deopt != nullptr)) {
     ASSERT(representation() == kUnboxedInt32);
     Register temp = locs()->temp(0).reg();
     __ movsxd(temp, value);
@@ -4635,7 +4635,7 @@
       break;
   }
   UNREACHABLE();
-  return NULL;
+  return nullptr;
 }
 
 void SimdOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
@@ -5361,7 +5361,7 @@
   __ cmpq(RDX, compiler::Immediate(0));
   __ j(GREATER_EQUAL, &all_done, compiler::Assembler::kNearJump);
   // Result is negative, adjust it.
-  if ((divisor_range() == NULL) || divisor_range()->Overlaps(-1, 1)) {
+  if ((divisor_range() == nullptr) || divisor_range()->Overlaps(-1, 1)) {
     compiler::Label subtract;
     __ cmpq(right, compiler::Immediate(0));
     __ j(LESS, &subtract, compiler::Assembler::kNearJump);
@@ -6209,7 +6209,7 @@
     ASSERT(locs()->in(1).reg() == RCX);
 
     // Jump to a slow path if shift count is > 63 or negative.
-    ShiftInt64OpSlowPath* slow_path = NULL;
+    ShiftInt64OpSlowPath* slow_path = nullptr;
     if (!IsShiftCountInRange()) {
       slow_path = new (Z) ShiftInt64OpSlowPath(this);
       compiler->AddSlowPathCode(slow_path);
@@ -6220,7 +6220,7 @@
 
     EmitShiftInt64ByRCX(compiler, op_kind(), left);
 
-    if (slow_path != NULL) {
+    if (slow_path != nullptr) {
       __ Bind(slow_path->exit_label());
     }
   }
@@ -6325,7 +6325,7 @@
     ASSERT(locs()->in(1).reg() == RCX);
 
     // Jump to a slow path if shift count is > 31 or negative.
-    ShiftUint32OpSlowPath* slow_path = NULL;
+    ShiftUint32OpSlowPath* slow_path = nullptr;
     if (!IsShiftCountInRange(kUint32ShiftCountLimit)) {
       slow_path = new (Z) ShiftUint32OpSlowPath(this);
       compiler->AddSlowPathCode(slow_path);
@@ -6336,7 +6336,7 @@
 
     EmitShiftUint32ByRCX(compiler, op_kind(), left);
 
-    if (slow_path != NULL) {
+    if (slow_path != nullptr) {
       __ Bind(slow_path->exit_label());
     }
   }
diff --git a/runtime/vm/compiler/backend/inliner.cc b/runtime/vm/compiler/backend/inliner.cc
index e0d0e0b3..e10e029 100644
--- a/runtime/vm/compiler/backend/inliner.cc
+++ b/runtime/vm/compiler/backend/inliner.cc
@@ -33,7 +33,7 @@
             12,
             "How many times we allow deoptimization before we stop inlining.");
 DEFINE_FLAG(bool, trace_inlining, false, "Trace inlining");
-DEFINE_FLAG(charp, inlining_filter, NULL, "Inline only in named function");
+DEFINE_FLAG(charp, inlining_filter, nullptr, "Inline only in named function");
 
 // Flags for inlining heuristics.
 DEFINE_FLAG(int,
@@ -112,7 +112,7 @@
 // Test if a call is recursive by looking in the deoptimization environment.
 static bool IsCallRecursive(const Function& function, Definition* call) {
   Environment* env = call->env();
-  while (env != NULL) {
+  while (env != nullptr) {
     if (function.ptr() == env->function().ptr()) {
       return true;
     }
@@ -402,7 +402,7 @@
       for (ForwardInstructionIterator it(block_it.Current()); !it.Done();
            it.Advance()) {
         Instruction* current = it.Current();
-        Definition* call = NULL;
+        Definition* call = nullptr;
         if (current->IsPolymorphicInstanceCall()) {
           PolymorphicInstanceCallInstr* instance_call =
               current->AsPolymorphicInstanceCall();
@@ -415,7 +415,7 @@
         } else if (current->IsClosureCall()) {
           // TODO(srdjan): Add data for closure calls.
         }
-        if (call != NULL) {
+        if (call != nullptr) {
           inlined_info->Add(
               InlinedInfo(caller, &target, depth + 1, call, "Too deep"));
         }
@@ -563,7 +563,7 @@
                      intptr_t depth,
                      GrowableArray<InlinedInfo>* inlined_info) {
     COMPILER_TIMINGS_TIMER_SCOPE(graph->thread(), FindCallSites);
-    ASSERT(graph != NULL);
+    ASSERT(graph != nullptr);
     if (depth > inlining_depth_threshold_) {
       if (FLAG_print_inlining_tree) {
         RecordAllNotInlinedFunction(graph, depth, inlined_info);
@@ -737,9 +737,9 @@
         arguments_descriptor(arguments_descriptor),
         first_arg_index(first_arg_index),
         arguments(arguments),
-        callee_graph(NULL),
-        parameter_stubs(NULL),
-        exit_collector(NULL),
+        callee_graph(nullptr),
+        parameter_stubs(nullptr),
+        exit_collector(nullptr),
         caller(caller) {}
 
   Definition* call;
@@ -864,7 +864,7 @@
   const bool is_polymorphic = call_data->call->IsPolymorphicInstanceCall();
   const bool no_checks =
       IsAThisCallThroughAnUncheckedEntryPoint(call_data->call);
-  ASSERT(is_polymorphic == (target_info != NULL));
+  ASSERT(is_polymorphic == (target_info != nullptr));
   FlowGraph* callee_graph = call_data->callee_graph;
   auto callee_entry = callee_graph->graph_entry()->normal_entry();
   const Function& callee = callee_graph->function();
@@ -1031,8 +1031,8 @@
         inlining_depth_(1),
         inlining_recursion_depth_(0),
         inlining_depth_threshold_(threshold),
-        collected_call_sites_(NULL),
-        inlining_call_sites_(NULL),
+        collected_call_sites_(nullptr),
+        inlining_call_sites_(nullptr),
         function_cache_(),
         inlined_info_() {}
 
@@ -1102,7 +1102,7 @@
     GrowableArray<CallSites::CallInfo<InstanceCallInstr>> calls;
     CallSites sites1(inlining_depth_threshold_, &calls);
     CallSites sites2(inlining_depth_threshold_, &calls);
-    CallSites* call_sites_temp = NULL;
+    CallSites* call_sites_temp = nullptr;
     collected_call_sites_ = &sites1;
     inlining_call_sites_ = &sites2;
     // Collect initial call sites.
@@ -1141,8 +1141,8 @@
       }
     }
 
-    collected_call_sites_ = NULL;
-    inlining_call_sites_ = NULL;
+    collected_call_sites_ = nullptr;
+    inlining_call_sites_ = nullptr;
   }
 
   bool inlined() const { return inlined_; }
@@ -1157,7 +1157,7 @@
                                   Value* argument,
                                   FlowGraph* graph) {
     ConstantInstr* constant = argument->definition()->AsConstant();
-    if (constant != NULL) {
+    if (constant != nullptr) {
       return graph->GetConstant(constant->value());
     } else {
       ParameterInstr* param =
@@ -1362,7 +1362,7 @@
           // Closure functions only have one entry point.
         }
         kernel::FlowGraphBuilder builder(
-            parsed_function, ic_data_array, /* not building var desc */ NULL,
+            parsed_function, ic_data_array, /* not building var desc */ nullptr,
             exit_collector,
             /* optimized = */ true, Compiler::kNoOSRDeoptId,
             caller_graph_->max_block_id() + 1,
@@ -1635,7 +1635,7 @@
         TRACE_INLINING(THR_Print(
             "       with reason %s, code size %" Pd ", call sites: %" Pd "\n",
             decision.reason, instruction_count, call_site_count));
-        PRINT_INLINING_TREE(NULL, &call_data->caller, &function, call);
+        PRINT_INLINING_TREE(nullptr, &call_data->caller, &function, call);
         return true;
       } else {
         error = thread()->StealStickyError();
@@ -1696,7 +1696,7 @@
     // Print those that were inlined.
     for (intptr_t i = 0; i < inlined_info_.length(); i++) {
       const InlinedInfo& info = inlined_info_[i];
-      if (info.bailout_reason != NULL) {
+      if (info.bailout_reason != nullptr) {
         continue;
       }
       if ((info.inlined_depth == depth) &&
@@ -1715,7 +1715,7 @@
     // Print those that were not inlined.
     for (intptr_t i = 0; i < inlined_info_.length(); i++) {
       const InlinedInfo& info = inlined_info_[i];
-      if (info.bailout_reason == NULL) {
+      if (info.bailout_reason == nullptr) {
         continue;
       }
       if ((info.inlined_depth == depth) &&
@@ -1739,7 +1739,7 @@
     // Plug result in the caller graph.
     InlineExitCollector* exit_collector = call_data->exit_collector;
     exit_collector->PrepareGraphs(callee_graph);
-    ReplaceParameterStubs(zone(), caller_graph_, call_data, NULL);
+    ReplaceParameterStubs(zone(), caller_graph_, call_data, nullptr);
     exit_collector->ReplaceCall(callee_function_entry);
 
     ASSERT(!call_data->call->HasMoveArguments());
@@ -1777,7 +1777,7 @@
       StaticCallInstr* call = call_info[call_idx].call;
 
       if (FlowGraphInliner::TryReplaceStaticCallWithInline(
-              inliner_->flow_graph(), NULL, call,
+              inliner_->flow_graph(), nullptr, call,
               inliner_->speculative_policy_)) {
         inlined = true;
         continue;
@@ -1930,7 +1930,7 @@
         const Instance& object =
             parsed_function.DefaultParameterValueAt(i - fixed_param_count);
         ConstantInstr* constant = callee_graph->GetConstant(object);
-        arguments->Add(NULL);
+        arguments->Add(nullptr);
         param_stubs->Add(constant);
       }
       return true;
@@ -1948,7 +1948,7 @@
       for (intptr_t i = 0; i < param_count - fixed_param_count; ++i) {
         const Instance& object = parsed_function.DefaultParameterValueAt(i);
         ConstantInstr* constant = callee_graph->GetConstant(object);
-        arguments->Add(NULL);
+        arguments->Add(nullptr);
         param_stubs->Add(constant);
       }
       return true;
@@ -1972,7 +1972,7 @@
     for (intptr_t i = fixed_param_count; i < param_count; ++i) {
       String& param_name = String::Handle(function.ParameterNameAt(i));
       // Search for and add the named argument.
-      Value* arg = NULL;
+      Value* arg = nullptr;
       for (intptr_t j = 0; j < named_args.length(); ++j) {
         if (param_name.Equals(*named_args[j].name)) {
           arg = named_args[j].value;
@@ -1982,7 +1982,7 @@
       }
       arguments->Add(arg);
       // Create a stub for the argument or use the parameter's default value.
-      if (arg != NULL) {
+      if (arg != nullptr) {
         param_stubs->Add(CreateParameterStub(i, arg, callee_graph));
       } else {
         const Instance& object =
@@ -2276,7 +2276,7 @@
         // already constructed a join and set its dominator.  Add a jump to
         // the join.
         JoinEntryInstr* join = callee_entry->AsJoinEntry();
-        ASSERT(join->dominator() != NULL);
+        ASSERT(join->dominator() != nullptr);
         GotoInstr* goto_join = new GotoInstr(join, DeoptId::kNone);
         goto_join->InheritDeoptTarget(zone(), join);
         cursor->LinkTo(goto_join);
@@ -2286,7 +2286,7 @@
         // shared inlined body) because this is the last inlined entry.
         UNREACHABLE();
       }
-      cursor = NULL;
+      cursor = nullptr;
     } else {
       // For all variants except the last, use a branch on the loaded class
       // id.
@@ -2296,7 +2296,7 @@
       const Smi& cid = Smi::ZoneHandle(Smi::New(variant.cid_start));
       ConstantInstr* cid_constant = owner_->caller_graph()->GetConstant(cid);
       BranchInstr* branch;
-      BranchInstr* upper_limit_branch = NULL;
+      BranchInstr* upper_limit_branch = nullptr;
       BlockEntryInstr* cid_test_entry_block = current_block;
       if (test_is_range) {
         // Double branch for testing a range of Cids.
@@ -2343,7 +2343,7 @@
       // cases (unshared, shared first predecessor, and shared subsequent
       // predecessors).
       BlockEntryInstr* callee_entry = inlined_entries_[i];
-      TargetEntryInstr* true_target = NULL;
+      TargetEntryInstr* true_target = nullptr;
       if (callee_entry->IsGraphEntry()) {
         // Unshared.
         auto graph_entry = callee_entry->AsGraphEntry();
@@ -2371,8 +2371,8 @@
         // already constructed a join.  We need a fresh target that jumps to
         // the join.
         JoinEntryInstr* join = callee_entry->AsJoinEntry();
-        ASSERT(join != NULL);
-        ASSERT(join->dominator() != NULL);
+        ASSERT(join != nullptr);
+        ASSERT(join->dominator() != nullptr);
         true_target =
             new TargetEntryInstr(AllocateBlockId(), try_idx, DeoptId::kNone);
         true_target->InheritDeoptTarget(zone(), join);
@@ -2700,8 +2700,9 @@
                                      &call_site_count);
 
   const Function& top = flow_graph_->function();
-  if ((FLAG_inlining_filter != NULL) &&
-      (strstr(top.ToFullyQualifiedCString(), FLAG_inlining_filter) == NULL)) {
+  if ((FLAG_inlining_filter != nullptr) &&
+      (strstr(top.ToFullyQualifiedCString(), FLAG_inlining_filter) ==
+       nullptr)) {
     return 0;
   }
 
@@ -2782,7 +2783,7 @@
   LoadFieldInstr* length = new (Z) LoadFieldInstr(
       new (Z) Value(*array), Slot::GetLengthFieldForArrayCid(array_cid),
       call->source());
-  *cursor = flow_graph->AppendTo(*cursor, length, NULL, FlowGraph::kValue);
+  *cursor = flow_graph->AppendTo(*cursor, length, nullptr, FlowGraph::kValue);
   *index = flow_graph->CreateCheckBound(length, *index, call->deopt_id());
   *cursor =
       flow_graph->AppendTo(*cursor, *index, call->env(), FlowGraph::kValue);
@@ -2792,14 +2793,16 @@
     LoadFieldInstr* elements = new (Z)
         LoadFieldInstr(new (Z) Value(*array), Slot::GrowableObjectArray_data(),
                        call->source());
-    *cursor = flow_graph->AppendTo(*cursor, elements, NULL, FlowGraph::kValue);
+    *cursor =
+        flow_graph->AppendTo(*cursor, elements, nullptr, FlowGraph::kValue);
     // Load from the data from backing store which is a fixed-length array.
     *array = elements;
     array_cid = kArrayCid;
   } else if (IsExternalTypedDataClassId(array_cid)) {
     LoadUntaggedInstr* elements = new (Z) LoadUntaggedInstr(
         new (Z) Value(*array), compiler::target::PointerBase::data_offset());
-    *cursor = flow_graph->AppendTo(*cursor, elements, NULL, FlowGraph::kValue);
+    *cursor =
+        flow_graph->AppendTo(*cursor, elements, nullptr, FlowGraph::kValue);
     *array = elements;
   }
   return array_cid;
@@ -2849,9 +2852,9 @@
       ResultType(call));
 
   *last = load;
-  cursor = flow_graph->AppendTo(cursor, load,
-                                deopt_id != DeoptId::kNone ? call->env() : NULL,
-                                FlowGraph::kValue);
+  cursor = flow_graph->AppendTo(
+      cursor, load, deopt_id != DeoptId::kNone ? call->env() : nullptr,
+      FlowGraph::kValue);
 
   const bool value_needs_boxing =
       array_cid == kTypedDataInt8ArrayCid ||
@@ -2865,7 +2868,7 @@
   if (array_cid == kTypedDataFloat32ArrayCid) {
     *last = new (Z) FloatToDoubleInstr(new (Z) Value(load), deopt_id);
     flow_graph->AppendTo(cursor, *last,
-                         deopt_id != DeoptId::kNone ? call->env() : NULL,
+                         deopt_id != DeoptId::kNone ? call->env() : nullptr,
                          FlowGraph::kValue);
   } else if (value_needs_boxing) {
     *last = BoxInstr::Create(kUnboxedIntPtr, new Value(load));
@@ -2920,7 +2923,7 @@
     // the index is not a smi.
     const AbstractType& value_type =
         AbstractType::ZoneHandle(Z, target.ParameterTypeAt(2));
-    Definition* type_args = NULL;
+    Definition* type_args = nullptr;
     switch (array_cid) {
       case kArrayCid:
       case kGrowableObjectArrayCid: {
@@ -2930,7 +2933,7 @@
                            Slot::GetTypeArgumentsSlotFor(flow_graph->thread(),
                                                          instantiator_class),
                            call->source());
-        cursor = flow_graph->AppendTo(cursor, load_type_args, NULL,
+        cursor = flow_graph->AppendTo(cursor, load_type_args, nullptr,
                                       FlowGraph::kValue);
         type_args = load_type_args;
         break;
@@ -3020,7 +3023,7 @@
       array_cid == kExternalTypedDataUint8ArrayCid ||
       array_cid == kExternalTypedDataUint8ClampedArrayCid;
 
-  if (value_check != NULL) {
+  if (value_check != nullptr) {
     // No store barrier needed because checked value is a smi, an unboxed mint,
     // an unboxed double, an unboxed Float32x4, or unboxed Int32x4.
     needs_store_barrier = kNoStoreBarrier;
@@ -3034,7 +3037,7 @@
     stored_value = new (Z)
         DoubleToFloatInstr(new (Z) Value(stored_value), call->deopt_id());
     cursor =
-        flow_graph->AppendTo(cursor, stored_value, NULL, FlowGraph::kValue);
+        flow_graph->AppendTo(cursor, stored_value, nullptr, FlowGraph::kValue);
   } else if (value_needs_unboxing) {
     Representation representation = kNoRepresentation;
     switch (array_cid) {
@@ -3195,7 +3198,8 @@
     // Internal or External typed data: load untagged.
     auto elements = new (Z) LoadUntaggedInstr(
         new (Z) Value(*array), compiler::target::PointerBase::data_offset());
-    *cursor = flow_graph->AppendTo(*cursor, elements, NULL, FlowGraph::kValue);
+    *cursor =
+        flow_graph->AppendTo(*cursor, elements, nullptr, FlowGraph::kValue);
     *array = elements;
   } else {
     // Internal typed data: no action.
@@ -3487,7 +3491,7 @@
                                               Instruction* cursor) {
   LoadFieldInstr* length = new (Z) LoadFieldInstr(
       new (Z) Value(str), Slot::GetLengthFieldForArrayCid(cid), str->source());
-  cursor = flow_graph->AppendTo(cursor, length, NULL, FlowGraph::kValue);
+  cursor = flow_graph->AppendTo(cursor, length, nullptr, FlowGraph::kValue);
 
   // Bounds check.
   index = flow_graph->CreateCheckBound(length, index, call->deopt_id());
@@ -3498,19 +3502,20 @@
     str = new LoadUntaggedInstr(
         new Value(str),
         compiler::target::ExternalOneByteString::external_data_offset());
-    cursor = flow_graph->AppendTo(cursor, str, NULL, FlowGraph::kValue);
+    cursor = flow_graph->AppendTo(cursor, str, nullptr, FlowGraph::kValue);
   } else if (cid == kExternalTwoByteStringCid) {
     str = new LoadUntaggedInstr(
         new Value(str),
         compiler::target::ExternalTwoByteString::external_data_offset());
-    cursor = flow_graph->AppendTo(cursor, str, NULL, FlowGraph::kValue);
+    cursor = flow_graph->AppendTo(cursor, str, nullptr, FlowGraph::kValue);
   }
 
   LoadIndexedInstr* load_indexed = new (Z) LoadIndexedInstr(
       new (Z) Value(str), new (Z) Value(index), /*index_unboxed=*/false,
       compiler::target::Instance::ElementSizeFor(cid), cid, kAlignedAccess,
       DeoptId::kNone, call->source());
-  cursor = flow_graph->AppendTo(cursor, load_indexed, NULL, FlowGraph::kValue);
+  cursor =
+      flow_graph->AppendTo(cursor, load_indexed, nullptr, FlowGraph::kValue);
 
   auto box = BoxInstr::Create(kUnboxedIntPtr, new Value(load_indexed));
   cursor = flow_graph->AppendTo(cursor, box, nullptr, FlowGraph::kValue);
@@ -3543,7 +3548,7 @@
   OneByteStringFromCharCodeInstr* char_at = new (Z)
       OneByteStringFromCharCodeInstr(new (Z) Value((*last)->AsDefinition()));
 
-  flow_graph->AppendTo(*last, char_at, NULL, FlowGraph::kValue);
+  flow_graph->AppendTo(*last, char_at, nullptr, FlowGraph::kValue);
   *last = char_at;
   *result = char_at->AsDefinition();
 
@@ -3648,19 +3653,19 @@
     }
     // Finally insert the sequence other definition in place of this one in the
     // graph.
-    if (entry->next() != NULL) {
+    if (entry->next() != nullptr) {
       call->previous()->LinkTo(entry->next());
     }
     entry->UnuseAllInputs();  // Entry block is not in the graph.
-    if (last != NULL) {
+    if (last != nullptr) {
       ASSERT(call->GetBlock() == last->GetBlock());
       last->LinkTo(call);
     }
     // Remove through the iterator.
     ASSERT(iterator->Current() == call);
     iterator->RemoveCurrentFromGraph();
-    call->set_previous(NULL);
-    call->set_next(NULL);
+    call->set_previous(nullptr);
+    call->set_next(nullptr);
     return true;
   }
   return false;
@@ -3702,7 +3707,7 @@
         call->previous()->LinkTo(entry->next());
       }
       entry->UnuseAllInputs();  // Entry block is not in the graph.
-      if (last != NULL) {
+      if (last != nullptr) {
         BlockEntryInstr* link = call->GetBlock();
         BlockEntryInstr* exit = last->GetBlock();
         if (link != exit) {
@@ -3730,7 +3735,7 @@
       }
     }
     // Remove through the iterator.
-    if (iterator != NULL) {
+    if (iterator != nullptr) {
       ASSERT(iterator->Current() == call);
       iterator->RemoveCurrentFromGraph();
     } else {
@@ -3860,9 +3865,9 @@
   // env_use_list()), so InheritDeoptTarget should be done only after decided
   // to inline.
   (*entry)->InheritDeoptTarget(Z, call);
-  flow_graph->AppendTo(cursor, *last,
-                       call->deopt_id() != DeoptId::kNone ? call->env() : NULL,
-                       FlowGraph::kValue);
+  flow_graph->AppendTo(
+      cursor, *last, call->deopt_id() != DeoptId::kNone ? call->env() : nullptr,
+      FlowGraph::kValue);
   *result = (*last)->AsDefinition();
   return true;
 }
@@ -4025,8 +4030,8 @@
     case MethodRecognizer::kObjectArraySetIndexedUnchecked:
     case MethodRecognizer::kGrowableArraySetIndexedUnchecked:
       return InlineSetIndexed(flow_graph, kind, target, call, receiver, source,
-                              /* value_check = */ NULL, exactness, graph_entry,
-                              entry, last, result);
+                              /* value_check = */ nullptr, exactness,
+                              graph_entry, entry, last, result);
     case MethodRecognizer::kInt8ArraySetIndexed:
     case MethodRecognizer::kUint8ArraySetIndexed:
     case MethodRecognizer::kUint8ClampedArraySetIndexed:
@@ -4035,7 +4040,8 @@
     case MethodRecognizer::kInt16ArraySetIndexed:
     case MethodRecognizer::kUint16ArraySetIndexed: {
       // Optimistically assume Smi.
-      if (ic_data != NULL && ic_data->HasDeoptReason(ICData::kDeoptCheckSmi)) {
+      if (ic_data != nullptr &&
+          ic_data->HasDeoptReason(ICData::kDeoptCheckSmi)) {
         // Optimistic assumption failed at least once.
         return false;
       }
@@ -4049,14 +4055,14 @@
       // Value check not needed for Int32 and Uint32 arrays because they
       // implicitly contain unboxing instructions which check for right type.
       return InlineSetIndexed(flow_graph, kind, target, call, receiver, source,
-                              /* value_check = */ NULL, exactness, graph_entry,
-                              entry, last, result);
+                              /* value_check = */ nullptr, exactness,
+                              graph_entry, entry, last, result);
     }
     case MethodRecognizer::kInt64ArraySetIndexed:
     case MethodRecognizer::kUint64ArraySetIndexed:
       return InlineSetIndexed(flow_graph, kind, target, call, receiver, source,
-                              /* value_check = */ NULL, exactness, graph_entry,
-                              entry, last, result);
+                              /* value_check = */ nullptr, exactness,
+                              graph_entry, entry, last, result);
     case MethodRecognizer::kFloat32ArraySetIndexed:
     case MethodRecognizer::kFloat64ArraySetIndexed: {
       if (!CanUnboxDouble()) {
@@ -4340,8 +4346,9 @@
                              call->GetBlock()->try_index(), DeoptId::kNone);
       (*entry)->InheritDeoptTarget(Z, call);
       ASSERT(!call->HasUses());
-      *last = NULL;    // Empty body.
-      *result = NULL;  // Since no uses of original call, result will be unused.
+      *last = nullptr;  // Empty body.
+      *result =
+          nullptr;  // Since no uses of original call, result will be unused.
       return true;
     }
 
@@ -4359,7 +4366,7 @@
                                            call->deopt_id());
           flow_graph->AppendTo(
               *entry, *last,
-              call->deopt_id() != DeoptId::kNone ? call->env() : NULL,
+              call->deopt_id() != DeoptId::kNone ? call->env() : nullptr,
               FlowGraph::kValue);
           *result = (*last)->AsDefinition();
           return true;
@@ -4401,7 +4408,7 @@
             new (Z) RedefinitionInstr(new (Z) Value(ctype));
         flow_graph->AppendTo(
             *entry, redef,
-            call->deopt_id() != DeoptId::kNone ? call->env() : NULL,
+            call->deopt_id() != DeoptId::kNone ? call->env() : nullptr,
             FlowGraph::kValue);
         *last = *result = redef;
         return true;
diff --git a/runtime/vm/compiler/backend/linearscan.cc b/runtime/vm/compiler/backend/linearscan.cc
index f32e0de..ac48a79 100644
--- a/runtime/vm/compiler/backend/linearscan.cc
+++ b/runtime/vm/compiler/backend/linearscan.cc
@@ -109,7 +109,7 @@
       cpu_spill_slot_count_(0),
       intrinsic_mode_(intrinsic_mode) {
   for (intptr_t i = 0; i < vreg_count_; i++) {
-    live_ranges_.Add(NULL);
+    live_ranges_.Add(nullptr);
   }
   for (intptr_t i = 0; i < vreg_count_; i++) {
     value_representations_.Add(kNoRepresentation);
@@ -150,7 +150,7 @@
     if (!mat->InputAt(i)->BindsToConstant()) {
       Definition* defn = mat->InputAt(i)->definition();
       MaterializeObjectInstr* inner_mat = defn->AsMaterializeObject();
-      if (inner_mat != NULL) {
+      if (inner_mat != nullptr) {
         DeepLiveness(inner_mat, live_in);
       } else {
         intptr_t idx = defn->vreg(0);
@@ -182,7 +182,7 @@
 
       // Handle definitions.
       Definition* current_def = current->AsDefinition();
-      if ((current_def != NULL) && current_def->HasSSATemp()) {
+      if ((current_def != nullptr) && current_def->HasSSATemp()) {
         kill->Add(current_def->vreg(0));
         live_in->Remove(current_def->vreg(0));
         if (current_def->HasPairRepresentation()) {
@@ -207,7 +207,7 @@
 
       // Add non-argument uses from the deoptimization environment (pushed
       // arguments are not allocated by the register allocator).
-      if (current->env() != NULL) {
+      if (current->env() != nullptr) {
         for (Environment::DeepIterator env_it(current->env()); !env_it.Done();
              env_it.Advance()) {
           Definition* defn = env_it.CurrentValue()->definition();
@@ -230,7 +230,7 @@
       JoinEntryInstr* join = block->AsJoinEntry();
       for (PhiIterator it(join); !it.Done(); it.Advance()) {
         PhiInstr* phi = it.Current();
-        ASSERT(phi != NULL);
+        ASSERT(phi != nullptr);
         kill->Add(phi->vreg(0));
         live_in->Remove(phi->vreg(0));
         if (phi->HasPairRepresentation()) {
@@ -279,10 +279,10 @@
 }
 
 UsePosition* LiveRange::AddUse(intptr_t pos, Location* location_slot) {
-  ASSERT(location_slot != NULL);
+  ASSERT(location_slot != nullptr);
   ASSERT((first_use_interval_->start_ <= pos) &&
          (pos <= first_use_interval_->end_));
-  if (uses_ != NULL) {
+  if (uses_ != nullptr) {
     if ((uses_->pos() == pos) && (uses_->location_slot() == location_slot)) {
       return uses_;
     } else if (uses_->pos() < pos) {
@@ -291,13 +291,13 @@
       // add uses both at position P-1 and *then* P which will make
       // uses_ unsorted unless we account for it here.
       UsePosition* insert_after = uses_;
-      while ((insert_after->next() != NULL) &&
+      while ((insert_after->next() != nullptr) &&
              (insert_after->next()->pos() < pos)) {
         insert_after = insert_after->next();
       }
 
       UsePosition* insert_before = insert_after->next();
-      while (insert_before != NULL && (insert_before->pos() == pos)) {
+      while (insert_before != nullptr && (insert_before->pos() == pos)) {
         if (insert_before->location_slot() == location_slot) {
           return insert_before;
         }
@@ -328,11 +328,11 @@
   SafepointPosition* safepoint =
       new SafepointPosition(ToInstructionEnd(pos), locs);
 
-  if (first_safepoint_ == NULL) {
-    ASSERT(last_safepoint_ == NULL);
+  if (first_safepoint_ == nullptr) {
+    ASSERT(last_safepoint_ == nullptr);
     first_safepoint_ = last_safepoint_ = safepoint;
   } else {
-    ASSERT(last_safepoint_ != NULL);
+    ASSERT(last_safepoint_ != nullptr);
     // We assume that safepoints list is sorted by position and that
     // safepoints are added in this order.
     ASSERT(last_safepoint_->pos() < pos);
@@ -344,7 +344,7 @@
 void LiveRange::AddHintedUse(intptr_t pos,
                              Location* location_slot,
                              Location* hint) {
-  ASSERT(hint != NULL);
+  ASSERT(hint != nullptr);
   AddUse(pos, location_slot)->set_hint(hint);
 }
 
@@ -354,7 +354,7 @@
   // Live ranges are being build by visiting instructions in post-order.
   // This implies that use intervals will be prepended in a monotonically
   // decreasing order.
-  if (first_use_interval() != NULL) {
+  if (first_use_interval() != nullptr) {
     // If the first use interval and the use interval we are adding
     // touch then we can just extend the first interval to cover their
     // union.
@@ -378,8 +378,8 @@
   }
 
   first_use_interval_ = new UseInterval(start, end, first_use_interval_);
-  if (last_use_interval_ == NULL) {
-    ASSERT(first_use_interval_->next() == NULL);
+  if (last_use_interval_ == nullptr) {
+    ASSERT(first_use_interval_->next() == nullptr);
     last_use_interval_ = first_use_interval_;
   }
 }
@@ -392,9 +392,9 @@
   // expand the first use interval to cover the block from the start
   // to the last use in the block and then we shrink it if we encounter
   // definition of the value inside the same block.
-  if (first_use_interval_ == NULL) {
+  if (first_use_interval_ == nullptr) {
     // Definition without a use.
-    first_use_interval_ = new UseInterval(pos, pos + 1, NULL);
+    first_use_interval_ = new UseInterval(pos, pos + 1, nullptr);
     last_use_interval_ = first_use_interval_;
   } else {
     // Shrink the first use interval. It was optimistically expanded to
@@ -405,7 +405,7 @@
 }
 
 LiveRange* FlowGraphAllocator::GetLiveRange(intptr_t vreg) {
-  if (live_ranges_[vreg] == NULL) {
+  if (live_ranges_[vreg] == nullptr) {
     Representation rep = value_representations_[vreg];
     ASSERT(rep != kNoRepresentation);
     live_ranges_[vreg] = new LiveRange(vreg, rep);
@@ -432,7 +432,7 @@
     return;
   }
 
-  if (blocking_ranges[loc.register_code()] == NULL) {
+  if (blocking_ranges[loc.register_code()] == nullptr) {
     Representation ignored = kNoRepresentation;
     LiveRange* range = new LiveRange(kNoVirtualRegister, ignored);
     blocking_ranges[loc.register_code()] = range;
@@ -481,7 +481,7 @@
 }
 
 void LiveRange::Print() {
-  if (first_use_interval() == NULL) {
+  if (first_use_interval() == nullptr) {
     return;
   }
 
@@ -494,7 +494,7 @@
   THR_Print("\n");
 
   SafepointPosition* safepoint = first_safepoint();
-  while (safepoint != NULL) {
+  while (safepoint != nullptr) {
     THR_Print("    Safepoint [%" Pd "]: ", safepoint->pos());
     safepoint->locs()->stack_bitmap().Print();
     THR_Print("\n");
@@ -502,13 +502,13 @@
   }
 
   UsePosition* use_pos = uses_;
-  for (UseInterval* interval = first_use_interval_; interval != NULL;
+  for (UseInterval* interval = first_use_interval_; interval != nullptr;
        interval = interval->next()) {
     THR_Print("    use interval [%" Pd ", %" Pd ")\n", interval->start(),
               interval->end());
-    while ((use_pos != NULL) && (use_pos->pos() <= interval->end())) {
+    while ((use_pos != nullptr) && (use_pos->pos() <= interval->end())) {
       THR_Print("      use at %" Pd "", use_pos->pos());
-      if (use_pos->location_slot() != NULL) {
+      if (use_pos->location_slot() != nullptr) {
         THR_Print(" as ");
         use_pos->location_slot()->Print();
       }
@@ -517,7 +517,7 @@
     }
   }
 
-  if (next_sibling() != NULL) {
+  if (next_sibling() != nullptr) {
     next_sibling()->Print();
   }
 }
@@ -530,7 +530,7 @@
 #endif
 
   for (intptr_t i = 0; i < live_ranges_.length(); i++) {
-    if (live_ranges_[i] != NULL) {
+    if (live_ranges_[i] != nullptr) {
       live_ranges_[i]->Print();
     }
   }
@@ -541,7 +541,7 @@
 static bool HasOnlyUnconstrainedUsesInLoop(LiveRange* range,
                                            intptr_t boundary) {
   UsePosition* use = range->first_use();
-  while ((use != NULL) && (use->pos() < boundary)) {
+  while ((use != nullptr) && (use->pos() < boundary)) {
     if (!use->location_slot()->Equals(Location::Any())) {
       return false;
     }
@@ -553,7 +553,7 @@
 // Returns true if all uses of the given range have Any allocation policy.
 static bool HasOnlyUnconstrainedUses(LiveRange* range) {
   UsePosition* use = range->first_use();
-  while (use != NULL) {
+  while (use != nullptr) {
     if (!use->location_slot()->Equals(Location::Any())) {
       return false;
     }
@@ -565,7 +565,7 @@
 void FlowGraphAllocator::BuildLiveRanges() {
   const intptr_t block_count = postorder_.length();
   ASSERT(postorder_.Last()->IsGraphEntry());
-  BitVector* current_interference_set = NULL;
+  BitVector* current_interference_set = nullptr;
   Zone* zone = flow_graph_.zone();
   for (intptr_t i = 0; i < (block_count - 1); i++) {
     BlockEntryInstr* block = postorder_[i];
@@ -793,7 +793,7 @@
     }
   } else {
     ConstantInstr* constant = defn->AsConstant();
-    ASSERT(constant != NULL);
+    ASSERT(constant != nullptr);
     const intptr_t pair_index = second_location_for_definition ? 1 : 0;
     range->set_assigned_location(Location::Constant(constant, pair_index));
     range->set_spill_slot(Location::Constant(constant, pair_index));
@@ -802,7 +802,7 @@
   range->finger()->Initialize(range);
   UsePosition* use =
       range->finger()->FirstRegisterBeneficialUse(block->start_pos());
-  if (use != NULL) {
+  if (use != nullptr) {
     LiveRange* tail = SplitBetween(range, block->start_pos(), use->pos());
     CompleteRange(tail, defn->RegisterKindForResult());
   }
@@ -875,7 +875,7 @@
   Instruction* last = block->last_instruction();
 
   GotoInstr* goto_instr = last->AsGoto();
-  if (goto_instr == NULL) return last;
+  if (goto_instr == nullptr) return last;
 
   // If we have a parallel move here then the successor block must be a
   // join with phis.  The phi inputs contribute uses to each predecessor
@@ -888,7 +888,7 @@
   const intptr_t pos = GetLifetimePosition(goto_instr);
 
   JoinEntryInstr* join = goto_instr->successor();
-  ASSERT(join != NULL);
+  ASSERT(join != nullptr);
 
   // Search for the index of the current block in the predecessors of
   // the join.
@@ -902,7 +902,7 @@
     MoveOperands* move = parallel_move->MoveOperandsAt(move_index++);
 
     ConstantInstr* constant = val->definition()->AsConstant();
-    if (constant != NULL) {
+    if (constant != nullptr) {
       move->set_src(Location::Constant(constant, /*pair_index*/ 0));
       if (val->definition()->HasPairRepresentation()) {
         move = parallel_move->MoveOperandsAt(move_index++);
@@ -918,7 +918,7 @@
     //
     intptr_t vreg = val->definition()->vreg(0);
     LiveRange* range = GetLiveRange(vreg);
-    if (interfere_at_backedge != NULL) interfere_at_backedge->Add(vreg);
+    if (interfere_at_backedge != nullptr) interfere_at_backedge->Add(vreg);
 
     range->AddUseInterval(block->start_pos(), pos);
     range->AddHintedUse(pos, move->src_slot(),
@@ -929,7 +929,7 @@
       move = parallel_move->MoveOperandsAt(move_index++);
       vreg = val->definition()->vreg(1);
       range = GetLiveRange(vreg);
-      if (interfere_at_backedge != NULL) {
+      if (interfere_at_backedge != nullptr) {
         interfere_at_backedge->Add(vreg);
       }
       range->AddUseInterval(block->start_pos(), pos);
@@ -955,7 +955,7 @@
   intptr_t move_idx = 0;
   for (PhiIterator it(join); !it.Done(); it.Advance()) {
     PhiInstr* phi = it.Current();
-    ASSERT(phi != NULL);
+    ASSERT(phi != nullptr);
     const intptr_t vreg = phi->vreg(0);
     ASSERT(vreg >= 0);
     const bool is_pair_phi = phi->HasPairRepresentation();
@@ -978,7 +978,7 @@
     for (intptr_t pred_idx = 0; pred_idx < phi->InputCount(); pred_idx++) {
       BlockEntryInstr* pred = join->PredecessorAt(pred_idx);
       GotoInstr* goto_instr = pred->last_instruction()->AsGoto();
-      ASSERT((goto_instr != NULL) && (goto_instr->HasParallelMove()));
+      ASSERT((goto_instr != nullptr) && (goto_instr->HasParallelMove()));
       MoveOperands* move =
           goto_instr->parallel_move()->MoveOperandsAt(move_idx);
       move->set_dest(Location::PrefersRegister());
@@ -1008,9 +1008,9 @@
 
 void FlowGraphAllocator::ProcessEnvironmentUses(BlockEntryInstr* block,
                                                 Instruction* current) {
-  ASSERT(current->env() != NULL);
+  ASSERT(current->env() != nullptr);
   Environment* env = current->env();
-  while (env != NULL) {
+  while (env != nullptr) {
     // Any value mentioned in the deoptimization environment should survive
     // until the end of instruction but it does not need to be in the register.
     // Expected shape of live range:
@@ -1112,7 +1112,7 @@
     MaterializeObjectInstr* mat) {
   // Materialization can occur several times in the same environment.
   // Check if we already processed this one.
-  if (mat->locations() != NULL) {
+  if (mat->locations() != nullptr) {
     return;  // Already processed.
   }
 
@@ -1124,7 +1124,7 @@
     Definition* def = mat->InputAt(i)->definition();
 
     ConstantInstr* constant = def->AsConstant();
-    if (constant != NULL) {
+    if (constant != nullptr) {
       locations[i] = Location::Constant(constant);
       continue;
     }
@@ -1163,10 +1163,10 @@
                                          Value* input,
                                          intptr_t vreg,
                                          RegisterSet* live_registers) {
-  ASSERT(in_ref != NULL);
+  ASSERT(in_ref != nullptr);
   ASSERT(!in_ref->IsPairLocation());
-  ASSERT(input != NULL);
-  ASSERT(block != NULL);
+  ASSERT(input != nullptr);
+  ASSERT(block != nullptr);
   LiveRange* range = GetLiveRange(vreg);
   if (in_ref->IsMachineRegister()) {
     // Input is expected in a fixed register. Expected shape of
@@ -1176,7 +1176,7 @@
     //      value    --*
     //      register   [-----)
     //
-    if (live_registers != NULL) {
+    if (live_registers != nullptr) {
       live_registers->Add(*in_ref, range->representation());
     }
     MoveOperands* move = AddMoveAt(pos - 1, *in_ref, Location::Any());
@@ -1232,10 +1232,10 @@
                                           Definition* input,
                                           intptr_t input_vreg,
                                           BitVector* interference_set) {
-  ASSERT(out != NULL);
+  ASSERT(out != nullptr);
   ASSERT(!out->IsPairLocation());
-  ASSERT(def != NULL);
-  ASSERT(block != NULL);
+  ASSERT(def != nullptr);
+  ASSERT(block != nullptr);
 
   LiveRange* range =
       vreg >= 0 ? GetLiveRange(vreg) : MakeLiveRangeForTemporary();
@@ -1260,7 +1260,7 @@
     UsePosition* use = range->first_use();
 
     // If the value has no uses we don't need to allocate it.
-    if (use == NULL) return;
+    if (use == nullptr) return;
 
     // Connect fixed output to all inputs that immediately follow to avoid
     // allocating an intermediary register.
@@ -1285,8 +1285,8 @@
     MoveOperands* move = AddMoveAt(pos + 1, Location::Any(), *out);
     range->AddHintedUse(pos + 1, move->dest_slot(), out);
   } else if (output_same_as_first_input) {
-    ASSERT(in_ref != NULL);
-    ASSERT(input != NULL);
+    ASSERT(in_ref != nullptr);
+    ASSERT(input != nullptr);
     // Output register will contain a value of the first input at instruction's
     // start. Expected shape of live ranges:
     //
@@ -1313,7 +1313,7 @@
     range->AddUse(pos, move->dest_slot());
     range->AddUse(pos, in_ref);
 
-    if ((interference_set != NULL) && (range->vreg() >= 0) &&
+    if ((interference_set != nullptr) && (range->vreg() >= 0) &&
         interference_set->Contains(range->vreg())) {
       interference_set->Add(input->vreg(0));
     }
@@ -1345,12 +1345,13 @@
   LocationSummary* locs = current->locs();
 
   Definition* def = current->AsDefinition();
-  if ((def != NULL) && (def->AsConstant() != NULL)) {
+  if ((def != nullptr) && (def->AsConstant() != nullptr)) {
     ASSERT(!def->HasPairRepresentation());
-    LiveRange* range = (def->vreg(0) != -1) ? GetLiveRange(def->vreg(0)) : NULL;
+    LiveRange* range =
+        (def->vreg(0) != -1) ? GetLiveRange(def->vreg(0)) : nullptr;
 
     // Drop definitions of constants that have no uses.
-    if ((range == NULL) || (range->first_use() == NULL)) {
+    if ((range == nullptr) || (range->first_use() == nullptr)) {
       locs->set_out(0, Location::NoLocation());
       return;
     }
@@ -1406,7 +1407,7 @@
                                     Location::RequiresRegister()));
   }
   // Add uses from the deoptimization environment.
-  if (current->env() != NULL) ProcessEnvironmentUses(block, current);
+  if (current->env() != nullptr) ProcessEnvironmentUses(block, current);
 
   // Process inputs.
   // Skip the first input if output is specified with kSameAsFirstInput policy,
@@ -1418,7 +1419,7 @@
       // the location is the first register or second register.
       Value* input = current->InputAt(j);
       Location* in_ref = locs->in_slot(j);
-      RegisterSet* live_registers = NULL;
+      RegisterSet* live_registers = nullptr;
       if (locs->HasCallOnSlowPath()) {
         live_registers = locs->live_registers();
       }
@@ -1526,7 +1527,7 @@
     safepoints_.Add(current);
   }
 
-  if (def == NULL) {
+  if (def == nullptr) {
     ASSERT(locs->out(0).IsInvalid());
     return;
   }
@@ -1562,11 +1563,11 @@
       // Each element of the pair is assigned it's own virtual register number
       // and is allocated its own LiveRange.
       ProcessOneOutput(block, pos, pair->SlotAt(0), def, def->vreg(0),
-                       false,           // output is not mapped to first input.
-                       NULL, NULL, -1,  // First input not needed.
+                       false,  // output is not mapped to first input.
+                       nullptr, nullptr, -1,  // First input not needed.
                        interference_set);
       ProcessOneOutput(block, pos, pair->SlotAt(1), def, def->vreg(1), false,
-                       NULL, NULL, -1, interference_set);
+                       nullptr, nullptr, -1, interference_set);
     }
   } else {
     if (output_same_as_first_input) {
@@ -1582,8 +1583,8 @@
                        interference_set);
     } else {
       ProcessOneOutput(block, pos, out, def, def->vreg(0),
-                       false,           // output is not mapped to first input.
-                       NULL, NULL, -1,  // First input not needed.
+                       false,  // output is not mapped to first input.
+                       nullptr, nullptr, -1,  // First input not needed.
                        interference_set);
     }
   }
@@ -1594,7 +1595,7 @@
   ASSERT(pos > 0);
   Instruction* prev = instr->previous();
   ParallelMoveInstr* move = prev->AsParallelMove();
-  if ((move == NULL) ||
+  if ((move == nullptr) ||
       (FlowGraphAllocator::GetLifetimePosition(move) != pos)) {
     move = new ParallelMoveInstr();
     prev->LinkTo(move);
@@ -1654,7 +1655,7 @@
     // For join entry predecessors create phi resolution moves if
     // necessary. They will be populated by the register allocator.
     JoinEntryInstr* join = block->AsJoinEntry();
-    if (join != NULL) {
+    if (join != nullptr) {
       intptr_t move_count = 0;
       for (PhiIterator it(join); !it.Done(); it.Advance()) {
         move_count += it.Current()->HasPairRepresentation() ? 2 : 1;
@@ -1707,16 +1708,16 @@
 
 bool AllocationFinger::Advance(const intptr_t start) {
   UseInterval* a = first_pending_use_interval_;
-  while (a != NULL && a->end() <= start)
+  while (a != nullptr && a->end() <= start)
     a = a->next();
   first_pending_use_interval_ = a;
-  return (first_pending_use_interval_ == NULL);
+  return (first_pending_use_interval_ == nullptr);
 }
 
 Location AllocationFinger::FirstHint() {
   UsePosition* use = first_hinted_use_;
 
-  while (use != NULL) {
+  while (use != nullptr) {
     if (use->HasHint()) return use->hint();
     use = use->next();
   }
@@ -1725,7 +1726,7 @@
 }
 
 static UsePosition* FirstUseAfter(UsePosition* use, intptr_t after) {
-  while ((use != NULL) && (use->pos() < after)) {
+  while ((use != nullptr) && (use->pos() < after)) {
     use = use->next();
   }
   return use;
@@ -1733,7 +1734,7 @@
 
 UsePosition* AllocationFinger::FirstRegisterUse(intptr_t after) {
   for (UsePosition* use = FirstUseAfter(first_register_use_, after);
-       use != NULL; use = use->next()) {
+       use != nullptr; use = use->next()) {
     Location* loc = use->location_slot();
     if (loc->IsUnallocated() &&
         ((loc->policy() == Location::kRequiresRegister) ||
@@ -1742,19 +1743,19 @@
       return use;
     }
   }
-  return NULL;
+  return nullptr;
 }
 
 UsePosition* AllocationFinger::FirstRegisterBeneficialUse(intptr_t after) {
   for (UsePosition* use = FirstUseAfter(first_register_beneficial_use_, after);
-       use != NULL; use = use->next()) {
+       use != nullptr; use = use->next()) {
     Location* loc = use->location_slot();
     if (loc->IsUnallocated() && loc->IsRegisterBeneficial()) {
       first_register_beneficial_use_ = use;
       return use;
     }
   }
-  return NULL;
+  return nullptr;
 }
 
 UsePosition* AllocationFinger::FirstInterferingUse(intptr_t after) {
@@ -1767,14 +1768,14 @@
 }
 
 void AllocationFinger::UpdateAfterSplit(intptr_t first_use_after_split_pos) {
-  if ((first_register_use_ != NULL) &&
+  if ((first_register_use_ != nullptr) &&
       (first_register_use_->pos() >= first_use_after_split_pos)) {
-    first_register_use_ = NULL;
+    first_register_use_ = nullptr;
   }
 
-  if ((first_register_beneficial_use_ != NULL) &&
+  if ((first_register_beneficial_use_ != nullptr) &&
       (first_register_beneficial_use_->pos() >= first_use_after_split_pos)) {
-    first_register_beneficial_use_ = NULL;
+    first_register_beneficial_use_ = nullptr;
   }
 }
 
@@ -1788,7 +1789,7 @@
 }
 
 static intptr_t FirstIntersection(UseInterval* a, UseInterval* u) {
-  while (a != NULL && u != NULL) {
+  while (a != nullptr && u != nullptr) {
     const intptr_t pos = a->Intersect(u);
     if (pos != kIllegalPosition) return pos;
 
@@ -1806,24 +1807,24 @@
 PositionType* SplitListOfPositions(PositionType** head,
                                    intptr_t split_pos,
                                    bool split_at_start) {
-  PositionType* last_before_split = NULL;
+  PositionType* last_before_split = nullptr;
   PositionType* pos = *head;
   if (split_at_start) {
-    while ((pos != NULL) && (pos->pos() < split_pos)) {
+    while ((pos != nullptr) && (pos->pos() < split_pos)) {
       last_before_split = pos;
       pos = pos->next();
     }
   } else {
-    while ((pos != NULL) && (pos->pos() <= split_pos)) {
+    while ((pos != nullptr) && (pos->pos() <= split_pos)) {
       last_before_split = pos;
       pos = pos->next();
     }
   }
 
-  if (last_before_split == NULL) {
-    *head = NULL;
+  if (last_before_split == nullptr) {
+    *head = nullptr;
   } else {
-    last_before_split->set_next(NULL);
+    last_before_split->set_next(nullptr);
   }
 
   return pos;
@@ -1833,7 +1834,7 @@
   if (Start() == split_pos) return this;
 
   UseInterval* interval = finger_.first_pending_use_interval();
-  if (interval == NULL) {
+  if (interval == nullptr) {
     finger_.Initialize(this);
     interval = finger_.first_pending_use_interval();
   }
@@ -1844,7 +1845,7 @@
   // end. We need to start over to find the previous interval.
   if (split_pos <= interval->start()) interval = first_use_interval_;
 
-  UseInterval* last_before_split = NULL;
+  UseInterval* last_before_split = nullptr;
   while (interval->end() <= split_pos) {
     last_before_split = interval;
     interval = interval->next();
@@ -1861,7 +1862,7 @@
     last_before_split = interval;
   }
 
-  ASSERT(last_before_split != NULL);
+  ASSERT(last_before_split != nullptr);
   ASSERT(last_before_split->next() == first_after_split);
   ASSERT(last_before_split->end() <= split_pos);
   ASSERT(split_pos <= first_after_split->start());
@@ -1883,9 +1884,9 @@
                         next_sibling_->Start(), next_sibling_->End()));
 
   last_use_interval_ = last_before_split;
-  last_use_interval_->next_ = NULL;
+  last_use_interval_->next_ = nullptr;
 
-  if (first_use_after_split != NULL) {
+  if (first_use_after_split != nullptr) {
     finger_.UpdateAfterSplit(first_use_after_split->pos());
   }
 
@@ -2005,7 +2006,7 @@
 
   // Compute range start and end.
   LiveRange* last_sibling = range;
-  while (last_sibling->next_sibling() != NULL) {
+  while (last_sibling->next_sibling() != nullptr) {
     last_sibling = last_sibling->next_sibling();
   }
 
@@ -2110,9 +2111,9 @@
         spill_slot.stack_index());
   }
   ASSERT(stack_index >= 0);
-  while (range != NULL) {
+  while (range != nullptr) {
     for (SafepointPosition* safepoint = range->first_safepoint();
-         safepoint != NULL; safepoint = safepoint->next()) {
+         safepoint != nullptr; safepoint = safepoint->next()) {
       // Mark the stack slot as having an object.
       safepoint->locs()->SetStackBit(stack_index);
     }
@@ -2173,7 +2174,7 @@
   intptr_t intersection = kMaxPosition;
   for (intptr_t i = 0; i < registers_[reg]->length(); i++) {
     LiveRange* allocated = (*registers_[reg])[i];
-    if (allocated == NULL) continue;
+    if (allocated == nullptr) continue;
 
     UseInterval* allocated_head =
         allocated->finger()->first_pending_use_interval();
@@ -2187,7 +2188,7 @@
 }
 
 void ReachingDefs::AddPhi(PhiInstr* phi) {
-  if (phi->reaching_defs() == NULL) {
+  if (phi->reaching_defs() == nullptr) {
     Zone* zone = flow_graph_.zone();
     phi->set_reaching_defs(new (zone) BitVector(zone, flow_graph_.max_vreg()));
 
@@ -2217,7 +2218,7 @@
     // Add all phis that affect this phi to the list.
     for (intptr_t i = 0; i < phi->InputCount(); i++) {
       PhiInstr* input_phi = phi->InputAt(i)->definition()->AsPhi();
-      if (input_phi != NULL) {
+      if (input_phi != nullptr) {
         AddPhi(input_phi);
       }
     }
@@ -2231,7 +2232,7 @@
       PhiInstr* phi = phis_[i];
       for (intptr_t i = 0; i < phi->InputCount(); i++) {
         PhiInstr* input_phi = phi->InputAt(i)->definition()->AsPhi();
-        if (input_phi != NULL) {
+        if (input_phi != nullptr) {
           if (phi->reaching_defs()->AddAll(input_phi->reaching_defs())) {
             changed = true;
           }
@@ -2244,7 +2245,7 @@
 }
 
 BitVector* ReachingDefs::Get(PhiInstr* phi) {
-  if (phi->reaching_defs() == NULL) {
+  if (phi->reaching_defs() == nullptr) {
     ASSERT(phis_.is_empty());
     AddPhi(phi);
     Compute();
@@ -2373,7 +2374,7 @@
 
     // If unallocated represents a constant value and does not have
     // any uses then avoid using a register for it.
-    if (unallocated->first_use() == NULL) {
+    if (unallocated->first_use() == nullptr) {
       if (unallocated->vreg() >= 0) {
         LiveRange* parent = GetLiveRange(unallocated->vreg());
         if (parent->spill_slot().IsConstant()) {
@@ -2448,7 +2449,7 @@
   // loop.
   UsePosition* register_use =
       unallocated->finger()->FirstRegisterUse(unallocated->Start());
-  if ((register_use == NULL) &&
+  if ((register_use == nullptr) &&
       !(unallocated->is_loop_phi() && HasCheapEvictionCandidate(unallocated))) {
     Spill(unallocated);
     return;
@@ -2467,7 +2468,7 @@
   }
 
   const intptr_t register_use_pos =
-      (register_use != NULL) ? register_use->pos() : unallocated->Start();
+      (register_use != nullptr) ? register_use->pos() : unallocated->Start();
   if (free_until < register_use_pos) {
     // Can't acquire free register. Spill until we really need one.
     ASSERT(unallocated->Start() < ToInstructionStart(register_use_pos));
@@ -2515,14 +2516,14 @@
       }
 
       UsePosition* use = allocated->finger()->FirstInterferingUse(start);
-      if ((use != NULL) && ((ToInstructionStart(use->pos()) - start) <= 1)) {
+      if ((use != nullptr) && ((ToInstructionStart(use->pos()) - start) <= 1)) {
         // This register is blocked by interval that is used
         // as register in the current instruction and can't
         // be spilled.
         return false;
       }
 
-      const intptr_t use_pos = (use != NULL) ? use->pos() : allocated->End();
+      const intptr_t use_pos = (use != nullptr) ? use->pos() : allocated->End();
 
       if (use_pos < free_until) free_until = use_pos;
     } else {
@@ -2551,7 +2552,7 @@
   intptr_t from = first_evicted + 1;
   while (from < registers_[reg]->length()) {
     LiveRange* allocated = (*registers_[reg])[from++];
-    if (allocated != NULL) (*registers_[reg])[to++] = allocated;
+    if (allocated != nullptr) (*registers_[reg])[to++] = allocated;
   }
   registers_[reg]->TruncateTo(to);
 }
@@ -2568,7 +2569,7 @@
         ASSERT(allocated->End() <= unallocated->Start());
         ConvertAllUses(allocated);
       }
-      (*registers_[reg])[i] = NULL;
+      (*registers_[reg])[i] = nullptr;
       first_evicted = i;
     }
   }
@@ -2590,7 +2591,7 @@
 
   const intptr_t spill_position = first_unallocated->start();
   UsePosition* use = allocated->finger()->FirstInterferingUse(spill_position);
-  if (use == NULL) {
+  if (use == nullptr) {
     // No register uses after this point.
     SpillAfter(allocated, spill_position);
   } else {
@@ -2613,7 +2614,7 @@
   // in it so we should not attempt to add parallel moves to it.
   ASSERT(pos >= kNormalEntryPos);
 
-  ParallelMoveInstr* parallel_move = NULL;
+  ParallelMoveInstr* parallel_move = nullptr;
   Instruction* instr = InstructionAt(pos);
   if (auto entry = instr->AsFunctionEntry()) {
     // Parallel moves added to the FunctionEntry will be added after the block
@@ -2630,7 +2631,7 @@
 
 void FlowGraphAllocator::ConvertUseTo(UsePosition* use, Location loc) {
   ASSERT(!loc.IsPairLocation());
-  ASSERT(use->location_slot() != NULL);
+  ASSERT(use->location_slot() != nullptr);
   Location* slot = use->location_slot();
   ASSERT(slot->IsUnallocated());
   TRACE_ALLOC(THR_Print("  use at %" Pd " converted to ", use->pos()));
@@ -2651,7 +2652,8 @@
   TRACE_ALLOC(loc.Print());
   TRACE_ALLOC(THR_Print(":\n"));
 
-  for (UsePosition* use = range->first_use(); use != NULL; use = use->next()) {
+  for (UsePosition* use = range->first_use(); use != nullptr;
+       use = use->next()) {
     ConvertUseTo(use, loc);
   }
 
@@ -2659,7 +2661,7 @@
   // code.
   if (loc.IsMachineRegister()) {
     for (SafepointPosition* safepoint = range->first_safepoint();
-         safepoint != NULL; safepoint = safepoint->next()) {
+         safepoint != nullptr; safepoint = safepoint->next()) {
       if (!safepoint->locs()->always_calls()) {
         ASSERT(safepoint->locs()->can_call());
         safepoint->locs()->live_registers()->Add(loc, range->representation());
@@ -2678,7 +2680,7 @@
       LiveRange* range = (*registers_[reg])[i];
       if (range->finger()->Advance(start)) {
         ConvertAllUses(range);
-        (*registers_[reg])[i] = NULL;
+        (*registers_[reg])[i] = nullptr;
         first_evicted = i;
       }
     }
@@ -2690,7 +2692,7 @@
 bool LiveRange::Contains(intptr_t pos) const {
   if (!CanCover(pos)) return false;
 
-  for (UseInterval* interval = first_use_interval_; interval != NULL;
+  for (UseInterval* interval = first_use_interval_; interval != nullptr;
        interval = interval->next()) {
     if (interval->Contains(pos)) {
       return true;
@@ -2799,7 +2801,7 @@
     ASSERT(registers_[reg]->is_empty());
 
     LiveRange* range = blocking_ranges[reg];
-    if (range != NULL) {
+    if (range != nullptr) {
       range->finger()->Initialize(range);
       registers_[reg]->Add(range);
     }
@@ -2883,7 +2885,7 @@
   // inside basic blocks.
   for (intptr_t vreg = 0; vreg < live_ranges_.length(); vreg++) {
     LiveRange* range = live_ranges_[vreg];
-    if (range == NULL) continue;
+    if (range == nullptr) continue;
 
     while (range->next_sibling() != nullptr) {
       LiveRange* sibling = range->next_sibling();
@@ -3103,7 +3105,7 @@
     } else if (auto join = block->AsJoinEntry()) {
       for (PhiIterator it(join); !it.Done(); it.Advance()) {
         PhiInstr* phi = it.Current();
-        ASSERT(phi != NULL && phi->vreg(0) >= 0);
+        ASSERT(phi != nullptr && phi->vreg(0) >= 0);
         value_representations_[phi->vreg(0)] =
             RepresentationForRange(phi->representation());
         if (phi->HasPairRepresentation()) {
@@ -3117,7 +3119,7 @@
     for (ForwardInstructionIterator instr_it(block); !instr_it.Done();
          instr_it.Advance()) {
       Definition* def = instr_it.Current()->AsDefinition();
-      if ((def != NULL) && (def->vreg(0) >= 0)) {
+      if ((def != nullptr) && (def->vreg(0) >= 0)) {
         const intptr_t vreg = def->vreg(0);
         value_representations_[vreg] =
             RepresentationForRange(def->representation());
@@ -3385,7 +3387,7 @@
   AllocateUnallocatedRanges();
 
   GraphEntryInstr* entry = block_order_[0]->AsGraphEntry();
-  ASSERT(entry != NULL);
+  ASSERT(entry != nullptr);
   intptr_t double_spill_slot_count = spill_slots_.length() * kDoubleSpillFactor;
   entry->set_spill_slot_count(cpu_spill_slot_count_ + double_spill_slot_count +
                               flow_graph_.max_argument_slot_count());
diff --git a/runtime/vm/compiler/backend/linearscan.h b/runtime/vm/compiler/backend/linearscan.h
index 62f859c..e49f31d 100644
--- a/runtime/vm/compiler/backend/linearscan.h
+++ b/runtime/vm/compiler/backend/linearscan.h
@@ -387,8 +387,8 @@
 class UsePosition : public ZoneAllocated {
  public:
   UsePosition(intptr_t pos, UsePosition* next, Location* location_slot)
-      : pos_(pos), location_slot_(location_slot), hint_(NULL), next_(next) {
-    ASSERT(location_slot != NULL);
+      : pos_(pos), location_slot_(location_slot), hint_(nullptr), next_(next) {
+    ASSERT(location_slot != nullptr);
   }
 
   Location* location_slot() const { return location_slot_; }
@@ -403,7 +403,7 @@
 
   void set_hint(Location* hint) { hint_ = hint; }
 
-  bool HasHint() const { return (hint_ != NULL) && !hint_->IsUnallocated(); }
+  bool HasHint() const { return (hint_ != nullptr) && !hint_->IsUnallocated(); }
 
   void set_next(UsePosition* next) { next_ = next; }
   UsePosition* next() const { return next_; }
@@ -460,10 +460,10 @@
 class AllocationFinger : public ValueObject {
  public:
   AllocationFinger()
-      : first_pending_use_interval_(NULL),
-        first_register_use_(NULL),
-        first_register_beneficial_use_(NULL),
-        first_hinted_use_(NULL) {}
+      : first_pending_use_interval_(nullptr),
+        first_register_use_(nullptr),
+        first_register_beneficial_use_(nullptr),
+        first_hinted_use_(nullptr) {}
 
   void Initialize(LiveRange* range);
   void UpdateAfterSplit(intptr_t first_use_after_split_pos);
@@ -490,7 +490,7 @@
 class SafepointPosition : public ZoneAllocated {
  public:
   SafepointPosition(intptr_t pos, LocationSummary* locs)
-      : pos_(pos), locs_(locs), next_(NULL) {}
+      : pos_(pos), locs_(locs), next_(nullptr) {}
 
   void set_next(SafepointPosition* next) { next_ = next; }
   SafepointPosition* next() const { return next_; }
@@ -514,12 +514,12 @@
         representation_(rep),
         assigned_location_(),
         spill_slot_(),
-        uses_(NULL),
-        first_use_interval_(NULL),
-        last_use_interval_(NULL),
-        first_safepoint_(NULL),
-        last_safepoint_(NULL),
-        next_sibling_(NULL),
+        uses_(nullptr),
+        first_use_interval_(nullptr),
+        last_use_interval_(nullptr),
+        first_safepoint_(nullptr),
+        last_safepoint_(nullptr),
+        next_sibling_(nullptr),
         has_only_any_uses_in_loops_(0),
         is_loop_phi_(false),
         finger_() {}
@@ -603,7 +603,7 @@
         first_use_interval_(first_use_interval),
         last_use_interval_(last_use_interval),
         first_safepoint_(first_safepoint),
-        last_safepoint_(NULL),
+        last_safepoint_(nullptr),
         next_sibling_(next_sibling),
         has_only_any_uses_in_loops_(0),
         is_loop_phi_(false),
diff --git a/runtime/vm/compiler/backend/locations.cc b/runtime/vm/compiler/backend/locations.cc
index 8c17f20..99c4178 100644
--- a/runtime/vm/compiler/backend/locations.cc
+++ b/runtime/vm/compiler/backend/locations.cc
@@ -149,7 +149,7 @@
     : num_inputs_(input_count),
       num_temps_(temp_count),
       output_location_(),  // out(0)->IsInvalid() unless later set.
-      stack_bitmap_(NULL),
+      stack_bitmap_(nullptr),
       contains_call_(contains_call),
       live_registers_() {
 #if defined(DEBUG)
@@ -229,7 +229,8 @@
 
 Location LocationRegisterOrConstant(Value* value) {
   ConstantInstr* constant = value->definition()->AsConstant();
-  return ((constant != NULL) && compiler::Assembler::IsSafe(constant->value()))
+  return ((constant != nullptr) &&
+          compiler::Assembler::IsSafe(constant->value()))
              ? Location::Constant(constant)
              : Location::RequiresRegister();
 }
@@ -271,7 +272,8 @@
 Location LocationFixedRegisterOrConstant(Value* value, Register reg) {
   ASSERT(((1 << reg) & kDartAvailableCpuRegs) != 0);
   ConstantInstr* constant = value->definition()->AsConstant();
-  return ((constant != NULL) && compiler::Assembler::IsSafe(constant->value()))
+  return ((constant != nullptr) &&
+          compiler::Assembler::IsSafe(constant->value()))
              ? Location::Constant(constant)
              : Location::RegisterLocation(reg);
 }
@@ -279,7 +281,7 @@
 Location LocationFixedRegisterOrSmiConstant(Value* value, Register reg) {
   ASSERT(((1 << reg) & kDartAvailableCpuRegs) != 0);
   ConstantInstr* constant = value->definition()->AsConstant();
-  return ((constant != NULL) &&
+  return ((constant != nullptr) &&
           compiler::Assembler::IsSafeSmi(constant->value()))
              ? Location::Constant(constant)
              : Location::RegisterLocation(reg);
@@ -287,7 +289,8 @@
 
 Location LocationAnyOrConstant(Value* value) {
   ConstantInstr* constant = value->definition()->AsConstant();
-  return ((constant != NULL) && compiler::Assembler::IsSafe(constant->value()))
+  return ((constant != nullptr) &&
+          compiler::Assembler::IsSafe(constant->value()))
              ? Location::Constant(constant)
              : Location::Any();
 }
diff --git a/runtime/vm/compiler/backend/locations.h b/runtime/vm/compiler/backend/locations.h
index b292588..3fd9edd 100644
--- a/runtime/vm/compiler/backend/locations.h
+++ b/runtime/vm/compiler/backend/locations.h
@@ -863,7 +863,7 @@
 
  private:
   BitmapBuilder& EnsureStackBitmap() {
-    if (stack_bitmap_ == NULL) {
+    if (stack_bitmap_ == nullptr) {
       stack_bitmap_ = new BitmapBuilder();
     }
     return *stack_bitmap_;
diff --git a/runtime/vm/compiler/backend/locations_helpers_test.cc b/runtime/vm/compiler/backend/locations_helpers_test.cc
index aaf3805..25053db 100644
--- a/runtime/vm/compiler/backend/locations_helpers_test.cc
+++ b/runtime/vm/compiler/backend/locations_helpers_test.cc
@@ -70,7 +70,7 @@
   virtual ~MockInstruction() {}
 
   LocationSummary* locs() {
-    if (locs_ == NULL) {
+    if (locs_ == nullptr) {
       locs_ = MakeLocationSummary(Thread::Current()->zone(), false);
     }
     return locs_;
@@ -90,7 +90,9 @@
    public:                                                                     \
     LocationSummary* MakeLocationSummary(Zone* zone, bool opt) const;          \
     void EmitNativeCode(FlowGraphCompiler* compiler);                          \
-    virtual intptr_t InputCount() const { return Arity; }                      \
+    virtual intptr_t InputCount() const {                                      \
+      return Arity;                                                            \
+    }                                                                          \
   };                                                                           \
   TEST_CASE(LocationsHelpers_##Name) {                                         \
     const Location expected_out = ExpectedOut;                                 \
@@ -107,7 +109,7 @@
     ValidateSummary(locs, expected_out, expected_inputs, expected_temps);      \
     FillSummary(locs, allocated_out, allocated_inputs, allocated_temps);       \
                                                                                \
-    instr->EmitNativeCode(NULL);                                               \
+    instr->EmitNativeCode(nullptr);                                            \
   }                                                                            \
   DEFINE_BACKEND(Name, Signature)
 
diff --git a/runtime/vm/compiler/backend/range_analysis.cc b/runtime/vm/compiler/backend/range_analysis.cc
index 64f740d..5d94091 100644
--- a/runtime/vm/compiler/backend/range_analysis.cc
+++ b/runtime/vm/compiler/backend/range_analysis.cc
@@ -76,7 +76,7 @@
        !block_it.Done(); block_it.Advance()) {
     BlockEntryInstr* block = block_it.Current();
     JoinEntryInstr* join = block->AsJoinEntry();
-    if (join != NULL) {
+    if (join != nullptr) {
       for (PhiIterator phi_it(join); !phi_it.Done(); phi_it.Advance()) {
         PhiInstr* current = phi_it.Current();
         if (current->Type()->IsInt()) {
@@ -89,7 +89,7 @@
          instr_it.Advance()) {
       Instruction* current = instr_it.Current();
       Definition* defn = current->AsDefinition();
-      if (defn != NULL) {
+      if (defn != nullptr) {
         if (defn->HasSSATemp() && IsIntegerDefinition(defn)) {
           values_.Add(defn);
           if (defn->IsBinaryInt64Op()) {
@@ -133,7 +133,7 @@
                            RangeBoundary::MaxSmi());
     default:
       UNREACHABLE();
-      return NULL;
+      return nullptr;
   }
 }
 
@@ -142,22 +142,22 @@
                                                     Range* constraint_range,
                                                     Instruction* after) {
   // No need to constrain constants.
-  if (defn->IsConstant()) return NULL;
+  if (defn->IsConstant()) return nullptr;
 
   // Check if the value is already constrained to avoid inserting duplicated
   // constraints.
   ConstraintInstr* constraint = after->next()->AsConstraint();
-  while (constraint != NULL) {
+  while (constraint != nullptr) {
     if ((constraint->value()->definition() == defn) &&
         constraint->constraint()->Equals(constraint_range)) {
-      return NULL;
+      return nullptr;
     }
     constraint = constraint->next()->AsConstraint();
   }
 
   constraint = new (Z) ConstraintInstr(use->CopyWithType(), constraint_range);
 
-  flow_graph_->InsertAfter(after, constraint, NULL, FlowGraph::kValue);
+  flow_graph_->InsertAfter(after, constraint, nullptr, FlowGraph::kValue);
   FlowGraph::RenameDominatedUses(defn, constraint, constraint);
   constraints_.Add(constraint);
   return constraint;
@@ -166,7 +166,7 @@
 bool RangeAnalysis::ConstrainValueAfterBranch(Value* use, Definition* defn) {
   BranchInstr* branch = use->instruction()->AsBranch();
   RelationalOpInstr* rel_op = branch->comparison()->AsRelationalOp();
-  if ((rel_op != NULL) && (rel_op->operation_cid() == kSmiCid)) {
+  if ((rel_op != nullptr) && (rel_op->operation_cid() == kSmiCid)) {
     // Found comparison of two smis. Constrain defn at true and false
     // successors using the other operand as a boundary.
     Definition* boundary;
@@ -186,7 +186,7 @@
     ConstraintInstr* true_constraint =
         InsertConstraintFor(use, defn, ConstraintSmiRange(op_kind, boundary),
                             branch->true_successor());
-    if (true_constraint != NULL) {
+    if (true_constraint != nullptr) {
       true_constraint->set_target(branch->true_successor());
     }
 
@@ -195,7 +195,7 @@
         use, defn,
         ConstraintSmiRange(Token::NegateComparison(op_kind), boundary),
         branch->false_successor());
-    if (false_constraint != NULL) {
+    if (false_constraint != nullptr) {
       false_constraint->set_target(branch->false_successor());
     }
 
@@ -206,7 +206,7 @@
 }
 
 void RangeAnalysis::InsertConstraintsFor(Definition* defn) {
-  for (Value* use = defn->input_use_list(); use != NULL;
+  for (Value* use = defn->input_use_list(); use != nullptr;
        use = use->next_use()) {
     if (auto branch = use->instruction()->AsBranch()) {
       if (ConstrainValueAfterBranch(use, defn)) {
@@ -226,7 +226,7 @@
                                                   Definition* defn) {
   const intptr_t use_index = use->use_index();
 
-  Range* constraint_range = NULL;
+  Range* constraint_range = nullptr;
   if (use_index == CheckBoundBase::kIndexPos) {
     Definition* length = check->length()->definition();
     constraint_range = new (Z) Range(RangeBoundary::FromConstant(0),
@@ -254,7 +254,7 @@
   Definition* defn = value->definition();
   const Range* range = defn->range();
 
-  if ((range == NULL) && (defn->Type()->ToCid() != kSmiCid)) {
+  if ((range == nullptr) && (defn->Type()->ToCid() != kSmiCid)) {
     // Type propagator determined that reaching type for this use is Smi.
     // However the definition itself is not a smi-definition and
     // thus it will never have range assigned to it. Just return the widest
@@ -263,7 +263,7 @@
     // (e.g. results of loads or function call) can be used only after they
     // pass through UnboxInt64Instr which is considered as mint-definition
     // and will have a range assigned to it.
-    // Note: that we can't return NULL here because it is used as lattice's
+    // Note: that we can't return nullptr here because it is used as lattice's
     // bottom element to indicate that the range was not computed *yet*.
     return &smi_range_;
   }
@@ -275,12 +275,12 @@
   Definition* defn = value->definition();
   const Range* range = defn->range();
 
-  if ((range == NULL) && !defn->Type()->IsInt()) {
+  if ((range == nullptr) && !defn->Type()->IsInt()) {
     // Type propagator determined that reaching type for this use is int.
     // However the definition itself is not a int-definition and
     // thus it will never have range assigned to it. Just return the widest
     // range possible for this value.
-    // Note: that we can't return NULL here because it is used as lattice's
+    // Note: that we can't return nullptr here because it is used as lattice's
     // bottom element to indicate that the range was not computed *yet*.
     return &int64_range_;
   }
@@ -482,7 +482,7 @@
     BlockEntryInstr* block = block_it.Current();
 
     JoinEntryInstr* join = block->AsJoinEntry();
-    if (join != NULL) {
+    if (join != nullptr) {
       for (PhiIterator it(join); !it.Done(); it.Advance()) {
         PhiInstr* phi = it.Current();
         if (set->Contains(phi->ssa_temp_index())) {
@@ -493,7 +493,7 @@
 
     for (ForwardInstructionIterator it(block); !it.Done(); it.Advance()) {
       Definition* defn = it.Current()->AsDefinition();
-      if ((defn != NULL) && defn->HasSSATemp() &&
+      if ((defn != nullptr) && defn->HasSSATemp() &&
           set->Contains(defn->ssa_temp_index())) {
         definitions_.Add(defn);
       }
@@ -623,7 +623,7 @@
   // Given the floating instruction attempt to schedule it into one of the
   // loop preheaders that dominates given post_dominator instruction.
   // Some of the instruction inputs can potentially be unscheduled as well.
-  // Returns NULL is the scheduling fails (e.g. inputs are not invariant for
+  // Returns nullptr is the scheduling fails (e.g. inputs are not invariant for
   // any loop containing post_dominator).
   // Resulting schedule should be equivalent to one obtained by inserting
   // instructions right before post_dominator and running CSE and LICM passes.
@@ -656,8 +656,8 @@
 
       if (!defn->HasSSATemp()) {
         Definition* scheduled = Emit(defn, sink);
-        if (scheduled == NULL) {
-          return NULL;
+        if (scheduled == nullptr) {
+          return nullptr;
         }
         instruction->InputAt(i)->set_definition(scheduled);
       } else if (defn->IsConstraint()) {
@@ -669,7 +669,7 @@
     // If the instruction is still in the graph (it could have been
     // un-scheduled by a rollback action) and it dominates the sink - use it.
     Instruction* emitted = map_.LookupValue(instruction);
-    if (emitted != NULL && !emitted->WasEliminated() &&
+    if (emitted != nullptr && !emitted->WasEliminated() &&
         sink->IsDominatedBy(emitted)) {
       return emitted;
     }
@@ -680,7 +680,7 @@
       BlockEntryInstr* header = loop_headers_[i];
       BlockEntryInstr* pre_header = pre_headers_[i];
 
-      if (pre_header == NULL) {
+      if (pre_header == nullptr) {
         continue;
       }
 
@@ -705,7 +705,7 @@
       }
     }
 
-    return NULL;
+    return nullptr;
   }
 
   void EmitTo(BlockEntryInstr* block, Instruction* instr) {
@@ -757,7 +757,7 @@
     // Re-associate subexpressions inside upper_bound to collect all constants
     // together. This will expose more redundancies when we are going to emit
     // upper bound through scheduler.
-    if (!Simplify(&upper_bound, NULL)) {
+    if (!Simplify(&upper_bound, nullptr)) {
       if (FLAG_support_il_printer && FLAG_trace_range_analysis) {
         THR_Print("Failed to simplify upper bound for %s index\n",
                   check->ToCString());
@@ -842,7 +842,7 @@
           DeoptId::kNone);
       precondition->mark_generalized();
       precondition = scheduler_.Emit(precondition, check);
-      if (precondition == NULL) {
+      if (precondition == nullptr) {
         if (FLAG_trace_range_analysis) {
           THR_Print("  => failed to insert positivity constraint\n");
         }
@@ -864,7 +864,7 @@
     }
 
     new_check = scheduler_.Emit(new_check, check);
-    if (new_check != NULL) {
+    if (new_check != nullptr) {
       if (FLAG_trace_range_analysis) {
         THR_Print("  => generalized check was hoisted into B%" Pd "\n",
                   new_check->GetBlock()->block_id());
@@ -880,7 +880,7 @@
 
   static void RemoveGeneralizedCheck(CheckArrayBoundInstr* check) {
     BinarySmiOpInstr* binary_op = check->index()->definition()->AsBinarySmiOp();
-    if (binary_op != NULL) {
+    if (binary_op != nullptr) {
       binary_op->set_can_overflow(false);
     }
     check->ReplaceUsesWith(check->index()->definition());
@@ -1114,31 +1114,31 @@
           return false;  // Abort.
         }
 
-        if (constant != NULL) {
+        if (constant != nullptr) {
           *constant = c;
         }
 
-        if ((left == NULL) && (right == NULL)) {
-          if (constant != NULL) {
-            *defn = NULL;
+        if ((left == nullptr) && (right == nullptr)) {
+          if (constant != nullptr) {
+            *defn = nullptr;
           } else {
             *defn = flow_graph_->GetConstant(Smi::Handle(Smi::New(c)));
           }
           return true;
         }
 
-        if (left == NULL) {
-          if ((constant != NULL) || (c == 0)) {
+        if (left == nullptr) {
+          if ((constant != nullptr) || (c == 0)) {
             *defn = right;
             return true;
           } else {
             left = right;
-            right = NULL;
+            right = nullptr;
           }
         }
 
-        if (right == NULL) {
-          if ((constant != NULL) || (c == 0)) {
+        if (right == nullptr) {
+          if ((constant != nullptr) || (c == 0)) {
             *defn = left;
             return true;
           } else {
@@ -1159,25 +1159,25 @@
           return false;  // Abort.
         }
 
-        if (constant != NULL) {
+        if (constant != nullptr) {
           *constant = c;
         }
 
-        if ((left == NULL) && (right == NULL)) {
-          if (constant != NULL) {
-            *defn = NULL;
+        if ((left == nullptr) && (right == nullptr)) {
+          if (constant != nullptr) {
+            *defn = nullptr;
           } else {
             *defn = flow_graph_->GetConstant(Smi::Handle(Smi::New(c)));
           }
           return true;
         }
 
-        if (left == NULL) {
+        if (left == nullptr) {
           left = flow_graph_->GetConstant(Object::smi_zero());
         }
 
-        if (right == NULL) {
-          if ((constant != NULL) || (c == 0)) {
+        if (right == nullptr) {
+          if ((constant != nullptr) || (c == 0)) {
             *defn = left;
             return true;
           } else {
@@ -1186,7 +1186,7 @@
           }
         }
       } else if (binary_op->op_kind() == Token::kMUL) {
-        if (!Simplify(&left, NULL) || !Simplify(&right, NULL)) {
+        if (!Simplify(&left, nullptr) || !Simplify(&right, nullptr)) {
           return false;
         }
       } else {
@@ -1194,8 +1194,8 @@
         return true;
       }
 
-      ASSERT(left != NULL);
-      ASSERT(right != NULL);
+      ASSERT(left != nullptr);
+      ASSERT(right != nullptr);
 
       const bool left_changed = (left != binary_op->left()->definition());
       const bool right_changed = (right != binary_op->right()->definition());
@@ -1210,13 +1210,13 @@
         }
       }
 
-      if ((c != 0) && (constant == NULL)) {
+      if ((c != 0) && (constant == nullptr)) {
         *defn = MakeBinaryOp(Token::kADD, *defn, c);
       }
     } else if ((*defn)->IsConstant()) {
       ConstantInstr* constant_defn = (*defn)->AsConstant();
-      if ((constant != NULL) && constant_defn->IsSmi()) {
-        *defn = NULL;
+      if ((constant != nullptr) && constant_defn->IsSmi()) {
+        *defn = nullptr;
         *constant = Smi::Cast(constant_defn->value()).Value();
       }
     }
@@ -1266,10 +1266,11 @@
   // instruction.
   static Definition* FindInnermostConstraint(Definition* defn,
                                              Instruction* post_dominator) {
-    for (Value* use = defn->input_use_list(); use != NULL;
+    for (Value* use = defn->input_use_list(); use != nullptr;
          use = use->next_use()) {
       ConstraintInstr* constraint = use->instruction()->AsConstraint();
-      if ((constraint != NULL) && post_dominator->IsDominatedBy(constraint)) {
+      if ((constraint != nullptr) &&
+          post_dominator->IsDominatedBy(constraint)) {
         return FindInnermostConstraint(constraint, post_dominator);
       }
     }
@@ -1284,10 +1285,10 @@
   static Definition* ApplyConstraints(
       Definition* defn,
       Instruction* post_dominator,
-      GrowableArray<ConstraintInstr*>* constraints = NULL) {
+      GrowableArray<ConstraintInstr*>* constraints = nullptr) {
     if (defn->HasSSATemp()) {
       defn = FindInnermostConstraint(defn, post_dominator);
-      if (constraints != NULL) {
+      if (constraints != nullptr) {
         for (intptr_t i = 0; i < constraints->length(); i++) {
           ConstraintInstr* constraint = (*constraints)[i];
           if (constraint->value()->definition() == defn) {
@@ -1310,7 +1311,7 @@
   static void PrettyPrintIndexBoundRecursively(BaseTextBuffer* f,
                                                Definition* index_bound) {
     BinarySmiOpInstr* binary_op = index_bound->AsBinarySmiOp();
-    if (binary_op != NULL) {
+    if (binary_op != nullptr) {
       f->AddString("(");
       PrettyPrintIndexBoundRecursively(f, binary_op->left()->definition());
       f->Printf(" %s ", Token::Str(binary_op->op_kind()));
@@ -1365,7 +1366,7 @@
   for (intptr_t i = 0; i < constraints_.length(); i++) {
     if (Range::IsUnknown(constraints_[i]->range())) {
       TargetEntryInstr* target = constraints_[i]->target();
-      if (target == NULL) {
+      if (target == nullptr) {
         // TODO(vegorov): replace Constraint with an unconditional
         // deoptimization and kill all dominated dead code.
         continue;
@@ -1419,7 +1420,7 @@
         int64_op->right()->CopyWithType(), int64_op->DeoptimizationTarget());
     int32_op->set_range(*int64_op->range());
     int32_op->set_can_overflow(false);
-    int64_op->ReplaceWith(int32_op, NULL);
+    int64_op->ReplaceWith(int32_op, nullptr);
   }
 }
 
@@ -1436,7 +1437,7 @@
         int64_op->right()->CopyWithType(), int64_op->DeoptimizationTarget());
     int32_op->set_range(*int64_op->range());
     int32_op->set_can_overflow(false);
-    int64_op->ReplaceWith(int32_op, NULL);
+    int64_op->ReplaceWith(int32_op, nullptr);
   }
 }
 
@@ -1452,7 +1453,7 @@
 
 IntegerInstructionSelector::IntegerInstructionSelector(FlowGraph* flow_graph)
     : flow_graph_(flow_graph) {
-  ASSERT(flow_graph_ != NULL);
+  ASSERT(flow_graph_ != nullptr);
   zone_ = flow_graph_->zone();
   selected_uint32_defs_ =
       new (zone_) BitVector(zone_, flow_graph_->current_ssa_temp_index());
@@ -1498,7 +1499,7 @@
          instr_it.Advance()) {
       Instruction* current = instr_it.Current();
       Definition* defn = current->AsDefinition();
-      if ((defn != NULL) && defn->HasSSATemp()) {
+      if ((defn != nullptr) && defn->HasSSATemp()) {
         if (IsPotentialUint32Definition(defn)) {
           if (FLAG_support_il_printer && FLAG_trace_integer_ir_selection) {
             THR_Print("Adding %s\n", current->ToCString());
@@ -1520,7 +1521,7 @@
       return false;
     }
     Range* range = op->range();
-    if ((range == NULL) ||
+    if ((range == nullptr) ||
         !range->IsWithin(0, static_cast<int64_t>(kMaxUint32))) {
       return false;
     }
@@ -1531,7 +1532,7 @@
 }
 
 void IntegerInstructionSelector::FindUint32NarrowingDefinitions() {
-  ASSERT(selected_uint32_defs_ != NULL);
+  ASSERT(selected_uint32_defs_ != nullptr);
   if (FLAG_trace_integer_ir_selection) {
     THR_Print("++++ Selecting Uint32 definitions:\n");
     THR_Print("++++ Initial set:\n");
@@ -1551,7 +1552,7 @@
   for (Value::Iterator it(list_head); !it.Done(); it.Advance()) {
     Value* use = it.Current();
     Definition* defn = use->instruction()->AsDefinition();
-    if ((defn == NULL) || !defn->HasSSATemp() ||
+    if ((defn == nullptr) || !defn->HasSSATemp() ||
         !selected_uint32_defs_->Contains(defn->ssa_temp_index())) {
       return false;
     }
@@ -1579,9 +1580,9 @@
     ShiftIntegerOpInstr* op = def->AsShiftIntegerOp();
     if ((op->op_kind() == Token::kSHR) || (op->op_kind() == Token::kUSHR)) {
       Definition* shift_input = op->left()->definition();
-      ASSERT(shift_input != NULL);
+      ASSERT(shift_input != nullptr);
       Range* range = shift_input->range();
-      if ((range == NULL) ||
+      if ((range == nullptr) ||
           !range->IsWithin(0, static_cast<int64_t>(kMaxUint32))) {
         return false;
       }
@@ -1596,7 +1597,7 @@
 }
 
 void IntegerInstructionSelector::Propagate() {
-  ASSERT(selected_uint32_defs_ != NULL);
+  ASSERT(selected_uint32_defs_ != nullptr);
   bool changed = true;
   intptr_t iteration = 0;
   while (changed) {
@@ -1663,7 +1664,7 @@
     return new (Z) UnaryUint32OpInstr(op_kind, value, deopt_id);
   }
   UNREACHABLE();
-  return NULL;
+  return nullptr;
 }
 
 void IntegerInstructionSelector::ReplaceInstructions() {
@@ -1677,7 +1678,7 @@
       continue;
     }
     Definition* replacement = ConstructReplacementFor(defn);
-    ASSERT(replacement != NULL);
+    ASSERT(replacement != nullptr);
     if (!Range::IsUnknown(defn->range())) {
       if (defn->range()->IsPositive()) {
         replacement->set_range(*defn->range());
@@ -1690,7 +1691,7 @@
       THR_Print("Replacing %s with %s\n", defn->ToCString(),
                 replacement->ToCString());
     }
-    defn->ReplaceWith(replacement, NULL);
+    defn->ReplaceWith(replacement, nullptr);
   }
 }
 
@@ -1894,7 +1895,7 @@
   if (!a->IsSymbol()) return false;
 
   Range* range = a->symbol()->range();
-  if ((range == NULL) || !range->max().IsSymbol()) return false;
+  if ((range == nullptr) || !range->max().IsSymbol()) return false;
 
   if (Utils::WillAddOverflow(range->max().offset(), a->offset())) {
     *a = RangeBoundary::PositiveInfinity();
@@ -1919,7 +1920,7 @@
   if (!a->IsSymbol()) return false;
 
   Range* range = a->symbol()->range();
-  if ((range == NULL) || !range->min().IsSymbol()) return false;
+  if ((range == nullptr) || !range->min().IsSymbol()) return false;
 
   if (Utils::WillAddOverflow(range->min().offset(), a->offset())) {
     *a = RangeBoundary::NegativeInfinity();
@@ -2137,10 +2138,10 @@
                 const Range* right,
                 RangeBoundary* result_min,
                 RangeBoundary* result_max) {
-  ASSERT(left != NULL);
-  ASSERT(right != NULL);
-  ASSERT(result_min != NULL);
-  ASSERT(result_max != NULL);
+  ASSERT(left != nullptr);
+  ASSERT(right != nullptr);
+  ASSERT(result_min != nullptr);
+  ASSERT(result_max != nullptr);
   RangeBoundary left_max = Range::ConstantMax(left);
   RangeBoundary left_min = Range::ConstantMin(left);
   // A negative shift count always deoptimizes (and throws), so the minimum
@@ -2254,10 +2255,10 @@
                 const Range* right_range,
                 RangeBoundary* result_min,
                 RangeBoundary* result_max) {
-  ASSERT(left_range != NULL);
-  ASSERT(right_range != NULL);
-  ASSERT(result_min != NULL);
-  ASSERT(result_max != NULL);
+  ASSERT(left_range != nullptr);
+  ASSERT(right_range != nullptr);
+  ASSERT(result_min != nullptr);
+  ASSERT(result_max != nullptr);
 
   if (Range::ConstantMin(right_range).ConstantValue() >= 0) {
     *result_min = RangeBoundary::FromConstant(0);
@@ -2302,10 +2303,10 @@
                 RangeBoundary* result_min,
                 RangeBoundary* result_max,
                 Definition* left_defn) {
-  ASSERT(left_range != NULL);
-  ASSERT(right_range != NULL);
-  ASSERT(result_min != NULL);
-  ASSERT(result_max != NULL);
+  ASSERT(left_range != nullptr);
+  ASSERT(right_range != nullptr);
+  ASSERT(result_min != nullptr);
+  ASSERT(result_max != nullptr);
 
   RangeBoundary left_min = Definition::IsArrayLength(left_defn)
                                ? RangeBoundary::FromDefinition(left_defn)
@@ -2332,10 +2333,10 @@
                 RangeBoundary* result_min,
                 RangeBoundary* result_max,
                 Definition* left_defn) {
-  ASSERT(left_range != NULL);
-  ASSERT(right_range != NULL);
-  ASSERT(result_min != NULL);
-  ASSERT(result_max != NULL);
+  ASSERT(left_range != nullptr);
+  ASSERT(right_range != nullptr);
+  ASSERT(result_min != nullptr);
+  ASSERT(result_max != nullptr);
 
   RangeBoundary left_min = Definition::IsArrayLength(left_defn)
                                ? RangeBoundary::FromDefinition(left_defn)
@@ -2361,10 +2362,10 @@
                 const Range* right_range,
                 RangeBoundary* result_min,
                 RangeBoundary* result_max) {
-  ASSERT(left_range != NULL);
-  ASSERT(right_range != NULL);
-  ASSERT(result_min != NULL);
-  ASSERT(result_max != NULL);
+  ASSERT(left_range != nullptr);
+  ASSERT(right_range != nullptr);
+  ASSERT(result_min != nullptr);
+  ASSERT(result_max != nullptr);
 
   const int64_t left_max = ConstantAbsMax(left_range);
   const int64_t right_max = ConstantAbsMax(right_range);
@@ -2461,7 +2462,7 @@
 
 // Return the maximum absolute value included in range.
 int64_t Range::ConstantAbsMax(const Range* range) {
-  if (range == NULL) {
+  if (range == nullptr) {
     return RangeBoundary::kMax;
   }
   const int64_t abs_min =
@@ -2473,7 +2474,7 @@
 
 // Return the minimum absolute value included in range.
 int64_t Range::ConstantAbsMin(const Range* range) {
-  if (range == NULL) {
+  if (range == nullptr) {
     return 0;
   }
   const int64_t abs_min =
@@ -2488,8 +2489,8 @@
                      const Range* right_range,
                      Definition* left_defn,
                      Range* result) {
-  ASSERT(left_range != NULL);
-  ASSERT(right_range != NULL);
+  ASSERT(left_range != nullptr);
+  ASSERT(right_range != nullptr);
 
   // Both left and right ranges are finite.
   ASSERT(left_range->IsFinite());
@@ -2557,7 +2558,7 @@
 }
 
 void Definition::set_range(const Range& range) {
-  if (range_ == NULL) {
+  if (range_ == nullptr) {
     range_ = new Range();
   }
   *range_ = range;
@@ -2679,7 +2680,7 @@
       return analysis->GetIntRange(input);
     default:
       UNREACHABLE();
-      return NULL;
+      return nullptr;
   }
 }
 
@@ -2982,15 +2983,15 @@
 static void CacheRange(Range** slot,
                        const Range* range,
                        RangeBoundary::RangeSize size) {
-  if (range != NULL) {
-    if (*slot == NULL) {
+  if (range != nullptr) {
+    if (*slot == nullptr) {
       *slot = new Range();
     }
     **slot = *range;
 
     // Eliminate any symbolic dependencies from the range information.
     (*slot)->ClampToConstant(size);
-  } else if (*slot != NULL) {
+  } else if (*slot != nullptr) {
     **slot = Range();  // Clear cached range information.
   }
 }
@@ -3051,7 +3052,7 @@
 }
 
 void UnboxUint32Instr::InferRange(RangeAnalysis* analysis, Range* range) {
-  const Range* value_range = NULL;
+  const Range* value_range = nullptr;
 
   if (value()->Type()->ToCid() == kSmiCid) {
     value_range = analysis->GetSmiRange(value());
@@ -3075,7 +3076,7 @@
 
 void UnboxInt64Instr::InferRange(RangeAnalysis* analysis, Range* range) {
   const Range* value_range = value()->definition()->range();
-  if (value_range != NULL) {
+  if (value_range != nullptr) {
     *range = *value_range;
   } else if (!value()->definition()->IsInt64Definition() &&
              (value()->definition()->Type()->ToCid() != kSmiCid)) {
diff --git a/runtime/vm/compiler/backend/range_analysis.h b/runtime/vm/compiler/backend/range_analysis.h
index dc178f6..6725ff7 100644
--- a/runtime/vm/compiler/backend/range_analysis.h
+++ b/runtime/vm/compiler/backend/range_analysis.h
@@ -336,7 +336,7 @@
   }
 
   static bool IsUnknown(const Range* other) {
-    if (other == NULL) {
+    if (other == nullptr) {
       return true;
     }
     return other->min().IsUnknown();
@@ -352,7 +352,7 @@
 
   bool Equals(const Range* other) {
     ASSERT(min_.IsUnknown() == max_.IsUnknown());
-    if (other == NULL) {
+    if (other == nullptr) {
       return min_.IsUnknown();
     }
     return min_.Equals(other->min_) && max_.Equals(other->max_);
@@ -397,7 +397,7 @@
 
   static RangeBoundary ConstantMin(const Range* range,
                                    RangeBoundary::RangeSize size) {
-    if (range == NULL) {
+    if (range == nullptr) {
       return RangeBoundary::MinConstant(size);
     }
     return range->min().LowerBound().Clamp(size);
@@ -405,7 +405,7 @@
 
   static RangeBoundary ConstantMax(const Range* range,
                                    RangeBoundary::RangeSize size) {
-    if (range == NULL) {
+    if (range == nullptr) {
       return RangeBoundary::MaxConstant(size);
     }
     return range->max().UpperBound().Clamp(size);
diff --git a/runtime/vm/compiler/backend/range_analysis_test.cc b/runtime/vm/compiler/backend/range_analysis_test.cc
index 39b4729..6baf36f 100644
--- a/runtime/vm/compiler/backend/range_analysis_test.cc
+++ b/runtime/vm/compiler/backend/range_analysis_test.cc
@@ -270,7 +270,7 @@
 
   {
     Range result;
-    Range::BinaryOp(Token::kADD, range_a, range_b, NULL, &result);
+    Range::BinaryOp(Token::kADD, range_a, range_b, nullptr, &result);
     ASSERT(!Range::IsUnknown(&result));
     EXPECT(!result.min().IsNegativeInfinity());
     EXPECT(!result.max().IsPositiveInfinity());
@@ -288,7 +288,7 @@
 
   {
     Range result;
-    Range::BinaryOp(Token::kADD, range_c, range_d, NULL, &result);
+    Range::BinaryOp(Token::kADD, range_c, range_d, nullptr, &result);
     ASSERT(!Range::IsUnknown(&result));
     EXPECT(result.min().ConstantValue() == 5);
     EXPECT(result.max().ConstantValue() == 15);
@@ -301,7 +301,7 @@
                              RangeBoundary::FromConstant(0xf));
   {
     Range result;
-    Range::BinaryOp(Token::kBIT_AND, range_e, range_f, NULL, &result);
+    Range::BinaryOp(Token::kBIT_AND, range_e, range_f, nullptr, &result);
     ASSERT(!Range::IsUnknown(&result));
     EXPECT(result.min().ConstantValue() == 0x0);
     EXPECT(result.max().ConstantValue() == 0xf);
@@ -320,7 +320,7 @@
     EXPECT(left_range->max().ConstantValue() == l_max);                        \
     EXPECT(right_range->min().ConstantValue() == r_min);                       \
     EXPECT(right_range->max().ConstantValue() == r_max);                       \
-    Range::Add(left_range, right_range, &min, &max, NULL);                     \
+    Range::Add(left_range, right_range, &min, &max, nullptr);                  \
     EXPECT(min.Equals(result_min));                                            \
     if (FLAG_support_il_printer && !min.Equals(result_min)) {                  \
       OS::PrintErr("%s != %s\n", min.ToCString(), result_min.ToCString());     \
@@ -400,7 +400,7 @@
     EXPECT(left_range->max().ConstantValue() == l_max);                        \
     EXPECT(right_range->min().ConstantValue() == r_min);                       \
     EXPECT(right_range->max().ConstantValue() == r_max);                       \
-    Range::Sub(left_range, right_range, &min, &max, NULL);                     \
+    Range::Sub(left_range, right_range, &min, &max, nullptr);                  \
     EXPECT(min.Equals(result_min));                                            \
     if (FLAG_support_il_printer && !min.Equals(result_min)) {                  \
       OS::PrintErr("%s != %s\n", min.ToCString(), result_min.ToCString());     \
diff --git a/runtime/vm/compiler/backend/redundancy_elimination.cc b/runtime/vm/compiler/backend/redundancy_elimination.cc
index b12de0e..f493021 100644
--- a/runtime/vm/compiler/backend/redundancy_elimination.cc
+++ b/runtime/vm/compiler/backend/redundancy_elimination.cc
@@ -312,7 +312,8 @@
   // Create object representing *[*] alias.
   static Place* CreateAnyInstanceAnyIndexAlias(Zone* zone, intptr_t id) {
     return Wrap(
-        zone, Place(EncodeFlags(kIndexed, kNoRepresentation, kNoSize), NULL, 0),
+        zone,
+        Place(EncodeFlags(kIndexed, kNoRepresentation, kNoSize), nullptr, 0),
         id);
   }
 
@@ -332,10 +333,10 @@
   //      if any.
   //
   Place ToAlias() const {
-    return Place(
-        RepresentationBits::update(kNoRepresentation, flags_),
-        (DependsOnInstance() && IsAllocation(instance())) ? instance() : NULL,
-        (kind() == kIndexed) ? 0 : raw_selector_);
+    return Place(RepresentationBits::update(kNoRepresentation, flags_),
+                 (DependsOnInstance() && IsAllocation(instance())) ? instance()
+                                                                   : nullptr,
+                 (kind() == kIndexed) ? 0 : raw_selector_);
   }
 
   bool DependsOnInstance() const {
@@ -358,7 +359,7 @@
   // wild-card dependent alias *.f, *.@offs, *[C] or *[*] respectively.
   Place CopyWithoutInstance() const {
     ASSERT(DependsOnInstance());
-    return Place(flags_, NULL, raw_selector_);
+    return Place(flags_, nullptr, raw_selector_);
   }
 
   // Given alias X[C] or *[C] return X[*] and *[*] respectively.
@@ -442,7 +443,7 @@
   }
 
   static const char* DefinitionName(Definition* def) {
-    if (def == NULL) {
+    if (def == nullptr) {
       return "*";
     } else {
       return Thread::Current()->zone()->PrintToString("v%" Pd,
@@ -511,9 +512,9 @@
   static Place* Wrap(Zone* zone, const Place& place, intptr_t id);
 
   static bool IsAllocation(Definition* defn) {
-    return (defn != NULL) && (defn->IsAllocation() ||
-                              (defn->IsStaticCall() &&
-                               defn->AsStaticCall()->IsRecognizedFactory()));
+    return (defn != nullptr) && (defn->IsAllocation() ||
+                                 (defn->IsStaticCall() &&
+                                  defn->AsStaticCall()->IsRecognizedFactory()));
   }
 
  private:
@@ -545,7 +546,7 @@
 
   void SetIndex(Definition* index, intptr_t scale, intptr_t class_id) {
     ConstantInstr* index_constant = index->AsConstant();
-    if ((index_constant != NULL) && index_constant->value().IsSmi()) {
+    if ((index_constant != nullptr) && index_constant->value().IsSmi()) {
       const intptr_t index_value = Smi::Cast(index_constant->value()).Value();
       const ElementSize size = ElementSizeFor(class_id);
       const bool is_typed_access = (size != kNoSize);
@@ -725,7 +726,7 @@
 
   MovesList GetOutgoingMoves(BlockEntryInstr* block) const {
     const intptr_t block_num = block->preorder_number();
-    return (block_num < moves_.length()) ? moves_[block_num] : NULL;
+    return (block_num < moves_.length()) ? moves_[block_num] : nullptr;
   }
 
  private:
@@ -761,11 +762,11 @@
 
   intptr_t LookupAliasId(const Place& alias) {
     const Place* result = aliases_map_.LookupValue(&alias);
-    return (result != NULL) ? result->id() : static_cast<intptr_t>(kNoAlias);
+    return (result != nullptr) ? result->id() : static_cast<intptr_t>(kNoAlias);
   }
 
   BitVector* GetKilledSet(intptr_t alias) {
-    return (alias < killed_.length()) ? killed_[alias] : NULL;
+    return (alias < killed_.length()) ? killed_[alias] : nullptr;
   }
 
   intptr_t max_place_id() const { return places().length(); }
@@ -846,7 +847,7 @@
               ->Add(place->id());
         }
 
-        if (alias->instance() == NULL) {
+        if (alias->instance() == nullptr) {
           EnsureSet(&representatives_, kUnknownInstanceConstantIndexedAlias)
               ->Add(place->id());
         }
@@ -884,7 +885,7 @@
         BitVector* kill = GetKilledSet(alias->id());
 
         THR_Print("%s: ", alias->ToCString());
-        if (kill != NULL) {
+        if (kill != nullptr) {
           PrintSet(kill);
         }
         THR_Print("\n");
@@ -899,7 +900,7 @@
 
   const Place* CanonicalizeAlias(const Place& alias) {
     const Place* canonical = aliases_map_.LookupValue(&alias);
-    if (canonical == NULL) {
+    if (canonical == nullptr) {
       canonical = Place::Wrap(zone_, alias,
                               kAnyInstanceAnyIndexAlias + aliases_.length());
       InsertAlias(canonical);
@@ -909,16 +910,17 @@
   }
 
   BitVector* GetRepresentativesSet(intptr_t alias) {
-    return (alias < representatives_.length()) ? representatives_[alias] : NULL;
+    return (alias < representatives_.length()) ? representatives_[alias]
+                                               : nullptr;
   }
 
   BitVector* EnsureSet(GrowableArray<BitVector*>* sets, intptr_t alias) {
     while (sets->length() <= alias) {
-      sets->Add(NULL);
+      sets->Add(nullptr);
     }
 
     BitVector* set = (*sets)[alias];
-    if (set == NULL) {
+    if (set == nullptr) {
       (*sets)[alias] = set = new (zone_) BitVector(zone_, max_place_id());
     }
     return set;
@@ -930,7 +932,7 @@
 
   void AddAllRepresentatives(intptr_t to, intptr_t from) {
     BitVector* from_set = GetRepresentativesSet(from);
-    if (from_set != NULL) {
+    if (from_set != nullptr) {
       EnsureSet(&killed_, to)->AddAll(from_set);
     }
   }
@@ -960,7 +962,7 @@
   void ComputeKillSet(const Place* alias) {
     switch (alias->kind()) {
       case Place::kIndexed:  // Either *[*] or X[*] alias.
-        if (alias->instance() == NULL) {
+        if (alias->instance() == nullptr) {
           // *[*] aliases with X[*], X[C], *[C].
           AddAllRepresentatives(alias, kAnyConstantIndexedAlias);
           AddAllRepresentatives(alias, kAnyAllocationIndexedAlias);
@@ -975,7 +977,7 @@
       case Place::kConstantIndexed:  // Either X[C] or *[C] alias.
         if (alias->element_size() != Place::kNoSize) {
           const bool has_aliased_instance =
-              (alias->instance() != NULL) && CanBeAliased(alias->instance());
+              (alias->instance() != nullptr) && CanBeAliased(alias->instance());
 
           // If this is a TypedData access then X[C|S] aliases larger elements
           // covering this one X[RoundDown(C, S')|S'] for all S' > S and
@@ -1034,7 +1036,7 @@
           }
         }
 
-        if (alias->instance() == NULL) {
+        if (alias->instance() == nullptr) {
           // *[C] aliases with X[C], X[*], *[*].
           AddAllRepresentatives(alias, kAnyAllocationIndexedAlias);
           CrossAlias(alias, kAnyInstanceAnyIndexAlias);
@@ -1082,7 +1084,7 @@
   bool HasLoadsFromPlace(Definition* defn, const Place* place) {
     ASSERT(place->kind() == Place::kInstanceField);
 
-    for (Value* use = defn->input_use_list(); use != NULL;
+    for (Value* use = defn->input_use_list(); use != nullptr;
          use = use->next_use()) {
       Instruction* instr = use->instruction();
       if (UseIsARedefinition(use) &&
@@ -1111,7 +1113,7 @@
   // Check if any use of the definition can create an alias.
   // Can add more objects into aliasing_worklist_.
   bool AnyUseCreatesAlias(Definition* defn) {
-    for (Value* use = defn->input_use_list(); use != NULL;
+    for (Value* use = defn->input_use_list(); use != nullptr;
          use = use->next_use()) {
       Instruction* instr = use->instruction();
       if (instr->HasUnknownSideEffects() || instr->IsLoadUntagged() ||
@@ -1188,7 +1190,7 @@
       }
     }
     // Find all stores into this object.
-    for (Value* use = defn->input_use_list(); use != NULL;
+    for (Value* use = defn->input_use_list(); use != nullptr;
          use = use->next_use()) {
       auto instr = use->instruction();
       if (UseIsARedefinition(use)) {
@@ -1275,22 +1277,22 @@
   }
 
   StoreFieldInstr* store_instance_field = instr->AsStoreField();
-  if (store_instance_field != NULL) {
+  if (store_instance_field != nullptr) {
     return store_instance_field->value()->definition();
   }
 
   StoreStaticFieldInstr* store_static_field = instr->AsStoreStaticField();
-  if (store_static_field != NULL) {
+  if (store_static_field != nullptr) {
     return store_static_field->value()->definition();
   }
 
   UNREACHABLE();  // Should only be called for supported store instructions.
-  return NULL;
+  return nullptr;
 }
 
 static bool IsPhiDependentPlace(Place* place) {
   return (place->kind() == Place::kInstanceField) &&
-         (place->instance() != NULL) && place->instance()->IsPhi();
+         (place->instance() != nullptr) && place->instance()->IsPhi();
 }
 
 // For each place that depends on a phi ensure that equivalent places
@@ -1319,7 +1321,7 @@
         input_place.set_instance(phi->InputAt(j)->definition());
 
         Place* result = map->LookupValue(&input_place);
-        if (result == NULL) {
+        if (result == nullptr) {
           result = Place::Wrap(zone, input_place, places->length());
           map->Insert(result);
           places->Add(result);
@@ -1375,7 +1377,7 @@
       }
 
       Place* result = map->LookupValue(&place);
-      if (result == NULL) {
+      if (result == nullptr) {
         result = Place::Wrap(zone, place, places->length());
         map->Insert(result);
         places->Add(result);
@@ -1391,10 +1393,10 @@
   }
 
   if ((mode == kOptimizeLoads) && !has_loads) {
-    return NULL;
+    return nullptr;
   }
   if ((mode == kOptimizeStores) && !has_stores) {
-    return NULL;
+    return nullptr;
   }
 
   PhiPlaceMoves* phi_moves = ComputePhiMoves(map, places);
@@ -1412,7 +1414,7 @@
 static bool IsLoopInvariantLoad(ZoneGrowableArray<BitVector*>* sets,
                                 intptr_t loop_header_index,
                                 Instruction* instr) {
-  return IsLoadEliminationCandidate(instr) && (sets != NULL) &&
+  return IsLoadEliminationCandidate(instr) && (sets != nullptr) &&
          HasPlaceId(instr) &&
          (*sets)[loop_header_index]->Contains(GetPlaceId(instr));
 }
@@ -1431,7 +1433,7 @@
   }
   // Move the instruction out of the loop.
   current->RemoveEnvironment();
-  if (it != NULL) {
+  if (it != nullptr) {
     it->RemoveCurrentFromGraph();
   } else {
     current->RemoveFromGraph();
@@ -1476,18 +1478,18 @@
     return;
   }
 
-  CheckSmiInstr* check = NULL;
-  for (Value* use = phi->input_use_list(); (use != NULL) && (check == NULL);
-       use = use->next_use()) {
+  CheckSmiInstr* check = nullptr;
+  for (Value* use = phi->input_use_list();
+       (use != nullptr) && (check == nullptr); use = use->next_use()) {
     check = use->instruction()->AsCheckSmi();
   }
 
-  if (check == NULL) {
+  if (check == nullptr) {
     return;
   }
 
   // Host CheckSmi instruction and make this phi smi one.
-  Hoist(NULL, pre_header, check);
+  Hoist(nullptr, pre_header, check);
 
   // Replace value we are checking with phi's input.
   check->value()->BindTo(phi->InputAt(non_smi_input)->definition());
@@ -1512,7 +1514,7 @@
     JoinEntryInstr* header = loop_headers[i]->AsJoinEntry();
     // Skip loop that don't have a pre-header block.
     BlockEntryInstr* pre_header = header->ImmediateDominator();
-    if (pre_header == NULL) continue;
+    if (pre_header == nullptr) continue;
 
     for (PhiIterator it(header); !it.Done(); it.Advance()) {
       TrySpecializeSmiPhi(it.Current(), header, pre_header);
@@ -1754,17 +1756,17 @@
         phis_(5),
         worklist_(5),
         congruency_worklist_(6),
-        in_worklist_(NULL),
+        in_worklist_(nullptr),
         forwarded_(false) {
     const intptr_t num_blocks = graph_->preorder().length();
     for (intptr_t i = 0; i < num_blocks; i++) {
-      out_.Add(NULL);
+      out_.Add(nullptr);
       gen_.Add(new (Z) BitVector(Z, aliased_set_->max_place_id()));
       kill_.Add(new (Z) BitVector(Z, aliased_set_->max_place_id()));
       in_.Add(new (Z) BitVector(Z, aliased_set_->max_place_id()));
 
-      exposed_values_.Add(NULL);
-      out_values_.Add(NULL);
+      exposed_values_.Add(nullptr);
+      out_values_.Add(nullptr);
     }
   }
 
@@ -1783,7 +1785,7 @@
 
     PointerSet<Place> map;
     AliasedSet* aliased_set = NumberPlaces(graph, &map, kOptimizeLoads);
-    if ((aliased_set != NULL) && !aliased_set->IsEmpty()) {
+    if ((aliased_set != nullptr) && !aliased_set->IsEmpty()) {
       // If any loads were forwarded return true from Optimize to run load
       // forwarding again. This will allow to forward chains of loads.
       // This is especially important for context variables as they are built
@@ -2060,8 +2062,8 @@
       BitVector* kill = kill_[preorder_number];
       BitVector* gen = gen_[preorder_number];
 
-      ZoneGrowableArray<Definition*>* exposed_values = NULL;
-      ZoneGrowableArray<Definition*>* out_values = NULL;
+      ZoneGrowableArray<Definition*>* exposed_values = nullptr;
+      ZoneGrowableArray<Definition*>* out_values = nullptr;
 
       for (ForwardInstructionIterator instr_it(block); !instr_it.Done();
            instr_it.Advance()) {
@@ -2070,7 +2072,7 @@
         bool is_load = false, is_store = false;
         Place place(instr, &is_load, &is_store);
 
-        BitVector* killed = NULL;
+        BitVector* killed = nullptr;
         if (is_store) {
           const intptr_t alias_id =
               aliased_set_->LookupAliasId(place.ToAlias());
@@ -2146,7 +2148,7 @@
           // Check if this load needs renumbering because of the intrablock
           // load forwarding.
           const Place* canonical = aliased_set_->LookupCanonical(&place);
-          if ((canonical != NULL) &&
+          if ((canonical != nullptr) &&
               (canonical->id() != GetPlaceId(instr->AsDefinition()))) {
             SetPlaceId(instr->AsDefinition(), canonical->id());
           }
@@ -2162,7 +2164,7 @@
         }
 
         Definition* defn = instr->AsDefinition();
-        if (defn == NULL) {
+        if (defn == nullptr) {
           continue;
         }
 
@@ -2172,7 +2174,7 @@
             // any values from it.
             continue;
           }
-          for (Value* use = alloc->input_use_list(); use != NULL;
+          for (Value* use = alloc->input_use_list(); use != nullptr;
                use = use->next_use()) {
             if (use->use_index() != 0) {
               // Not a potential immediate load or store, since they take the
@@ -2256,7 +2258,8 @@
         const intptr_t place_id = GetPlaceId(defn);
         if (gen->Contains(place_id)) {
           // This is a locally redundant load.
-          ASSERT((out_values != NULL) && ((*out_values)[place_id] != NULL));
+          ASSERT((out_values != nullptr) &&
+                 ((*out_values)[place_id] != nullptr));
 
           Definition* replacement = (*out_values)[place_id];
           if (CanForwardLoadTo(defn, replacement)) {
@@ -2275,7 +2278,7 @@
           // This is an exposed load: it is the first representative of a
           // given expression id and it is not killed on the path from
           // the block entry.
-          if (exposed_values == NULL) {
+          if (exposed_values == nullptr) {
             static const intptr_t kMaxExposedValuesInitialSize = 5;
             exposed_values = new (Z) ZoneGrowableArray<Definition*>(
                 Utils::Minimum(kMaxExposedValuesInitialSize,
@@ -2287,7 +2290,7 @@
 
         gen->Add(place_id);
 
-        if (out_values == NULL) out_values = CreateBlockOutValues();
+        if (out_values == nullptr) out_values = CreateBlockOutValues();
         (*out_values)[place_id] = defn;
       }
 
@@ -2355,10 +2358,10 @@
           for (intptr_t i = 0; i < block->PredecessorCount(); i++) {
             BlockEntryInstr* pred = block->PredecessorAt(i);
             BitVector* pred_out = out_[pred->preorder_number()];
-            if (pred_out == NULL) continue;
+            if (pred_out == nullptr) continue;
             PhiPlaceMoves::MovesList phi_moves =
                 aliased_set_->phi_moves()->GetOutgoingMoves(pred);
-            if (phi_moves != NULL) {
+            if (phi_moves != nullptr) {
               // If there are phi moves, perform intersection with
               // a copy of pred_out where the phi moves are applied.
               temp_out->CopyFrom(pred_out);
@@ -2369,15 +2372,15 @@
           }
         }
 
-        if (!temp->Equals(*block_in) || (block_out == NULL)) {
+        if (!temp->Equals(*block_in) || (block_out == nullptr)) {
           // If IN set has changed propagate the change to OUT set.
           block_in->CopyFrom(temp);
 
           temp->RemoveAll(block_kill);
           temp->AddAll(block_gen);
 
-          if ((block_out == NULL) || !block_out->Equals(*temp)) {
-            if (block_out == NULL) {
+          if ((block_out == nullptr) || !block_out->Equals(*temp)) {
+            if (block_out == nullptr) {
               block_out = out_[preorder_number] =
                   new (Z) BitVector(Z, aliased_set_->max_place_id());
             }
@@ -2393,13 +2396,13 @@
   // through the graph. Generate phis on back edges where eager merge is
   // impossible.
   // No replacement is done at this point and thus any out_value[place_id] is
-  // changed at most once: from NULL to an actual value.
+  // changed at most once: from nullptr to an actual value.
   // When merging incoming loads we might need to create a phi.
   // These phis are not inserted at the graph immediately because some of them
   // might become redundant after load forwarding is done.
   void ComputeOutValues() {
     GrowableArray<PhiInstr*> pending_phis(5);
-    ZoneGrowableArray<Definition*>* temp_forwarded_values = NULL;
+    ZoneGrowableArray<Definition*>* temp_forwarded_values = nullptr;
 
     for (BlockIterator block_it = graph_->reverse_postorder_iterator();
          !block_it.Done(); block_it.Advance()) {
@@ -2418,16 +2421,17 @@
            it.Advance()) {
         const intptr_t place_id = it.Current();
 
-        if (block_out_values == NULL) {
+        if (block_out_values == nullptr) {
           out_values_[preorder_number] = block_out_values =
               CreateBlockOutValues();
         }
 
-        if ((*block_out_values)[place_id] == NULL) {
+        if ((*block_out_values)[place_id] == nullptr) {
           ASSERT(block->PredecessorCount() > 0);
-          Definition* in_value =
-              can_merge_eagerly ? MergeIncomingValues(block, place_id) : NULL;
-          if ((in_value == NULL) &&
+          Definition* in_value = can_merge_eagerly
+                                     ? MergeIncomingValues(block, place_id)
+                                     : nullptr;
+          if ((in_value == nullptr) &&
               (in_[preorder_number]->Contains(place_id))) {
             PhiInstr* phi = new (Z)
                 PhiInstr(block->AsJoinEntry(), block->PredecessorCount());
@@ -2443,8 +2447,8 @@
       // of values to ensure that cyclic moves are performed correctly.
       PhiPlaceMoves::MovesList phi_moves =
           aliased_set_->phi_moves()->GetOutgoingMoves(block);
-      if ((phi_moves != NULL) && (block_out_values != NULL)) {
-        if (temp_forwarded_values == NULL) {
+      if ((phi_moves != nullptr) && (block_out_values != nullptr)) {
+        if (temp_forwarded_values == nullptr) {
           temp_forwarded_values = CreateBlockOutValues();
         }
 
@@ -2508,8 +2512,8 @@
     for (intptr_t i = 0; i < loop_headers.length(); i++) {
       BlockEntryInstr* header = loop_headers[i];
       BlockEntryInstr* pre_header = header->ImmediateDominator();
-      if (pre_header == NULL) {
-        invariant_loads->Add(NULL);
+      if (pre_header == nullptr) {
+        invariant_loads->Add(nullptr);
         continue;
       }
 
@@ -2547,14 +2551,15 @@
     // First check if the same value is coming in from all predecessors.
     static Definition* const kDifferentValuesMarker =
         reinterpret_cast<Definition*>(-1);
-    Definition* incoming = NULL;
+    Definition* incoming = nullptr;
     for (intptr_t i = 0; i < block->PredecessorCount(); i++) {
       BlockEntryInstr* pred = block->PredecessorAt(i);
       ZoneGrowableArray<Definition*>* pred_out_values =
           out_values_[pred->preorder_number()];
-      if ((pred_out_values == NULL) || ((*pred_out_values)[place_id] == NULL)) {
-        return NULL;
-      } else if (incoming == NULL) {
+      if ((pred_out_values == nullptr) ||
+          ((*pred_out_values)[place_id] == nullptr)) {
+        return nullptr;
+      } else if (incoming == nullptr) {
         incoming = (*pred_out_values)[place_id];
       } else if (incoming != (*pred_out_values)[place_id]) {
         incoming = kDifferentValuesMarker;
@@ -2562,7 +2567,7 @@
     }
 
     if (incoming != kDifferentValuesMarker) {
-      ASSERT(incoming != NULL);
+      ASSERT(incoming != nullptr);
       return incoming;
     }
 
@@ -2582,7 +2587,7 @@
       BlockEntryInstr* pred = block->PredecessorAt(i);
       ZoneGrowableArray<Definition*>* pred_out_values =
           out_values_[pred->preorder_number()];
-      ASSERT((*pred_out_values)[place_id] != NULL);
+      ASSERT((*pred_out_values)[place_id] != nullptr);
 
       // Sets of outgoing values are not linked into use lists so
       // they might contain values that were replaced and removed
@@ -2614,7 +2619,7 @@
 
       ZoneGrowableArray<Definition*>* loads =
           exposed_values_[block->preorder_number()];
-      if (loads == NULL) continue;  // No exposed loads.
+      if (loads == nullptr) continue;  // No exposed loads.
 
       BitVector* in = in_[block->preorder_number()];
 
@@ -2623,7 +2628,7 @@
         if (!in->Contains(GetPlaceId(load))) continue;  // No incoming value.
 
         Definition* replacement = MergeIncomingValues(block, GetPlaceId(load));
-        ASSERT(replacement != NULL);
+        ASSERT(replacement != nullptr);
 
         // Sets of outgoing values are not linked into use lists so
         // they might contain values that were replace and removed
@@ -2656,10 +2661,10 @@
   // they are not marked alive.
   // TODO(vegorov): move this into a separate phase over all phis.
   bool EliminateRedundantPhi(PhiInstr* phi) {
-    Definition* value = NULL;  // Possible value of this phi.
+    Definition* value = nullptr;  // Possible value of this phi.
 
     worklist_.Clear();
-    if (in_worklist_ == NULL) {
+    if (in_worklist_ == nullptr) {
       in_worklist_ = new (Z) BitVector(Z, graph_->current_ssa_temp_index());
     } else {
       in_worklist_->Clear();
@@ -2676,7 +2681,7 @@
         if (input == phi) continue;
 
         PhiInstr* phi_input = input->AsPhi();
-        if ((phi_input != NULL) && !phi_input->is_alive()) {
+        if ((phi_input != nullptr) && !phi_input->is_alive()) {
           if (!in_worklist_->Contains(phi_input->ssa_temp_index())) {
             worklist_.Add(phi_input);
             in_worklist_->Add(phi_input->ssa_temp_index());
@@ -2684,7 +2689,7 @@
           continue;
         }
 
-        if (value == NULL) {
+        if (value == nullptr) {
           value = input;
         } else if (value != input) {
           return false;  // This phi is not redundant.
@@ -2694,7 +2699,7 @@
 
     // All phis in the worklist are redundant and have the same computed
     // value on all code paths.
-    ASSERT(value != NULL);
+    ASSERT(value != nullptr);
     for (intptr_t i = 0; i < worklist_.length(); i++) {
       worklist_[i]->ReplaceUsesWith(value);
     }
@@ -2761,7 +2766,7 @@
     BlockEntryInstr* other_block = other->GetBlock();
 
     if (dom_block == other_block) {
-      for (Instruction* current = dom->next(); current != NULL;
+      for (Instruction* current = dom->next(); current != nullptr;
            current = current->next()) {
         if (current == other) {
           return true;
@@ -2780,7 +2785,7 @@
     ASSERT(phi->block() == replacement->block());
 
     congruency_worklist_.Clear();
-    if (in_worklist_ == NULL) {
+    if (in_worklist_ == nullptr) {
       in_worklist_ = new (Z) BitVector(Z, graph_->current_ssa_temp_index());
     } else {
       in_worklist_->Clear();
@@ -2869,7 +2874,7 @@
       PhiInstr* phi = phis_[i];
       if (!phi->HasUses() || EliminateRedundantPhi(phi)) {
         phi->UnuseAllInputs();
-        phis_[i] = NULL;
+        phis_[i] = nullptr;
       }
     }
 
@@ -2877,7 +2882,7 @@
     // graph.
     for (intptr_t i = 0; i < phis_.length(); i++) {
       PhiInstr* phi = phis_[i];
-      if ((phi != NULL) && (!phi->HasUses() || !EmitPhi(phi))) {
+      if ((phi != nullptr) && (!phi->HasUses() || !EmitPhi(phi))) {
         phi->UnuseAllInputs();
       }
     }
@@ -2887,7 +2892,7 @@
     ZoneGrowableArray<Definition*>* out =
         new (Z) ZoneGrowableArray<Definition*>(aliased_set_->max_place_id());
     for (intptr_t i = 0; i < aliased_set_->max_place_id(); i++) {
-      out->Add(NULL);
+      out->Add(nullptr);
     }
     return out;
   }
@@ -2951,7 +2956,7 @@
     if (current->AllowsCSE()) {
       Instruction* replacement = map->Lookup(current);
 
-      if (replacement != NULL) {
+      if (replacement != nullptr) {
         // Replace current with lookup result.
         ASSERT(replacement->AllowsCSE());
         graph->ReplaceCurrentInstruction(&it, current, replacement);
@@ -2994,7 +2999,7 @@
         exposed_stores_(graph_->postorder().length()) {
     const intptr_t num_blocks = graph_->postorder().length();
     for (intptr_t i = 0; i < num_blocks; i++) {
-      exposed_stores_.Add(NULL);
+      exposed_stores_.Add(nullptr);
     }
   }
 
@@ -3009,7 +3014,7 @@
 
     PointerSet<Place> map;
     AliasedSet* aliased_set = NumberPlaces(graph, &map, kOptimizeStores);
-    if ((aliased_set != NULL) && !aliased_set->IsEmpty()) {
+    if ((aliased_set != nullptr) && !aliased_set->IsEmpty()) {
       StoreOptimizer store_optimizer(graph, aliased_set, &map);
       store_optimizer.Optimize();
     }
@@ -3083,7 +3088,7 @@
       BitVector* live_in = live_in_[postorder_number];
       BitVector* live_out = live_out_[postorder_number];
 
-      ZoneGrowableArray<Instruction*>* exposed_stores = NULL;
+      ZoneGrowableArray<Instruction*>* exposed_stores = nullptr;
 
       // Iterate backwards starting at the last instruction.
       for (BackwardInstructionIterator instr_it(block); !instr_it.Done();
@@ -3113,7 +3118,7 @@
           } else if (!live_in->Contains(GetPlaceId(instr))) {
             // Mark this store as down-ward exposed: They are the only
             // candidates for the global store elimination.
-            if (exposed_stores == NULL) {
+            if (exposed_stores == nullptr) {
               const intptr_t kMaxExposedStoresInitialSize = 5;
               exposed_stores = new (zone) ZoneGrowableArray<Instruction*>(
                   Utils::Minimum(kMaxExposedStoresInitialSize,
@@ -3177,7 +3182,7 @@
 
         // Handle loads.
         Definition* defn = instr->AsDefinition();
-        if ((defn != NULL) && IsLoadEliminationCandidate(defn)) {
+        if ((defn != nullptr) && IsLoadEliminationCandidate(defn)) {
           const intptr_t alias = aliased_set_->LookupAliasId(place.ToAlias());
           live_in->AddAll(aliased_set_->GetKilledSet(alias));
           continue;
@@ -3202,7 +3207,7 @@
 
       ZoneGrowableArray<Instruction*>* exposed_stores =
           exposed_stores_[postorder_number];
-      if (exposed_stores == NULL) continue;  // No exposed stores.
+      if (exposed_stores == nullptr) continue;  // No exposed stores.
 
       // Iterate over candidate stores.
       for (intptr_t i = 0; i < exposed_stores->length(); ++i) {
@@ -3357,7 +3362,7 @@
 // instructions that write into fields of the allocated object.
 static bool IsAllocationSinkingCandidate(Definition* alloc,
                                          SafeUseCheck check_type) {
-  for (Value* use = alloc->input_use_list(); use != NULL;
+  for (Value* use = alloc->input_use_list(); use != nullptr;
        use = use->next_use()) {
     if (!IsSafeUse(use, check_type)) {
       if (FLAG_support_il_printer && FLAG_trace_optimization) {
@@ -3443,7 +3448,8 @@
 // There should be no environment uses. The pass replaced them with
 // MaterializeObject instructions.
 #ifdef DEBUG
-  for (Value* use = alloc->env_use_list(); use != NULL; use = use->next_use()) {
+  for (Value* use = alloc->env_use_list(); use != nullptr;
+       use = use->next_use()) {
     ASSERT(use->instruction()->IsMaterializeObject());
   }
 #endif
@@ -3515,7 +3521,7 @@
     Definition* alloc = candidates_[i];
 
     Value* next_use;
-    for (Value* use = alloc->input_use_list(); use != NULL; use = next_use) {
+    for (Value* use = alloc->input_use_list(); use != nullptr; use = next_use) {
       next_use = use->next_use();
       if (use->instruction()->IsMaterializeObject()) {
         use->BindTo(MaterializationFor(alloc, use->instruction()));
@@ -3589,7 +3595,7 @@
       }
 
 #ifdef DEBUG
-      for (Value* use = alloc->env_use_list(); use != NULL;
+      for (Value* use = alloc->env_use_list(); use != nullptr;
            use = use->next_use()) {
         ASSERT(use->instruction()->IsMaterializeObject());
       }
@@ -3599,8 +3605,8 @@
       // loads first and detach materializations from allocation's environment
       // use list: we will reconstruct it when we start removing
       // materializations.
-      alloc->set_env_use_list(NULL);
-      for (Value* use = alloc->input_use_list(); use != NULL;
+      alloc->set_env_use_list(nullptr);
+      for (Value* use = alloc->input_use_list(); use != nullptr;
            use = use->next_use()) {
         if (use->instruction()->IsLoadField() ||
             use->instruction()->IsLoadIndexed()) {
@@ -3766,13 +3772,13 @@
   }
 
   for (MaterializeObjectInstr* mat = exit->previous()->AsMaterializeObject();
-       mat != NULL; mat = mat->previous()->AsMaterializeObject()) {
+       mat != nullptr; mat = mat->previous()->AsMaterializeObject()) {
     if (mat->allocation() == alloc) {
       return mat;
     }
   }
 
-  return NULL;
+  return nullptr;
 }
 
 // Insert MaterializeObject instruction for the given allocation before
@@ -3892,7 +3898,8 @@
 // dematerialized and that are referenced by deopt environments that
 // don't contain this allocation explicitly.
 void AllocationSinking::ExitsCollector::Collect(Definition* alloc) {
-  for (Value* use = alloc->env_use_list(); use != NULL; use = use->next_use()) {
+  for (Value* use = alloc->env_use_list(); use != nullptr;
+       use = use->next_use()) {
     if (use->instruction()->IsMaterializeObject()) {
       AddInstruction(&exits_, ExitForMaterialization(
                                   use->instruction()->AsMaterializeObject()));
@@ -3905,10 +3912,10 @@
   // candidate and put it on worklist so that we conservatively collect all
   // exits for that candidate as well because they potentially might see
   // this object.
-  for (Value* use = alloc->input_use_list(); use != NULL;
+  for (Value* use = alloc->input_use_list(); use != nullptr;
        use = use->next_use()) {
     Definition* obj = StoreDestination(use);
-    if ((obj != NULL) && (obj != alloc)) {
+    if ((obj != nullptr) && (obj != alloc)) {
       AddInstruction(&worklist_, obj);
     }
   }
@@ -3934,7 +3941,7 @@
   // Collect all fields and array elements that are written for this instance.
   auto slots = new (Z) ZoneGrowableArray<const Slot*>(5);
 
-  for (Value* use = alloc->input_use_list(); use != NULL;
+  for (Value* use = alloc->input_use_list(); use != nullptr;
        use = use->next_use()) {
     if (StoreDestination(use) == alloc) {
       // Allocation instructions cannot be used in as inputs to themselves.
@@ -4270,7 +4277,7 @@
     // The following representation is used:
     // ParameterInstr => unknown
     // ConstantInstr => known constant
-    // NULL => non-constant
+    // nullptr => non-constant
     GrowableArray<Definition*>* idefs = catch_entry->initial_definitions();
     GrowableArray<Definition*> cdefs(idefs->length());
     cdefs.AddArray(*idefs);
@@ -4326,7 +4333,7 @@
 // Returns true iff this definition is used in a non-phi instruction.
 static bool HasRealUse(Definition* def) {
   // Environment uses are real (non-phi) uses.
-  if (def->env_use_list() != NULL) return true;
+  if (def->env_use_list() != nullptr) return true;
 
   for (Value::Iterator it(def->input_use_list()); !it.Done(); it.Advance()) {
     if (!it.Current()->instruction()->IsPhi()) return true;
@@ -4339,7 +4346,7 @@
   for (BlockIterator b = flow_graph->postorder_iterator(); !b.Done();
        b.Advance()) {
     JoinEntryInstr* join = b.Current()->AsJoinEntry();
-    if (join != NULL) {
+    if (join != nullptr) {
       for (PhiIterator it(join); !it.Done(); it.Advance()) {
         PhiInstr* phi = it.Current();
         // Phis that have uses and phis inside try blocks are
@@ -4359,7 +4366,7 @@
     for (intptr_t i = 0; i < phi->InputCount(); i++) {
       Value* val = phi->InputAt(i);
       PhiInstr* used_phi = val->definition()->AsPhi();
-      if ((used_phi != NULL) && !used_phi->is_alive()) {
+      if ((used_phi != nullptr) && !used_phi->is_alive()) {
         used_phi->mark_alive();
         live_phis.Add(used_phi);
       }
@@ -4512,7 +4519,7 @@
 void CheckStackOverflowElimination::EliminateStackOverflow(FlowGraph* graph) {
   const bool should_remove_all = IsMarkedWithNoInterrupts(graph->function());
 
-  CheckStackOverflowInstr* first_stack_overflow_instr = NULL;
+  CheckStackOverflowInstr* first_stack_overflow_instr = nullptr;
   for (BlockIterator block_it = graph->reverse_postorder_iterator();
        !block_it.Done(); block_it.Advance()) {
     BlockEntryInstr* entry = block_it.Current();
@@ -4526,7 +4533,7 @@
           continue;
         }
 
-        if (first_stack_overflow_instr == NULL) {
+        if (first_stack_overflow_instr == nullptr) {
           first_stack_overflow_instr = instr;
           ASSERT(!first_stack_overflow_instr->in_loop());
         }
@@ -4543,7 +4550,7 @@
     }
   }
 
-  if (first_stack_overflow_instr != NULL) {
+  if (first_stack_overflow_instr != nullptr) {
     first_stack_overflow_instr->RemoveFromGraph();
   }
 }
diff --git a/runtime/vm/compiler/backend/type_propagator.cc b/runtime/vm/compiler/backend/type_propagator.cc
index 371b80a..7ee69f5 100644
--- a/runtime/vm/compiler/backend/type_propagator.cc
+++ b/runtime/vm/compiler/backend/type_propagator.cc
@@ -32,7 +32,7 @@
                                 CompileType* compileType) {
   if (FLAG_trace_strong_mode_types) {
     const AbstractType* type = compileType->ToAbstractType();
-    if ((type != NULL) && !type->IsDynamicType()) {
+    if ((type != nullptr) && !type->IsDynamicType()) {
       TraceStrongModeType(instr, *type);
     }
   }
@@ -49,21 +49,21 @@
     : FlowGraphVisitor(flow_graph->reverse_postorder()),
       flow_graph_(flow_graph),
       is_aot_(CompilerState::Current().is_aot()),
-      visited_blocks_(new (flow_graph->zone())
+      visited_blocks_(new(flow_graph->zone())
                           BitVector(flow_graph->zone(),
                                     flow_graph->reverse_postorder().length())),
       types_(flow_graph->current_ssa_temp_index()),
-      in_worklist_(NULL),
-      asserts_(NULL),
-      collected_asserts_(NULL) {
+      in_worklist_(nullptr),
+      asserts_(nullptr),
+      collected_asserts_(nullptr) {
   for (intptr_t i = 0; i < flow_graph->current_ssa_temp_index(); i++) {
-    types_.Add(NULL);
+    types_.Add(nullptr);
   }
 
   asserts_ = new ZoneGrowableArray<AssertAssignableInstr*>(
       flow_graph->current_ssa_temp_index());
   for (intptr_t i = 0; i < flow_graph->current_ssa_temp_index(); i++) {
-    asserts_->Add(NULL);
+    asserts_->Add(nullptr);
   }
 
   collected_asserts_ = new ZoneGrowableArray<intptr_t>(10);
@@ -104,7 +104,7 @@
         Instruction* instr = it.Current()->instruction();
 
         Definition* use_defn = instr->AsDefinition();
-        if (use_defn != NULL) {
+        if (use_defn != nullptr) {
           AddToWorklist(use_defn);
         }
       }
@@ -143,7 +143,7 @@
   }
 
   GotoInstr* goto_instr = block->last_instruction()->AsGoto();
-  if (goto_instr != NULL) {
+  if (goto_instr != nullptr) {
     JoinEntryInstr* join = goto_instr->successor();
     intptr_t pred_index = join->IndexOfPredecessor(block);
     ASSERT(pred_index >= 0);
@@ -170,9 +170,9 @@
   const intptr_t index = def->ssa_temp_index();
 
   CompileType* type = types_[index];
-  if (type == NULL) {
+  if (type == nullptr) {
     type = types_[index] = def->Type();
-    ASSERT(type != NULL);
+    ASSERT(type != nullptr);
   }
   return type;
 }
@@ -271,7 +271,7 @@
 void FlowGraphTypePropagator::VisitCheckClassId(CheckClassIdInstr* check) {
   LoadClassIdInstr* load_cid =
       check->value()->definition()->OriginalDefinition()->AsLoadClassId();
-  if (load_cid != NULL && check->cids().IsSingleCid()) {
+  if (load_cid != nullptr && check->cids().IsSingleCid()) {
     SetCid(load_cid->object()->definition(), check->cids().cid_start);
   }
 }
@@ -385,7 +385,7 @@
 
 void FlowGraphTypePropagator::VisitBranch(BranchInstr* instr) {
   StrictCompareInstr* comparison = instr->comparison()->AsStrictCompare();
-  if (comparison == NULL) return;
+  if (comparison == nullptr) return;
   bool negated = comparison->kind() == Token::kNE_STRICT;
   LoadClassIdInstr* load_cid =
       comparison->InputAt(0)->definition()->AsLoadClassId();
@@ -394,15 +394,15 @@
   InstanceOfInstr* instance_of =
       comparison->InputAt(0)->definition()->AsInstanceOf();
   bool is_simple_instance_of =
-      (call != NULL) && call->MatchesCoreName(Symbols::_simpleInstanceOf());
-  if (load_cid != NULL && comparison->InputAt(1)->BindsToConstant()) {
+      (call != nullptr) && call->MatchesCoreName(Symbols::_simpleInstanceOf());
+  if (load_cid != nullptr && comparison->InputAt(1)->BindsToConstant()) {
     intptr_t cid = Smi::Cast(comparison->InputAt(1)->BoundConstant()).Value();
     BlockEntryInstr* true_successor =
         negated ? instr->false_successor() : instr->true_successor();
     EnsureMoreAccurateRedefinition(true_successor,
                                    load_cid->object()->definition(),
                                    CompileType::FromCid(cid));
-  } else if ((is_simple_instance_of || (instance_of != NULL)) &&
+  } else if ((is_simple_instance_of || (instance_of != nullptr)) &&
              comparison->InputAt(1)->BindsToConstant() &&
              comparison->InputAt(1)->BoundConstant().IsBool()) {
     if (comparison->InputAt(1)->BoundConstant().ptr() == Bool::False().ptr()) {
@@ -410,8 +410,8 @@
     }
     BlockEntryInstr* true_successor =
         negated ? instr->false_successor() : instr->true_successor();
-    const AbstractType* type = NULL;
-    Definition* left = NULL;
+    const AbstractType* type = nullptr;
+    Definition* left = nullptr;
     if (is_simple_instance_of) {
       ASSERT(call->ArgumentAt(1)->IsConstant());
       const Object& type_obj = call->ArgumentAt(1)->AsConstant()->value();
@@ -507,9 +507,9 @@
 
     // If this is the first type assertion checking given value record it.
     AssertAssignableInstr* assert = instr->AsAssertAssignable();
-    if (assert != NULL) {
+    if (assert != nullptr) {
       Definition* defn = assert->value()->definition()->OriginalDefinition();
-      if ((*asserts_)[defn->ssa_temp_index()] == NULL) {
+      if ((*asserts_)[defn->ssa_temp_index()] == nullptr) {
         (*asserts_)[defn->ssa_temp_index()] = assert;
         collected_asserts_->Add(defn->ssa_temp_index());
       }
@@ -517,7 +517,7 @@
   }
 
   for (intptr_t i = 0; i < collected_asserts_->length(); i++) {
-    (*asserts_)[(*collected_asserts_)[i]] = NULL;
+    (*asserts_)[(*collected_asserts_)[i]] = nullptr;
   }
 
   collected_asserts_->TruncateTo(0);
@@ -532,12 +532,12 @@
   Definition* defn = check->InputAt(0)->definition()->OriginalDefinition();
 
   AssertAssignableInstr* assert = (*asserts_)[defn->ssa_temp_index()];
-  if ((assert == NULL) || (assert == kStrengthenedAssertMarker)) {
+  if ((assert == nullptr) || (assert == kStrengthenedAssertMarker)) {
     return;
   }
-  ASSERT(assert->env() != NULL);
+  ASSERT(assert->env() != nullptr);
 
-  Instruction* check_clone = NULL;
+  Instruction* check_clone = nullptr;
   if (check->IsCheckSmi()) {
     check_clone = new CheckSmiInstr(assert->value()->Copy(zone()),
                                     assert->deopt_id(), check->source());
@@ -547,7 +547,7 @@
         new CheckClassInstr(assert->value()->Copy(zone()), assert->deopt_id(),
                             check->AsCheckClass()->cids(), check->source());
   }
-  ASSERT(check_clone != NULL);
+  ASSERT(check_clone != nullptr);
   check_clone->InsertBefore(assert);
   assert->env()->DeepCopyTo(zone(), check_clone);
 
@@ -746,11 +746,11 @@
   if (cid_ == kIllegalCid) {
     // Make sure to initialize cid_ for Null type to consistently return
     // kNullCid.
-    if ((type_ != NULL) && type_->IsNullType()) {
+    if ((type_ != nullptr) && type_->IsNullType()) {
       cid_ = kNullCid;
     }
     // Same for sentinel.
-    if ((type_ != NULL) && type_->IsSentinelType()) {
+    if ((type_ != nullptr) && type_->IsSentinelType()) {
       cid_ = kSentinelCid;
     }
   }
@@ -765,7 +765,7 @@
 
 intptr_t CompileType::ToNullableCid() {
   if (cid_ == kIllegalCid) {
-    if (type_ == NULL) {
+    if (type_ == nullptr) {
       // Type propagation is turned off or has not yet run.
       return kDynamicCid;
     } else if (type_->IsVoidType()) {
@@ -808,7 +808,7 @@
 }
 
 const AbstractType* CompileType::ToAbstractType() {
-  if (type_ == NULL) {
+  if (type_ == nullptr) {
     // Type propagation has not run. Return dynamic-type.
     if (cid_ == kIllegalCid) {
       return &Object::dynamic_type();
@@ -990,7 +990,7 @@
     const Class& cls =
         Class::Handle(IsolateGroup::Current()->class_table()->At(cid_));
     type_name = String::Handle(cls.ScrubbedName()).ToCString();
-  } else if (type_ != NULL) {
+  } else if (type_ != nullptr) {
     type_name = type_->IsDynamicType()
                     ? "*"
                     : String::Handle(type_->ScrubbedName()).ToCString();
@@ -1010,7 +1010,7 @@
 }
 
 CompileType* Value::Type() {
-  if (reaching_type_ == NULL) {
+  if (reaching_type_ == nullptr) {
     reaching_type_ = definition()->Type();
   }
   return reaching_type_;
@@ -1060,7 +1060,7 @@
 }
 
 CompileType RedefinitionInstr::ComputeType() const {
-  if (constrained_type_ != NULL) {
+  if (constrained_type_ != nullptr) {
     // Check if the type associated with this redefinition is more specific
     // than the type of its input. If yes, return it. Otherwise, fall back
     // to the input's type.
@@ -1144,7 +1144,7 @@
   // However there are parameters that are known to match their declared type:
   // for example receiver.
   GraphEntryInstr* graph_entry = block_->AsGraphEntry();
-  if (graph_entry == NULL) {
+  if (graph_entry == nullptr) {
     if (auto function_entry = block_->AsFunctionEntry()) {
       graph_entry = function_entry->graph_entry();
     } else if (auto osr_entry = block_->AsOsrEntry()) {
diff --git a/runtime/vm/compiler/call_specializer.cc b/runtime/vm/compiler/call_specializer.cc
index 692b5a6..989ecb6 100644
--- a/runtime/vm/compiler/call_specializer.cc
+++ b/runtime/vm/compiler/call_specializer.cc
@@ -66,7 +66,7 @@
 // Attempts to convert an instance call (IC call) using propagated class-ids,
 // e.g., receiver class id, guarded-cid, or by guessing cid-s.
 void CallSpecializer::ApplyClassIds() {
-  ASSERT(current_iterator_ == NULL);
+  ASSERT(current_iterator_ == nullptr);
   for (BlockIterator block_it = flow_graph_->reverse_postorder_iterator();
        !block_it.Done(); block_it.Advance()) {
     thread()->CheckForSafepoint();
@@ -89,7 +89,7 @@
         SpecializePolymorphicInstanceCall(instr->AsPolymorphicInstanceCall());
       }
     }
-    current_iterator_ = NULL;
+    current_iterator_ = nullptr;
   }
 }
 
@@ -192,7 +192,7 @@
       FlowGraphCompiler::ResolveCallTargetsForReceiverCid(
           receiver_cid, String::Handle(zone(), ic_data.target_name()),
           Array::Handle(zone(), ic_data.arguments_descriptor()));
-  if (targets == NULL) {
+  if (targets == nullptr) {
     // No specialization.
     return;
   }
@@ -302,8 +302,8 @@
   // or results of string-from-char-code.
   Definition* left = call->ArgumentAt(0);
   Definition* right = call->ArgumentAt(1);
-  Value* left_val = NULL;
-  Definition* to_remove_left = NULL;
+  Value* left_val = nullptr;
+  Definition* to_remove_left = nullptr;
   if (IsLengthOneString(right)) {
     // Swap, since we know that both arguments are strings
     Definition* temp = left;
@@ -331,8 +331,8 @@
       UNREACHABLE();
     }
 
-    Definition* to_remove_right = NULL;
-    Value* right_val = NULL;
+    Definition* to_remove_right = nullptr;
+    Value* right_val = nullptr;
     if (right->IsOneByteStringFromCharCode()) {
       // Skip string-from-char-code, and use its input as right value.
       OneByteStringFromCharCodeInstr* right_instr =
@@ -356,13 +356,13 @@
     ReplaceCall(call, comp);
 
     // Remove dead instructions.
-    if ((to_remove_left != NULL) &&
-        (to_remove_left->input_use_list() == NULL)) {
+    if ((to_remove_left != nullptr) &&
+        (to_remove_left->input_use_list() == nullptr)) {
       to_remove_left->ReplaceUsesWith(flow_graph()->constant_null());
       to_remove_left->RemoveFromGraph();
     }
-    if ((to_remove_right != NULL) &&
-        (to_remove_right->input_use_list() == NULL)) {
+    if ((to_remove_right != nullptr) &&
+        (to_remove_right->input_use_list() == nullptr)) {
       to_remove_right->ReplaceUsesWith(flow_graph()->constant_null());
       to_remove_right->RemoveFromGraph();
     }
@@ -432,8 +432,8 @@
       // be hoisted out of this function.
       ConstantInstr* right_const = right->AsConstant();
       ConstantInstr* left_const = left->AsConstant();
-      if ((right_const != NULL && right_const->value().IsNull()) ||
-          (left_const != NULL && left_const->value().IsNull())) {
+      if ((right_const != nullptr && right_const->value().IsNull()) ||
+          (left_const != nullptr && left_const->value().IsNull())) {
         StrictCompareInstr* comp = new (Z)
             StrictCompareInstr(call->source(), Token::kEQ_STRICT,
                                new (Z) Value(left), new (Z) Value(right),
@@ -696,7 +696,7 @@
   ASSERT(call->type_args_len() == 0);
   ASSERT(call->ArgumentCount() == 1);
   Definition* input = call->ArgumentAt(0);
-  Definition* unary_op = NULL;
+  Definition* unary_op = nullptr;
   if (call->Targets().ReceiverIs(kSmiCid)) {
     InsertBefore(call,
                  new (Z) CheckSmiInstr(new (Z) Value(input), call->deopt_id(),
@@ -716,7 +716,7 @@
   } else {
     return false;
   }
-  ASSERT(unary_op != NULL);
+  ASSERT(unary_op != nullptr);
   ReplaceCall(call, unary_op);
   return true;
 }
@@ -1005,7 +1005,7 @@
         ASSERT(call->HasICData());
         const ICData& ic_data = *call->ic_data();
         Definition* input = call->ArgumentAt(0);
-        Definition* d2i_instr = NULL;
+        Definition* d2i_instr = nullptr;
         if (ic_data.HasDeoptReason(ICData::kDeoptDoubleToSmi)) {
           // Do not repeatedly deoptimize because result didn't fit into Smi.
           d2i_instr = new (Z) DoubleToIntegerInstr(
@@ -1203,8 +1203,8 @@
 void CallSpecializer::ReplaceWithInstanceOf(InstanceCallInstr* call) {
   ASSERT(Token::IsTypeTestOperator(call->token_kind()));
   Definition* left = call->ArgumentAt(0);
-  Definition* instantiator_type_args = NULL;
-  Definition* function_type_args = NULL;
+  Definition* instantiator_type_args = nullptr;
+  Definition* function_type_args = nullptr;
   AbstractType& type = AbstractType::ZoneHandle(Z);
   ASSERT(call->type_args_len() == 0);
   if (call->ArgumentCount() == 2) {
@@ -1226,7 +1226,7 @@
   intptr_t type_cid;
   if (TypeCheckAsClassEquality(type, &type_cid)) {
     LoadClassIdInstr* left_cid = new (Z) LoadClassIdInstr(new (Z) Value(left));
-    InsertBefore(call, left_cid, NULL, FlowGraph::kValue);
+    InsertBefore(call, left_cid, nullptr, FlowGraph::kValue);
     ConstantInstr* cid =
         flow_graph()->GetConstant(Smi::Handle(Z, Smi::New(type_cid)));
 
diff --git a/runtime/vm/compiler/cha_test.cc b/runtime/vm/compiler/cha_test.cc
index b4ea6ce..98cfd12 100644
--- a/runtime/vm/compiler/cha_test.cc
+++ b/runtime/vm/compiler/cha_test.cc
@@ -30,7 +30,7 @@
       "  bar() { }"
       "}\n";
 
-  TestCase::LoadTestScript(kScriptChars, NULL);
+  TestCase::LoadTestScript(kScriptChars, nullptr);
 
   TransitionNativeToVM transition(thread);
   EXPECT(ClassFinalizer::ProcessPendingClasses());
diff --git a/runtime/vm/compiler/compiler_pass.cc b/runtime/vm/compiler/compiler_pass.cc
index 35c5b74d2..192a845 100644
--- a/runtime/vm/compiler/compiler_pass.cc
+++ b/runtime/vm/compiler/compiler_pass.cc
@@ -57,8 +57,8 @@
     : thread(thread),
       precompiler(precompiler),
       inlining_depth(0),
-      sinking(NULL),
-      call_specializer(NULL),
+      sinking(nullptr),
+      call_specializer(nullptr),
       speculative_policy(speculative_policy),
       reorder_blocks(false),
       sticky_flags(0),
@@ -74,7 +74,7 @@
   // larger than the length of |inline_id_to_token_pos| by one.
 }
 
-CompilerPass* CompilerPass::passes_[CompilerPass::kNumPasses] = {NULL};
+CompilerPass* CompilerPass::passes_[CompilerPass::kNumPasses] = {nullptr};
 uint8_t CompilerPass::flags_[CompilerPass::kNumPasses] = {0};
 
 DEFINE_OPTION_HANDLER(CompilerPass::ParseFiltersFromFlag,
@@ -124,14 +124,14 @@
 }
 
 void CompilerPass::ParseFilters(const char* filter, uint8_t* pass_flags) {
-  if (filter == NULL || *filter == 0) {
+  if (filter == nullptr || *filter == 0) {
     return;
   }
 
   if (strcmp(filter, "help") == 0) {
     OS::PrintErr("%s", kCompilerPassesUsage);
     for (intptr_t i = 0; i < kNumPasses; i++) {
-      if (passes_[i] != NULL) {
+      if (passes_[i] != nullptr) {
         OS::PrintErr("  %s\n", passes_[i]->name());
       }
     }
@@ -190,7 +190,7 @@
   if (length != 0) {
     char* pass_name = Utils::StrNDup(start, length);
     CompilerPass* pass = FindPassByName(pass_name);
-    if (pass != NULL) {
+    if (pass != nullptr) {
       pass_flags[pass->id()] |= flags;
     } else {
       OS::PrintErr("Unknown compiler pass: %s\n", pass_name);
@@ -536,7 +536,7 @@
 });
 
 COMPILER_PASS(AllocationSinking_DetachMaterializations, {
-  if (state->sinking != NULL) {
+  if (state->sinking != nullptr) {
     // Remove all MaterializeObject instructions inserted by allocation
     // sinking from the flow graph and let them float on the side
     // referenced only from environments. Register allocator will consider
diff --git a/runtime/vm/compiler/compiler_pass.h b/runtime/vm/compiler/compiler_pass.h
index d9159e0..21727dd 100644
--- a/runtime/vm/compiler/compiler_pass.h
+++ b/runtime/vm/compiler/compiler_pass.h
@@ -71,7 +71,7 @@
   CompilerPassState(Thread* thread,
                     FlowGraph* flow_graph,
                     SpeculativeInliningPolicy* speculative_policy,
-                    Precompiler* precompiler = NULL);
+                    Precompiler* precompiler = nullptr);
 
   FlowGraph* flow_graph() const { return flow_graph_; }
 
@@ -117,7 +117,7 @@
 #undef ADD_ONE
 
   CompilerPass(Id id, const char* name) : id_(id), name_(name) {
-    ASSERT(passes_[id] == NULL);
+    ASSERT(passes_[id] == nullptr);
     passes_[id] = this;
 
     // By default print the final flow-graph after the register allocation.
@@ -192,11 +192,11 @@
  private:
   static CompilerPass* FindPassByName(const char* name) {
     for (intptr_t i = 0; i < kNumPasses; i++) {
-      if ((passes_[i] != NULL) && (strcmp(passes_[i]->name_, name) == 0)) {
+      if ((passes_[i] != nullptr) && (strcmp(passes_[i]->name_, name) == 0)) {
         return passes_[i];
       }
     }
-    return NULL;
+    return nullptr;
   }
 
   void PrintGraph(CompilerPassState* state, Flag mask, intptr_t round) const;
diff --git a/runtime/vm/compiler/frontend/base_flow_graph_builder.cc b/runtime/vm/compiler/frontend/base_flow_graph_builder.cc
index 303c301..f1981c3 100644
--- a/runtime/vm/compiler/frontend/base_flow_graph_builder.cc
+++ b/runtime/vm/compiler/frontend/base_flow_graph_builder.cc
@@ -46,9 +46,9 @@
 }
 
 Fragment& Fragment::operator<<=(Instruction* next) {
-  if (entry == NULL) {
+  if (entry == nullptr) {
     entry = current = next;
-  } else if (current != NULL) {
+  } else if (current != nullptr) {
     current->LinkTo(next);
     current = next;
   }
@@ -56,7 +56,7 @@
 }
 
 void Fragment::Prepend(Instruction* start) {
-  if (entry == NULL) {
+  if (entry == nullptr) {
     entry = current = start;
   } else {
     start->LinkTo(entry);
@@ -65,8 +65,8 @@
 }
 
 Fragment Fragment::closed() {
-  ASSERT(entry != NULL);
-  return Fragment(entry, NULL);
+  ASSERT(entry != nullptr);
+  return Fragment(entry, nullptr);
 }
 
 Fragment operator+(const Fragment& first, const Fragment& second) {
@@ -713,7 +713,7 @@
 
 void BaseFlowGraphBuilder::SetTempIndex(Definition* definition) {
   definition->set_temp_index(
-      stack_ == NULL ? 0 : stack_->definition()->temp_index() + 1);
+      stack_ == nullptr ? 0 : stack_->definition()->temp_index() + 1);
 }
 
 void BaseFlowGraphBuilder::Push(Definition* definition) {
@@ -732,25 +732,25 @@
 }
 
 Value* BaseFlowGraphBuilder::Pop() {
-  ASSERT(stack_ != NULL);
+  ASSERT(stack_ != nullptr);
   Value* value = stack_;
   stack_ = value->next_use();
-  if (stack_ != NULL) stack_->set_previous_use(NULL);
+  if (stack_ != nullptr) stack_->set_previous_use(nullptr);
 
-  value->set_next_use(NULL);
-  value->set_previous_use(NULL);
+  value->set_next_use(nullptr);
+  value->set_previous_use(nullptr);
   value->definition()->ClearSSATempIndex();
   return value;
 }
 
 Fragment BaseFlowGraphBuilder::Drop() {
-  ASSERT(stack_ != NULL);
+  ASSERT(stack_ != nullptr);
   Fragment instructions;
   Definition* definition = stack_->definition();
   // The SSA renaming implementation doesn't like [LoadLocal]s without a
   // tempindex.
   if (definition->HasSSATemp() || definition->IsLoadLocal()) {
-    instructions <<= new (Z) DropTempsInstr(1, NULL);
+    instructions <<= new (Z) DropTempsInstr(1, nullptr);
   } else {
     definition->ClearTempIndex();
   }
diff --git a/runtime/vm/compiler/frontend/base_flow_graph_builder.h b/runtime/vm/compiler/frontend/base_flow_graph_builder.h
index a4d17a2..b08fcf1 100644
--- a/runtime/vm/compiler/frontend/base_flow_graph_builder.h
+++ b/runtime/vm/compiler/frontend/base_flow_graph_builder.h
@@ -112,7 +112,7 @@
 //
 // kNone:
 //
-//   There is no unchecked entrypoint: the unchecked entry is set to NULL in
+//   There is no unchecked entrypoint: the unchecked entry is set to nullptr in
 //   the 'GraphEntryInstr'.
 //
 // kSeparate:
@@ -153,7 +153,7 @@
         last_used_block_id_(last_used_block_id),
         current_try_index_(kInvalidTryIndex),
         next_used_try_index_(0),
-        stack_(NULL),
+        stack_(nullptr),
         exit_collector_(exit_collector),
         inlining_unchecked_entry_(inlining_unchecked_entry),
         saved_args_desc_array_(
@@ -315,7 +315,7 @@
 
   intptr_t GetNextDeoptId() {
     intptr_t deopt_id = thread_->compiler_state().GetNextDeoptId();
-    if (context_level_array_ != NULL) {
+    if (context_level_array_ != nullptr) {
       intptr_t level = context_depth_;
       context_level_array_->Add(deopt_id);
       context_level_array_->Add(level);
diff --git a/runtime/vm/compiler/frontend/flow_graph_builder.cc b/runtime/vm/compiler/frontend/flow_graph_builder.cc
index b580536..78c22bb 100644
--- a/runtime/vm/compiler/frontend/flow_graph_builder.cc
+++ b/runtime/vm/compiler/frontend/flow_graph_builder.cc
@@ -50,7 +50,7 @@
       callee_graph->current_ssa_temp_index());
 
   // Attach the outer environment on each instruction in the callee graph.
-  ASSERT(call_->env() != NULL);
+  ASSERT(call_->env() != nullptr);
   ASSERT(call_->deopt_id() != DeoptId::kNone);
 
   auto zone = callee_graph->zone();
@@ -92,7 +92,7 @@
 }
 
 void InlineExitCollector::AddExit(ReturnInstr* exit) {
-  Data data = {NULL, exit};
+  Data data = {nullptr, exit};
   exits_.Add(data);
 }
 
@@ -113,7 +113,7 @@
   int j = 0;
   for (int i = 0; i < exits_.length(); ++i) {
     BlockEntryInstr* block = exits_[i].exit_return->GetBlock();
-    if ((block != NULL) && (0 <= block->postorder_number()) &&
+    if ((block != nullptr) && (0 <= block->postorder_number()) &&
         (block->postorder_number() < postorder.length()) &&
         (postorder[block->postorder_number()] == block)) {
       if (i != j) {
@@ -145,7 +145,7 @@
     ReturnAt(0)->UnuseAllInputs();
     *exit_block = ExitBlockAt(0);
     *last_instruction = LastInstructionAt(0);
-    return call_->HasUses() ? ValueAt(0)->definition() : NULL;
+    return call_->HasUses() ? ValueAt(0)->definition() : nullptr;
   } else {
     ASSERT(num_exits > 1);
     // Create a join of the returns.
@@ -180,7 +180,7 @@
       // Collect the block's dominators.
       block_dominators.Clear();
       BlockEntryInstr* dominator = ExitBlockAt(i)->dominator();
-      while (dominator != NULL) {
+      while (dominator != nullptr) {
         block_dominators.Add(dominator);
         dominator = dominator->dominator();
       }
@@ -233,20 +233,20 @@
       for (intptr_t i = 0; i < num_exits; ++i) {
         ReturnAt(i)->UnuseAllInputs();
       }
-      join->InheritDeoptTargetAfter(caller_graph_, call_, NULL);
-      return NULL;
+      join->InheritDeoptTargetAfter(caller_graph_, call_, nullptr);
+      return nullptr;
     }
   }
 }
 
 void InlineExitCollector::ReplaceCall(BlockEntryInstr* callee_entry) {
-  ASSERT(call_->previous() != NULL);
-  ASSERT(call_->next() != NULL);
+  ASSERT(call_->previous() != nullptr);
+  ASSERT(call_->next() != nullptr);
   BlockEntryInstr* call_block = call_->GetBlock();
 
   // Insert the callee graph into the caller graph.
-  BlockEntryInstr* callee_exit = NULL;
-  Instruction* callee_last_instruction = NULL;
+  BlockEntryInstr* callee_exit = nullptr;
+  Instruction* callee_last_instruction = nullptr;
 
   if (exits_.length() == 0) {
     // Handle the case when there are no normal return exits from the callee
@@ -258,7 +258,7 @@
     TargetEntryInstr* false_block = new (Z) TargetEntryInstr(
         caller_graph_->allocate_block_id(), call_block->try_index(),
         CompilerState::Current().GetNextDeoptId());
-    false_block->InheritDeoptTargetAfter(caller_graph_, call_, NULL);
+    false_block->InheritDeoptTargetAfter(caller_graph_, call_, nullptr);
     false_block->LinkTo(call_->next());
     call_block->ReplaceAsPredecessorWith(false_block);
 
@@ -302,7 +302,7 @@
   } else {
     Definition* callee_result = JoinReturns(
         &callee_exit, &callee_last_instruction, call_block->try_index());
-    if (callee_result != NULL) {
+    if (callee_result != nullptr) {
       call_->ReplaceUsesWith(callee_result);
     }
     if (callee_last_instruction == callee_entry) {
diff --git a/runtime/vm/compiler/frontend/flow_graph_builder.h b/runtime/vm/compiler/frontend/flow_graph_builder.h
index 25d74d6..7f6343f 100644
--- a/runtime/vm/compiler/frontend/flow_graph_builder.h
+++ b/runtime/vm/compiler/frontend/flow_graph_builder.h
@@ -53,7 +53,7 @@
   };
 
   BlockEntryInstr* ExitBlockAt(intptr_t i) const {
-    ASSERT(exits_[i].exit_block != NULL);
+    ASSERT(exits_[i].exit_block != nullptr);
     return exits_[i].exit_block;
   }
 
diff --git a/runtime/vm/compiler/frontend/kernel_to_il.cc b/runtime/vm/compiler/frontend/kernel_to_il.cc
index 1624bb6..7e2a63f 100644
--- a/runtime/vm/compiler/frontend/kernel_to_il.cc
+++ b/runtime/vm/compiler/frontend/kernel_to_il.cc
@@ -86,13 +86,13 @@
       catch_depth_(0),
       for_in_depth_(0),
       block_expression_depth_(0),
-      graph_entry_(NULL),
-      scopes_(NULL),
-      breakable_block_(NULL),
-      switch_block_(NULL),
-      try_catch_block_(NULL),
-      try_finally_block_(NULL),
-      catch_block_(NULL),
+      graph_entry_(nullptr),
+      scopes_(nullptr),
+      breakable_block_(nullptr),
+      switch_block_(nullptr),
+      try_catch_block_(nullptr),
+      try_finally_block_(nullptr),
+      catch_block_(nullptr),
       prepend_type_arguments_(Function::ZoneHandle(zone_)),
       throw_new_null_assertion_(Function::ZoneHandle(zone_)) {
   const Script& script =
@@ -192,7 +192,7 @@
   const Function& function = parsed_function_->function();
 
   if (function.IsGeneric() || function.HasGenericParent()) {
-    ASSERT(parsed_function_->function_type_arguments() != NULL);
+    ASSERT(parsed_function_->function_type_arguments() != nullptr);
     instructions += LoadLocal(parsed_function_->function_type_arguments());
   } else {
     instructions += NullConstant();
@@ -369,7 +369,7 @@
       InstructionSource(position), name, kind, std::move(arguments),
       type_args_len, argument_names, checked_argument_count, ic_data_array_,
       GetNextDeoptId(), interface_target, tearoff_interface_target);
-  if ((result_type != NULL) && !result_type->IsTrivial()) {
+  if ((result_type != nullptr) && !result_type->IsTrivial()) {
     call->SetResultType(Z, result_type->ToCompileType(Z));
   }
   if (use_unchecked_entry) {
@@ -621,11 +621,11 @@
     intptr_t argument_count,
     const InferredTypeMetadata* result_type) {
   if (call->InitResultType(Z)) {
-    ASSERT((result_type == NULL) || (result_type->cid == kDynamicCid) ||
+    ASSERT((result_type == nullptr) || (result_type->cid == kDynamicCid) ||
            (result_type->cid == call->result_cid()));
     return;
   }
-  if ((result_type != NULL) && !result_type->IsTrivial()) {
+  if ((result_type != nullptr) && !result_type->IsTrivial()) {
     call->SetResultType(Z, result_type->ToCompileType(Z));
   }
 }
@@ -776,7 +776,7 @@
 
 LocalVariable* FlowGraphBuilder::LookupVariable(intptr_t kernel_offset) {
   LocalVariable* local = scopes_->locals.Lookup(kernel_offset);
-  ASSERT(local != NULL);
+  ASSERT(local != nullptr);
   ASSERT(local->kernel_offset() == kernel_offset);
   return local;
 }
@@ -1747,7 +1747,7 @@
 
   receiver_variable->set_is_captured();
   //  receiver_variable->set_is_final();
-  LocalScope* scope = new (Z) LocalScope(NULL, 0, 0);
+  LocalScope* scope = new (Z) LocalScope(nullptr, 0, 0);
   scope->set_context_level(0);
   scope->AddVariable(receiver_variable);
   scope->AddContextVariable(receiver_variable);
@@ -2027,7 +2027,7 @@
     }
 
     const AbstractType* target_type = &param->type();
-    if (forwarding_target != NULL) {
+    if (forwarding_target != nullptr) {
       // We add 1 to the parameter index to account for the receiver.
       target_type =
           &AbstractType::ZoneHandle(Z, forwarding_target->ParameterTypeAt(i));
@@ -2176,7 +2176,7 @@
   LocalVariable* array = MakeTemporary();
   if (receiver_index > 0) {
     LocalVariable* type_args = parsed_function_->function_type_arguments();
-    ASSERT(type_args != NULL);
+    ASSERT(type_args != nullptr);
     body += LoadLocal(array);
     body += IntConstant(0);
     body += LoadLocal(type_args);
@@ -3733,7 +3733,7 @@
           TranslateInstantiatedTypeArguments(instantiated_type_arguments);
     } else {
       type_args_len = function.NumTypeParameters();
-      ASSERT(parsed_function_->function_type_arguments() != NULL);
+      ASSERT(parsed_function_->function_type_arguments() != nullptr);
       closure += LoadLocal(parsed_function_->function_type_arguments());
     }
   } else if (target.IsFactory()) {
@@ -3779,7 +3779,7 @@
 
   closure += StaticCall(TokenPosition::kNoSource, target, argument_count,
                         argument_names, ICData::kNoRebind,
-                        /* result_type = */ NULL, type_args_len);
+                        /* result_type = */ nullptr, type_args_len);
 
   if (target.IsGenerativeConstructor()) {
     // Drop result of constructor invocation, leave receiver
diff --git a/runtime/vm/compiler/frontend/kernel_to_il.h b/runtime/vm/compiler/frontend/kernel_to_il.h
index e9ccbca..3810737 100644
--- a/runtime/vm/compiler/frontend/kernel_to_il.h
+++ b/runtime/vm/compiler/frontend/kernel_to_il.h
@@ -214,7 +214,7 @@
                       intptr_t argument_count,
                       const Array& argument_names,
                       ICData::RebindRule rebind_rule,
-                      const InferredTypeMetadata* result_type = NULL,
+                      const InferredTypeMetadata* result_type = nullptr,
                       intptr_t type_args_len = 0,
                       bool use_unchecked_entry = false);
   Fragment StringInterpolateSingle(TokenPosition position);
@@ -720,7 +720,7 @@
         context_depth_(builder->context_depth_),
         try_index_(builder->CurrentTryIndex()) {
     builder_->switch_block_ = this;
-    if (outer_ != NULL) {
+    if (outer_ != nullptr) {
       depth_ = outer_->depth_ + outer_->case_count_;
     } else {
       depth_ = 0;
@@ -729,14 +729,14 @@
   ~SwitchBlock() { builder_->switch_block_ = outer_; }
 
   bool HadJumper(intptr_t case_num) {
-    return destinations_.Lookup(case_num) != NULL;
+    return destinations_.Lookup(case_num) != nullptr;
   }
 
   // Get destination via absolute target number (i.e. the correct destination
   // is not necessarily in this block).
   JoinEntryInstr* Destination(intptr_t target_index,
-                              TryFinallyBlock** outer_finally = NULL,
-                              intptr_t* context_depth = NULL) {
+                              TryFinallyBlock** outer_finally = nullptr,
+                              intptr_t* context_depth = nullptr) {
     // Verify consistency of program state.
     ASSERT(builder_->switch_block_ == this);
     // Find corresponding destination.
@@ -747,7 +747,7 @@
     }
 
     // Set the outer finally block.
-    if (outer_finally != NULL) {
+    if (outer_finally != nullptr) {
       *outer_finally = block->outer_finally_;
       *context_depth = block->context_depth_;
     }
@@ -759,10 +759,10 @@
   // Get destination via relative target number (i.e. relative to this block,
   // 0 is first case in this block etc).
   JoinEntryInstr* DestinationDirect(intptr_t case_num,
-                                    TryFinallyBlock** outer_finally = NULL,
-                                    intptr_t* context_depth = NULL) {
+                                    TryFinallyBlock** outer_finally = nullptr,
+                                    intptr_t* context_depth = nullptr) {
     // Set the outer finally block.
-    if (outer_finally != NULL) {
+    if (outer_finally != nullptr) {
       *outer_finally = outer_finally_;
       *context_depth = context_depth_;
     }
@@ -774,7 +774,7 @@
  private:
   JoinEntryInstr* EnsureDestination(intptr_t case_num) {
     JoinEntryInstr* cached_inst = destinations_.Lookup(case_num);
-    if (cached_inst == NULL) {
+    if (cached_inst == nullptr) {
       JoinEntryInstr* inst = builder_->BuildJoinEntry(try_index_);
       destinations_.Insert(case_num, inst);
       return inst;
@@ -862,11 +862,11 @@
   explicit BreakableBlock(FlowGraphBuilder* builder)
       : builder_(builder),
         outer_(builder->breakable_block_),
-        destination_(NULL),
+        destination_(nullptr),
         outer_finally_(builder->try_finally_block_),
         context_depth_(builder->context_depth_),
         try_index_(builder->CurrentTryIndex()) {
-    if (builder_->breakable_block_ == NULL) {
+    if (builder_->breakable_block_ == nullptr) {
       index_ = 0;
     } else {
       index_ = builder_->breakable_block_->index_ + 1;
@@ -875,7 +875,7 @@
   }
   ~BreakableBlock() { builder_->breakable_block_ = outer_; }
 
-  bool HadJumper() { return destination_ != NULL; }
+  bool HadJumper() { return destination_ != nullptr; }
 
   JoinEntryInstr* destination() { return destination_; }
 
@@ -897,7 +897,7 @@
 
  private:
   JoinEntryInstr* EnsureDestination() {
-    if (destination_ == NULL) {
+    if (destination_ == nullptr) {
       destination_ = builder_->BuildJoinEntry(try_index_);
     }
     return destination_;
diff --git a/runtime/vm/compiler/frontend/kernel_translation_helper.cc b/runtime/vm/compiler/frontend/kernel_translation_helper.cc
index 6192c32..c292d18 100644
--- a/runtime/vm/compiler/frontend/kernel_translation_helper.cc
+++ b/runtime/vm/compiler/frontend/kernel_translation_helper.cc
@@ -3008,7 +3008,7 @@
 }
 
 intptr_t ActiveClass::MemberTypeParameterCount(Zone* zone) {
-  ASSERT(member != NULL);
+  ASSERT(member != nullptr);
   if (member->IsFactory()) {
     return klass->NumTypeParameters();
   } else if (member->IsMethodExtractor()) {
@@ -3067,7 +3067,7 @@
 
   const TypeArguments* old_params = active_class->local_type_parameters;
   const intptr_t old_param_count =
-      old_params == NULL ? 0 : old_params->Length();
+      old_params == nullptr ? 0 : old_params->Length();
   const TypeArguments& extended_params = TypeArguments::Handle(
       Z, TypeArguments::New(old_param_count + num_new_params));
 
@@ -3115,7 +3115,7 @@
       constant_reader_(constant_reader),
       translation_helper_(helper->translation_helper_),
       active_class_(active_class),
-      type_parameter_scope_(NULL),
+      type_parameter_scope_(nullptr),
       inferred_type_metadata_helper_(helper_, constant_reader_),
       unboxing_info_metadata_helper_(helper_),
       zone_(translation_helper_.zone()),
@@ -3240,7 +3240,7 @@
 
 void TypeTranslator::BuildFunctionType(bool simple) {
   const intptr_t num_enclosing_type_arguments =
-      active_class_->enclosing != NULL
+      active_class_->enclosing != nullptr
           ? active_class_->enclosing->NumTypeArguments()
           : 0;
   Nullability nullability = helper_->ReadNullability();
@@ -3468,7 +3468,7 @@
       }
     }
   }
-  if (active_class_->local_type_parameters != NULL) {
+  if (active_class_->local_type_parameters != nullptr) {
     if (parameter_index < active_class_->local_type_parameters->Length()) {
       const auto& type_param = TypeParameter::CheckedHandle(
           Z, active_class_->local_type_parameters->TypeAt(parameter_index));
@@ -3485,7 +3485,7 @@
     parameter_index -= active_class_->local_type_parameters->Length();
   }
 
-  if (type_parameter_scope_ != NULL &&
+  if (type_parameter_scope_ != nullptr &&
       parameter_index < type_parameter_scope_->outer_parameter_count() +
                             type_parameter_scope_->parameter_count()) {
     result_ = Type::DynamicType();
diff --git a/runtime/vm/compiler/frontend/kernel_translation_helper.h b/runtime/vm/compiler/frontend/kernel_translation_helper.h
index 8db2411..2ca4a52 100644
--- a/runtime/vm/compiler/frontend/kernel_translation_helper.h
+++ b/runtime/vm/compiler/frontend/kernel_translation_helper.h
@@ -1262,7 +1262,7 @@
 
   void ReadUntilFunctionNode();
 
-  Tag PeekTag(uint8_t* payload = NULL);
+  Tag PeekTag(uint8_t* payload = nullptr);
 
  protected:
   const Script& script() const { return script_; }
@@ -1324,7 +1324,7 @@
   void SkipLibraryCombinator();
   void SkipLibraryDependency();
   TokenPosition ReadPosition();
-  Tag ReadTag(uint8_t* payload = NULL);
+  Tag ReadTag(uint8_t* payload = nullptr);
   uint8_t ReadFlags() { return reader_.ReadFlags(); }
   Nullability ReadNullability();
   Variance ReadVariance();
@@ -1384,15 +1384,15 @@
 class ActiveClass {
  public:
   ActiveClass()
-      : klass(NULL),
-        member(NULL),
-        enclosing(NULL),
-        local_type_parameters(NULL) {}
+      : klass(nullptr),
+        member(nullptr),
+        enclosing(nullptr),
+        local_type_parameters(nullptr) {}
 
-  bool HasMember() { return member != NULL; }
+  bool HasMember() { return member != nullptr; }
 
   bool MemberIsProcedure() {
-    ASSERT(member != NULL);
+    ASSERT(member != nullptr);
     UntaggedFunction::Kind function_kind = member->kind();
     return function_kind == UntaggedFunction::kRegularFunction ||
            function_kind == UntaggedFunction::kGetterFunction ||
@@ -1403,7 +1403,7 @@
   }
 
   bool MemberIsFactoryProcedure() {
-    ASSERT(member != NULL);
+    ASSERT(member != nullptr);
     return member->IsFactory();
   }
 
@@ -1416,7 +1416,7 @@
   intptr_t MemberTypeParameterCount(Zone* zone);
 
   intptr_t ClassNumTypeArguments() {
-    ASSERT(klass != NULL);
+    ASSERT(klass != nullptr);
     return klass->NumTypeArguments();
   }
 
@@ -1431,7 +1431,7 @@
   }
 
   const char* ToCString() {
-    return member != NULL ? member->ToCString() : klass->ToCString();
+    return member != nullptr ? member->ToCString() : klass->ToCString();
   }
 
   // The current enclosing class (or the library top-level class).
@@ -1587,7 +1587,7 @@
           outer_(translator->type_parameter_scope_),
           translator_(translator) {
       outer_parameter_count_ = 0;
-      if (outer_ != NULL) {
+      if (outer_ != nullptr) {
         outer_parameter_count_ =
             outer_->outer_parameter_count_ + outer_->parameter_count_;
       }
diff --git a/runtime/vm/compiler/frontend/prologue_builder.cc b/runtime/vm/compiler/frontend/prologue_builder.cc
index 54a01d1..a8a0f5a 100644
--- a/runtime/vm/compiler/frontend/prologue_builder.cc
+++ b/runtime/vm/compiler/frontend/prologue_builder.cc
@@ -177,7 +177,7 @@
 
   // Copy optional parameters down.
   if (num_opt_pos_params > 0) {
-    JoinEntryInstr* next_missing = NULL;
+    JoinEntryInstr* next_missing = nullptr;
     for (intptr_t opt_param = 1; param < num_params; ++param, ++opt_param) {
       const intptr_t param_index = param - (function_.IsFactory() ? 1 : 0);
       update_param_offset(function_, param_index);
@@ -199,7 +199,7 @@
       good += Drop();
 
       Fragment not_good(missing);
-      if (next_missing != NULL) {
+      if (next_missing != nullptr) {
         not_good += Goto(next_missing);
         not_good.current = next_missing;
       }
diff --git a/runtime/vm/compiler/frontend/scope_builder.cc b/runtime/vm/compiler/frontend/scope_builder.cc
index fedf28a..fddfd63 100644
--- a/runtime/vm/compiler/frontend/scope_builder.cc
+++ b/runtime/vm/compiler/frontend/scope_builder.cc
@@ -17,12 +17,12 @@
 #define IG IsolateGroup::Current()
 
 ScopeBuilder::ScopeBuilder(ParsedFunction* parsed_function)
-    : result_(NULL),
+    : result_(nullptr),
       parsed_function_(parsed_function),
       translation_helper_(Thread::Current()),
       zone_(translation_helper_.zone()),
-      current_function_scope_(NULL),
-      scope_(NULL),
+      current_function_scope_(nullptr),
+      scope_(nullptr),
       depth_(0),
       name_index_(0),
       needs_expr_temp_(false),
@@ -45,9 +45,9 @@
 }
 
 ScopeBuildingResult* ScopeBuilder::BuildScopes() {
-  if (result_ != NULL) return result_;
+  if (result_ != nullptr) return result_;
 
-  ASSERT(scope_ == NULL && depth_.loop_ == 0 && depth_.function_ == 0);
+  ASSERT(scope_ == nullptr && depth_.loop_ == 0 && depth_.function_ == 0);
   result_ = new (Z) ScopeBuildingResult();
 
   const Function& function = parsed_function_->function();
@@ -65,7 +65,7 @@
   ActiveTypeParametersScope active_type_params(&active_class_, function,
                                                &signature, Z);
 
-  LocalScope* enclosing_scope = NULL;
+  LocalScope* enclosing_scope = nullptr;
   if (function.IsImplicitClosureFunction() && !function.is_static()) {
     // Create artificial enclosing scope for the tear-off that contains
     // captured receiver value. This ensure that AssertAssignable will correctly
@@ -77,7 +77,7 @@
                      Symbols::This(), klass_type);
     parsed_function_->set_receiver_var(receiver_variable);
     receiver_variable->set_is_captured();
-    enclosing_scope = new (Z) LocalScope(NULL, 0, 0);
+    enclosing_scope = new (Z) LocalScope(nullptr, 0, 0);
     enclosing_scope->set_context_level(0);
     enclosing_scope->AddVariable(receiver_variable);
     enclosing_scope->AddContextVariable(receiver_variable);
@@ -1186,7 +1186,7 @@
       return;
     case kReturnStatement: {
       if ((depth_.function_ == 0) && (depth_.finally_ > 0) &&
-          (result_->finally_return_variable == NULL)) {
+          (result_->finally_return_variable == nullptr)) {
         const String& name = Symbols::TryFinallyReturnValue();
         LocalVariable* variable =
             MakeVariable(TokenPosition::kNoSource, TokenPosition::kNoSource,
@@ -1712,7 +1712,7 @@
     const String& name,
     const AbstractType& type,
     intptr_t kernel_offset,
-    const InferredTypeMetadata* param_type_md /* = NULL */) {
+    const InferredTypeMetadata* param_type_md /* = nullptr */) {
   CompileType* param_type = nullptr;
   const Object* param_value = nullptr;
   if (param_type_md != nullptr && !param_type_md->IsTrivial()) {
@@ -1729,7 +1729,7 @@
     GrowableArray<LocalVariable*>* variables,
     const char* prefix,
     intptr_t nesting_depth) {
-  LocalVariable* v = NULL;
+  LocalVariable* v = nullptr;
 
   // No need to create variables for try/catch-statements inside
   // nested functions.
@@ -1738,7 +1738,7 @@
 
   // If variable was not lifted by the transformer introduce a new
   // one into the current function scope.
-  if (v == NULL) {
+  if (v == nullptr) {
     v = MakeVariable(TokenPosition::kNoSource, TokenPosition::kNoSource,
                      GenerateName(prefix, nesting_depth - 1),
                      AbstractType::dynamic_type());
@@ -1813,7 +1813,7 @@
 }
 
 void ScopeBuilder::AddSwitchVariable() {
-  if ((depth_.function_ == 0) && (result_->switch_variable == NULL)) {
+  if ((depth_.function_ == 0) && (result_->switch_variable == nullptr)) {
     LocalVariable* variable =
         MakeVariable(TokenPosition::kNoSource, TokenPosition::kNoSource,
                      Symbols::SwitchExpr(), AbstractType::dynamic_type());
@@ -1843,7 +1843,7 @@
     // case that we are compiling a nested function and the variable is
     // declared in an outer scope.  In that case, look it up in the scope by
     // name and add it to the variable map to simplify later lookup.
-    ASSERT(current_function_scope_->parent() != NULL);
+    ASSERT(current_function_scope_->parent() != nullptr);
     StringIndex var_name = GetNameFromVariableDeclaration(
         declaration_binary_offset - helper_.data_program_offset_,
         parsed_function_->function());
@@ -1918,17 +1918,17 @@
 void ScopeBuilder::HandleSpecialLoad(LocalVariable** variable,
                                      const String& symbol,
                                      intptr_t kernel_offset) {
-  if (current_function_scope_->parent() != NULL) {
+  if (current_function_scope_->parent() != nullptr) {
     // We are building the scope tree of a closure function and saw [node]. We
     // lazily populate the variable using the parent function scope.
-    if (*variable == NULL) {
+    if (*variable == nullptr) {
       *variable = current_function_scope_->parent()->LookupVariable(
           symbol, kernel_offset, true);
-      ASSERT(*variable != NULL);
+      ASSERT(*variable != nullptr);
     }
   }
 
-  if ((current_function_scope_->parent() != NULL) ||
+  if ((current_function_scope_->parent() != nullptr) ||
       (scope_->function_level() > 0)) {
     // Every scope we use the [variable] from needs to be notified of the usage
     // in order to ensure that preserving the context scope on that particular
diff --git a/runtime/vm/compiler/frontend/scope_builder.h b/runtime/vm/compiler/frontend/scope_builder.h
index d6ba189..64845b5 100644
--- a/runtime/vm/compiler/frontend/scope_builder.h
+++ b/runtime/vm/compiler/frontend/scope_builder.h
@@ -104,7 +104,7 @@
       const String& name,
       const AbstractType& type,
       intptr_t kernel_offset = LocalVariable::kNoKernelOffset,
-      const InferredTypeMetadata* param_type_md = NULL);
+      const InferredTypeMetadata* param_type_md = nullptr);
 
   void AddExceptionVariable(GrowableArray<LocalVariable*>* variables,
                             const char* prefix,
@@ -187,12 +187,12 @@
 class ScopeBuildingResult : public ZoneAllocated {
  public:
   ScopeBuildingResult()
-      : type_arguments_variable(NULL),
-        switch_variable(NULL),
-        finally_return_variable(NULL),
-        setter_value(NULL),
-        yield_jump_variable(NULL),
-        yield_context_variable(NULL),
+      : type_arguments_variable(nullptr),
+        switch_variable(nullptr),
+        finally_return_variable(nullptr),
+        setter_value(nullptr),
+        yield_jump_variable(nullptr),
+        yield_context_variable(nullptr),
         raw_variable_counter_(0) {}
 
   bool IsClosureWithEmptyContext(intptr_t function_node_offset) {
@@ -208,24 +208,24 @@
   IntMap<LocalScope*> scopes;
   GrowableArray<FunctionScope> function_scopes;
 
-  // Only non-NULL for factory constructor functions.
+  // Only non-nullptr for factory constructor functions.
   LocalVariable* type_arguments_variable;
 
-  // Non-NULL when the function contains a switch statement.
+  // Non-nullptr when the function contains a switch statement.
   LocalVariable* switch_variable;
 
-  // Non-NULL when the function contains a return inside a finally block.
+  // Non-nullptr when the function contains a return inside a finally block.
   LocalVariable* finally_return_variable;
 
-  // Non-NULL when the function is a setter.
+  // Non-nullptr when the function is a setter.
   LocalVariable* setter_value;
 
-  // Non-NULL if the function contains yield statement.
+  // Non-nullptr if the function contains yield statement.
   // TODO(27590) actual variable is called :await_jump_var, we should rename
   // it to reflect the fact that it is used for both await and yield.
   LocalVariable* yield_jump_variable;
 
-  // Non-NULL if the function contains yield statement.
+  // Non-nullptr if the function contains yield statement.
   // TODO(27590) actual variable is called :await_ctx_var, we should rename
   // it to reflect the fact that it is used for both await and yield.
   LocalVariable* yield_context_variable;
diff --git a/runtime/vm/compiler/jit/compiler.cc b/runtime/vm/compiler/jit/compiler.cc
index b6866e2..e854da4 100644
--- a/runtime/vm/compiler/jit/compiler.cc
+++ b/runtime/vm/compiler/jit/compiler.cc
@@ -53,7 +53,10 @@
     max_deoptimization_counter_threshold,
     16,
     "How many times we allow deoptimization before we disallow optimization.");
-DEFINE_FLAG(charp, optimization_filter, NULL, "Optimize only named function");
+DEFINE_FLAG(charp,
+            optimization_filter,
+            nullptr,
+            "Optimize only named function");
 DEFINE_FLAG(bool, print_flow_graph, false, "Print the IR flow graph.");
 DEFINE_FLAG(bool,
             print_flow_graph_optimized,
@@ -130,10 +133,11 @@
     intptr_t osr_id,
     bool optimized) {
   kernel::FlowGraphBuilder builder(parsed_function, ic_data_array,
-                                   /* not building var desc */ NULL,
-                                   /* not inlining */ NULL, optimized, osr_id);
+                                   /* not building var desc */ nullptr,
+                                   /* not inlining */ nullptr, optimized,
+                                   osr_id);
   FlowGraph* graph = builder.BuildGraph();
-  ASSERT(graph != NULL);
+  ASSERT(graph != nullptr);
   return graph;
 }
 
@@ -256,7 +260,7 @@
     function.SetUsageCounter(INT32_MIN);
     return false;
   }
-  if (FLAG_optimization_filter != NULL) {
+  if (FLAG_optimization_filter != nullptr) {
     // FLAG_optimization_filter is a comma-separated list of strings that are
     // matched against the fully-qualified function name.
     char* save_ptr;  // Needed for strtok_r.
@@ -266,12 +270,12 @@
     strncpy(filter, FLAG_optimization_filter, len);  // strtok modifies arg 1.
     char* token = strtok_r(filter, ",", &save_ptr);
     bool found = false;
-    while (token != NULL) {
-      if (strstr(function_name, token) != NULL) {
+    while (token != nullptr) {
+      if (strstr(function_name, token) != nullptr) {
         found = true;
         break;
       }
-      token = strtok_r(NULL, ",", &save_ptr);
+      token = strtok_r(nullptr, ",", &save_ptr);
     }
     delete[] filter;
     if (!found) {
@@ -522,7 +526,7 @@
 
         if (FLAG_print_ic_data_map) {
           for (intptr_t i = 0; i < ic_data_array->length(); i++) {
-            if ((*ic_data_array)[i] != NULL) {
+            if ((*ic_data_array)[i] != nullptr) {
               THR_Print("%" Pd " ", i);
               FlowGraphPrinter::PrintICData(*(*ic_data_array)[i]);
             }
@@ -923,7 +927,7 @@
 
     kernel::FlowGraphBuilder builder(
         parsed_function, ic_data_array, context_level_array,
-        /* not inlining */ NULL, false, Compiler::kNoOSRDeoptId);
+        /* not inlining */ nullptr, false, Compiler::kNoOSRDeoptId);
     builder.BuildGraph();
 
     auto& var_descs = LocalVarDescriptors::Handle(zone);
@@ -968,9 +972,9 @@
   }
 #if !defined(PRODUCT)
   TimelineStream* stream = Timeline::GetCompilerStream();
-  ASSERT(stream != NULL);
+  ASSERT(stream != nullptr);
   TimelineEvent* event = stream->StartEvent();
-  if (event != NULL) {
+  if (event != nullptr) {
     event->Instant("AbortBackgroundCompilation");
     event->SetNumArguments(1);
     event->CopyArgument(0, "reason", msg);
@@ -986,10 +990,10 @@
 class QueueElement {
  public:
   explicit QueueElement(const Function& function)
-      : next_(NULL), function_(function.ptr()) {}
+      : next_(nullptr), function_(function.ptr()) {}
 
   virtual ~QueueElement() {
-    next_ = NULL;
+    next_ = nullptr;
     function_ = Function::null();
   }
 
@@ -1014,39 +1018,39 @@
 // It implements a FIFO queue, using Peek, Add, Remove operations.
 class BackgroundCompilationQueue {
  public:
-  BackgroundCompilationQueue() : first_(NULL), last_(NULL) {}
+  BackgroundCompilationQueue() : first_(nullptr), last_(nullptr) {}
   virtual ~BackgroundCompilationQueue() { Clear(); }
 
   void VisitObjectPointers(ObjectPointerVisitor* visitor) {
-    ASSERT(visitor != NULL);
+    ASSERT(visitor != nullptr);
     QueueElement* p = first_;
-    while (p != NULL) {
+    while (p != nullptr) {
       visitor->VisitPointer(p->function_untag());
       p = p->next();
     }
   }
 
-  bool IsEmpty() const { return first_ == NULL; }
+  bool IsEmpty() const { return first_ == nullptr; }
 
   void Add(QueueElement* value) {
-    ASSERT(value != NULL);
-    ASSERT(value->next() == NULL);
-    if (first_ == NULL) {
+    ASSERT(value != nullptr);
+    ASSERT(value->next() == nullptr);
+    if (first_ == nullptr) {
       first_ = value;
-      ASSERT(last_ == NULL);
+      ASSERT(last_ == nullptr);
     } else {
-      ASSERT(last_ != NULL);
+      ASSERT(last_ != nullptr);
       last_->set_next(value);
     }
     last_ = value;
-    ASSERT(first_ != NULL && last_ != NULL);
+    ASSERT(first_ != nullptr && last_ != nullptr);
   }
 
   QueueElement* Peek() const { return first_; }
 
   FunctionPtr PeekFunction() const {
     QueueElement* e = Peek();
-    if (e == NULL) {
+    if (e == nullptr) {
       return Function::null();
     } else {
       return e->Function();
@@ -1054,18 +1058,18 @@
   }
 
   QueueElement* Remove() {
-    ASSERT(first_ != NULL);
+    ASSERT(first_ != nullptr);
     QueueElement* result = first_;
     first_ = first_->next();
-    if (first_ == NULL) {
-      last_ = NULL;
+    if (first_ == nullptr) {
+      last_ = nullptr;
     }
     return result;
   }
 
   bool ContainsObj(const Object& obj) const {
     QueueElement* p = first_;
-    while (p != NULL) {
+    while (p != nullptr) {
       if (p->function() == obj.ptr()) {
         return true;
       }
@@ -1079,7 +1083,7 @@
       QueueElement* e = Remove();
       delete e;
     }
-    ASSERT((first_ == NULL) && (last_ == NULL));
+    ASSERT((first_ == nullptr) && (last_ == nullptr));
   }
 
  private:
@@ -1251,7 +1255,7 @@
 CompilationPipeline* CompilationPipeline::New(Zone* zone,
                                               const Function& function) {
   UNREACHABLE();
-  return NULL;
+  return nullptr;
 }
 
 DEFINE_RUNTIME_ENTRY(CompileFunction, 1) {
diff --git a/runtime/vm/compiler/jit/compiler.h b/runtime/vm/compiler/jit/compiler.h
index f2e3e44..8c6367b 100644
--- a/runtime/vm/compiler/jit/compiler.h
+++ b/runtime/vm/compiler/jit/compiler.h
@@ -54,7 +54,7 @@
 
 class IrregexpCompilationPipeline : public CompilationPipeline {
  public:
-  IrregexpCompilationPipeline() : backtrack_goto_(NULL) {}
+  IrregexpCompilationPipeline() : backtrack_goto_(nullptr) {}
 
   void ParseFunction(ParsedFunction* parsed_function) override;
 
diff --git a/runtime/vm/compiler/jit/jit_call_specializer.cc b/runtime/vm/compiler/jit/jit_call_specializer.cc
index 7daddfc..fdcff35 100644
--- a/runtime/vm/compiler/jit/jit_call_specializer.cc
+++ b/runtime/vm/compiler/jit/jit_call_specializer.cc
@@ -163,7 +163,7 @@
 
 // Replace generic context allocation or cloning with a sequence of inlined
 // allocation and explicit initializing stores.
-// If context_value is not NULL then newly allocated context is a populated
+// If context_value is not nullptr then newly allocated context is a populated
 // with values copied from it, otherwise it is initialized with null.
 void JitCallSpecializer::LowerContextAllocation(
     Definition* alloc,
@@ -179,11 +179,11 @@
   Instruction* cursor = replacement;
 
   Value* initial_value;
-  if (context_value != NULL) {
+  if (context_value != nullptr) {
     LoadFieldInstr* load =
         new (Z) LoadFieldInstr(context_value->CopyWithType(Z),
                                Slot::Context_parent(), alloc->source());
-    flow_graph()->InsertAfter(cursor, load, NULL, FlowGraph::kValue);
+    flow_graph()->InsertAfter(cursor, load, nullptr, FlowGraph::kValue);
     cursor = load;
     initial_value = new (Z) Value(load);
   } else {
diff --git a/runtime/vm/compiler/method_recognizer.cc b/runtime/vm/compiler/method_recognizer.cc
index eb39530..e0b33d2 100644
--- a/runtime/vm/compiler/method_recognizer.cc
+++ b/runtime/vm/compiler/method_recognizer.cc
@@ -369,7 +369,7 @@
   const uint32_t finger_print;
   const char* const name;
 } factory_recognizer_list[] = {RECOGNIZED_LIST_FACTORY_LIST(RECOGNIZE_FACTORY){
-    Symbols::kIllegal, -1, 0, NULL}};
+    Symbols::kIllegal, -1, 0, nullptr}};
 
 #undef RECOGNIZE_FACTORY