[VM/Runtime] Refactoring sources to enable interpreter only builds.

The interpreter only build of the Dart VM should not be including any
sources from the 'compiler' directory.
First part of restructuring sources to enable an interpreter only build
of the VM
- Move CompileClass from compiler.cc file to class_finalizer.cc

Change-Id: I8743c6537ed2d63e6991d78949f858a1ae506121
Reviewed-on: https://dart-review.googlesource.com/c/87987
Commit-Queue: Siva Annamalai <asiva@google.com>
Reviewed-by: Alexander Markov <alexmarkov@google.com>
Reviewed-by: RĂ©gis Crelier <regis@google.com>
diff --git a/runtime/vm/bootstrap.cc b/runtime/vm/bootstrap.cc
index 37d6578..fc59b64 100644
--- a/runtime/vm/bootstrap.cc
+++ b/runtime/vm/bootstrap.cc
@@ -48,7 +48,7 @@
   ObjectStore* object_store = thread->isolate()->object_store();
   Zone* zone = thread->zone();
   Class& cls = Class::Handle(zone, object_store->closure_class());
-  Compiler::CompileClass(cls);
+  ClassFinalizer::LoadClassMembers(cls);
 
 #if defined(DEBUG)
   // Verify that closure field offsets are identical in Dart and C++.
@@ -71,7 +71,7 @@
 
   // Eagerly compile Bool class, bool constants are used from within compiler.
   cls = object_store->bool_class();
-  Compiler::CompileClass(cls);
+  ClassFinalizer::LoadClassMembers(cls);
 }
 
 static RawError* BootstrapFromKernel(Thread* thread,
diff --git a/runtime/vm/class_finalizer.cc b/runtime/vm/class_finalizer.cc
index e0fc7de..00a16a7 100644
--- a/runtime/vm/class_finalizer.cc
+++ b/runtime/vm/class_finalizer.cc
@@ -1110,33 +1110,9 @@
     }
   }
 
-  // A top level class is parsed eagerly so just finalize it.
+  // A top level class is loaded eagerly so just finalize it.
   if (cls.IsTopLevel()) {
     FinalizeClass(cls);
-  } else {
-    // This class should not contain any functions or user-defined fields yet,
-    // because it has not been compiled yet. There may however be metadata
-    // fields because type parameters are parsed before the class body. Since
-    // 'FinalizeMemberTypes(cls)' has not been called yet, unfinalized
-    // member types could choke the snapshotter.
-    // Or
-    // if the class is being refinalized because a patch is being applied
-    // after the class has been finalized then it is ok for the class to have
-    // functions.
-    //
-    // TODO(kmillikin): This ASSERT will fail when bootstrapping from Kernel
-    // because classes are first created, methods are added, and then classes
-    // are finalized.  It is not easy to finalize classes earlier because not
-    // all bootstrap classes have been created yet.  It would be possible to
-    // create all classes, delay adding methods, finalize the classes, and then
-    // reprocess all classes to add methods, but that seems unnecessary.
-    // Marking the bootstrap classes as is_refinalize_after_patch seems cute but
-    // it causes other things to fail by violating their assumptions.  Reenable
-    // this ASSERT if it's important, remove it if it's just a sanity check and
-    // not required for correctness.
-    //
-    // ASSERT((Array::Handle(cls.functions()).Length() == 0) ||
-    //        cls.is_refinalize_after_patch());
   }
 }
 
@@ -1180,7 +1156,7 @@
   if (!super.IsNull()) {
     FinalizeClass(super);
   }
-  // Mark as parsed and finalized.
+  // Mark as loaded and finalized.
   cls.Finalize();
   // Every class should have at least a constructor, unless it is a top level
   // class or a typedef class. The Kernel frontend does not create an implicit
@@ -1205,6 +1181,158 @@
   }
 }
 
+static void AddRelatedClassesToList(
+    const Class& cls,
+    GrowableHandlePtrArray<const Class>* load_class_list,
+    GrowableHandlePtrArray<const Class>* load_patchclass_list) {
+  Zone* zone = Thread::Current()->zone();
+  Class& load_class = Class::Handle(zone);
+  AbstractType& interface_type = Type::Handle(zone);
+  Array& interfaces = Array::Handle(zone);
+
+  // Add all the interfaces implemented by the class that have not been
+  // already loaded to the load list. Mark the interface as loading so that
+  // we don't recursively add it back into the list.
+  interfaces ^= cls.interfaces();
+  for (intptr_t i = 0; i < interfaces.Length(); i++) {
+    interface_type ^= interfaces.At(i);
+    load_class ^= interface_type.type_class();
+    if (!load_class.is_finalized() &&
+        !load_class.is_marked_for_lazy_loading()) {
+      load_class_list->Add(load_class);
+      load_class.set_is_marked_for_lazy_loading();
+    }
+  }
+
+  // Walk up the super_class chain and add these classes to the list if they
+  // have not been already added to the load class list. Mark the class as
+  // loading so that we don't recursively add it back into the list.
+  load_class ^= cls.SuperClass();
+  while (!load_class.IsNull()) {
+    if (!load_class.is_finalized() &&
+        !load_class.is_marked_for_lazy_loading()) {
+      load_class_list->Add(load_class);
+      load_class.set_is_marked_for_lazy_loading();
+    }
+    load_class ^= load_class.SuperClass();
+  }
+
+  // Add patch classes if they exist to the load patchclass list if they have
+  // not already been loaded and patched. Mark the class as loading so that
+  // we don't recursively add it back into the list.
+  load_class ^= cls.GetPatchClass();
+  if (!load_class.IsNull()) {
+    if (!load_class.is_finalized() &&
+        !load_class.is_marked_for_lazy_loading()) {
+      load_patchclass_list->Add(load_class);
+      load_class.set_is_marked_for_lazy_loading();
+    }
+  }
+}
+
+RawError* ClassFinalizer::LoadClassMembers(const Class& cls) {
+  ASSERT(Thread::Current()->IsMutatorThread());
+  // If class is a top level class it is already loaded.
+  if (cls.IsTopLevel()) {
+    return Error::null();
+  }
+  // If the class is already marked for loading return immediately.
+  if (cls.is_marked_for_lazy_loading()) {
+    return Error::null();
+  }
+  // If the class is a typedef class there is no need to try and
+  // compile it. Just finalize it directly.
+  if (cls.IsTypedefClass()) {
+#if defined(DEBUG)
+    const Class& closure_cls =
+        Class::Handle(Isolate::Current()->object_store()->closure_class());
+    ASSERT(closure_cls.is_finalized());
+#endif
+    LongJumpScope jump;
+    if (setjmp(*jump.Set()) == 0) {
+      ClassFinalizer::FinalizeClass(cls);
+      return Error::null();
+    } else {
+      return Thread::Current()->StealStickyError();
+    }
+  }
+
+  Thread* const thread = Thread::Current();
+  StackZone zone(thread);
+#if !defined(PRODUCT)
+  VMTagScope tagScope(thread, VMTag::kClassLoadingTagId);
+  TimelineDurationScope tds(thread, Timeline::GetCompilerStream(),
+                            "ClassLoading");
+  if (tds.enabled()) {
+    tds.SetNumArguments(1);
+    tds.CopyArgument(0, "class", cls.ToCString());
+  }
+#endif  // !defined(PRODUCT)
+
+  // We remember all the classes that are being lazy loaded in these lists.
+  // This also allows us to reset the marked_for_loading state in case we see
+  // an error.
+  GrowableHandlePtrArray<const Class> load_class_list(thread->zone(), 4);
+  GrowableHandlePtrArray<const Class> load_patchclass_list(thread->zone(), 4);
+
+  // Load the class and all the interfaces it implements and super classes.
+  LongJumpScope jump;
+  if (setjmp(*jump.Set()) == 0) {
+    if (FLAG_trace_class_finalization) {
+      THR_Print("Lazy Loading Class '%s'\n", cls.ToCString());
+    }
+
+    // Add the primary class which needs to be load to the load list.
+    // Mark the class as loading so that we don't recursively add the same
+    // class back into the list.
+    load_class_list.Add(cls);
+    cls.set_is_marked_for_lazy_loading();
+
+    // Add all super classes, interface classes and patch class if one
+    // exists to the corresponding lists.
+    // NOTE: The load_class_list array keeps growing as more classes are added
+    // to it by AddRelatedClassesToList. It is not OK to hoist
+    // load_class_list.Length() into a local variable and iterate using the
+    // local variable.
+    for (intptr_t i = 0; i < load_class_list.length(); i++) {
+      AddRelatedClassesToList(load_class_list.At(i), &load_class_list,
+                              &load_patchclass_list);
+    }
+
+    // Finish lazy loading of these classes and finialize them.
+    for (intptr_t i = (load_class_list.length() - 1); i >= 0; i--) {
+      const Class& load_class = load_class_list.At(i);
+      ASSERT(!load_class.IsNull());
+      ClassFinalizer::FinalizeClass(load_class);
+      load_class.reset_is_marked_for_lazy_loading();
+    }
+    for (intptr_t i = (load_patchclass_list.length() - 1); i >= 0; i--) {
+      const Class& load_class = load_patchclass_list.At(i);
+      ASSERT(!load_class.IsNull());
+      ClassFinalizer::FinalizeClass(load_class);
+      load_class.reset_is_marked_for_lazy_loading();
+    }
+
+    return Error::null();
+  } else {
+    // Reset the marked for parsing flags.
+    for (intptr_t i = 0; i < load_class_list.length(); i++) {
+      const Class& load_class = load_class_list.At(i);
+      if (load_class.is_marked_for_lazy_loading()) {
+        load_class.reset_is_marked_for_lazy_loading();
+      }
+    }
+    for (intptr_t i = 0; i < load_patchclass_list.length(); i++) {
+      const Class& load_class = load_patchclass_list.At(i);
+      if (load_class.is_marked_for_lazy_loading()) {
+        load_class.reset_is_marked_for_lazy_loading();
+      }
+    }
+
+    return Thread::Current()->StealStickyError();
+  }
+}
+
 // Allocate instances for each enumeration value, and populate the
 // static field 'values'.
 // By allocating the instances programmatically, we save an implicit final
diff --git a/runtime/vm/class_finalizer.h b/runtime/vm/class_finalizer.h
index b3b9c42..a720aa0 100644
--- a/runtime/vm/class_finalizer.h
+++ b/runtime/vm/class_finalizer.h
@@ -60,6 +60,12 @@
   // Finalize the class including its fields and functions.
   static void FinalizeClass(const Class& cls);
 
+  // Completes loading of the class, this populates the function
+  // and fields of the class.
+  //
+  // Returns Error::null() if there is no loading error.
+  static RawError* LoadClassMembers(const Class& cls);
+
 #if !defined(DART_PRECOMPILED_RUNTIME)
   // Verify that the classes have been properly prefinalized. This is
   // needed during bootstrapping where the classes have been preloaded.
diff --git a/runtime/vm/compiler/jit/compiler.cc b/runtime/vm/compiler/jit/compiler.cc
index 19787c5..c63af58 100644
--- a/runtime/vm/compiler/jit/compiler.cc
+++ b/runtime/vm/compiler/jit/compiler.cc
@@ -332,154 +332,6 @@
   return Error::null();
 }
 
-static void AddRelatedClassesToList(
-    const Class& cls,
-    GrowableHandlePtrArray<const Class>* parse_list,
-    GrowableHandlePtrArray<const Class>* patch_list) {
-  Zone* zone = Thread::Current()->zone();
-  Class& parse_class = Class::Handle(zone);
-  AbstractType& interface_type = Type::Handle(zone);
-  Array& interfaces = Array::Handle(zone);
-
-  // Add all the interfaces implemented by the class that have not been
-  // already parsed to the parse list. Mark the interface as parsed so that
-  // we don't recursively add it back into the list.
-  interfaces ^= cls.interfaces();
-  for (intptr_t i = 0; i < interfaces.Length(); i++) {
-    interface_type ^= interfaces.At(i);
-    parse_class ^= interface_type.type_class();
-    if (!parse_class.is_finalized() && !parse_class.is_marked_for_parsing()) {
-      parse_list->Add(parse_class);
-      parse_class.set_is_marked_for_parsing();
-    }
-  }
-
-  // Walk up the super_class chain and add these classes to the list if they
-  // have not been already parsed to the parse list. Mark the class as parsed
-  // so that we don't recursively add it back into the list.
-  parse_class ^= cls.SuperClass();
-  while (!parse_class.IsNull()) {
-    if (!parse_class.is_finalized() && !parse_class.is_marked_for_parsing()) {
-      parse_list->Add(parse_class);
-      parse_class.set_is_marked_for_parsing();
-    }
-    parse_class ^= parse_class.SuperClass();
-  }
-
-  // Add patch classes if they exist to the parse list if they have not already
-  // been parsed and patched. Mark the class as parsed so that we don't
-  // recursively add it back into the list.
-  parse_class ^= cls.GetPatchClass();
-  if (!parse_class.IsNull()) {
-    if (!parse_class.is_finalized() && !parse_class.is_marked_for_parsing()) {
-      patch_list->Add(parse_class);
-      parse_class.set_is_marked_for_parsing();
-    }
-  }
-}
-
-RawError* Compiler::CompileClass(const Class& cls) {
-  ASSERT(Thread::Current()->IsMutatorThread());
-  // If class is a top level class it is already parsed.
-  if (cls.IsTopLevel()) {
-    return Error::null();
-  }
-  // If the class is already marked for parsing return immediately.
-  if (cls.is_marked_for_parsing()) {
-    return Error::null();
-  }
-  // If the class is a typedef class there is no need to try and
-  // compile it. Just finalize it directly.
-  if (cls.IsTypedefClass()) {
-#if defined(DEBUG)
-    const Class& closure_cls =
-        Class::Handle(Isolate::Current()->object_store()->closure_class());
-    ASSERT(closure_cls.is_finalized());
-#endif
-    LongJumpScope jump;
-    if (setjmp(*jump.Set()) == 0) {
-      ClassFinalizer::FinalizeClass(cls);
-      return Error::null();
-    } else {
-      return Thread::Current()->StealStickyError();
-    }
-  }
-
-  Thread* const thread = Thread::Current();
-  StackZone zone(thread);
-#if !defined(PRODUCT)
-  VMTagScope tagScope(thread, VMTag::kCompileClassTagId);
-  TimelineDurationScope tds(thread, Timeline::GetCompilerStream(),
-                            "CompileClass");
-  if (tds.enabled()) {
-    tds.SetNumArguments(1);
-    tds.CopyArgument(0, "class", cls.ToCString());
-  }
-#endif  // !defined(PRODUCT)
-
-  // We remember all the classes that are being compiled in these lists. This
-  // also allows us to reset the marked_for_parsing state in case we see an
-  // error.
-  GrowableHandlePtrArray<const Class> parse_list(thread->zone(), 4);
-  GrowableHandlePtrArray<const Class> patch_list(thread->zone(), 4);
-
-  // Parse the class and all the interfaces it implements and super classes.
-  LongJumpScope jump;
-  if (setjmp(*jump.Set()) == 0) {
-    if (FLAG_trace_compiler) {
-      THR_Print("Compiling Class '%s'\n", cls.ToCString());
-    }
-
-    // Add the primary class which needs to be parsed to the parse list.
-    // Mark the class as parsed so that we don't recursively add the same
-    // class back into the list.
-    parse_list.Add(cls);
-    cls.set_is_marked_for_parsing();
-
-    // Add all super classes, interface classes and patch class if one
-    // exists to the corresponding lists.
-    // NOTE: The parse_list array keeps growing as more classes are added
-    // to it by AddRelatedClassesToList. It is not OK to hoist
-    // parse_list.Length() into a local variable and iterate using the local
-    // variable.
-    for (intptr_t i = 0; i < parse_list.length(); i++) {
-      AddRelatedClassesToList(parse_list.At(i), &parse_list, &patch_list);
-    }
-
-    // Finalize these classes.
-    for (intptr_t i = (parse_list.length() - 1); i >= 0; i--) {
-      const Class& parse_class = parse_list.At(i);
-      ASSERT(!parse_class.IsNull());
-      ClassFinalizer::FinalizeClass(parse_class);
-      parse_class.reset_is_marked_for_parsing();
-    }
-    for (intptr_t i = (patch_list.length() - 1); i >= 0; i--) {
-      const Class& parse_class = patch_list.At(i);
-      ASSERT(!parse_class.IsNull());
-      ClassFinalizer::FinalizeClass(parse_class);
-      parse_class.reset_is_marked_for_parsing();
-    }
-
-    return Error::null();
-  } else {
-    // Reset the marked for parsing flags.
-    for (intptr_t i = 0; i < parse_list.length(); i++) {
-      const Class& parse_class = parse_list.At(i);
-      if (parse_class.is_marked_for_parsing()) {
-        parse_class.reset_is_marked_for_parsing();
-      }
-    }
-    for (intptr_t i = 0; i < patch_list.length(); i++) {
-      const Class& parse_class = patch_list.At(i);
-      if (parse_class.is_marked_for_parsing()) {
-        parse_class.reset_is_marked_for_parsing();
-      }
-    }
-
-    return Thread::Current()->StealStickyError();
-  }
-}
-
 class CompileParsedFunctionHelper : public ValueObject {
  public:
   CompileParsedFunctionHelper(ParsedFunction* parsed_function,
@@ -1815,11 +1667,6 @@
   return Error::null();
 }
 
-RawError* Compiler::CompileClass(const Class& cls) {
-  FATAL1("Attempt to compile class %s", cls.ToCString());
-  return Error::null();
-}
-
 RawObject* Compiler::CompileFunction(Thread* thread, const Function& function) {
   FATAL1("Attempt to compile function %s", function.ToCString());
   return Error::null();
diff --git a/runtime/vm/compiler/jit/compiler.h b/runtime/vm/compiler/jit/compiler.h
index 53e41fd..43f4320 100644
--- a/runtime/vm/compiler/jit/compiler.h
+++ b/runtime/vm/compiler/jit/compiler.h
@@ -88,12 +88,6 @@
   // Returns Error::null() if there is no compilation error.
   static RawError* Compile(const Library& library, const Script& script);
 
-  // Extracts function and field symbols from the class and populates
-  // the class.
-  //
-  // Returns Error::null() if there is no compilation error.
-  static RawError* CompileClass(const Class& cls);
-
   // Generates code for given function without optimization and sets its code
   // field.
   //
diff --git a/runtime/vm/object.cc b/runtime/vm/object.cc
index 3882361..5e88dba 100644
--- a/runtime/vm/object.cc
+++ b/runtime/vm/object.cc
@@ -3480,7 +3480,7 @@
   ASSERT(thread->IsMutatorThread());
   ASSERT(thread != NULL);
   const Error& error =
-      Error::Handle(thread->zone(), Compiler::CompileClass(*this));
+      Error::Handle(thread->zone(), ClassFinalizer::LoadClassMembers(*this));
   if (!error.IsNull()) {
     ASSERT(thread == Thread::Current());
     if (thread->long_jump_base() != NULL) {
@@ -4011,12 +4011,13 @@
                                             raw_ptr()->state_bits_));
 }
 
-void Class::set_is_marked_for_parsing() const {
-  set_state_bits(MarkedForParsingBit::update(true, raw_ptr()->state_bits_));
+void Class::set_is_marked_for_lazy_loading() const {
+  set_state_bits(MarkedForLazyLoadingBit::update(true, raw_ptr()->state_bits_));
 }
 
-void Class::reset_is_marked_for_parsing() const {
-  set_state_bits(MarkedForParsingBit::update(false, raw_ptr()->state_bits_));
+void Class::reset_is_marked_for_lazy_loading() const {
+  set_state_bits(
+      MarkedForLazyLoadingBit::update(false, raw_ptr()->state_bits_));
 }
 
 void Class::set_interfaces(const Array& value) const {
diff --git a/runtime/vm/object.h b/runtime/vm/object.h
index 66dd582..bc71cdb 100644
--- a/runtime/vm/object.h
+++ b/runtime/vm/object.h
@@ -1136,11 +1136,11 @@
   void SetRefinalizeAfterPatch() const;
   void ResetFinalization() const;
 
-  bool is_marked_for_parsing() const {
-    return MarkedForParsingBit::decode(raw_ptr()->state_bits_);
+  bool is_marked_for_lazy_loading() const {
+    return MarkedForLazyLoadingBit::decode(raw_ptr()->state_bits_);
   }
-  void set_is_marked_for_parsing() const;
-  void reset_is_marked_for_parsing() const;
+  void set_is_marked_for_lazy_loading() const;
+  void reset_is_marked_for_lazy_loading() const;
 
   bool is_const() const { return ConstBit::decode(raw_ptr()->state_bits_); }
   void set_is_const() const;
@@ -1326,7 +1326,7 @@
     kAbstractBit = kClassFinalizedPos + kClassFinalizedSize,  // = 5
     kPatchBit = 6,
     kSynthesizedClassBit = 7,
-    kMarkedForParsingBit = 8,
+    kMarkedForLazyLoadingBit = 8,
     kMixinAppAliasBit = 9,
     kMixinTypeAppliedBit = 10,
     kFieldsMarkedNullableBit = 11,
@@ -1347,8 +1347,8 @@
   class PatchBit : public BitField<uint16_t, bool, kPatchBit, 1> {};
   class SynthesizedClassBit
       : public BitField<uint16_t, bool, kSynthesizedClassBit, 1> {};
-  class MarkedForParsingBit
-      : public BitField<uint16_t, bool, kMarkedForParsingBit, 1> {};
+  class MarkedForLazyLoadingBit
+      : public BitField<uint16_t, bool, kMarkedForLazyLoadingBit, 1> {};
   class FieldsMarkedNullableBit
       : public BitField<uint16_t, bool, kFieldsMarkedNullableBit, 1> {};
   class CycleFreeBit : public BitField<uint16_t, bool, kCycleFreeBit, 1> {};
diff --git a/runtime/vm/source_report.cc b/runtime/vm/source_report.cc
index 81a1576..9fea664 100644
--- a/runtime/vm/source_report.cc
+++ b/runtime/vm/source_report.cc
@@ -476,7 +476,7 @@
     if (!cls.is_finalized()) {
       if (compile_mode_ == kForceCompile) {
         Error& err = Error::Handle();
-        if (cls.is_marked_for_parsing()) {
+        if (cls.is_marked_for_lazy_loading()) {
           const String& error_message = String::Handle(
               String::New("Unable to process 'force compile' request, "
                           "while the class is being finalized."));
diff --git a/runtime/vm/tags.h b/runtime/vm/tags.h
index e2b08a1..0253739 100644
--- a/runtime/vm/tags.h
+++ b/runtime/vm/tags.h
@@ -20,7 +20,7 @@
   V(LoadBytecode)                                                              \
   V(CompileOptimized)                                                          \
   V(CompileUnoptimized)                                                        \
-  V(CompileClass)                                                              \
+  V(ClassLoading)                                                              \
   V(CompileParseRegExp)                                                        \
   V(DartCompiled)                                                              \
   V(DartInterpreted)                                                           \