[vm/bytecode] Bytecode compilation
Change-Id: I09192e4e929a397920c217b605580f8c4880e7c2
Reviewed-on: https://dart-review.googlesource.com/74002
Commit-Queue: Alexander Markov <alexmarkov@google.com>
Reviewed-by: Vyacheslav Egorov <vegorov@google.com>
Reviewed-by: Martin Kustermann <kustermann@google.com>
Reviewed-by: RĂ©gis Crelier <regis@google.com>
diff --git a/runtime/vm/compiler/aot/precompiler.cc b/runtime/vm/compiler/aot/precompiler.cc
index d2fdd60..5395c71 100644
--- a/runtime/vm/compiler/aot/precompiler.cc
+++ b/runtime/vm/compiler/aot/precompiler.cc
@@ -2774,7 +2774,7 @@
"BuildFlowGraph");
#endif // !PRODUCT
flow_graph =
- pipeline->BuildFlowGraph(zone, parsed_function(), *ic_data_array,
+ pipeline->BuildFlowGraph(zone, parsed_function(), ic_data_array,
Compiler::kNoOSRDeoptId, optimized());
}
diff --git a/runtime/vm/compiler/backend/flow_graph.h b/runtime/vm/compiler/backend/flow_graph.h
index ba3e046..a027b8a 100644
--- a/runtime/vm/compiler/backend/flow_graph.h
+++ b/runtime/vm/compiler/backend/flow_graph.h
@@ -128,7 +128,9 @@
}
intptr_t CurrentContextEnvIndex() const {
- return EnvIndex(parsed_function().current_context_var());
+ return FLAG_use_bytecode_compiler
+ ? -1
+ : EnvIndex(parsed_function().current_context_var());
}
intptr_t RawTypeArgumentEnvIndex() const {
diff --git a/runtime/vm/compiler/backend/flow_graph_compiler.cc b/runtime/vm/compiler/backend/flow_graph_compiler.cc
index 0dcc98fb..efb9313 100644
--- a/runtime/vm/compiler/backend/flow_graph_compiler.cc
+++ b/runtime/vm/compiler/backend/flow_graph_compiler.cc
@@ -1037,7 +1037,8 @@
}
void FlowGraphCompiler::FinalizeVarDescriptors(const Code& code) {
- if (code.is_optimized()) {
+ // TODO(alexmarkov): revise local vars descriptors when compiling bytecode
+ if (code.is_optimized() || FLAG_use_bytecode_compiler) {
// Optimized code does not need variable descriptors. They are
// only stored in the unoptimized version.
code.set_var_descriptors(Object::empty_var_descriptors());
diff --git a/runtime/vm/compiler/backend/il.cc b/runtime/vm/compiler/backend/il.cc
index 3e94025..63fc9df 100644
--- a/runtime/vm/compiler/backend/il.cc
+++ b/runtime/vm/compiler/backend/il.cc
@@ -779,17 +779,21 @@
}
}
-const NativeFieldDesc* NativeFieldDesc::GetTypeArgumentsFieldFor(
- Zone* zone,
- const Class& cls) {
+const NativeFieldDesc* NativeFieldDesc::GetTypeArgumentsField(Zone* zone,
+ intptr_t offset) {
// TODO(vegorov) consider caching type arguments fields for specific classes
// in some sort of a flow-graph specific cache.
- const intptr_t offset = cls.type_arguments_field_offset();
ASSERT(offset != Class::kNoTypeArguments);
return new (zone) NativeFieldDesc(kTypeArguments, offset, kDynamicCid,
/*immutable=*/true);
}
+const NativeFieldDesc* NativeFieldDesc::GetTypeArgumentsFieldFor(
+ Zone* zone,
+ const Class& cls) {
+ return GetTypeArgumentsField(zone, cls.type_arguments_field_offset());
+}
+
RawAbstractType* NativeFieldDesc::type() const {
if (cid() == kSmiCid) {
return Type::SmiType();
@@ -1233,7 +1237,10 @@
}
void Value::AddToList(Value* value, Value** list) {
+ ASSERT(value->next_use() == nullptr);
+ ASSERT(value->previous_use() == nullptr);
Value* next = *list;
+ ASSERT(value != next);
*list = value;
value->set_next_use(next);
value->set_previous_use(NULL);
@@ -3965,7 +3972,8 @@
}
#if !defined(TARGET_ARCH_DBC)
- if (compiler->is_optimizing() && HasICData()) {
+ if ((compiler->is_optimizing() || FLAG_use_bytecode_compiler) &&
+ HasICData()) {
ASSERT(HasICData());
if (ic_data()->NumberOfUsedChecks() > 0) {
const ICData& unary_ic_data =
diff --git a/runtime/vm/compiler/backend/il.h b/runtime/vm/compiler/backend/il.h
index ebac7b6..f7e8350 100644
--- a/runtime/vm/compiler/backend/il.h
+++ b/runtime/vm/compiler/backend/il.h
@@ -1708,10 +1708,10 @@
GraphEntryInstr* graph_entry,
const Array& handler_types,
intptr_t catch_try_index,
- const LocalVariable& exception_var,
- const LocalVariable& stacktrace_var,
bool needs_stacktrace,
intptr_t deopt_id,
+ const LocalVariable* exception_var,
+ const LocalVariable* stacktrace_var,
const LocalVariable* raw_exception_var,
const LocalVariable* raw_stacktrace_var)
: BlockEntryInstr(block_id, try_index, deopt_id),
@@ -1739,8 +1739,8 @@
GraphEntryInstr* graph_entry() const { return graph_entry_; }
- const LocalVariable& exception_var() const { return exception_var_; }
- const LocalVariable& stacktrace_var() const { return stacktrace_var_; }
+ const LocalVariable* exception_var() const { return exception_var_; }
+ const LocalVariable* stacktrace_var() const { return stacktrace_var_; }
const LocalVariable* raw_exception_var() const { return raw_exception_var_; }
const LocalVariable* raw_stacktrace_var() const {
@@ -1775,8 +1775,8 @@
const Array& catch_handler_types_;
const intptr_t catch_try_index_;
GrowableArray<Definition*> initial_definitions_;
- const LocalVariable& exception_var_;
- const LocalVariable& stacktrace_var_;
+ const LocalVariable* exception_var_;
+ const LocalVariable* stacktrace_var_;
const LocalVariable* raw_exception_var_;
const LocalVariable* raw_stacktrace_var_;
const bool needs_stacktrace_;
@@ -2993,6 +2993,7 @@
ASSERT(!dst_type.IsNull());
ASSERT(!dst_type.IsTypeRef());
ASSERT(!dst_name.IsNull());
+ ASSERT(!dst_type.IsDynamicType());
SetInputAt(0, value);
SetInputAt(1, instantiator_type_arguments);
SetInputAt(2, function_type_arguments);
@@ -5131,6 +5132,8 @@
static const NativeFieldDesc* Get(Kind kind);
static const NativeFieldDesc* GetLengthFieldForArrayCid(intptr_t array_cid);
+ static const NativeFieldDesc* GetTypeArgumentsField(Zone* zone,
+ intptr_t offset);
static const NativeFieldDesc* GetTypeArgumentsFieldFor(Zone* zone,
const Class& cls);
diff --git a/runtime/vm/compiler/backend/il_printer.cc b/runtime/vm/compiler/backend/il_printer.cc
index ed6af4d..233b45d 100644
--- a/runtime/vm/compiler/backend/il_printer.cc
+++ b/runtime/vm/compiler/backend/il_printer.cc
@@ -707,7 +707,7 @@
instantiator_type_arguments()->PrintTo(f);
f->Print("), function_type_args(");
function_type_arguments()->PrintTo(f);
- f->Print(")");
+ f->Print("), instantiator_class(%s)", instantiator_class().ToCString());
}
void AllocateContextInstr::PrintOperandsTo(BufferFormatter* f) const {
diff --git a/runtime/vm/compiler/backend/inliner.cc b/runtime/vm/compiler/backend/inliner.cc
index 0550ff3..2d35fbd 100644
--- a/runtime/vm/compiler/backend/inliner.cc
+++ b/runtime/vm/compiler/backend/inliner.cc
@@ -973,7 +973,7 @@
entry_kind = instr->entry_kind();
}
kernel::FlowGraphBuilder builder(
- parsed_function, *ic_data_array, /* not building var desc */ NULL,
+ parsed_function, ic_data_array, /* not building var desc */ NULL,
exit_collector,
/* optimized = */ true, Compiler::kNoOSRDeoptId,
caller_graph_->max_block_id() + 1,
diff --git a/runtime/vm/compiler/backend/type_propagator.cc b/runtime/vm/compiler/backend/type_propagator.cc
index 2dfe032..67ac074 100644
--- a/runtime/vm/compiler/backend/type_propagator.cc
+++ b/runtime/vm/compiler/backend/type_propagator.cc
@@ -938,6 +938,12 @@
return CompileType::Dynamic();
}
+ if (FLAG_use_bytecode_compiler &&
+ graph_entry->parsed_function().node_sequence() == nullptr) {
+ // TODO(alexmarkov): Consider adding node_sequence() and scope.
+ return CompileType::Dynamic();
+ }
+
// Parameter is the receiver.
if ((index() == 0) &&
(function.IsDynamicFunction() || function.IsGenerativeConstructor())) {
diff --git a/runtime/vm/compiler/call_specializer.cc b/runtime/vm/compiler/call_specializer.cc
index 471b5b9..75e6d4c 100644
--- a/runtime/vm/compiler/call_specializer.cc
+++ b/runtime/vm/compiler/call_specializer.cc
@@ -998,10 +998,8 @@
}
// Build an AssertAssignable if necessary.
- if (I->argument_type_checks()) {
- const AbstractType& dst_type =
- AbstractType::ZoneHandle(zone(), field.type());
-
+ const AbstractType& dst_type = AbstractType::ZoneHandle(zone(), field.type());
+ if (I->argument_type_checks() && !dst_type.IsTopType()) {
// Compute if we need to type check the value. Always type check if
// not in strong mode or if at a dynamic invocation.
bool needs_check = true;
diff --git a/runtime/vm/compiler/compiler_sources.gni b/runtime/vm/compiler/compiler_sources.gni
index a3f399d..9300603 100644
--- a/runtime/vm/compiler/compiler_sources.gni
+++ b/runtime/vm/compiler/compiler_sources.gni
@@ -78,6 +78,8 @@
"compiler_state.h",
"frontend/base_flow_graph_builder.cc",
"frontend/base_flow_graph_builder.h",
+ "frontend/bytecode_flow_graph_builder.cc",
+ "frontend/bytecode_flow_graph_builder.h",
"frontend/bytecode_reader.cc",
"frontend/bytecode_reader.h",
"frontend/constant_evaluator.cc",
diff --git a/runtime/vm/compiler/frontend/base_flow_graph_builder.cc b/runtime/vm/compiler/frontend/base_flow_graph_builder.cc
index 55e3531..af1c0c7 100644
--- a/runtime/vm/compiler/frontend/base_flow_graph_builder.cc
+++ b/runtime/vm/compiler/frontend/base_flow_graph_builder.cc
@@ -302,6 +302,14 @@
}
}
+Fragment BaseFlowGraphBuilder::LoadField(const Field& field) {
+ LoadFieldInstr* load = new (Z) LoadFieldInstr(
+ Pop(), &MayCloneField(field), AbstractType::ZoneHandle(Z, field.type()),
+ TokenPosition::kNoSource, parsed_function_);
+ Push(load);
+ return Fragment(load);
+}
+
Fragment BaseFlowGraphBuilder::LoadField(intptr_t offset, intptr_t class_id) {
LoadFieldInstr* load = new (Z) LoadFieldInstr(
Pop(), offset, AbstractType::ZoneHandle(Z), TokenPosition::kNoSource);
@@ -495,14 +503,6 @@
return variable;
}
-intptr_t BaseFlowGraphBuilder::CurrentTryIndex() {
- if (try_catch_block_ == NULL) {
- return CatchClauseNode::kInvalidTryIndex;
- } else {
- return try_catch_block_->try_index();
- }
-}
-
void BaseFlowGraphBuilder::SetTempIndex(Definition* definition) {
definition->set_temp_index(
stack_ == NULL ? 0 : stack_->definition()->temp_index() + 1);
diff --git a/runtime/vm/compiler/frontend/base_flow_graph_builder.h b/runtime/vm/compiler/frontend/base_flow_graph_builder.h
index 190635c..28d4a4b 100644
--- a/runtime/vm/compiler/frontend/base_flow_graph_builder.h
+++ b/runtime/vm/compiler/frontend/base_flow_graph_builder.h
@@ -113,6 +113,7 @@
BaseFlowGraphBuilder(
const ParsedFunction* parsed_function,
intptr_t last_used_block_id,
+ intptr_t osr_id = DeoptId::kNone,
ZoneGrowableArray<intptr_t>* context_level_array = nullptr,
InlineExitCollector* exit_collector = nullptr,
bool inlining_unchecked_entry = false)
@@ -120,10 +121,11 @@
function_(parsed_function_->function()),
thread_(Thread::Current()),
zone_(thread_->zone()),
+ osr_id_(osr_id),
context_level_array_(context_level_array),
context_depth_(0),
last_used_block_id_(last_used_block_id),
- try_catch_block_(NULL),
+ current_try_index_(CatchClauseNode::kInvalidTryIndex),
next_used_try_index_(0),
stack_(NULL),
pending_argument_count_(0),
@@ -131,6 +133,7 @@
exit_collector_(exit_collector),
inlining_unchecked_entry_(inlining_unchecked_entry) {}
+ Fragment LoadField(const Field& field);
Fragment LoadField(intptr_t offset, intptr_t class_id = kDynamicCid);
Fragment LoadNativeField(const NativeFieldDesc* native_field);
Fragment LoadIndexed(intptr_t index_scale);
@@ -230,6 +233,12 @@
}
intptr_t AllocateTryIndex() { return next_used_try_index_++; }
+ intptr_t CurrentTryIndex() const { return current_try_index_; }
+ void SetCurrentTryIndex(intptr_t try_index) {
+ current_try_index_ = try_index;
+ }
+
+ bool IsCompiledForOsr() { return osr_id_ != DeoptId::kNone; }
bool IsInlining() const { return exit_collector_ != nullptr; }
@@ -263,20 +272,18 @@
protected:
intptr_t AllocateBlockId() { return ++last_used_block_id_; }
- intptr_t CurrentTryIndex();
const ParsedFunction* parsed_function_;
const Function& function_;
Thread* thread_;
Zone* zone_;
+ intptr_t osr_id_;
// Contains (deopt_id, context_level) pairs.
ZoneGrowableArray<intptr_t>* context_level_array_;
intptr_t context_depth_;
intptr_t last_used_block_id_;
- // A chained list of try-catch blocks. Chaining and lookup is done by the
- // [TryCatchBlock] class.
- TryCatchBlock* try_catch_block_;
+ intptr_t current_try_index_;
intptr_t next_used_try_index_;
Value* stack_;
@@ -286,36 +293,13 @@
const bool inlining_unchecked_entry_;
- friend class TryCatchBlock;
friend class StreamingFlowGraphBuilder;
+ friend class BytecodeFlowGraphBuilder;
private:
DISALLOW_COPY_AND_ASSIGN(BaseFlowGraphBuilder);
};
-class TryCatchBlock {
- public:
- explicit TryCatchBlock(BaseFlowGraphBuilder* builder,
- intptr_t try_handler_index = -1)
- : builder_(builder),
- outer_(builder->try_catch_block_),
- try_index_(try_handler_index) {
- if (try_index_ == -1) try_index_ = builder->AllocateTryIndex();
- builder->try_catch_block_ = this;
- }
- ~TryCatchBlock() { builder_->try_catch_block_ = outer_; }
-
- intptr_t try_index() { return try_index_; }
- TryCatchBlock* outer() const { return outer_; }
-
- private:
- BaseFlowGraphBuilder* builder_;
- TryCatchBlock* outer_;
- intptr_t try_index_;
-
- DISALLOW_COPY_AND_ASSIGN(TryCatchBlock);
-};
-
} // namespace kernel
} // namespace dart
diff --git a/runtime/vm/compiler/frontend/bytecode_flow_graph_builder.cc b/runtime/vm/compiler/frontend/bytecode_flow_graph_builder.cc
new file mode 100644
index 0000000..e93787f
--- /dev/null
+++ b/runtime/vm/compiler/frontend/bytecode_flow_graph_builder.cc
@@ -0,0 +1,1361 @@
+// Copyright (c) 2018, the Dart project authors. Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+#include "vm/compiler/frontend/bytecode_flow_graph_builder.h"
+
+#include "vm/compiler/backend/il_printer.h"
+#include "vm/compiler/frontend/prologue_builder.h"
+#include "vm/compiler/jit/compiler.h"
+#include "vm/object_store.h"
+#include "vm/stack_frame.h"
+#include "vm/stack_frame_kbc.h"
+
+#if !defined(DART_PRECOMPILED_RUNTIME)
+
+#define B (flow_graph_builder_)
+#define Z (zone_)
+
+namespace dart {
+
+DEFINE_FLAG(bool,
+ print_flow_graph_from_bytecode,
+ false,
+ "Print flow graph constructed from bytecode");
+
+namespace kernel {
+
+// 8-bit unsigned operand at bits 8-15.
+BytecodeFlowGraphBuilder::Operand BytecodeFlowGraphBuilder::DecodeOperandA() {
+ if (is_generating_interpreter()) {
+ UNIMPLEMENTED(); // TODO(alexmarkov): interpreter
+ } else {
+ intptr_t value = KernelBytecode::DecodeA(bytecode_instr_);
+ return Operand(value);
+ }
+}
+
+// 8-bit unsigned operand at bits 16-23.
+BytecodeFlowGraphBuilder::Operand BytecodeFlowGraphBuilder::DecodeOperandB() {
+ if (is_generating_interpreter()) {
+ UNIMPLEMENTED(); // TODO(alexmarkov): interpreter
+ } else {
+ intptr_t value = KernelBytecode::DecodeB(bytecode_instr_);
+ return Operand(value);
+ }
+}
+
+// 8-bit unsigned operand at bits 24-31.
+BytecodeFlowGraphBuilder::Operand BytecodeFlowGraphBuilder::DecodeOperandC() {
+ if (is_generating_interpreter()) {
+ UNIMPLEMENTED(); // TODO(alexmarkov): interpreter
+ } else {
+ intptr_t value = KernelBytecode::DecodeC(bytecode_instr_);
+ return Operand(value);
+ }
+}
+
+// 16-bit unsigned operand at bits 16-31.
+BytecodeFlowGraphBuilder::Operand BytecodeFlowGraphBuilder::DecodeOperandD() {
+ if (is_generating_interpreter()) {
+ UNIMPLEMENTED(); // TODO(alexmarkov): interpreter
+ } else {
+ intptr_t value = KernelBytecode::DecodeD(bytecode_instr_);
+ return Operand(value);
+ }
+}
+
+// 16-bit signed operand at bits 16-31.
+BytecodeFlowGraphBuilder::Operand BytecodeFlowGraphBuilder::DecodeOperandX() {
+ if (is_generating_interpreter()) {
+ UNIMPLEMENTED(); // TODO(alexmarkov): interpreter
+ } else {
+ intptr_t value = KernelBytecode::DecodeX(bytecode_instr_);
+ return Operand(value);
+ }
+}
+
+// 24-bit signed operand at bits 8-31.
+BytecodeFlowGraphBuilder::Operand BytecodeFlowGraphBuilder::DecodeOperandT() {
+ if (is_generating_interpreter()) {
+ UNIMPLEMENTED(); // TODO(alexmarkov): interpreter
+ } else {
+ intptr_t value = KernelBytecode::DecodeT(bytecode_instr_);
+ return Operand(value);
+ }
+}
+
+KBCInstr BytecodeFlowGraphBuilder::InstructionAt(
+ intptr_t pc,
+ KernelBytecode::Opcode expect_opcode) {
+ ASSERT(!is_generating_interpreter());
+ ASSERT((0 <= pc) && (pc < bytecode_length_));
+
+ const KBCInstr instr = raw_bytecode_[pc];
+ if (KernelBytecode::DecodeOpcode(instr) != expect_opcode) {
+ FATAL3("Expected bytecode instruction %s, but found %s at %" Pd "",
+ KernelBytecode::NameOf(KernelBytecode::Encode(expect_opcode)),
+ KernelBytecode::NameOf(instr), pc);
+ }
+
+ return instr;
+}
+
+BytecodeFlowGraphBuilder::Constant BytecodeFlowGraphBuilder::ConstantAt(
+ Operand entry_index,
+ intptr_t add_index) {
+ if (is_generating_interpreter()) {
+ UNIMPLEMENTED(); // TODO(alexmarkov): interpreter
+ } else {
+ const Object& value = Object::ZoneHandle(
+ Z, object_pool_.ObjectAt(entry_index.value() + add_index));
+ return Constant(Z, value);
+ }
+}
+
+void BytecodeFlowGraphBuilder::PushConstant(Constant constant) {
+ if (is_generating_interpreter()) {
+ B->Push(constant.definition());
+ } else {
+ code_ += B->Constant(constant.value());
+ }
+}
+
+BytecodeFlowGraphBuilder::Constant BytecodeFlowGraphBuilder::PopConstant() {
+ if (is_generating_interpreter()) {
+ UNIMPLEMENTED(); // TODO(alexmarkov): interpreter
+ } else {
+ const Object& value = B->stack_->definition()->AsConstant()->value();
+ code_ += B->Drop();
+ return Constant(Z, value);
+ }
+}
+
+void BytecodeFlowGraphBuilder::LoadStackSlots(intptr_t num_slots) {
+ if (B->stack_ != nullptr) {
+ intptr_t stack_depth = B->stack_->definition()->temp_index() + 1;
+ ASSERT(stack_depth >= num_slots);
+ return;
+ }
+
+ ASSERT(is_generating_interpreter());
+ UNIMPLEMENTED(); // TODO(alexmarkov): interpreter
+}
+
+void BytecodeFlowGraphBuilder::AllocateLocalVariables(
+ Operand frame_size,
+ intptr_t num_param_locals) {
+ if (is_generating_interpreter()) {
+ UNIMPLEMENTED(); // TODO(alexmarkov): interpreter
+ } else {
+ // TODO(alexmarkov): Make table of local variables in bytecode and
+ // propagate type, name and positions.
+
+ ASSERT(local_vars_.is_empty());
+
+ const intptr_t num_bytecode_locals = frame_size.value();
+ ASSERT(num_bytecode_locals >= 0);
+
+ intptr_t num_locals = num_bytecode_locals;
+ if (exception_var_ != nullptr) {
+ ++num_locals;
+ }
+ if (stacktrace_var_ != nullptr) {
+ ++num_locals;
+ }
+ if (parsed_function()->has_arg_desc_var()) {
+ ++num_locals;
+ }
+
+ if (num_locals == 0) {
+ return;
+ }
+
+ local_vars_.EnsureLength(num_bytecode_locals, nullptr);
+ for (intptr_t i = num_param_locals; i < num_bytecode_locals; ++i) {
+ String& name =
+ String::ZoneHandle(Z, Symbols::NewFormatted(thread(), "var%" Pd, i));
+ LocalVariable* local = new (Z)
+ LocalVariable(TokenPosition::kNoSource, TokenPosition::kNoSource,
+ name, Object::dynamic_type());
+ local->set_index(VariableIndex(-i));
+ local_vars_[i] = local;
+ }
+
+ intptr_t idx = num_bytecode_locals;
+ if (exception_var_ != nullptr) {
+ exception_var_->set_index(VariableIndex(-idx));
+ ++idx;
+ }
+ if (stacktrace_var_ != nullptr) {
+ stacktrace_var_->set_index(VariableIndex(-idx));
+ ++idx;
+ }
+ if (parsed_function()->has_arg_desc_var()) {
+ parsed_function()->arg_desc_var()->set_index(VariableIndex(-idx));
+ ++idx;
+ }
+ ASSERT(idx == num_locals);
+
+ ASSERT(parsed_function()->node_sequence() == nullptr);
+ parsed_function()->AllocateBytecodeVariables(num_locals);
+ }
+}
+
+LocalVariable* BytecodeFlowGraphBuilder::AllocateParameter(
+ intptr_t param_index,
+ VariableIndex var_index) {
+ const String& name =
+ String::ZoneHandle(Z, function().ParameterNameAt(param_index));
+ const AbstractType& type =
+ AbstractType::ZoneHandle(Z, function().ParameterTypeAt(param_index));
+
+ LocalVariable* param_var = new (Z) LocalVariable(
+ TokenPosition::kNoSource, TokenPosition::kNoSource, name, type);
+ param_var->set_index(var_index);
+
+ if (var_index.value() <= 0) {
+ local_vars_[-var_index.value()] = param_var;
+ }
+
+ return param_var;
+}
+
+void BytecodeFlowGraphBuilder::AllocateFixedParameters() {
+ if (is_generating_interpreter()) {
+ return;
+ }
+
+ ASSERT(!function().HasOptionalParameters());
+
+ const intptr_t num_fixed_params = function().num_fixed_parameters();
+ auto parameters =
+ new (Z) ZoneGrowableArray<LocalVariable*>(Z, num_fixed_params);
+
+ for (intptr_t i = 0; i < num_fixed_params; ++i) {
+ LocalVariable* param_var =
+ AllocateParameter(i, VariableIndex(num_fixed_params - i));
+ parameters->Add(param_var);
+ }
+
+ parsed_function()->SetRawParameters(parameters);
+}
+
+LocalVariable* BytecodeFlowGraphBuilder::LocalVariableAt(intptr_t local_index) {
+ ASSERT(!is_generating_interpreter());
+ if (local_index < 0) {
+ // Parameter
+ ASSERT(!function().HasOptionalParameters());
+ const intptr_t param_index = local_index +
+ function().num_fixed_parameters() +
+ kKBCParamEndSlotFromFp;
+ ASSERT((0 <= param_index) &&
+ (param_index < function().num_fixed_parameters()));
+ return parsed_function()->RawParameterVariable(param_index);
+ } else {
+ return local_vars_.At(local_index);
+ }
+}
+
+void BytecodeFlowGraphBuilder::StoreLocal(Operand local_index) {
+ if (is_generating_interpreter()) {
+ UNIMPLEMENTED(); // TODO(alexmarkov): interpreter
+ } else {
+ LocalVariable* local_var = LocalVariableAt(local_index.value());
+ code_ += B->StoreLocalRaw(position_, local_var);
+ }
+}
+
+void BytecodeFlowGraphBuilder::LoadLocal(Operand local_index) {
+ if (is_generating_interpreter()) {
+ UNIMPLEMENTED(); // TODO(alexmarkov): interpreter
+ } else {
+ LocalVariable* local_var = LocalVariableAt(local_index.value());
+ code_ += B->LoadLocal(local_var);
+ }
+}
+
+Value* BytecodeFlowGraphBuilder::Pop() {
+ LoadStackSlots(1);
+ return B->Pop();
+}
+
+ArgumentArray BytecodeFlowGraphBuilder::GetArguments(int count) {
+ ArgumentArray arguments =
+ new (Z) ZoneGrowableArray<PushArgumentInstr*>(Z, count);
+ arguments->SetLength(count);
+ for (intptr_t i = count - 1; i >= 0; --i) {
+ Definition* arg_def = B->stack_->definition();
+ ASSERT(!arg_def->HasSSATemp());
+ ASSERT(arg_def->temp_index() >= i);
+
+ PushArgumentInstr* argument = new (Z) PushArgumentInstr(Pop());
+
+ if (code_.current == arg_def) {
+ code_ <<= argument;
+ } else {
+ Instruction* next = arg_def->next();
+ ASSERT(next != nullptr);
+ arg_def->LinkTo(argument);
+ argument->LinkTo(next);
+ }
+
+ arguments->data()[i] = argument;
+ }
+ return arguments;
+}
+
+void BytecodeFlowGraphBuilder::PropagateStackState(intptr_t target_pc) {
+ if (is_generating_interpreter() || (B->stack_ == nullptr)) {
+ return;
+ }
+
+ // Stack state propagation is supported for forward branches only.
+ // Bytecode generation guarantees that expression stack is empty between
+ // statements and backward jumps are only used to transfer control between
+ // statements (e.g. in loop and continue statements).
+ RELEASE_ASSERT(target_pc > pc_);
+
+ Value* current_stack = B->stack_;
+ Value* target_stack = stack_states_.Lookup(target_pc);
+
+ if (target_stack != nullptr) {
+ // Control flow join should observe the same stack state from
+ // all incoming branches.
+ RELEASE_ASSERT(target_stack == current_stack);
+ } else {
+ stack_states_.Insert(target_pc, current_stack);
+ }
+}
+
+void BytecodeFlowGraphBuilder::BuildInstruction(KernelBytecode::Opcode opcode) {
+ switch (opcode) {
+#define BUILD_BYTECODE_CASE(bytecode) \
+ case KernelBytecode::k##bytecode: \
+ Build##bytecode(); \
+ break;
+
+ FOR_EACH_BYTECODE_IN_FLOW_GRAPH_BUILDER(BUILD_BYTECODE_CASE)
+
+#undef BUILD_BYTECODE_CASE
+ default:
+ FATAL1("Unsupported bytecode instruction %s\n",
+ KernelBytecode::NameOf(bytecode_instr_));
+ }
+}
+
+void BytecodeFlowGraphBuilder::BuildEntry() {
+ AllocateLocalVariables(DecodeOperandD());
+ AllocateFixedParameters();
+}
+
+void BytecodeFlowGraphBuilder::BuildEntryFixed() {
+ if (is_generating_interpreter()) {
+ UNIMPLEMENTED(); // TODO(alexmarkov): interpreter
+ }
+
+ const intptr_t num_fixed_params = DecodeOperandA().value();
+ ASSERT(num_fixed_params == function().num_fixed_parameters());
+
+ AllocateLocalVariables(DecodeOperandD());
+ AllocateFixedParameters();
+
+ Fragment check_args;
+
+ ASSERT(throw_no_such_method_ == nullptr);
+ throw_no_such_method_ = B->BuildThrowNoSuchMethod();
+
+ check_args += B->LoadArgDescriptor();
+ check_args += B->LoadField(ArgumentsDescriptor::positional_count_offset());
+ check_args += B->IntConstant(num_fixed_params);
+ TargetEntryInstr *success1, *fail1;
+ check_args += B->BranchIfEqual(&success1, &fail1);
+ check_args = Fragment(check_args.entry, success1);
+
+ check_args += B->LoadArgDescriptor();
+ check_args += B->LoadField(ArgumentsDescriptor::count_offset());
+ check_args += B->IntConstant(num_fixed_params);
+ TargetEntryInstr *success2, *fail2;
+ check_args += B->BranchIfEqual(&success2, &fail2);
+ check_args = Fragment(check_args.entry, success2);
+
+ Fragment(fail1) + B->Goto(throw_no_such_method_);
+ Fragment(fail2) + B->Goto(throw_no_such_method_);
+
+ ASSERT(B->stack_ == nullptr);
+
+ if (!B->IsInlining() && !B->IsCompiledForOsr()) {
+ code_ += check_args;
+ }
+}
+
+void BytecodeFlowGraphBuilder::BuildEntryOptional() {
+ if (is_generating_interpreter()) {
+ UNIMPLEMENTED(); // TODO(alexmarkov): interpreter
+ }
+
+ const intptr_t num_fixed_params = DecodeOperandA().value();
+ const intptr_t num_opt_pos_params = DecodeOperandB().value();
+ const intptr_t num_opt_named_params = DecodeOperandC().value();
+ ASSERT(num_fixed_params == function().num_fixed_parameters());
+ ASSERT(num_opt_pos_params == function().NumOptionalPositionalParameters());
+ ASSERT(num_opt_named_params == function().NumOptionalNamedParameters());
+
+ ASSERT((num_opt_pos_params == 0) || (num_opt_named_params == 0));
+ const intptr_t num_load_const = num_opt_pos_params + 2 * num_opt_named_params;
+
+ const KBCInstr frame_instr =
+ InstructionAt(pc_ + 1 + num_load_const, KernelBytecode::kFrame);
+ const intptr_t num_temps = (num_opt_named_params > 0) ? 1 : 0;
+ const intptr_t num_extra_locals =
+ KernelBytecode::DecodeD(frame_instr) + num_temps;
+ const intptr_t num_params =
+ num_fixed_params + num_opt_pos_params + num_opt_named_params;
+ const intptr_t total_locals = num_params + num_extra_locals;
+
+ AllocateLocalVariables(Operand(total_locals), num_params);
+
+ ZoneGrowableArray<const Instance*>* default_values =
+ new (Z) ZoneGrowableArray<const Instance*>(
+ Z, num_opt_pos_params + num_opt_named_params);
+ ZoneGrowableArray<LocalVariable*>* raw_parameters =
+ new (Z) ZoneGrowableArray<LocalVariable*>(Z, num_params);
+ LocalVariable* temp_var = nullptr;
+
+ intptr_t param = 0;
+ for (; param < num_fixed_params; ++param) {
+ LocalVariable* param_var = AllocateParameter(param, VariableIndex(-param));
+ raw_parameters->Add(param_var);
+ }
+
+ for (intptr_t i = 0; i < num_opt_pos_params; ++i, ++param) {
+ const KBCInstr load_value_instr =
+ InstructionAt(pc_ + 1 + i, KernelBytecode::kLoadConstant);
+ const Object& default_value =
+ ConstantAt(Operand(KernelBytecode::DecodeD(load_value_instr))).value();
+ ASSERT(KernelBytecode::DecodeA(load_value_instr) == param);
+
+ LocalVariable* param_var = AllocateParameter(param, VariableIndex(-param));
+ raw_parameters->Add(param_var);
+ default_values->Add(
+ &Instance::ZoneHandle(Z, Instance::RawCast(default_value.raw())));
+ }
+
+ if (num_opt_named_params > 0) {
+ default_values->EnsureLength(num_opt_named_params, nullptr);
+ raw_parameters->EnsureLength(num_params, nullptr);
+ temp_var = LocalVariableAt(total_locals - 1);
+
+ for (intptr_t i = 0; i < num_opt_named_params; ++i, ++param) {
+ const KBCInstr load_name_instr =
+ InstructionAt(pc_ + 1 + i * 2, KernelBytecode::kLoadConstant);
+ const KBCInstr load_value_instr =
+ InstructionAt(pc_ + 1 + i * 2 + 1, KernelBytecode::kLoadConstant);
+ const String& param_name = String::Cast(
+ ConstantAt(Operand(KernelBytecode::DecodeD(load_name_instr)))
+ .value());
+ ASSERT(param_name.IsSymbol());
+ const Object& default_value =
+ ConstantAt(Operand(KernelBytecode::DecodeD(load_value_instr)))
+ .value();
+
+ intptr_t param_index = num_fixed_params;
+ for (; param_index < num_params; ++param_index) {
+ if (function().ParameterNameAt(param_index) == param_name.raw()) {
+ break;
+ }
+ }
+ ASSERT(param_index < num_params);
+
+ ASSERT(default_values->At(param_index - num_fixed_params) == nullptr);
+ (*default_values)[param_index - num_fixed_params] =
+ &Instance::ZoneHandle(Z, Instance::RawCast(default_value.raw()));
+
+ const intptr_t local_index = KernelBytecode::DecodeA(load_name_instr);
+ ASSERT(local_index == KernelBytecode::DecodeA(load_value_instr));
+
+ LocalVariable* param_var =
+ AllocateParameter(param_index, VariableIndex(-param));
+ ASSERT(raw_parameters->At(param_index) == nullptr);
+ (*raw_parameters)[param_index] = param_var;
+ }
+ }
+
+ parsed_function()->set_default_parameter_values(default_values);
+ parsed_function()->SetRawParameters(raw_parameters);
+
+ Fragment copy_args_prologue;
+
+ // Code generated for EntryOptional is considered a prologue code.
+ // Prologue should span a range of block ids, so start a new block at the
+ // beginning and end a block at the end.
+ JoinEntryInstr* prologue_entry = B->BuildJoinEntry();
+ copy_args_prologue += B->Goto(prologue_entry);
+ copy_args_prologue = Fragment(copy_args_prologue.entry, prologue_entry);
+
+ ASSERT(throw_no_such_method_ == nullptr);
+ throw_no_such_method_ = B->BuildThrowNoSuchMethod();
+
+ PrologueBuilder prologue_builder(parsed_function(), B->last_used_block_id_,
+ B->IsCompiledForOsr(), B->IsInlining());
+
+ B->last_used_block_id_ = prologue_builder.last_used_block_id();
+
+ copy_args_prologue += prologue_builder.BuildOptionalParameterHandling(
+ throw_no_such_method_, temp_var);
+
+ JoinEntryInstr* prologue_exit = B->BuildJoinEntry();
+ copy_args_prologue += B->Goto(prologue_exit);
+ copy_args_prologue.current = prologue_exit;
+
+ if (!B->IsInlining() && !B->IsCompiledForOsr()) {
+ code_ += copy_args_prologue;
+ }
+
+ prologue_info_ =
+ PrologueInfo(prologue_entry->block_id(), prologue_exit->block_id() - 1);
+
+ // Skip LoadConstant and Frame instructions.
+ pc_ += num_load_const + 1;
+
+ ASSERT(B->stack_ == nullptr);
+}
+
+void BytecodeFlowGraphBuilder::BuildLoadConstant() {
+ if (is_generating_interpreter()) {
+ UNIMPLEMENTED(); // TODO(alexmarkov): interpreter
+ }
+
+ // Handled in EntryOptional instruction.
+ UNREACHABLE();
+}
+
+void BytecodeFlowGraphBuilder::BuildFrame() {
+ if (is_generating_interpreter()) {
+ UNIMPLEMENTED(); // TODO(alexmarkov): interpreter
+ }
+
+ // Handled in EntryOptional instruction.
+ UNREACHABLE();
+}
+
+void BytecodeFlowGraphBuilder::BuildCheckFunctionTypeArgs() {
+ if (is_generating_interpreter()) {
+ UNIMPLEMENTED(); // TODO(alexmarkov): interpreter
+ }
+
+ const intptr_t expected_num_type_args = DecodeOperandA().value();
+ LocalVariable* type_args_var = LocalVariableAt(DecodeOperandD().value());
+ ASSERT(function().IsGeneric());
+
+ if (throw_no_such_method_ == nullptr) {
+ throw_no_such_method_ = B->BuildThrowNoSuchMethod();
+ }
+
+ Fragment setup_type_args;
+ JoinEntryInstr* done = B->BuildJoinEntry();
+
+ // Type args are always optional, so length can always be zero.
+ // If expect_type_args, a non-zero length must match the declaration length.
+ TargetEntryInstr *then, *fail;
+ setup_type_args += B->LoadArgDescriptor();
+ setup_type_args += B->LoadNativeField(NativeFieldDesc::Get(
+ NativeFieldDesc::kArgumentsDescriptor_type_args_len));
+
+ if (expected_num_type_args != 0) {
+ JoinEntryInstr* join2 = B->BuildJoinEntry();
+
+ LocalVariable* len = B->MakeTemporary();
+
+ TargetEntryInstr* otherwise;
+ setup_type_args += B->LoadLocal(len);
+ setup_type_args += B->IntConstant(0);
+ setup_type_args += B->BranchIfEqual(&then, &otherwise);
+
+ TargetEntryInstr* then2;
+ Fragment check_len(otherwise);
+ check_len += B->LoadLocal(len);
+ check_len += B->IntConstant(expected_num_type_args);
+ check_len += B->BranchIfEqual(&then2, &fail);
+
+ Fragment null_type_args(then);
+ null_type_args += B->NullConstant();
+ null_type_args += B->StoreLocalRaw(TokenPosition::kNoSource, type_args_var);
+ null_type_args += B->Drop();
+ null_type_args += B->Goto(join2);
+
+ Fragment store_type_args(then2);
+ store_type_args += B->LoadArgDescriptor();
+ store_type_args += B->LoadField(ArgumentsDescriptor::count_offset());
+ store_type_args += B->LoadFpRelativeSlot(
+ kWordSize * (1 + compiler_frame_layout.param_end_from_fp));
+ store_type_args +=
+ B->StoreLocalRaw(TokenPosition::kNoSource, type_args_var);
+ store_type_args += B->Drop();
+ store_type_args += B->Goto(join2);
+
+ Fragment(join2) + B->Drop() + B->Goto(done);
+ Fragment(fail) + B->Goto(throw_no_such_method_);
+ } else {
+ setup_type_args += B->IntConstant(0);
+ setup_type_args += B->BranchIfEqual(&then, &fail);
+ Fragment(then) + B->Goto(done);
+ Fragment(fail) + B->Goto(throw_no_such_method_);
+ }
+
+ setup_type_args = Fragment(setup_type_args.entry, done);
+ ASSERT(B->stack_ == nullptr);
+
+ if (expected_num_type_args != 0) {
+ parsed_function()->set_function_type_arguments(type_args_var);
+ parsed_function()->SetRawTypeArgumentsVariable(type_args_var);
+ }
+
+ if (!B->IsInlining() && !B->IsCompiledForOsr()) {
+ code_ += setup_type_args;
+ }
+}
+
+void BytecodeFlowGraphBuilder::BuildCheckStack() {
+ // TODO(alexmarkov): update B->loop_depth_
+ code_ += B->CheckStackOverflow(position_);
+ ASSERT(B->stack_ == nullptr);
+}
+
+void BytecodeFlowGraphBuilder::BuildPushConstant() {
+ PushConstant(ConstantAt(DecodeOperandD()));
+}
+
+void BytecodeFlowGraphBuilder::BuildStoreLocal() {
+ LoadStackSlots(1);
+ const Operand local_index = DecodeOperandX();
+ StoreLocal(local_index);
+}
+
+void BytecodeFlowGraphBuilder::BuildPopLocal() {
+ BuildStoreLocal();
+ code_ += B->Drop();
+}
+
+void BytecodeFlowGraphBuilder::BuildPush() {
+ const Operand local_index = DecodeOperandX();
+ LoadLocal(local_index);
+}
+
+void BytecodeFlowGraphBuilder::BuildIndirectStaticCall() {
+ if (is_generating_interpreter()) {
+ UNIMPLEMENTED(); // TODO(alexmarkov): interpreter
+ }
+
+ const ICData& icdata = ICData::Cast(PopConstant().value());
+
+ const Function& target = Function::ZoneHandle(Z, icdata.GetTargetAt(0));
+ const ArgumentsDescriptor arg_desc(
+ Array::Cast(ConstantAt(DecodeOperandD()).value()));
+ intptr_t argc = DecodeOperandA().value();
+ ASSERT(ic_data_array_->At(icdata.deopt_id())->Original() == icdata.raw());
+
+ ArgumentArray arguments = GetArguments(argc);
+
+ // TODO(alexmarkov): pass ICData::kSuper for super calls
+ // (need to distinguish them in bytecode).
+ StaticCallInstr* call = new (Z) StaticCallInstr(
+ position_, target, arg_desc.TypeArgsLen(),
+ Array::ZoneHandle(Z, arg_desc.GetArgumentNames()), arguments,
+ *ic_data_array_, icdata.deopt_id(), ICData::kStatic);
+
+ // TODO(alexmarkov): add type info
+ // SetResultTypeForStaticCall(call, target, argument_count, result_type);
+
+ code_ <<= call;
+ B->Push(call);
+}
+
+void BytecodeFlowGraphBuilder::BuildInstanceCall() {
+ if (is_generating_interpreter()) {
+ UNIMPLEMENTED(); // TODO(alexmarkov): interpreter
+ }
+
+ const ICData& icdata = ICData::Cast(ConstantAt(DecodeOperandD()).value());
+ ASSERT(ic_data_array_->At(icdata.deopt_id())->Original() == icdata.raw());
+
+ const intptr_t argc = DecodeOperandA().value();
+ const ArgumentsDescriptor arg_desc(
+ Array::Handle(Z, icdata.arguments_descriptor()));
+
+ const String& name = String::ZoneHandle(Z, icdata.target_name());
+ const Token::Kind token_kind =
+ MethodTokenRecognizer::RecognizeTokenKind(name);
+
+ const ArgumentArray arguments = GetArguments(argc);
+
+ // TODO(alexmarkov): store interface_target in bytecode and pass it here.
+
+ InstanceCallInstr* call = new (Z) InstanceCallInstr(
+ position_, name, token_kind, arguments, arg_desc.TypeArgsLen(),
+ Array::ZoneHandle(Z, arg_desc.GetArgumentNames()), icdata.NumArgsTested(),
+ *ic_data_array_, icdata.deopt_id());
+
+ ASSERT(call->ic_data() != nullptr);
+ ASSERT(call->ic_data()->Original() == icdata.raw());
+
+ // TODO(alexmarkov): add type info - call->SetResultType()
+
+ code_ <<= call;
+ B->Push(call);
+}
+
+void BytecodeFlowGraphBuilder::BuildNativeCall() {
+ if (is_generating_interpreter()) {
+ UNIMPLEMENTED(); // TODO(alexmarkov): interpreter
+ }
+
+ // Default flow graph builder is used to compile native methods.
+ UNREACHABLE();
+}
+
+void BytecodeFlowGraphBuilder::BuildAllocate() {
+ if (is_generating_interpreter()) {
+ UNIMPLEMENTED(); // TODO(alexmarkov): interpreter
+ }
+
+ const Class& klass = Class::Cast(ConstantAt(DecodeOperandD()).value());
+
+ const ArgumentArray arguments =
+ new (Z) ZoneGrowableArray<PushArgumentInstr*>(Z, 0);
+
+ AllocateObjectInstr* allocate =
+ new (Z) AllocateObjectInstr(position_, klass, arguments);
+
+ code_ <<= allocate;
+ B->Push(allocate);
+}
+
+void BytecodeFlowGraphBuilder::BuildAllocateT() {
+ if (is_generating_interpreter()) {
+ UNIMPLEMENTED(); // TODO(alexmarkov): interpreter
+ }
+
+ const Class& klass = Class::Cast(PopConstant().value());
+ const ArgumentArray arguments = GetArguments(1);
+
+ AllocateObjectInstr* allocate =
+ new (Z) AllocateObjectInstr(position_, klass, arguments);
+
+ code_ <<= allocate;
+ B->Push(allocate);
+}
+
+void BytecodeFlowGraphBuilder::BuildAllocateContext() {
+ if (is_generating_interpreter()) {
+ UNIMPLEMENTED(); // TODO(alexmarkov): interpreter
+ }
+
+ code_ += B->AllocateContext(DecodeOperandD().value());
+}
+
+void BytecodeFlowGraphBuilder::BuildCloneContext() {
+ LoadStackSlots(1);
+ // TODO(alexmarkov): Pass context_size and use it in compiled mode.
+ CloneContextInstr* clone_instruction = new (Z) CloneContextInstr(
+ TokenPosition::kNoSource, Pop(), CloneContextInstr::kUnknownContextSize,
+ B->GetNextDeoptId());
+ code_ <<= clone_instruction;
+ B->Push(clone_instruction);
+}
+
+void BytecodeFlowGraphBuilder::BuildCreateArrayTOS() {
+ LoadStackSlots(2);
+ code_ += B->CreateArray();
+}
+
+void BytecodeFlowGraphBuilder::BuildStoreFieldTOS() {
+ if (is_generating_interpreter()) {
+ UNIMPLEMENTED(); // TODO(alexmarkov): interpreter
+ }
+
+ LoadStackSlots(2);
+ Operand cp_index = DecodeOperandD();
+
+ const Field& field = Field::Cast(ConstantAt(cp_index, 1).value());
+ ASSERT(Smi::Cast(ConstantAt(cp_index).value()).Value() * kWordSize ==
+ field.Offset());
+
+ if (field.Owner() == isolate()->object_store()->closure_class()) {
+ // Stores to _Closure fields are lower-level.
+ // TODO(alexmarkov): use NativeFieldDesc
+ code_ += B->StoreInstanceField(position_, field.Offset());
+ } else {
+ // The rest of the StoreFieldTOS are for field initializers.
+ // TODO(alexmarkov): Consider adding a flag to StoreFieldTOS or even
+ // adding a separate bytecode instruction.
+ code_ += B->StoreInstanceFieldGuarded(field,
+ /* is_initialization_store = */ true);
+ }
+}
+
+void BytecodeFlowGraphBuilder::BuildLoadFieldTOS() {
+ if (is_generating_interpreter()) {
+ UNIMPLEMENTED(); // TODO(alexmarkov): interpreter
+ }
+
+ LoadStackSlots(1);
+ Operand cp_index = DecodeOperandD();
+
+ const Field& field = Field::Cast(ConstantAt(cp_index, 1).value());
+ ASSERT(Smi::Cast(ConstantAt(cp_index).value()).Value() * kWordSize ==
+ field.Offset());
+
+ if (field.Owner() == isolate()->object_store()->closure_class()) {
+ // Loads from _Closure fields are lower-level.
+ // TODO(alexmarkov): use NativeFieldDesc
+ code_ += B->LoadField(field.Offset());
+ } else {
+ code_ += B->LoadField(field);
+ }
+}
+
+void BytecodeFlowGraphBuilder::BuildStoreContextParent() {
+ LoadStackSlots(2);
+
+ // TODO(alexmarkov): use NativeFieldDesc
+ code_ += B->StoreInstanceField(position_, Context::parent_offset());
+}
+
+void BytecodeFlowGraphBuilder::BuildLoadContextParent() {
+ LoadStackSlots(1);
+
+ // TODO(alexmarkov): use NativeFieldDesc
+ code_ += B->LoadField(Context::parent_offset());
+}
+
+void BytecodeFlowGraphBuilder::BuildStoreContextVar() {
+ if (is_generating_interpreter()) {
+ UNIMPLEMENTED(); // TODO(alexmarkov): interpreter
+ }
+
+ LoadStackSlots(2);
+ Operand var_index = DecodeOperandD();
+
+ // TODO(alexmarkov): use NativeFieldDesc
+ code_ += B->StoreInstanceField(position_,
+ Context::variable_offset(var_index.value()));
+}
+
+void BytecodeFlowGraphBuilder::BuildLoadContextVar() {
+ if (is_generating_interpreter()) {
+ UNIMPLEMENTED(); // TODO(alexmarkov): interpreter
+ }
+
+ LoadStackSlots(1);
+ Operand var_index = DecodeOperandD();
+
+ // TODO(alexmarkov): use NativeFieldDesc
+ code_ += B->LoadField(Context::variable_offset(var_index.value()));
+}
+
+void BytecodeFlowGraphBuilder::BuildLoadTypeArgumentsField() {
+ if (is_generating_interpreter()) {
+ UNIMPLEMENTED(); // TODO(alexmarkov): interpreter
+ }
+
+ LoadStackSlots(1);
+ const intptr_t offset =
+ Smi::Cast(ConstantAt(DecodeOperandD()).value()).Value() * kWordSize;
+
+ code_ +=
+ B->LoadNativeField(NativeFieldDesc::GetTypeArgumentsField(Z, offset));
+}
+
+void BytecodeFlowGraphBuilder::BuildStoreStaticTOS() {
+ if (is_generating_interpreter()) {
+ UNIMPLEMENTED(); // TODO(alexmarkov): interpreter
+ }
+
+ LoadStackSlots(1);
+ Operand cp_index = DecodeOperandD();
+
+ const Field& field = Field::Cast(ConstantAt(cp_index).value());
+
+ code_ += B->StoreStaticField(position_, field);
+}
+
+void BytecodeFlowGraphBuilder::BuildPushStatic() {
+ // Note: Field object is both pushed into the stack and
+ // available in constant pool entry D.
+ // TODO(alexmarkov): clean this up. If we stop pushing field object
+ // explicitly, we might need the following code to get it from constant
+ // pool: PushConstant(ConstantAt(DecodeOperandD()));
+
+ code_ += B->LoadStaticField();
+}
+
+void BytecodeFlowGraphBuilder::BuildStoreIndexedTOS() {
+ LoadStackSlots(3);
+ code_ += B->StoreIndexed(kArrayCid);
+ code_ += B->Drop();
+}
+
+void BytecodeFlowGraphBuilder::BuildBooleanNegateTOS() {
+ LoadStackSlots(1);
+ code_ += B->BooleanNegate();
+}
+
+void BytecodeFlowGraphBuilder::BuildInstantiateType() {
+ if (is_generating_interpreter()) {
+ UNIMPLEMENTED(); // TODO(alexmarkov): interpreter
+ }
+
+ const AbstractType& type =
+ AbstractType::Cast(ConstantAt(DecodeOperandD()).value());
+
+ LoadStackSlots(2);
+ code_ += B->InstantiateType(type);
+}
+
+void BytecodeFlowGraphBuilder::BuildInstantiateTypeArgumentsTOS() {
+ if (is_generating_interpreter()) {
+ UNIMPLEMENTED(); // TODO(alexmarkov): interpreter
+ }
+
+ const TypeArguments& type_args =
+ TypeArguments::Cast(ConstantAt(DecodeOperandD()).value());
+
+ LoadStackSlots(2);
+ code_ += B->InstantiateTypeArguments(type_args);
+}
+
+void BytecodeFlowGraphBuilder::BuildAssertBoolean() {
+ LoadStackSlots(1);
+ code_ += B->AssertBool(position_);
+}
+
+void BytecodeFlowGraphBuilder::BuildAssertAssignable() {
+ if (is_generating_interpreter()) {
+ UNIMPLEMENTED(); // TODO(alexmarkov): interpreter
+ }
+
+ LoadStackSlots(5);
+
+ const String& dst_name = String::Cast(PopConstant().value());
+ const AbstractType& dst_type = AbstractType::Cast(PopConstant().value());
+
+ Value* function_type_args = Pop();
+ Value* instantiator_type_args = Pop();
+ Value* value = Pop();
+
+ AssertAssignableInstr* instr = new (Z) AssertAssignableInstr(
+ position_, value, instantiator_type_args, function_type_args, dst_type,
+ dst_name, B->GetNextDeoptId());
+
+ code_ <<= instr;
+
+ B->Push(instr);
+}
+
+void BytecodeFlowGraphBuilder::BuildAssertSubtype() {
+ if (is_generating_interpreter()) {
+ UNIMPLEMENTED(); // TODO(alexmarkov): interpreter
+ }
+
+ LoadStackSlots(5);
+
+ const String& dst_name = String::Cast(PopConstant().value());
+ const AbstractType& super_type = AbstractType::Cast(PopConstant().value());
+ const AbstractType& sub_type = AbstractType::Cast(PopConstant().value());
+ Value* function_type_args = Pop();
+ Value* instantiator_type_args = Pop();
+
+ AssertSubtypeInstr* instr = new (Z)
+ AssertSubtypeInstr(position_, instantiator_type_args, function_type_args,
+ sub_type, super_type, dst_name, B->GetNextDeoptId());
+ code_ <<= instr;
+}
+
+void BytecodeFlowGraphBuilder::BuildJump() {
+ if (is_generating_interpreter()) {
+ UNIMPLEMENTED(); // TODO(alexmarkov): interpreter
+ }
+
+ const intptr_t target_pc = pc_ + DecodeOperandT().value();
+ JoinEntryInstr* join = jump_targets_.Lookup(target_pc);
+ ASSERT(join != nullptr);
+ code_ += B->Goto(join);
+ PropagateStackState(target_pc);
+ B->stack_ = nullptr;
+}
+
+void BytecodeFlowGraphBuilder::BuildJumpIfNoAsserts() {
+ if (!isolate()->asserts()) {
+ BuildJump();
+ }
+}
+
+void BytecodeFlowGraphBuilder::BuildJumpIfNotZeroTypeArgs() {
+ if (is_generating_interpreter()) {
+ UNIMPLEMENTED(); // TODO(alexmarkov): interpreter
+ }
+
+ TargetEntryInstr *is_zero, *is_not_zero;
+ code_ += B->LoadArgDescriptor();
+ code_ += B->LoadNativeField(NativeFieldDesc::Get(
+ NativeFieldDesc::kArgumentsDescriptor_type_args_len));
+ code_ += B->IntConstant(0);
+ code_ += B->BranchIfEqual(&is_zero, &is_not_zero);
+
+ const intptr_t target_pc = pc_ + DecodeOperandT().value();
+ JoinEntryInstr* join = jump_targets_.Lookup(target_pc);
+ ASSERT(join != nullptr);
+ Fragment(is_not_zero) += B->Goto(join);
+ PropagateStackState(target_pc);
+
+ code_ = Fragment(code_.entry, is_zero);
+}
+
+void BytecodeFlowGraphBuilder::BuildIfStrictCompare(Token::Kind cmp_kind) {
+ ASSERT((cmp_kind == Token::kEQ) || (cmp_kind == Token::kNE));
+
+ // TODO(alexmarkov): revise If* bytecodes to include Jump
+ // (and maybe comparison to true/false)
+
+ if (is_generating_interpreter()) {
+ UNIMPLEMENTED(); // TODO(alexmarkov): interpreter
+ }
+
+ LoadStackSlots(2);
+
+ TargetEntryInstr* eq_branch = nullptr;
+ TargetEntryInstr* ne_branch = nullptr;
+ code_ += B->BranchIfStrictEqual(&eq_branch, &ne_branch);
+
+ TargetEntryInstr* then_entry =
+ (cmp_kind == Token::kEQ) ? eq_branch : ne_branch;
+ TargetEntryInstr* else_entry =
+ (cmp_kind == Token::kEQ) ? ne_branch : eq_branch;
+
+ // The next bytecode instruction should be a Jump.
+ ++pc_;
+ bytecode_instr_ = InstructionAt(pc_, KernelBytecode::kJump);
+ ASSERT(jump_targets_.Lookup(pc_) == nullptr);
+
+ const intptr_t target_pc = pc_ + DecodeOperandT().value();
+ JoinEntryInstr* join = jump_targets_.Lookup(target_pc);
+ ASSERT(join != nullptr);
+
+ code_ = Fragment(then_entry);
+ code_ += B->Goto(join);
+ PropagateStackState(target_pc);
+
+ code_ = Fragment(else_entry);
+}
+
+void BytecodeFlowGraphBuilder::BuildIfEqStrictTOS() {
+ BuildIfStrictCompare(Token::kEQ);
+}
+
+void BytecodeFlowGraphBuilder::BuildIfNeStrictTOS() {
+ BuildIfStrictCompare(Token::kNE);
+}
+
+void BytecodeFlowGraphBuilder::BuildIfEqNull() {
+ LoadLocal(DecodeOperandA());
+ code_ += B->NullConstant();
+ BuildIfEqStrictTOS();
+}
+
+void BytecodeFlowGraphBuilder::BuildDrop1() {
+ if (is_generating_interpreter()) {
+ UNIMPLEMENTED(); // TODO(alexmarkov): interpreter
+ // AdjustSP(-1);
+ } else {
+ code_ += B->Drop();
+ }
+}
+
+void BytecodeFlowGraphBuilder::BuildReturnTOS() {
+ LoadStackSlots(1);
+ ASSERT(code_.is_open());
+ code_ += B->Return(position_);
+}
+
+void BytecodeFlowGraphBuilder::BuildTrap() {
+ code_ += Fragment(new (Z) StopInstr("Bytecode Trap instruction")).closed();
+}
+
+void BytecodeFlowGraphBuilder::BuildThrow() {
+ if (is_generating_interpreter()) {
+ UNIMPLEMENTED(); // TODO(alexmarkov): interpreter
+ }
+
+ if (DecodeOperandA().value() == 0) {
+ // throw
+ LoadStackSlots(1);
+ code_ += B->PushArgument();
+ code_ += B->ThrowException(position_);
+ } else {
+ // rethrow
+ LoadStackSlots(2);
+ GetArguments(2);
+ code_ += Fragment(new (Z) ReThrowInstr(position_,
+ CatchClauseNode::kInvalidTryIndex,
+ B->GetNextDeoptId()))
+ .closed();
+ }
+
+ ASSERT(code_.is_closed());
+
+ // Empty stack as closed fragment should not leave any values on the stack.
+ while (B->stack_ != nullptr) {
+ B->Pop();
+ }
+}
+
+void BytecodeFlowGraphBuilder::BuildMoveSpecial() {
+ if (is_generating_interpreter()) {
+ UNIMPLEMENTED(); // TODO(alexmarkov): interpreter
+ }
+
+ LocalVariable* special_var = nullptr;
+ switch (DecodeOperandD().value()) {
+ // TODO(alexmarkov): Move these constants to constants_kbc.h
+ case KernelBytecode::kExceptionSpecialIndex:
+ ASSERT(exception_var_ != nullptr);
+ special_var = exception_var_;
+ break;
+ case KernelBytecode::kStackTraceSpecialIndex:
+ ASSERT(stacktrace_var_ != nullptr);
+ special_var = stacktrace_var_;
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ code_ += B->LoadLocal(special_var);
+ StoreLocal(DecodeOperandA());
+ code_ += B->Drop();
+}
+
+void BytecodeFlowGraphBuilder::BuildSetFrame() {
+ if (is_generating_interpreter()) {
+ UNIMPLEMENTED(); // TODO(alexmarkov): interpreter
+ }
+
+ // No-op in compiled code.
+ ASSERT(B->stack_ == nullptr);
+}
+
+static bool IsICDataEntry(const ObjectPool& object_pool, intptr_t index) {
+ if (object_pool.TypeAt(index) != ObjectPool::kTaggedObject) {
+ return false;
+ }
+ RawObject* entry = object_pool.ObjectAt(index);
+ return entry->IsHeapObject() && entry->IsICData();
+}
+
+// Read ICData entries in object pool, skip deopt_ids and
+// pre-populate ic_data_array_.
+void BytecodeFlowGraphBuilder::ProcessICDataInObjectPool(
+ const ObjectPool& object_pool) {
+ CompilerState& compiler_state = thread()->compiler_state();
+ ASSERT(compiler_state.deopt_id() == 0);
+
+ const intptr_t pool_length = object_pool.Length();
+ for (intptr_t i = 0; i < pool_length; ++i) {
+ if (IsICDataEntry(object_pool, i)) {
+ const ICData& icdata = ICData::CheckedHandle(Z, object_pool.ObjectAt(i));
+ const intptr_t deopt_id = compiler_state.GetNextDeoptId();
+
+ ASSERT(icdata.deopt_id() == deopt_id);
+ ASSERT(ic_data_array_->is_empty() ||
+ (ic_data_array_->At(deopt_id)->Original() == icdata.raw()));
+ }
+ }
+
+ if (ic_data_array_->is_empty()) {
+ const intptr_t len = compiler_state.deopt_id();
+ ic_data_array_->EnsureLength(len, nullptr);
+ for (intptr_t i = 0; i < pool_length; ++i) {
+ if (IsICDataEntry(object_pool, i)) {
+ const ICData& icdata =
+ ICData::CheckedHandle(Z, object_pool.ObjectAt(i));
+ (*ic_data_array_)[icdata.deopt_id()] = &icdata;
+ }
+ }
+ }
+}
+
+intptr_t BytecodeFlowGraphBuilder::GetTryIndex(const PcDescriptors& descriptors,
+ intptr_t pc) {
+ const uword pc_offset =
+ KernelBytecode::BytecodePcToOffset(pc, /* is_return_address = */ true);
+ PcDescriptors::Iterator iter(descriptors, RawPcDescriptors::kAnyKind);
+ intptr_t try_index = CatchClauseNode::kInvalidTryIndex;
+ while (iter.MoveNext()) {
+ const intptr_t current_try_index = iter.TryIndex();
+ const uword start_pc = iter.PcOffset();
+ if (pc_offset < start_pc) {
+ break;
+ }
+ const bool has_next = iter.MoveNext();
+ ASSERT(has_next);
+ const uword end_pc = iter.PcOffset();
+ if (start_pc <= pc_offset && pc_offset < end_pc) {
+ ASSERT(try_index < current_try_index);
+ try_index = current_try_index;
+ }
+ }
+ return try_index;
+}
+
+JoinEntryInstr* BytecodeFlowGraphBuilder::EnsureControlFlowJoin(
+ const PcDescriptors& descriptors,
+ intptr_t pc) {
+ ASSERT((0 <= pc) && (pc < bytecode_length_));
+ JoinEntryInstr* join = jump_targets_.Lookup(pc);
+ if (join == nullptr) {
+ join = B->BuildJoinEntry(GetTryIndex(descriptors, pc));
+ jump_targets_.Insert(pc, join);
+ }
+ return join;
+}
+
+void BytecodeFlowGraphBuilder::CollectControlFlow(
+ const PcDescriptors& descriptors,
+ const ExceptionHandlers& handlers,
+ GraphEntryInstr* graph_entry) {
+ for (intptr_t pc = 0; pc < bytecode_length_; ++pc) {
+ const KBCInstr instr = raw_bytecode_[pc];
+ const KernelBytecode::Opcode opcode = KernelBytecode::DecodeOpcode(instr);
+
+ if ((opcode == KernelBytecode::kJump) ||
+ (opcode == KernelBytecode::kJumpIfNoAsserts) ||
+ (opcode == KernelBytecode::kJumpIfNotZeroTypeArgs)) {
+ const intptr_t target = pc + KernelBytecode::DecodeT(instr);
+ EnsureControlFlowJoin(descriptors, target);
+ }
+ }
+
+ PcDescriptors::Iterator iter(descriptors, RawPcDescriptors::kAnyKind);
+ while (iter.MoveNext()) {
+ const intptr_t start_pc = KernelBytecode::OffsetToBytecodePc(
+ iter.PcOffset(), /* is_return_address = */ true);
+ EnsureControlFlowJoin(descriptors, start_pc);
+
+ const bool has_next = iter.MoveNext();
+ ASSERT(has_next);
+ const intptr_t end_pc = KernelBytecode::OffsetToBytecodePc(
+ iter.PcOffset(), /* is_return_address = */ true);
+ EnsureControlFlowJoin(descriptors, end_pc);
+ }
+
+ if (handlers.num_entries() > 0) {
+ B->InlineBailout("kernel::BytecodeFlowGraphBuilder::CollectControlFlow");
+
+ exception_var_ = new (Z)
+ LocalVariable(TokenPosition::kNoSource, TokenPosition::kNoSource,
+ Symbols::ExceptionVar(), Object::dynamic_type());
+ stacktrace_var_ = new (Z)
+ LocalVariable(TokenPosition::kNoSource, TokenPosition::kNoSource,
+ Symbols::StackTraceVar(), Object::dynamic_type());
+ }
+
+ for (intptr_t try_index = 0; try_index < handlers.num_entries();
+ ++try_index) {
+ ExceptionHandlerInfo handler_info;
+ handlers.GetHandlerInfo(try_index, &handler_info);
+
+ const intptr_t handler_pc = KernelBytecode::OffsetToBytecodePc(
+ handler_info.handler_pc_offset, /* is_return_address = */ false);
+ JoinEntryInstr* join = EnsureControlFlowJoin(descriptors, handler_pc);
+
+ const Array& handler_types =
+ Array::ZoneHandle(Z, handlers.GetHandledTypes(try_index));
+
+ CatchBlockEntryInstr* entry = new (Z) CatchBlockEntryInstr(
+ TokenPosition::kNoSource, handler_info.is_generated,
+ B->AllocateBlockId(), handler_info.outer_try_index, graph_entry,
+ handler_types, try_index, handler_info.needs_stacktrace,
+ B->GetNextDeoptId(), nullptr, nullptr, exception_var_, stacktrace_var_);
+ graph_entry->AddCatchEntry(entry);
+
+ code_ = Fragment(entry);
+ code_ += B->Goto(join);
+ }
+}
+
+FlowGraph* BytecodeFlowGraphBuilder::BuildGraph() {
+ if (function().is_native()) {
+ // Use default flow graph builder for native methods.
+ return nullptr;
+ }
+
+ const Code& bytecode = Code::Handle(Z, function().Bytecode());
+
+ object_pool_ = bytecode.object_pool();
+ raw_bytecode_ = reinterpret_cast<KBCInstr*>(bytecode.EntryPoint());
+ bytecode_length_ = bytecode.Size() / sizeof(KBCInstr);
+
+ ProcessICDataInObjectPool(object_pool_);
+
+ TargetEntryInstr* normal_entry = B->BuildTargetEntry();
+ GraphEntryInstr* graph_entry =
+ new (Z) GraphEntryInstr(*parsed_function_, normal_entry, B->osr_id_);
+
+ const PcDescriptors& descriptors =
+ PcDescriptors::Handle(Z, bytecode.pc_descriptors());
+ const ExceptionHandlers& handlers =
+ ExceptionHandlers::Handle(Z, bytecode.exception_handlers());
+
+ CollectControlFlow(descriptors, handlers, graph_entry);
+
+ code_ = Fragment(normal_entry);
+
+ for (pc_ = 0; pc_ < bytecode_length_; ++pc_) {
+ bytecode_instr_ = raw_bytecode_[pc_];
+
+ JoinEntryInstr* join = jump_targets_.Lookup(pc_);
+ if (join != nullptr) {
+ Value* stack_state = stack_states_.Lookup(pc_);
+ if (code_.is_open()) {
+ ASSERT((stack_state == nullptr) || (stack_state == B->stack_));
+ code_ += B->Goto(join);
+ } else {
+ ASSERT(B->stack_ == nullptr);
+ B->stack_ = stack_state;
+ }
+ code_ = Fragment(join);
+ B->SetCurrentTryIndex(join->try_index());
+ } else if (code_.is_closed()) {
+ // Skip unreachable bytecode instructions.
+ continue;
+ }
+
+ BuildInstruction(KernelBytecode::DecodeOpcode(bytecode_instr_));
+
+ if (code_.is_closed()) {
+ ASSERT(B->stack_ == nullptr);
+ }
+ }
+
+ // When compiling for OSR, use a depth first search to find the OSR
+ // entry and make graph entry jump to it instead of normal entry.
+ // Catch entries are always considered reachable, even if they
+ // become unreachable after OSR.
+ if (B->IsCompiledForOsr()) {
+ graph_entry->RelinkToOsrEntry(Z, B->last_used_block_id_ + 1);
+ }
+
+ FlowGraph* flow_graph = new (Z) FlowGraph(
+ *parsed_function_, graph_entry, B->last_used_block_id_, prologue_info_);
+
+ if (FLAG_print_flow_graph_from_bytecode) {
+ FlowGraphPrinter::PrintGraph("Constructed from bytecode", flow_graph);
+ }
+
+ return flow_graph;
+}
+
+} // namespace kernel
+} // namespace dart
+
+#endif // !defined(DART_PRECOMPILED_RUNTIME)
diff --git a/runtime/vm/compiler/frontend/bytecode_flow_graph_builder.h b/runtime/vm/compiler/frontend/bytecode_flow_graph_builder.h
new file mode 100644
index 0000000..9bf0b9c
--- /dev/null
+++ b/runtime/vm/compiler/frontend/bytecode_flow_graph_builder.h
@@ -0,0 +1,234 @@
+// Copyright (c) 2018, the Dart project authors. Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+#ifndef RUNTIME_VM_COMPILER_FRONTEND_BYTECODE_FLOW_GRAPH_BUILDER_H_
+#define RUNTIME_VM_COMPILER_FRONTEND_BYTECODE_FLOW_GRAPH_BUILDER_H_
+
+#include "vm/compiler/backend/il.h"
+#include "vm/compiler/frontend/base_flow_graph_builder.h"
+#include "vm/constants_kbc.h"
+
+#if !defined(DART_PRECOMPILED_RUNTIME)
+
+namespace dart {
+namespace kernel {
+
+#define FOR_EACH_BYTECODE_IN_FLOW_GRAPH_BUILDER(M) \
+ M(Allocate) \
+ M(AllocateContext) \
+ M(AllocateT) \
+ M(AssertAssignable) \
+ M(AssertBoolean) \
+ M(AssertSubtype) \
+ M(BooleanNegateTOS) \
+ M(CheckFunctionTypeArgs) \
+ M(CheckStack) \
+ M(CloneContext) \
+ M(CreateArrayTOS) \
+ M(Drop1) \
+ M(Entry) \
+ M(EntryFixed) \
+ M(EntryOptional) \
+ M(Frame) \
+ M(IfEqNull) \
+ M(IfEqStrictTOS) \
+ M(IfNeStrictTOS) \
+ M(IndirectStaticCall) \
+ M(InstanceCall) \
+ M(InstantiateType) \
+ M(InstantiateTypeArgumentsTOS) \
+ M(Jump) \
+ M(JumpIfNoAsserts) \
+ M(JumpIfNotZeroTypeArgs) \
+ M(LoadConstant) \
+ M(LoadContextParent) \
+ M(LoadContextVar) \
+ M(LoadFieldTOS) \
+ M(LoadTypeArgumentsField) \
+ M(MoveSpecial) \
+ M(NativeCall) \
+ M(PopLocal) \
+ M(Push) \
+ M(PushConstant) \
+ M(PushStatic) \
+ M(ReturnTOS) \
+ M(SetFrame) \
+ M(StoreContextParent) \
+ M(StoreContextVar) \
+ M(StoreFieldTOS) \
+ M(StoreIndexedTOS) \
+ M(StoreLocal) \
+ M(StoreStaticTOS) \
+ M(Throw) \
+ M(Trap)
+
+// This class builds flow graph from bytecode. It is used either to compile
+// from bytecode, or generate bytecode interpreter (the latter is not
+// fully implemented yet).
+// TODO(alexmarkov): extend this class and IL to generate an interpreter in
+// addition to compiling bytecode.
+class BytecodeFlowGraphBuilder {
+ public:
+ BytecodeFlowGraphBuilder(BaseFlowGraphBuilder* flow_graph_builder,
+ ParsedFunction* parsed_function,
+ ZoneGrowableArray<const ICData*>* ic_data_array)
+ : flow_graph_builder_(flow_graph_builder),
+ zone_(flow_graph_builder->zone_),
+ is_generating_interpreter_(
+ false), // TODO(alexmarkov): pass as argument
+ parsed_function_(parsed_function),
+ ic_data_array_(ic_data_array),
+ object_pool_(ObjectPool::Handle(zone_)),
+ raw_bytecode_(nullptr),
+ bytecode_length_(0),
+ pc_(0),
+ bytecode_instr_(KernelBytecode::kTrap),
+ position_(TokenPosition::kNoSource),
+ local_vars_(zone_, 0),
+ parameters_(zone_, 0),
+ exception_var_(nullptr),
+ stacktrace_var_(nullptr),
+ prologue_info_(-1, -1),
+ throw_no_such_method_(nullptr) {}
+
+ FlowGraph* BuildGraph();
+
+ protected:
+ // Returns `true` if building a flow graph for a bytecode interpreter, or
+ // `false` if compiling a function from bytecode.
+ bool is_generating_interpreter() const { return is_generating_interpreter_; }
+
+ private:
+ // Operand of bytecode instruction, either intptr_t value (if compiling
+ // bytecode) or Definition (if generating interpreter).
+ class Operand {
+ public:
+ explicit Operand(Definition* definition)
+ : definition_(definition), value_(0) {
+ ASSERT(definition != nullptr);
+ }
+
+ explicit Operand(intptr_t value) : definition_(nullptr), value_(value) {}
+
+ Definition* definition() const {
+ ASSERT(definition_ != nullptr);
+ return definition_;
+ }
+
+ intptr_t value() const {
+ ASSERT(definition_ == nullptr);
+ return value_;
+ }
+
+ private:
+ Definition* definition_;
+ intptr_t value_;
+ };
+
+ // Constant from a constant pool.
+ // It is either Object (if compiling bytecode) or Definition
+ // (if generating interpreter).
+ class Constant {
+ public:
+ explicit Constant(Definition* definition)
+ : definition_(definition), value_(Object::null_object()) {
+ ASSERT(definition != nullptr);
+ }
+
+ explicit Constant(Zone* zone, const Object& value)
+ : definition_(nullptr), value_(value) {}
+
+ Definition* definition() const {
+ ASSERT(definition_ != nullptr);
+ return definition_;
+ }
+
+ const Object& value() const {
+ ASSERT(definition_ == nullptr);
+ return value_;
+ }
+
+ private:
+ Definition* definition_;
+ const Object& value_;
+ };
+
+ Operand DecodeOperandA();
+ Operand DecodeOperandB();
+ Operand DecodeOperandC();
+ Operand DecodeOperandD();
+ Operand DecodeOperandX();
+ Operand DecodeOperandT();
+ KBCInstr InstructionAt(intptr_t pc, KernelBytecode::Opcode expect_opcode);
+ Constant ConstantAt(Operand entry_index, intptr_t add_index = 0);
+ void PushConstant(Constant constant);
+ Constant PopConstant();
+ void LoadStackSlots(intptr_t num_slots);
+ void AllocateLocalVariables(Operand frame_size,
+ intptr_t num_param_locals = 0);
+ LocalVariable* AllocateParameter(intptr_t param_index,
+ VariableIndex var_index);
+ void AllocateFixedParameters();
+ LocalVariable* LocalVariableAt(intptr_t local_index);
+ void StoreLocal(Operand local_index);
+ void LoadLocal(Operand local_index);
+ Value* Pop();
+ ArgumentArray GetArguments(int count);
+ void PropagateStackState(intptr_t target_pc);
+ void BuildIfStrictCompare(Token::Kind cmp_kind);
+
+ void BuildInstruction(KernelBytecode::Opcode opcode);
+
+#define DECLARE_BUILD_METHOD(bytecode) void Build##bytecode();
+
+ FOR_EACH_BYTECODE_IN_FLOW_GRAPH_BUILDER(DECLARE_BUILD_METHOD)
+#undef DECLARE_BUILD_METHOD
+
+ void ProcessICDataInObjectPool(const ObjectPool& object_pool);
+ intptr_t GetTryIndex(const PcDescriptors& descriptors, intptr_t pc);
+ JoinEntryInstr* EnsureControlFlowJoin(const PcDescriptors& descriptors,
+ intptr_t pc);
+ void CollectControlFlow(const PcDescriptors& descriptors,
+ const ExceptionHandlers& handlers,
+ GraphEntryInstr* graph_entry);
+
+ Thread* thread() const { return flow_graph_builder_->thread_; }
+ Isolate* isolate() const { return thread()->isolate(); }
+
+ ParsedFunction* parsed_function() {
+ ASSERT(!is_generating_interpreter());
+ return parsed_function_;
+ }
+ const Function& function() { return parsed_function()->function(); }
+
+ BaseFlowGraphBuilder* flow_graph_builder_;
+ Zone* zone_;
+ bool is_generating_interpreter_;
+
+ // The following members are available only when compiling bytecode.
+
+ ParsedFunction* parsed_function_;
+ ZoneGrowableArray<const ICData*>* ic_data_array_;
+ ObjectPool& object_pool_;
+ KBCInstr* raw_bytecode_;
+ intptr_t bytecode_length_;
+ intptr_t pc_;
+ KBCInstr bytecode_instr_;
+ TokenPosition position_; // TODO(alexmarkov): Set/update.
+ Fragment code_;
+ ZoneGrowableArray<LocalVariable*> local_vars_;
+ ZoneGrowableArray<LocalVariable*> parameters_;
+ LocalVariable* exception_var_;
+ LocalVariable* stacktrace_var_;
+ IntMap<JoinEntryInstr*> jump_targets_;
+ IntMap<Value*> stack_states_;
+ PrologueInfo prologue_info_;
+ JoinEntryInstr* throw_no_such_method_;
+};
+
+} // namespace kernel
+} // namespace dart
+
+#endif // !defined(DART_PRECOMPILED_RUNTIME)
+#endif // RUNTIME_VM_COMPILER_FRONTEND_BYTECODE_FLOW_GRAPH_BUILDER_H_
diff --git a/runtime/vm/compiler/frontend/bytecode_reader.cc b/runtime/vm/compiler/frontend/bytecode_reader.cc
index 75bf359..bac50ed 100644
--- a/runtime/vm/compiler/frontend/bytecode_reader.cc
+++ b/runtime/vm/compiler/frontend/bytecode_reader.cc
@@ -52,7 +52,16 @@
const intptr_t obj_count = helper_->reader_.ReadListLength();
const ObjectPool& pool =
ObjectPool::Handle(helper_->zone_, ObjectPool::New(obj_count));
- ReadPoolEntries(function, function, pool, 0);
+
+ {
+ // While reading pool entries, deopt_ids are allocated for
+ // ICData objects.
+ //
+ // TODO(alexmarkov): allocate deopt_ids for closures separately
+ DeoptIdScope deopt_id_scope(H.thread(), 0);
+
+ ReadPoolEntries(function, function, pool, 0);
+ }
// Read bytecode and attach to function.
const Code& bytecode = Code::Handle(helper_->zone_, ReadBytecode(pool));
@@ -164,14 +173,16 @@
uint32_t low_bits = helper_->ReadUInt32();
int64_t value = helper_->ReadUInt32();
value = (value << 32) | low_bits;
- obj = Integer::New(value);
+ obj = Integer::New(value, Heap::kOld);
+ obj = H.Canonicalize(Integer::Cast(obj));
} break;
case ConstantPoolTag::kDouble: {
uint32_t low_bits = helper_->ReadUInt32();
uint64_t bits = helper_->ReadUInt32();
bits = (bits << 32) | low_bits;
double value = bit_cast<double, uint64_t>(bits);
- obj = Double::New(value);
+ obj = Double::New(value, Heap::kOld);
+ obj = H.Canonicalize(Double::Cast(obj));
} break;
case ConstantPoolTag::kBool:
if (helper_->ReadUInt() == 1) {
@@ -229,10 +240,11 @@
(name.raw() != Symbols::Call().raw())) {
name = Function::CreateDynamicInvocationForwarderName(name);
}
- obj = ICData::New(function, name,
- array, // Arguments descriptor.
- DeoptId::kNone, checked_argument_count,
- ICData::RebindRule::kInstance);
+ obj =
+ ICData::New(function, name,
+ array, // Arguments descriptor.
+ H.thread()->compiler_state().GetNextDeoptId(),
+ checked_argument_count, ICData::RebindRule::kInstance);
#if defined(TAG_IC_DATA)
ICData::Cast(obj).set_tag(ICData::Tag::kInstanceCall);
#endif
@@ -275,8 +287,8 @@
array ^= pool.ObjectAt(arg_desc_index);
obj = ICData::New(function, name,
array, // Arguments descriptor.
- DeoptId::kNone, num_args_checked,
- ICData::RebindRule::kStatic);
+ H.thread()->compiler_state().GetNextDeoptId(),
+ num_args_checked, ICData::RebindRule::kStatic);
ICData::Cast(obj).AddTarget(Function::Cast(elem));
#if defined(TAG_IC_DATA)
ICData::Cast(obj).set_tag(ICData::Tag::kStaticCall);
diff --git a/runtime/vm/compiler/frontend/flow_graph_builder.cc b/runtime/vm/compiler/frontend/flow_graph_builder.cc
index 9336067..3e68c2e 100644
--- a/runtime/vm/compiler/frontend/flow_graph_builder.cc
+++ b/runtime/vm/compiler/frontend/flow_graph_builder.cc
@@ -4167,8 +4167,8 @@
catch_block->token_pos(), (node->token_pos() == TokenPosition::kNoSource),
owner()->AllocateBlockId(), catch_handler_index, owner()->graph_entry(),
catch_block->handler_types(), try_handler_index,
- catch_block->exception_var(), catch_block->stacktrace_var(),
catch_block->needs_stacktrace(), owner()->GetNextDeoptId(),
+ &catch_block->exception_var(), &catch_block->stacktrace_var(),
&catch_block->exception_var(), &catch_block->stacktrace_var());
owner()->AddCatchEntry(catch_entry);
AppendFragment(catch_entry, for_catch);
@@ -4214,8 +4214,8 @@
true, // this is not a catch block from user code.
owner()->AllocateBlockId(), original_handler_index,
owner()->graph_entry(), types, catch_handler_index,
- catch_block->exception_var(), catch_block->stacktrace_var(),
catch_block->needs_stacktrace(), owner()->GetNextDeoptId(),
+ &catch_block->exception_var(), &catch_block->stacktrace_var(),
&catch_block->exception_var(), &catch_block->stacktrace_var());
owner()->AddCatchEntry(finally_entry);
AppendFragment(finally_entry, for_finally);
diff --git a/runtime/vm/compiler/frontend/kernel_binary_flowgraph.cc b/runtime/vm/compiler/frontend/kernel_binary_flowgraph.cc
index a330ce7..4392efd 100644
--- a/runtime/vm/compiler/frontend/kernel_binary_flowgraph.cc
+++ b/runtime/vm/compiler/frontend/kernel_binary_flowgraph.cc
@@ -4,6 +4,7 @@
#include "vm/compiler/frontend/kernel_binary_flowgraph.h"
+#include "vm/compiler/frontend/bytecode_flow_graph_builder.h"
#include "vm/compiler/frontend/bytecode_reader.h"
#include "vm/compiler/frontend/flow_graph_builder.h" // For dart::FlowGraphBuilder::SimpleInstanceOfType.
#include "vm/compiler/frontend/prologue_builder.h"
@@ -1242,7 +1243,7 @@
// entry and make graph entry jump to it instead of normal entry.
// Catch entries are always considered reachable, even if they
// become unreachable after OSR.
- if (flow_graph_builder_->osr_id_ != Compiler::kNoOSRDeoptId) {
+ if (flow_graph_builder_->IsCompiledForOsr()) {
graph_entry->RelinkToOsrEntry(Z,
flow_graph_builder_->last_used_block_id_ + 1);
}
@@ -1877,7 +1878,7 @@
// entry and make graph entry jump to it instead of normal entry.
// Catch entries are always considered reachable, even if they
// become unreachable after OSR.
- if (flow_graph_builder_->osr_id_ != Compiler::kNoOSRDeoptId) {
+ if (flow_graph_builder_->IsCompiledForOsr()) {
graph_entry->RelinkToOsrEntry(Z,
flow_graph_builder_->last_used_block_id_ + 1);
}
@@ -1906,9 +1907,7 @@
SetOffset(kernel_offset);
- // TODO(regis): Clean up this logic of when to compile.
- // If the bytecode was previously loaded, we really want to compile.
- if (FLAG_enable_interpreter && !function.HasBytecode()) {
+ if (FLAG_enable_interpreter || FLAG_use_bytecode_compiler) {
// TODO(regis): For now, we skip bytecode loading for functions that were
// synthesized and that do not have bytecode. Since they inherited the
// kernel offset of a concrete function, the wrong bytecode would be loaded.
@@ -1921,10 +1920,29 @@
case RawFunction::kDynamicInvocationForwarder:
case RawFunction::kImplicitClosureFunction:
break;
+ case RawFunction::kImplicitStaticFinalGetter:
+ if (!IsFieldInitializer(function, Z)) {
+ break;
+ }
+ // Fallthrough.
default: {
- bytecode_metadata_helper_.ReadMetadata(function);
+ // TODO(regis): Clean up this logic of when to compile.
+ // If the bytecode was previously loaded, we really want to compile.
+ if (!function.HasBytecode()) {
+ bytecode_metadata_helper_.ReadMetadata(function);
+ }
if (function.HasBytecode()) {
- return NULL;
+ if (FLAG_use_bytecode_compiler) {
+ BytecodeFlowGraphBuilder bytecode_compiler(
+ flow_graph_builder_, parsed_function(),
+ &(flow_graph_builder_->ic_data_array_));
+ FlowGraph* flow_graph = bytecode_compiler.BuildGraph();
+ if (flow_graph != nullptr) {
+ return flow_graph;
+ }
+ } else {
+ return nullptr;
+ }
}
}
}
@@ -2580,7 +2598,7 @@
AlternativeReadingScope alt(&reader_);
TryFinallyBlock* const saved_block = B->try_finally_block_;
- TryCatchBlock* const saved_try_catch_block = B->try_catch_block_;
+ TryCatchBlock* const saved_try_catch_block = B->CurrentTryCatchBlock();
const intptr_t saved_depth = B->context_depth_;
const intptr_t saved_try_depth = B->try_depth_;
@@ -2601,7 +2619,7 @@
bool changed_try_index = false;
intptr_t target_try_index = B->try_finally_block_->try_index();
while (B->CurrentTryIndex() != target_try_index) {
- B->try_catch_block_ = B->try_catch_block_->outer();
+ B->SetCurrentTryCatchBlock(B->CurrentTryCatchBlock()->outer());
changed_try_index = true;
}
if (changed_try_index) {
@@ -2628,7 +2646,7 @@
}
B->try_finally_block_ = saved_block;
- B->try_catch_block_ = saved_try_catch_block;
+ B->SetCurrentTryCatchBlock(saved_try_catch_block);
B->context_depth_ = saved_depth;
B->try_depth_ = saved_try_depth;
diff --git a/runtime/vm/compiler/frontend/kernel_to_il.cc b/runtime/vm/compiler/frontend/kernel_to_il.cc
index c81353c..fff48a9 100644
--- a/runtime/vm/compiler/frontend/kernel_to_il.cc
+++ b/runtime/vm/compiler/frontend/kernel_to_il.cc
@@ -30,7 +30,7 @@
FlowGraphBuilder::FlowGraphBuilder(
ParsedFunction* parsed_function,
- const ZoneGrowableArray<const ICData*>& ic_data_array,
+ ZoneGrowableArray<const ICData*>* ic_data_array,
ZoneGrowableArray<intptr_t>* context_level_array,
InlineExitCollector* exit_collector,
bool optimizing,
@@ -39,6 +39,7 @@
bool inlining_unchecked_entry)
: BaseFlowGraphBuilder(parsed_function,
first_block_id - 1,
+ osr_id,
context_level_array,
exit_collector,
inlining_unchecked_entry),
@@ -47,8 +48,7 @@
zone_(translation_helper_.zone()),
parsed_function_(parsed_function),
optimizing_(optimizing),
- osr_id_(osr_id),
- ic_data_array_(ic_data_array),
+ ic_data_array_(*ic_data_array),
next_function_id_(0),
try_depth_(0),
catch_depth_(0),
@@ -57,6 +57,7 @@
scopes_(NULL),
breakable_block_(NULL),
switch_block_(NULL),
+ try_catch_block_(NULL),
try_finally_block_(NULL),
catch_block_(NULL) {
const Script& script =
@@ -245,8 +246,8 @@
TokenPosition::kNoSource, // Token position of catch block.
is_synthesized, // whether catch block was synthesized by FE compiler
AllocateBlockId(), CurrentTryIndex(), graph_entry_, handler_types,
- handler_index, *exception_var, *stacktrace_var, needs_stacktrace,
- GetNextDeoptId(), raw_exception_var, raw_stacktrace_var);
+ handler_index, needs_stacktrace, GetNextDeoptId(), exception_var,
+ stacktrace_var, raw_exception_var, raw_stacktrace_var);
graph_entry_->AddCatchEntry(entry);
Fragment instructions(entry);
@@ -416,18 +417,6 @@
return Fragment(load);
}
-Fragment FlowGraphBuilder::LoadField(const Field& field) {
- LoadFieldInstr* load = new (Z) LoadFieldInstr(
- Pop(), &MayCloneField(field), AbstractType::ZoneHandle(Z, field.type()),
- TokenPosition::kNoSource, parsed_function_);
- Push(load);
- return Fragment(load);
-}
-
-Fragment FlowGraphBuilder::LoadField(intptr_t offset, intptr_t class_id) {
- return BaseFlowGraphBuilder::LoadField(offset, class_id);
-}
-
Fragment FlowGraphBuilder::LoadLocal(LocalVariable* variable) {
if (variable->is_captured()) {
Fragment instructions;
@@ -1381,6 +1370,13 @@
prologue_info);
}
+void FlowGraphBuilder::SetCurrentTryCatchBlock(TryCatchBlock* try_catch_block) {
+ try_catch_block_ = try_catch_block;
+ SetCurrentTryIndex(try_catch_block == nullptr
+ ? CatchClauseNode::kInvalidTryIndex
+ : try_catch_block->try_index());
+}
+
} // namespace kernel
} // namespace dart
diff --git a/runtime/vm/compiler/frontend/kernel_to_il.h b/runtime/vm/compiler/frontend/kernel_to_il.h
index 9f6b524..eeecd74 100644
--- a/runtime/vm/compiler/frontend/kernel_to_il.h
+++ b/runtime/vm/compiler/frontend/kernel_to_il.h
@@ -22,13 +22,13 @@
namespace kernel {
-class BaseFlowGraphBuilder;
class StreamingFlowGraphBuilder;
struct InferredTypeMetadata;
class BreakableBlock;
class CatchBlock;
class FlowGraphBuilder;
class SwitchBlock;
+class TryCatchBlock;
class TryFinallyBlock;
struct YieldContinuation {
@@ -45,7 +45,7 @@
class FlowGraphBuilder : public BaseFlowGraphBuilder {
public:
FlowGraphBuilder(ParsedFunction* parsed_function,
- const ZoneGrowableArray<const ICData*>& ic_data_array,
+ ZoneGrowableArray<const ICData*>* ic_data_array,
ZoneGrowableArray<intptr_t>* context_level_array,
InlineExitCollector* exit_collector,
bool optimizing,
@@ -114,8 +114,6 @@
Fragment RethrowException(TokenPosition position, int catch_try_index);
Fragment LoadClassId();
- Fragment LoadField(intptr_t offset, intptr_t class_id = kDynamicCid);
- Fragment LoadField(const Field& field);
Fragment LoadLocal(LocalVariable* variable);
Fragment InitStaticField(const Field& field);
Fragment NativeCall(const String* name, const Function* function);
@@ -172,16 +170,13 @@
LocalVariable* LookupVariable(intptr_t kernel_offset);
- bool IsCompiledForOsr() { return osr_id_ != DeoptId::kNone; }
-
TranslationHelper translation_helper_;
Thread* thread_;
Zone* zone_;
ParsedFunction* parsed_function_;
const bool optimizing_;
- intptr_t osr_id_;
- const ZoneGrowableArray<const ICData*>& ic_data_array_;
+ ZoneGrowableArray<const ICData*>& ic_data_array_;
intptr_t next_function_id_;
intptr_t AllocateFunctionId() { return next_function_id_++; }
@@ -212,6 +207,10 @@
return scopes_->catch_context_variables[try_depth_];
}
+ TryCatchBlock* CurrentTryCatchBlock() const { return try_catch_block_; }
+
+ void SetCurrentTryCatchBlock(TryCatchBlock* try_catch_block);
+
// A chained list of breakable blocks. Chaining and lookup is done by the
// [BreakableBlock] class.
BreakableBlock* breakable_block_;
@@ -220,6 +219,10 @@
// [SwitchBlock] class.
SwitchBlock* switch_block_;
+ // A chained list of try-catch blocks. Chaining and lookup is done by the
+ // [TryCatchBlock] class.
+ TryCatchBlock* try_catch_block_;
+
// A chained list of try-finally blocks. Chaining and lookup is done by the
// [TryFinallyBlock] class.
TryFinallyBlock* try_finally_block_;
@@ -235,6 +238,7 @@
friend class ConstantEvaluator;
friend class StreamingFlowGraphBuilder;
friend class SwitchBlock;
+ friend class TryCatchBlock;
friend class TryFinallyBlock;
DISALLOW_COPY_AND_ASSIGN(FlowGraphBuilder);
@@ -321,6 +325,30 @@
intptr_t try_index_;
};
+class TryCatchBlock {
+ public:
+ explicit TryCatchBlock(FlowGraphBuilder* builder,
+ intptr_t try_handler_index = -1)
+ : builder_(builder),
+ outer_(builder->CurrentTryCatchBlock()),
+ try_index_(try_handler_index == -1 ? builder->AllocateTryIndex()
+ : try_handler_index) {
+ builder->SetCurrentTryCatchBlock(this);
+ }
+
+ ~TryCatchBlock() { builder_->SetCurrentTryCatchBlock(outer_); }
+
+ intptr_t try_index() { return try_index_; }
+ TryCatchBlock* outer() const { return outer_; }
+
+ private:
+ FlowGraphBuilder* const builder_;
+ TryCatchBlock* const outer_;
+ intptr_t const try_index_;
+
+ DISALLOW_COPY_AND_ASSIGN(TryCatchBlock);
+};
+
class TryFinallyBlock {
public:
TryFinallyBlock(FlowGraphBuilder* builder, intptr_t finalizer_kernel_offset)
diff --git a/runtime/vm/compiler/frontend/prologue_builder.cc b/runtime/vm/compiler/frontend/prologue_builder.cc
index 88490a4..a63ae30 100644
--- a/runtime/vm/compiler/frontend/prologue_builder.cc
+++ b/runtime/vm/compiler/frontend/prologue_builder.cc
@@ -34,8 +34,6 @@
BlockEntryInstr* PrologueBuilder::BuildPrologue(BlockEntryInstr* entry,
PrologueInfo* prologue_info) {
- const bool strong = FLAG_strong;
-
// We always have to build the graph, but we only link it sometimes.
const bool link = !is_inlining_ && !compiling_for_osr_;
@@ -52,14 +50,15 @@
nsm = BuildThrowNoSuchMethod();
}
if (check_arguments) {
- Fragment f = BuildTypeArgumentsLengthCheck(strong, nsm, expect_type_args);
+ Fragment f = BuildTypeArgumentsLengthCheck(nsm, expect_type_args);
if (link) prologue += f;
}
if (load_optional_arguments) {
- Fragment f = BuildOptionalParameterHandling(strong, nsm);
+ Fragment f = BuildOptionalParameterHandling(
+ nsm, parsed_function_->expression_temp_var());
if (link) prologue += f;
} else if (check_arguments) {
- Fragment f = BuildFixedParameterLengthChecks(strong, nsm);
+ Fragment f = BuildFixedParameterLengthChecks(nsm);
if (link) prologue += f;
}
if (function_.IsClosureFunction()) {
@@ -87,8 +86,7 @@
}
}
-Fragment PrologueBuilder::BuildTypeArgumentsLengthCheck(bool strong,
- JoinEntryInstr* nsm,
+Fragment PrologueBuilder::BuildTypeArgumentsLengthCheck(JoinEntryInstr* nsm,
bool expect_type_args) {
Fragment check_type_args;
JoinEntryInstr* done = BuildJoinEntry();
@@ -130,8 +128,9 @@
return Fragment(check_type_args.entry, done);
}
-Fragment PrologueBuilder::BuildOptionalParameterHandling(bool strong,
- JoinEntryInstr* nsm) {
+Fragment PrologueBuilder::BuildOptionalParameterHandling(
+ JoinEntryInstr* nsm,
+ LocalVariable* temp_var) {
Fragment copy_args_prologue;
const int num_fixed_params = function_.num_fixed_parameters();
const int num_opt_pos_params = function_.NumOptionalPositionalParameters();
@@ -244,13 +243,12 @@
ArgumentsDescriptor::first_named_entry_offset() - Array::data_offset();
// Start by alphabetically sorting the names of the optional parameters.
- LocalVariable** opt_param = new LocalVariable*[num_opt_named_params];
- int* opt_param_position = new int[num_opt_named_params];
- SortOptionalNamedParametersInto(opt_param, opt_param_position,
- num_fixed_params, num_params);
+ int* opt_param_position = Z->Alloc<int>(num_opt_named_params);
+ SortOptionalNamedParametersInto(opt_param_position, num_fixed_params,
+ num_params);
- LocalVariable* optional_count_vars_processed =
- parsed_function_->expression_temp_var();
+ ASSERT(temp_var != nullptr);
+ LocalVariable* optional_count_vars_processed = temp_var;
copy_args_prologue += IntConstant(0);
copy_args_prologue +=
StoreLocalRaw(TokenPosition::kNoSource, optional_count_vars_processed);
@@ -274,8 +272,10 @@
copy_args_prologue += LoadIndexed(/* index_scale = */ kWordSize);
// first name in sorted list of all names
- ASSERT(opt_param[i]->name().IsSymbol());
- copy_args_prologue += Constant(opt_param[i]->name());
+ const String& param_name = String::ZoneHandle(
+ Z, function_.ParameterNameAt(opt_param_position[i]));
+ ASSERT(param_name.IsSymbol());
+ copy_args_prologue += Constant(param_name);
// Compare the two names: Note that the ArgumentDescriptor array always
// terminates with a "null" name (i.e. kNullCid), which will prevent us
@@ -337,9 +337,6 @@
copy_args_prologue += Drop(); // tuple_diff
}
- delete[] opt_param;
- delete[] opt_param_position;
-
// If there are more arguments from the caller we haven't processed, go
// NSM.
TargetEntryInstr *done, *unknown_named_arg_passed;
@@ -361,8 +358,7 @@
return copy_args_prologue;
}
-Fragment PrologueBuilder::BuildFixedParameterLengthChecks(bool strong,
- JoinEntryInstr* nsm) {
+Fragment PrologueBuilder::BuildFixedParameterLengthChecks(JoinEntryInstr* nsm) {
Fragment check_args;
JoinEntryInstr* done = BuildJoinEntry();
@@ -448,24 +444,21 @@
return handling;
}
-void PrologueBuilder::SortOptionalNamedParametersInto(LocalVariable** opt_param,
- int* opt_param_position,
+void PrologueBuilder::SortOptionalNamedParametersInto(int* opt_param_position,
int num_fixed_params,
int num_params) {
- LocalScope* scope = parsed_function_->node_sequence()->scope();
+ String& name = String::Handle(Z);
+ String& name_i = String::Handle(Z);
for (int pos = num_fixed_params; pos < num_params; pos++) {
- LocalVariable* parameter = scope->VariableAt(pos);
- const String& opt_param_name = parameter->name();
+ name = function_.ParameterNameAt(pos);
int i = pos - num_fixed_params;
while (--i >= 0) {
- LocalVariable* param_i = opt_param[i];
- const intptr_t result = opt_param_name.CompareTo(param_i->name());
+ name_i = function_.ParameterNameAt(opt_param_position[i]);
+ const intptr_t result = name.CompareTo(name_i);
ASSERT(result != 0);
if (result > 0) break;
- opt_param[i + 1] = opt_param[i];
opt_param_position[i + 1] = opt_param_position[i];
}
- opt_param[i + 1] = parameter;
opt_param_position[i + 1] = pos;
}
}
diff --git a/runtime/vm/compiler/frontend/prologue_builder.h b/runtime/vm/compiler/frontend/prologue_builder.h
index 30623d8..3f58e13 100644
--- a/runtime/vm/compiler/frontend/prologue_builder.h
+++ b/runtime/vm/compiler/frontend/prologue_builder.h
@@ -47,19 +47,19 @@
BlockEntryInstr* BuildPrologue(BlockEntryInstr* entry,
PrologueInfo* prologue_info);
+ Fragment BuildOptionalParameterHandling(JoinEntryInstr* nsm,
+ LocalVariable* temp_var);
+
static bool HasEmptyPrologue(const Function& function);
static bool PrologueSkippableOnUncheckedEntry(const Function& function);
intptr_t last_used_block_id() const { return last_used_block_id_; }
private:
- Fragment BuildTypeArgumentsLengthCheck(bool strong,
- JoinEntryInstr* nsm,
+ Fragment BuildTypeArgumentsLengthCheck(JoinEntryInstr* nsm,
bool expect_type_args);
- Fragment BuildOptionalParameterHandling(bool strong, JoinEntryInstr* nsm);
-
- Fragment BuildFixedParameterLengthChecks(bool strong, JoinEntryInstr* nsm);
+ Fragment BuildFixedParameterLengthChecks(JoinEntryInstr* nsm);
Fragment BuildClosureContextHandling();
@@ -79,8 +79,7 @@
return Instance::null_instance();
}
- void SortOptionalNamedParametersInto(LocalVariable** opt_param,
- int* opt_param_position,
+ void SortOptionalNamedParametersInto(int* opt_param_position,
int num_fixed_params,
int num_params);
diff --git a/runtime/vm/compiler/jit/compiler.cc b/runtime/vm/compiler/jit/compiler.cc
index e84b28d..50ff623 100644
--- a/runtime/vm/compiler/jit/compiler.cc
+++ b/runtime/vm/compiler/jit/compiler.cc
@@ -140,6 +140,7 @@
bool UseKernelFrontEndFor(ParsedFunction* parsed_function) {
const Function& function = parsed_function->function();
return (function.kernel_offset() > 0) ||
+ (FLAG_use_bytecode_compiler && function.HasBytecode()) ||
(function.kind() == RawFunction::kNoSuchMethodDispatcher) ||
(function.kind() == RawFunction::kInvokeFieldDispatcher);
}
@@ -154,7 +155,7 @@
FlowGraph* DartCompilationPipeline::BuildFlowGraph(
Zone* zone,
ParsedFunction* parsed_function,
- const ZoneGrowableArray<const ICData*>& ic_data_array,
+ ZoneGrowableArray<const ICData*>* ic_data_array,
intptr_t osr_id,
bool optimized) {
if (UseKernelFrontEndFor(parsed_function)) {
@@ -167,7 +168,7 @@
parsed_function->function().HasBytecode()));
return graph;
}
- FlowGraphBuilder builder(*parsed_function, ic_data_array,
+ FlowGraphBuilder builder(*parsed_function, *ic_data_array,
/* not building var desc */ NULL,
/* not inlining */ NULL, osr_id);
@@ -207,13 +208,13 @@
FlowGraph* IrregexpCompilationPipeline::BuildFlowGraph(
Zone* zone,
ParsedFunction* parsed_function,
- const ZoneGrowableArray<const ICData*>& ic_data_array,
+ ZoneGrowableArray<const ICData*>* ic_data_array,
intptr_t osr_id,
bool optimized) {
// Compile to the dart IR.
RegExpEngine::CompilationResult result =
RegExpEngine::CompileIR(parsed_function->regexp_compile_data(),
- parsed_function, ic_data_array, osr_id);
+ parsed_function, *ic_data_array, osr_id);
backtrack_goto_ = result.backtrack_goto;
// Allocate variables now that we know the number of locals.
@@ -826,7 +827,7 @@
NOT_IN_PRODUCT(TimelineDurationScope tds(thread(), compiler_timeline,
"BuildFlowGraph"));
flow_graph = pipeline->BuildFlowGraph(
- zone, parsed_function(), *ic_data_array, osr_id(), optimized());
+ zone, parsed_function(), ic_data_array, osr_id(), optimized());
}
// TODO(regis): Revisit.
@@ -1377,7 +1378,7 @@
} else {
parsed_function->EnsureKernelScopes();
kernel::FlowGraphBuilder builder(
- parsed_function, *ic_data_array, context_level_array,
+ parsed_function, ic_data_array, context_level_array,
/* not inlining */ NULL, false, Compiler::kNoOSRDeoptId);
builder.BuildGraph();
}
diff --git a/runtime/vm/compiler/jit/compiler.h b/runtime/vm/compiler/jit/compiler.h
index d938450..1636109 100644
--- a/runtime/vm/compiler/jit/compiler.h
+++ b/runtime/vm/compiler/jit/compiler.h
@@ -38,7 +38,7 @@
virtual FlowGraph* BuildFlowGraph(
Zone* zone,
ParsedFunction* parsed_function,
- const ZoneGrowableArray<const ICData*>& ic_data_array,
+ ZoneGrowableArray<const ICData*>* ic_data_array,
intptr_t osr_id,
bool optimized) = 0;
virtual void FinalizeCompilation(FlowGraph* flow_graph) = 0;
@@ -47,32 +47,30 @@
class DartCompilationPipeline : public CompilationPipeline {
public:
- virtual void ParseFunction(ParsedFunction* parsed_function);
+ void ParseFunction(ParsedFunction* parsed_function) override;
- virtual FlowGraph* BuildFlowGraph(
- Zone* zone,
- ParsedFunction* parsed_function,
- const ZoneGrowableArray<const ICData*>& ic_data_array,
- intptr_t osr_id,
- bool optimized);
+ FlowGraph* BuildFlowGraph(Zone* zone,
+ ParsedFunction* parsed_function,
+ ZoneGrowableArray<const ICData*>* ic_data_array,
+ intptr_t osr_id,
+ bool optimized) override;
- virtual void FinalizeCompilation(FlowGraph* flow_graph);
+ void FinalizeCompilation(FlowGraph* flow_graph) override;
};
class IrregexpCompilationPipeline : public CompilationPipeline {
public:
IrregexpCompilationPipeline() : backtrack_goto_(NULL) {}
- virtual void ParseFunction(ParsedFunction* parsed_function);
+ void ParseFunction(ParsedFunction* parsed_function) override;
- virtual FlowGraph* BuildFlowGraph(
- Zone* zone,
- ParsedFunction* parsed_function,
- const ZoneGrowableArray<const ICData*>& ic_data_array,
- intptr_t osr_id,
- bool optimized);
+ FlowGraph* BuildFlowGraph(Zone* zone,
+ ParsedFunction* parsed_function,
+ ZoneGrowableArray<const ICData*>* ic_data_array,
+ intptr_t osr_id,
+ bool optimized) override;
- virtual void FinalizeCompilation(FlowGraph* flow_graph);
+ void FinalizeCompilation(FlowGraph* flow_graph) override;
private:
IndirectGotoInstr* backtrack_goto_;
diff --git a/runtime/vm/constants_kbc.h b/runtime/vm/constants_kbc.h
index a7b8922..8142e11 100644
--- a/runtime/vm/constants_kbc.h
+++ b/runtime/vm/constants_kbc.h
@@ -588,7 +588,7 @@
// If B is not 0 then EntryOptional bytecode is followed by B LoadConstant
// bytecodes specifying default values for optional arguments.
//
-// If C is not 0 then EntryOptional is followed by 2 * B LoadConstant
+// If C is not 0 then EntryOptional is followed by 2 * C LoadConstant
// bytecodes.
// Bytecode at 2 * i specifies name of the i-th named argument and at
// 2 * i + 1 default value. rA part of the LoadConstant bytecode specifies
@@ -597,7 +597,8 @@
// prologues are implemented on other architectures.
//
// Note: Unlike Entry bytecode EntryOptional does not setup the frame for
-// local variables this is done by a separate bytecode Frame.
+// local variables this is done by a separate bytecode Frame, which should
+// follow EntryOptional and its LoadConstant instructions.
//
// - EntryOptimized rD
//
@@ -1037,6 +1038,12 @@
return names[DecodeOpcode(instr)];
}
+ enum SpecialIndex {
+ kExceptionSpecialIndex,
+ kStackTraceSpecialIndex,
+ kSpecialIndexCount
+ };
+
static const intptr_t kOpShift = 0;
static const intptr_t kAShift = 8;
static const intptr_t kAMask = 0xFF;
@@ -1048,6 +1055,7 @@
static const intptr_t kDMask = 0xFFFF;
static const intptr_t kYShift = 24;
static const intptr_t kYMask = 0xFF;
+ static const intptr_t kTShift = 8;
static KBCInstr Encode(Opcode op, uintptr_t a, uintptr_t b, uintptr_t c) {
ASSERT((a & kAMask) == a);
@@ -1083,10 +1091,22 @@
return (bc >> kBShift) & kBMask;
}
+ DART_FORCE_INLINE static uint8_t DecodeC(KBCInstr bc) {
+ return (bc >> kCShift) & kCMask;
+ }
+
DART_FORCE_INLINE static uint16_t DecodeD(KBCInstr bc) {
return (bc >> kDShift) & kDMask;
}
+ DART_FORCE_INLINE static int16_t DecodeX(KBCInstr bc) {
+ return static_cast<int16_t>((bc >> kDShift) & kDMask);
+ }
+
+ DART_FORCE_INLINE static int32_t DecodeT(KBCInstr bc) {
+ return static_cast<int32_t>(bc) >> kTShift;
+ }
+
DART_FORCE_INLINE static Opcode DecodeOpcode(KBCInstr bc) {
return static_cast<Opcode>(bc & 0xFF);
}
diff --git a/runtime/vm/dart.cc b/runtime/vm/dart.cc
index d2dcdb2..18bb9b2 100644
--- a/runtime/vm/dart.cc
+++ b/runtime/vm/dart.cc
@@ -136,6 +136,12 @@
}
#endif
+ if (FLAG_use_bytecode_compiler) {
+ // Interpreter is not able to trigger compilation yet.
+ // TODO(alexmarkov): Revise
+ FLAG_enable_interpreter = false;
+ }
+
if (FLAG_enable_interpreter) {
#if defined(USING_SIMULATOR) || defined(TARGET_ARCH_DBC)
return strdup(
diff --git a/runtime/vm/dart_entry.cc b/runtime/vm/dart_entry.cc
index 435fb26..a49749d 100644
--- a/runtime/vm/dart_entry.cc
+++ b/runtime/vm/dart_entry.cc
@@ -339,6 +339,26 @@
return NameAt(index) == other.raw();
}
+RawArray* ArgumentsDescriptor::GetArgumentNames() const {
+ const intptr_t num_named_args = NamedCount();
+ if (num_named_args == 0) {
+ return Array::null();
+ }
+
+ Zone* zone = Thread::Current()->zone();
+ const Array& names =
+ Array::Handle(zone, Array::New(num_named_args, Heap::kOld));
+ String& name = String::Handle(zone);
+ const intptr_t num_pos_args = PositionalCount();
+ for (intptr_t i = 0; i < num_named_args; ++i) {
+ const intptr_t index = PositionAt(i) - num_pos_args;
+ name = NameAt(i);
+ ASSERT(names.At(index) == Object::null());
+ names.SetAt(index, name);
+ }
+ return names.raw();
+}
+
intptr_t ArgumentsDescriptor::type_args_len_offset() {
return Array::element_offset(kTypeArgsLenIndex);
}
diff --git a/runtime/vm/dart_entry.h b/runtime/vm/dart_entry.h
index 0898a93..cfcd404 100644
--- a/runtime/vm/dart_entry.h
+++ b/runtime/vm/dart_entry.h
@@ -45,6 +45,8 @@
RawString* NameAt(intptr_t i) const;
intptr_t PositionAt(intptr_t i) const;
bool MatchesNameAt(intptr_t i, const String& other) const;
+ // Returns array of argument names in the arguments order.
+ RawArray* GetArgumentNames() const;
// Generated code support.
static intptr_t type_args_len_offset();
diff --git a/runtime/vm/flag_list.h b/runtime/vm/flag_list.h
index 4704064..8ce163e 100644
--- a/runtime/vm/flag_list.h
+++ b/runtime/vm/flag_list.h
@@ -185,6 +185,7 @@
D(trace_zones, bool, false, "Traces allocation sizes in the zone.") \
P(truncating_left_shift, bool, true, \
"Optimize left shift to truncate if possible") \
+ C(use_bytecode_compiler, false, false, bool, false, "Compile from bytecode") \
P(use_compactor, bool, false, "Compact the heap during old-space GC.") \
P(use_cha_deopt, bool, true, \
"Use class hierarchy analysis even if it can cause deoptimization.") \
diff --git a/runtime/vm/interpreter.cc b/runtime/vm/interpreter.cc
index 63a9372..466b216 100644
--- a/runtime/vm/interpreter.cc
+++ b/runtime/vm/interpreter.cc
@@ -1524,7 +1524,7 @@
exit_fp); \
} \
ASSERT(reinterpret_cast<uword>(fp_) < stack_limit()); \
- return special_[kExceptionSpecialIndex]; \
+ return special_[KernelBytecode::kExceptionSpecialIndex]; \
} \
goto DispatchAfterException; \
} while (0)
@@ -1544,7 +1544,7 @@
thread->set_top_exit_frame_info(exit_fp); \
thread->set_top_resource(top_resource); \
thread->set_vm_tag(vm_tag); \
- return special_[kExceptionSpecialIndex]; \
+ return special_[KernelBytecode::kExceptionSpecialIndex]; \
} \
goto DispatchAfterException; \
} while (0)
@@ -4837,8 +4837,8 @@
ASSERT(raw_exception != Object::null());
thread->set_active_exception(Object::null_object());
thread->set_active_stacktrace(Object::null_object());
- special_[kExceptionSpecialIndex] = raw_exception;
- special_[kStackTraceSpecialIndex] = raw_stacktrace;
+ special_[KernelBytecode::kExceptionSpecialIndex] = raw_exception;
+ special_[KernelBytecode::kStackTraceSpecialIndex] = raw_stacktrace;
pc_ = thread->resume_pc();
} else {
pc_ = pc;
diff --git a/runtime/vm/interpreter.h b/runtime/vm/interpreter.h
index c49c3b5..3d98670 100644
--- a/runtime/vm/interpreter.h
+++ b/runtime/vm/interpreter.h
@@ -91,12 +91,6 @@
return intrinsics_[id] != NULL;
}
- enum SpecialIndex {
- kExceptionSpecialIndex,
- kStackTraceSpecialIndex,
- kSpecialIndexCount
- };
-
void VisitObjectPointers(ObjectPointerVisitor* visitor);
private:
@@ -113,7 +107,7 @@
RawObjectPool* pp_; // Pool Pointer.
RawArray* argdesc_; // Arguments Descriptor: used to pass information between
// call instruction and the function entry.
- RawObject* special_[kSpecialIndexCount];
+ RawObject* special_[KernelBytecode::kSpecialIndexCount];
static IntrinsicHandler intrinsics_[kIntrinsicCount];
diff --git a/runtime/vm/object.cc b/runtime/vm/object.cc
index 434de7b..98ecf52 100644
--- a/runtime/vm/object.cc
+++ b/runtime/vm/object.cc
@@ -5971,8 +5971,10 @@
// We should not have loaded the bytecode if the function had code.
ASSERT(!HasCode());
- // Set the code entry_point to InterpretCall stub.
- SetInstructions(Code::Handle(StubCode::InterpretCall_entry()->code()));
+ if (!FLAG_use_bytecode_compiler) {
+ // Set the code entry_point to InterpretCall stub.
+ SetInstructions(Code::Handle(StubCode::InterpretCall_entry()->code()));
+ }
}
bool Function::HasBytecode() const {
@@ -6002,9 +6004,7 @@
ASSERT(Thread::Current()->IsMutatorThread());
StorePointer(&raw_ptr()->unoptimized_code_, Code::null());
- if (FLAG_enable_interpreter) {
- StorePointer(&raw_ptr()->bytecode_, Code::null());
- }
+ StorePointer(&raw_ptr()->bytecode_, Code::null());
SetInstructions(Code::Handle(StubCode::LazyCompile_entry()->code()));
#endif // defined(DART_PRECOMPILED_RUNTIME)
diff --git a/runtime/vm/parser.cc b/runtime/vm/parser.cc
index 1ec727e..0e7670e 100644
--- a/runtime/vm/parser.cc
+++ b/runtime/vm/parser.cc
@@ -337,7 +337,8 @@
// parameters array, which can be used to access the raw parameters (i.e. not
// the potentially variables which are in the context)
- for (intptr_t param = 0; param < function().NumParameters(); ++param) {
+ raw_parameters_ = new (Z) ZoneGrowableArray<LocalVariable*>(Z, num_params);
+ for (intptr_t param = 0; param < num_params; ++param) {
LocalVariable* raw_parameter = scope->VariableAt(param);
if (raw_parameter->is_captured()) {
String& tmp = String::ZoneHandle(Z);
@@ -366,7 +367,7 @@
VariableIndex(function().NumParameters() - param));
}
}
- raw_parameters_.Add(raw_parameter);
+ raw_parameters_->Add(raw_parameter);
}
if (function_type_arguments_ != NULL) {
LocalVariable* raw_type_args_parameter = function_type_arguments_;
@@ -445,6 +446,12 @@
num_stack_locals_ = num_stack_locals;
}
+void ParsedFunction::AllocateBytecodeVariables(intptr_t num_stack_locals) {
+ ASSERT(!function().IsIrregexpFunction());
+ first_parameter_index_ = VariableIndex(function().num_fixed_parameters());
+ num_stack_locals_ = num_stack_locals;
+}
+
struct Parser::Block : public ZoneAllocated {
Block(Block* outer_block, LocalScope* local_scope, SequenceNode* seq)
: parent(outer_block), scope(local_scope), statements(seq) {
diff --git a/runtime/vm/parser.h b/runtime/vm/parser.h
index 2383ce2..712e5c3 100644
--- a/runtime/vm/parser.h
+++ b/runtime/vm/parser.h
@@ -200,6 +200,7 @@
void AllocateVariables();
void AllocateIrregexpVariables(intptr_t num_stack_locals);
+ void AllocateBytecodeVariables(intptr_t num_stack_locals);
void record_await() { have_seen_await_expr_ = true; }
bool have_seen_await() const { return have_seen_await_expr_; }
@@ -229,8 +230,16 @@
return raw_type_arguments_var_;
}
+ void SetRawTypeArgumentsVariable(LocalVariable* raw_type_arguments_var) {
+ raw_type_arguments_var_ = raw_type_arguments_var;
+ }
+
+ void SetRawParameters(ZoneGrowableArray<LocalVariable*>* raw_parameters) {
+ raw_parameters_ = raw_parameters;
+ }
+
LocalVariable* RawParameterVariable(intptr_t i) const {
- return raw_parameters_[i];
+ return raw_parameters_->At(i);
}
private:
@@ -252,7 +261,7 @@
ZoneGrowableArray<const Instance*>* default_parameter_values_;
LocalVariable* raw_type_arguments_var_;
- ZoneGrowableArray<LocalVariable*> raw_parameters_;
+ ZoneGrowableArray<LocalVariable*>* raw_parameters_ = nullptr;
VariableIndex first_parameter_index_;
int num_stack_locals_;
diff --git a/runtime/vm/stack_frame_kbc.h b/runtime/vm/stack_frame_kbc.h
index 8a6063e..8bfe809 100644
--- a/runtime/vm/stack_frame_kbc.h
+++ b/runtime/vm/stack_frame_kbc.h
@@ -46,6 +46,7 @@
static const int kKBCCallerSpSlotFromFp = -kKBCDartFrameFixedSize - 1;
static const int kKBCPcMarkerSlotFromFp = -3;
static const int kKBCFunctionSlotFromFp = -4;
+static const int kKBCParamEndSlotFromFp = 4;
// Entry and exit frame layout.
static const int kKBCEntrySavedSlots = 3;