[vm/ffi] Don't make force-optimized code masquerade as unoptimized code.
Instead, we set a bit on force-optimized code so that deoptimization knows to skip it and exception
handling will execute catch-entry moves.
In addition we don't throw away deoptimization info for these codes, so that exception handling
works.
Bug: dartbug.com/37311
Change-Id: If5bcff41a98c0053c581648a9ee22b3302ceea04
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/106740
Commit-Queue: Samir Jindel <sjindel@google.com>
Reviewed-by: Martin Kustermann <kustermann@google.com>
diff --git a/runtime/vm/compiler/backend/flow_graph_checker.cc b/runtime/vm/compiler/backend/flow_graph_checker.cc
index 4660d82..7d20887 100644
--- a/runtime/vm/compiler/backend/flow_graph_checker.cc
+++ b/runtime/vm/compiler/backend/flow_graph_checker.cc
@@ -351,6 +351,23 @@
// ASSERT(constant->GetBlock() == flow_graph_->graph_entry());
}
+void FlowGraphChecker::VisitInstanceCall(InstanceCallInstr* instr) {
+ const Function& function = flow_graph_->function();
+
+ // Force-optimized functions may not have instance calls inside them because
+ // we do not reset ICData for these.
+ ASSERT(!function.ForceOptimize());
+}
+
+void FlowGraphChecker::VisitPolymorphicInstanceCall(
+ PolymorphicInstanceCallInstr* instr) {
+ const Function& function = flow_graph_->function();
+
+ // Force-optimized functions may not have instance calls inside them because
+ // we do not reset ICData for these.
+ ASSERT(!function.ForceOptimize());
+}
+
void FlowGraphChecker::VisitPhi(PhiInstr* phi) {
// Make sure the definition of each input value of a Phi dominates
// the corresponding incoming edge, as defined by order.
diff --git a/runtime/vm/compiler/backend/flow_graph_checker.h b/runtime/vm/compiler/backend/flow_graph_checker.h
index 088ad3e..63a77bd 100644
--- a/runtime/vm/compiler/backend/flow_graph_checker.h
+++ b/runtime/vm/compiler/backend/flow_graph_checker.h
@@ -56,6 +56,9 @@
void VisitIndirectGoto(IndirectGotoInstr* jmp) override;
void VisitBranch(BranchInstr* branch) override;
void VisitRedefinition(RedefinitionInstr* def) override;
+ void VisitInstanceCall(InstanceCallInstr* instr) override;
+ void VisitPolymorphicInstanceCall(
+ PolymorphicInstanceCallInstr* instr) override;
FlowGraph* const flow_graph_;
BlockEntryInstr* current_block_;
diff --git a/runtime/vm/compiler/jit/compiler.cc b/runtime/vm/compiler/jit/compiler.cc
index 66e293f..2e76f5d 100644
--- a/runtime/vm/compiler/jit/compiler.cc
+++ b/runtime/vm/compiler/jit/compiler.cc
@@ -378,9 +378,7 @@
// CreateDeoptInfo uses the object pool and needs to be done before
// FinalizeCode.
Array& deopt_info_array = Array::Handle(zone, Object::empty_array().raw());
- if (!function.ForceOptimize()) {
- deopt_info_array = graph_compiler->CreateDeoptInfo(assembler);
- }
+ deopt_info_array = graph_compiler->CreateDeoptInfo(assembler);
// Allocates instruction object. Since this occurs only at safepoint,
// there can be no concurrent access to the instruction page.
@@ -408,9 +406,8 @@
if (function.ForceOptimize()) {
ASSERT(optimized() && thread()->IsMutatorThread());
- code.set_is_optimized(false);
+ code.set_is_force_optimized(true);
function.AttachCode(code);
- function.set_unoptimized_code(code);
function.SetWasCompiled(true);
} else if (optimized()) {
// Installs code while at safepoint.
@@ -947,6 +944,7 @@
RawError* Compiler::EnsureUnoptimizedCode(Thread* thread,
const Function& function) {
+ ASSERT(!function.ForceOptimize());
if (function.unoptimized_code() != Object::null()) {
return Error::null();
}
diff --git a/runtime/vm/debugger.cc b/runtime/vm/debugger.cc
index af1ca5c..89f8442 100644
--- a/runtime/vm/debugger.cc
+++ b/runtime/vm/debugger.cc
@@ -304,6 +304,8 @@
}
#endif
if (bytecode_.IsNull()) {
+ // Force-optimize functions should not be debuggable.
+ ASSERT(!function_.ForceOptimize());
function_.EnsureHasCompiledUnoptimizedCode();
code_ = function_.unoptimized_code();
}
diff --git a/runtime/vm/deopt_instructions.cc b/runtime/vm/deopt_instructions.cc
index 1a71bf6..b28c397 100644
--- a/runtime/vm/deopt_instructions.cc
+++ b/runtime/vm/deopt_instructions.cc
@@ -1165,6 +1165,7 @@
deopt_instr =
new (zone()) DeoptUint32Instr(ToCpuRegisterSource(source_loc));
break;
+ case kUnboxedFloat:
case kUnboxedDouble:
deopt_instr = new (zone()) DeoptDoubleInstr(
ToFpuRegisterSource(source_loc, Location::kDoubleStackSlot));
diff --git a/runtime/vm/isolate_reload.cc b/runtime/vm/isolate_reload.cc
index eb4aacb..540ce11 100644
--- a/runtime/vm/isolate_reload.cc
+++ b/runtime/vm/isolate_reload.cc
@@ -922,7 +922,11 @@
if (frame->IsDartFrame() && !frame->is_interpreted()) {
func = frame->LookupDartFunction();
ASSERT(!func.IsNull());
- func.EnsureHasCompiledUnoptimizedCode();
+ // Force-optimized functions don't need unoptimized code because their
+ // optimized code cannot deopt.
+ if (!func.ForceOptimize()) {
+ func.EnsureHasCompiledUnoptimizedCode();
+ }
}
}
}
@@ -1889,7 +1893,7 @@
bytecode.ResetICDatas(zone);
} else {
code = frame->LookupDartCode();
- if (code.is_optimized()) {
+ if (code.is_optimized() && !code.is_force_optimized()) {
// If this code is optimized, we need to reset the ICs in the
// corresponding unoptimized code, which will be executed when the stack
// unwinds to the optimized code.
diff --git a/runtime/vm/object.cc b/runtime/vm/object.cc
index 4e5f849..6fe3c0c 100644
--- a/runtime/vm/object.cc
+++ b/runtime/vm/object.cc
@@ -5747,6 +5747,7 @@
}
void Function::EnsureHasCompiledUnoptimizedCode() const {
+ ASSERT(!ForceOptimize());
Thread* thread = Thread::Current();
ASSERT(thread->IsMutatorThread());
DEBUG_ASSERT(thread->TopErrorHandlerIsExitFrame());
@@ -8095,7 +8096,7 @@
}
// Compiling in unoptimized mode should never fail if there are no errors.
ASSERT(HasCode());
- ASSERT(unoptimized_code() == result.raw());
+ ASSERT(ForceOptimize() || unoptimized_code() == result.raw());
return CurrentCode();
}
@@ -14480,6 +14481,10 @@
set_state_bits(OptimizedBit::update(value, raw_ptr()->state_bits_));
}
+void Code::set_is_force_optimized(bool value) const {
+ set_state_bits(ForceOptimizedBit::update(value, raw_ptr()->state_bits_));
+}
+
void Code::set_is_alive(bool value) const {
set_state_bits(AliveBit::update(value, raw_ptr()->state_bits_));
}
@@ -14753,6 +14758,7 @@
result ^= raw;
result.set_pointer_offsets_length(pointer_offsets_length);
result.set_is_optimized(false);
+ result.set_is_force_optimized(false);
result.set_is_alive(false);
NOT_IN_PRODUCT(result.set_comments(Comments::New(0)));
NOT_IN_PRODUCT(result.set_compile_timestamp(0));
diff --git a/runtime/vm/object.h b/runtime/vm/object.h
index 58f52b3..05c750e 100644
--- a/runtime/vm/object.h
+++ b/runtime/vm/object.h
@@ -5200,6 +5200,14 @@
return Code::OptimizedBit::decode(code->ptr()->state_bits_);
}
+ bool is_force_optimized() const {
+ return ForceOptimizedBit::decode(raw_ptr()->state_bits_);
+ }
+ void set_is_force_optimized(bool value) const;
+ static bool IsForceOptimized(RawCode* code) {
+ return Code::ForceOptimizedBit::decode(code->ptr()->state_bits_);
+ }
+
bool is_alive() const { return AliveBit::decode(raw_ptr()->state_bits_); }
void set_is_alive(bool value) const;
@@ -5553,12 +5561,19 @@
friend class RawCode;
enum {
kOptimizedBit = 0,
- kAliveBit = 1,
- kPtrOffBit = 2,
- kPtrOffSize = 30,
+ kForceOptimizedBit = 1,
+ kAliveBit = 2,
+ kPtrOffBit = 3,
+ kPtrOffSize = 29,
};
class OptimizedBit : public BitField<int32_t, bool, kOptimizedBit, 1> {};
+
+ // Force-optimized is true if the Code was generated for a function with
+ // Function::ForceOptimize().
+ class ForceOptimizedBit
+ : public BitField<int32_t, bool, kForceOptimizedBit, 1> {};
+
class AliveBit : public BitField<int32_t, bool, kAliveBit, 1> {};
class PtrOffBits
: public BitField<int32_t, intptr_t, kPtrOffBit, kPtrOffSize> {};
diff --git a/runtime/vm/runtime_entry.cc b/runtime/vm/runtime_entry.cc
index c886658..809dcdc 100644
--- a/runtime/vm/runtime_entry.cc
+++ b/runtime/vm/runtime_entry.cc
@@ -2132,7 +2132,7 @@
for (intptr_t i = 0; i < num_frames; i++) {
ActivationFrame* frame = stack->FrameAt(i);
#ifndef DART_PRECOMPILED_RUNTIME
- if (!frame->IsInterpreted()) {
+ if (!frame->IsInterpreted() && !frame->function().ForceOptimize()) {
// Ensure that we have unoptimized code.
frame->function().EnsureHasCompiledUnoptimizedCode();
}
@@ -2510,6 +2510,11 @@
void DeoptimizeAt(const Code& optimized_code, StackFrame* frame) {
ASSERT(optimized_code.is_optimized());
+
+ // Force-optimized code is optimized code which cannot deoptimize and doesn't
+ // have unoptimized code to fall back to.
+ ASSERT(!optimized_code.is_force_optimized());
+
Thread* thread = Thread::Current();
Zone* zone = thread->zone();
const Function& function = Function::Handle(zone, optimized_code.function());
@@ -2590,7 +2595,8 @@
while (frame != NULL) {
if (!frame->is_interpreted()) {
optimized_code = frame->LookupDartCode();
- if (optimized_code.is_optimized()) {
+ if (optimized_code.is_optimized() &&
+ !optimized_code.is_force_optimized()) {
DeoptimizeAt(optimized_code, frame);
}
}
diff --git a/runtime/vm/source_report.cc b/runtime/vm/source_report.cc
index febc594..db396d9 100644
--- a/runtime/vm/source_report.cc
+++ b/runtime/vm/source_report.cc
@@ -88,6 +88,9 @@
}
}
+ // These don't have unoptimized code and are only used for synthetic stubs.
+ if (func.ForceOptimize()) return true;
+
switch (func.kind()) {
case RawFunction::kRegularFunction:
case RawFunction::kClosureFunction:
diff --git a/tests/ffi/function_test.dart b/tests/ffi/function_test.dart
index 2865d30..b8e62e8 100644
--- a/tests/ffi/function_test.dart
+++ b/tests/ffi/function_test.dart
@@ -38,6 +38,7 @@
testFloatRounding();
testVoidReturn();
testNoArgs();
+ testException();
}
}
@@ -430,3 +431,15 @@
double result = inventFloatValue();
Expect.approxEquals(1337.0, result);
}
+
+// Throw an exception from within the trampoline and collect a stacktrace
+// include its frame.
+void testException() {
+ try {
+ sumPlus42(null, null);
+ } catch (e, s) {
+ print("$e, $s");
+ return;
+ }
+ throw "Didn't throw!";
+}