[vm/compiler] Introduce pass specific ids for IL instructions.
We already have some scratch space in the Instruction for
storing either place id for load optimizer or lifetime
position for register allocator. Some passes also use
the same scratch space.
This change removes pass specific getter/setters from
the Instruction and adds a tag to stored id - to make
sure that different passes don't get confused by
each others ids.
Change-Id: Ie88b6c9cf60604d3ef5d289c23363f18bdb0d014
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/143524
Reviewed-by: Alexander Markov <alexmarkov@google.com>
Reviewed-by: Martin Kustermann <kustermann@google.com>
diff --git a/runtime/vm/compiler/backend/il.h b/runtime/vm/compiler/backend/il.h
index 9ee1ce5..a0816ba 100644
--- a/runtime/vm/compiler/backend/il.h
+++ b/runtime/vm/compiler/backend/il.h
@@ -17,6 +17,7 @@
#include "vm/compiler/backend/compile_type.h"
#include "vm/compiler/backend/locations.h"
#include "vm/compiler/backend/slot.h"
+#include "vm/compiler/compiler_pass.h"
#include "vm/compiler/compiler_state.h"
#include "vm/compiler/ffi/marshaller.h"
#include "vm/compiler/ffi/native_calling_convention.h"
@@ -772,7 +773,6 @@
explicit Instruction(intptr_t deopt_id = DeoptId::kNone)
: deopt_id_(deopt_id),
- lifetime_position_(kNoPlaceId),
previous_(NULL),
next_(NULL),
env_(NULL),
@@ -967,8 +967,21 @@
void RemoveEnvironment();
void ReplaceInEnvironment(Definition* current, Definition* replacement);
- intptr_t lifetime_position() const { return lifetime_position_; }
- void set_lifetime_position(intptr_t pos) { lifetime_position_ = pos; }
+ // Different compiler passes can assign pass specific ids to the instruction.
+ // Only one id can be stored at a time.
+ intptr_t GetPassSpecificId(CompilerPass::Id pass) const {
+ return (PassSpecificId::DecodePass(pass_specific_id_) == pass)
+ ? PassSpecificId::DecodeId(pass_specific_id_)
+ : PassSpecificId::kNoId;
+ }
+ void SetPassSpecificId(CompilerPass::Id pass, intptr_t id) {
+ pass_specific_id_ = PassSpecificId::Encode(pass, id);
+ }
+ bool HasPassSpecificId(CompilerPass::Id pass) const {
+ return (PassSpecificId::DecodePass(pass_specific_id_) == pass) &&
+ (PassSpecificId::DecodeId(pass_specific_id_) !=
+ PassSpecificId::kNoId);
+ }
bool HasUnmatchedInputRepresentations() const;
@@ -1043,11 +1056,6 @@
// Get the block entry for this instruction.
virtual BlockEntryInstr* GetBlock();
- // Place identifiers used by the load optimization pass.
- intptr_t place_id() const { return place_id_; }
- void set_place_id(intptr_t place_id) { place_id_ = place_id; }
- bool HasPlaceId() const { return place_id_ != kNoPlaceId; }
-
intptr_t inlining_id() const { return inlining_id_; }
void set_inlining_id(intptr_t value) {
ASSERT(value >= 0);
@@ -1145,13 +1153,28 @@
virtual void RawSetInputAt(intptr_t i, Value* value) = 0;
- enum { kNoPlaceId = -1 };
+ class PassSpecificId {
+ public:
+ static intptr_t Encode(CompilerPass::Id pass, intptr_t id) {
+ return (id << kPassBits) | pass;
+ }
+
+ static CompilerPass::Id DecodePass(intptr_t value) {
+ return static_cast<CompilerPass::Id>(value & Utils::NBitMask(kPassBits));
+ }
+
+ static intptr_t DecodeId(intptr_t value) { return (value >> kPassBits); }
+
+ static constexpr intptr_t kNoId = -1;
+
+ private:
+ static constexpr intptr_t kPassBits = 8;
+ static_assert(CompilerPass::kNumPasses <= (1 << kPassBits),
+ "Pass Id does not fit into the bit field");
+ };
intptr_t deopt_id_;
- union {
- intptr_t lifetime_position_; // Position used by register allocator.
- intptr_t place_id_;
- };
+ intptr_t pass_specific_id_ = PassSpecificId::kNoId;
Instruction* previous_;
Instruction* next_;
Environment* env_;
diff --git a/runtime/vm/compiler/backend/il_deserializer.cc b/runtime/vm/compiler/backend/il_deserializer.cc
index fe184e9..abc152e 100644
--- a/runtime/vm/compiler/backend/il_deserializer.cc
+++ b/runtime/vm/compiler/backend/il_deserializer.cc
@@ -798,10 +798,6 @@
if (inst == nullptr) return nullptr;
if (env != nullptr) env->DeepCopyTo(zone(), inst);
- if (auto const lifetime_sexp =
- CheckInteger(list->ExtraLookupValue("lifetime_position"))) {
- inst->set_lifetime_position(lifetime_sexp->value());
- }
return inst;
}
diff --git a/runtime/vm/compiler/backend/il_printer.cc b/runtime/vm/compiler/backend/il_printer.cc
index ab4c572..e036600 100644
--- a/runtime/vm/compiler/backend/il_printer.cc
+++ b/runtime/vm/compiler/backend/il_printer.cc
@@ -6,6 +6,7 @@
#include "vm/compiler/api/print_filter.h"
#include "vm/compiler/backend/il.h"
+#include "vm/compiler/backend/linearscan.h"
#include "vm/compiler/backend/range_analysis.h"
#include "vm/compiler/ffi/native_calling_convention.h"
#include "vm/os.h"
@@ -74,8 +75,8 @@
if (print_locations && (instr->HasLocs())) {
instr->locs()->PrintTo(&f);
}
- if (instr->lifetime_position() != -1) {
- THR_Print("%3" Pd ": ", instr->lifetime_position());
+ if (FlowGraphAllocator::HasLifetimePosition(instr)) {
+ THR_Print("%3" Pd ": ", FlowGraphAllocator::GetLifetimePosition(instr));
}
if (!instr->IsBlockEntry()) THR_Print(" ");
THR_Print("%s", str);
diff --git a/runtime/vm/compiler/backend/il_serializer.cc b/runtime/vm/compiler/backend/il_serializer.cc
index d448073..cfab7bb 100644
--- a/runtime/vm/compiler/backend/il_serializer.cc
+++ b/runtime/vm/compiler/backend/il_serializer.cc
@@ -833,9 +833,6 @@
if (!token_pos().IsNoSource()) {
s->AddExtraInteger(sexp, "token_pos", token_pos().value());
}
- if (lifetime_position() != kNoPlaceId) {
- s->AddExtraInteger(sexp, "lifetime_position", lifetime_position());
- }
}
SExpression* Range::ToSExpression(FlowGraphSerializer* s) {
diff --git a/runtime/vm/compiler/backend/linearscan.cc b/runtime/vm/compiler/backend/linearscan.cc
index 21c2bfe..2aa6944 100644
--- a/runtime/vm/compiler/backend/linearscan.cc
+++ b/runtime/vm/compiler/backend/linearscan.cc
@@ -680,15 +680,15 @@
range->set_assigned_location(loc);
AssignSafepoints(defn, range);
range->finger()->Initialize(range);
- SplitInitialDefinitionAt(range, block->lifetime_position() + 1);
+ SplitInitialDefinitionAt(range, GetLifetimePosition(block) + 1);
ConvertAllUses(range);
// We have exception/stacktrace in a register and need to
// ensure this register is not available for register allocation during
// the [CatchBlockEntry] to ensure it's not overwritten.
if (loc.IsRegister()) {
- BlockLocation(loc, block->lifetime_position(),
- block->lifetime_position() + 1);
+ BlockLocation(loc, GetLifetimePosition(block),
+ GetLifetimePosition(block) + 1);
}
return;
}
@@ -743,12 +743,12 @@
range->set_assigned_location(loc);
if (loc.IsRegister()) {
AssignSafepoints(defn, range);
- if (range->End() > (block->lifetime_position() + 2)) {
- SplitInitialDefinitionAt(range, block->lifetime_position() + 2);
+ if (range->End() > (GetLifetimePosition(block) + 2)) {
+ SplitInitialDefinitionAt(range, GetLifetimePosition(block) + 2);
}
ConvertAllUses(range);
- BlockLocation(loc, block->lifetime_position(),
- block->lifetime_position() + 2);
+ BlockLocation(loc, GetLifetimePosition(block),
+ GetLifetimePosition(block) + 2);
return;
}
} else {
@@ -835,7 +835,7 @@
ParallelMoveInstr* parallel_move = goto_instr->parallel_move();
// All uses are recorded at the position of parallel move preceding goto.
- const intptr_t pos = goto_instr->lifetime_position();
+ const intptr_t pos = GetLifetimePosition(goto_instr);
JoinEntryInstr* join = goto_instr->successor();
ASSERT(join != NULL);
@@ -973,7 +973,7 @@
}
const intptr_t block_start_pos = block->start_pos();
- const intptr_t use_pos = current->lifetime_position() + 1;
+ const intptr_t use_pos = GetLifetimePosition(current) + 1;
Location* locations = flow_graph_.zone()->Alloc<Location>(env->Length());
@@ -1304,7 +1304,7 @@
}
}
- const intptr_t pos = current->lifetime_position();
+ const intptr_t pos = GetLifetimePosition(current);
ASSERT(IsInstructionStartPosition(pos));
ASSERT(locs->input_count() == current->InputCount());
@@ -1522,11 +1522,12 @@
ASSERT(pos > 0);
Instruction* prev = instr->previous();
ParallelMoveInstr* move = prev->AsParallelMove();
- if ((move == NULL) || (move->lifetime_position() != pos)) {
+ if ((move == NULL) ||
+ (FlowGraphAllocator::GetLifetimePosition(move) != pos)) {
move = new ParallelMoveInstr();
prev->LinkTo(move);
move->LinkTo(instr);
- move->set_lifetime_position(pos);
+ FlowGraphAllocator::SetLifetimePosition(move, pos);
}
return move;
}
@@ -1534,7 +1535,8 @@
static ParallelMoveInstr* CreateParallelMoveAfter(Instruction* instr,
intptr_t pos) {
Instruction* next = instr->next();
- if (next->IsParallelMove() && (next->lifetime_position() == pos)) {
+ if (next->IsParallelMove() &&
+ (FlowGraphAllocator::GetLifetimePosition(next) == pos)) {
return next->AsParallelMove();
}
return CreateParallelMoveBefore(next, pos);
@@ -1556,7 +1558,7 @@
instructions_.Add(block);
block_entries_.Add(block);
block->set_start_pos(pos);
- block->set_lifetime_position(pos);
+ SetLifetimePosition(block, pos);
pos += 2;
for (ForwardInstructionIterator it(block); !it.Done(); it.Advance()) {
@@ -1565,7 +1567,7 @@
if (!current->IsParallelMove()) {
instructions_.Add(current);
block_entries_.Add(block);
- current->set_lifetime_position(pos);
+ SetLifetimePosition(current, pos);
pos += 2;
}
}
@@ -1830,7 +1832,7 @@
BlockEntryInstr* split_block_entry = BlockEntryAt(to);
ASSERT(split_block_entry == InstructionAt(to)->GetBlock());
- if (from < split_block_entry->lifetime_position()) {
+ if (from < GetLifetimePosition(split_block_entry)) {
// Interval [from, to) spans multiple blocks.
// If the last block is inside a loop, prefer splitting at the outermost
@@ -1861,16 +1863,16 @@
}
}
while ((loop_info != nullptr) &&
- (from < loop_info->header()->lifetime_position())) {
+ (from < GetLifetimePosition(loop_info->header()))) {
split_block_entry = loop_info->header();
loop_info = loop_info->outer();
TRACE_ALLOC(THR_Print(" move back to loop header B%" Pd " at %" Pd "\n",
split_block_entry->block_id(),
- split_block_entry->lifetime_position()));
+ GetLifetimePosition(split_block_entry)));
}
// Split at block's start.
- split_pos = split_block_entry->lifetime_position();
+ split_pos = GetLifetimePosition(split_block_entry);
} else {
// Interval [from, to) is contained inside a single block.
@@ -2585,7 +2587,7 @@
continue;
}
- const intptr_t pos = safepoint_instr->lifetime_position();
+ const intptr_t pos = GetLifetimePosition(safepoint_instr);
if (range->End() <= pos) break;
if (range->Contains(pos)) {
diff --git a/runtime/vm/compiler/backend/linearscan.h b/runtime/vm/compiler/backend/linearscan.h
index 2fc72ef..740e489 100644
--- a/runtime/vm/compiler/backend/linearscan.h
+++ b/runtime/vm/compiler/backend/linearscan.h
@@ -67,6 +67,20 @@
// Map a virtual register number to its live range.
LiveRange* GetLiveRange(intptr_t vreg);
+ DART_FORCE_INLINE static void SetLifetimePosition(Instruction* instr,
+ intptr_t pos) {
+ instr->SetPassSpecificId(CompilerPass::kAllocateRegisters, pos);
+ }
+
+ DART_FORCE_INLINE static bool HasLifetimePosition(Instruction* instr) {
+ return instr->HasPassSpecificId(CompilerPass::kAllocateRegisters);
+ }
+
+ DART_FORCE_INLINE static intptr_t GetLifetimePosition(
+ const Instruction* instr) {
+ return instr->GetPassSpecificId(CompilerPass::kAllocateRegisters);
+ }
+
private:
void CollectRepresentations();
diff --git a/runtime/vm/compiler/backend/redundancy_elimination.cc b/runtime/vm/compiler/backend/redundancy_elimination.cc
index f363a15..45c4b24 100644
--- a/runtime/vm/compiler/backend/redundancy_elimination.cc
+++ b/runtime/vm/compiler/backend/redundancy_elimination.cc
@@ -1243,6 +1243,18 @@
return phi_moves;
}
+DART_FORCE_INLINE static void SetPlaceId(Instruction* instr, intptr_t id) {
+ instr->SetPassSpecificId(CompilerPass::kCSE, id);
+}
+
+DART_FORCE_INLINE static intptr_t GetPlaceId(const Instruction* instr) {
+ return instr->GetPassSpecificId(CompilerPass::kCSE);
+}
+
+DART_FORCE_INLINE static bool HasPlaceId(const Instruction* instr) {
+ return instr->HasPassSpecificId(CompilerPass::kCSE);
+}
+
enum CSEMode { kOptimizeLoads, kOptimizeStores };
static AliasedSet* NumberPlaces(
@@ -1280,7 +1292,7 @@
}
}
- instr->set_place_id(result->id());
+ SetPlaceId(instr, result->id());
}
}
@@ -1307,8 +1319,8 @@
intptr_t loop_header_index,
Instruction* instr) {
return IsLoadEliminationCandidate(instr) && (sets != NULL) &&
- instr->HasPlaceId() && ((*sets)[loop_header_index] != NULL) &&
- (*sets)[loop_header_index]->Contains(instr->place_id());
+ HasPlaceId(instr) &&
+ (*sets)[loop_header_index]->Contains(GetPlaceId(instr));
}
LICM::LICM(FlowGraph* flow_graph) : flow_graph_(flow_graph) {
@@ -1657,7 +1669,7 @@
// instruction that still points to the old place with a more
// generic alias.
const intptr_t old_alias_id = aliased_set_->LookupAliasId(
- aliased_set_->places()[instr->place_id()]->ToAlias());
+ aliased_set_->places()[GetPlaceId(instr)]->ToAlias());
killed = aliased_set_->GetKilledSet(old_alias_id);
}
@@ -1676,7 +1688,7 @@
if (FLAG_trace_optimization) {
THR_Print("Removing redundant store to place %" Pd
" in block B%" Pd "\n",
- instr->place_id(), block->block_id());
+ GetPlaceId(instr), block->block_id());
}
instr_it.RemoveCurrentFromGraph();
continue;
@@ -1709,8 +1721,8 @@
// load forwarding.
const Place* canonical = aliased_set_->LookupCanonical(&place);
if ((canonical != NULL) &&
- (canonical->id() != instr->AsDefinition()->place_id())) {
- instr->AsDefinition()->set_place_id(canonical->id());
+ (canonical->id() != GetPlaceId(instr->AsDefinition()))) {
+ SetPlaceId(instr->AsDefinition(), canonical->id());
}
}
@@ -1749,11 +1761,11 @@
intptr_t place_id = 0;
if (auto load = use->instruction()->AsLoadField()) {
slot = &load->slot();
- place_id = load->place_id();
+ place_id = GetPlaceId(load);
} else if (auto store =
use->instruction()->AsStoreInstanceField()) {
slot = &store->slot();
- place_id = store->place_id();
+ place_id = GetPlaceId(store);
}
if (slot != nullptr) {
@@ -1787,7 +1799,7 @@
continue;
}
- const intptr_t place_id = defn->place_id();
+ const intptr_t place_id = GetPlaceId(defn);
if (gen->Contains(place_id)) {
// This is a locally redundant load.
ASSERT((out_values != NULL) && ((*out_values)[place_id] != NULL));
@@ -1963,7 +1975,7 @@
(in_[preorder_number]->Contains(place_id))) {
PhiInstr* phi = new (Z)
PhiInstr(block->AsJoinEntry(), block->PredecessorCount());
- phi->set_place_id(place_id);
+ SetPlaceId(phi, place_id);
pending_phis.Add(phi);
in_value = phi;
}
@@ -2101,14 +2113,14 @@
// Incoming values are different. Phi is required to merge.
PhiInstr* phi =
new (Z) PhiInstr(block->AsJoinEntry(), block->PredecessorCount());
- phi->set_place_id(place_id);
+ SetPlaceId(phi, place_id);
FillPhiInputs(phi);
return phi;
}
void FillPhiInputs(PhiInstr* phi) {
BlockEntryInstr* block = phi->GetBlock();
- const intptr_t place_id = phi->place_id();
+ const intptr_t place_id = GetPlaceId(phi);
for (intptr_t i = 0; i < block->PredecessorCount(); i++) {
BlockEntryInstr* pred = block->PredecessorAt(i);
@@ -2152,9 +2164,9 @@
for (intptr_t i = 0; i < loads->length(); i++) {
Definition* load = (*loads)[i];
- if (!in->Contains(load->place_id())) continue; // No incoming value.
+ if (!in->Contains(GetPlaceId(load))) continue; // No incoming value.
- Definition* replacement = MergeIncomingValues(block, load->place_id());
+ Definition* replacement = MergeIncomingValues(block, GetPlaceId(load));
ASSERT(replacement != NULL);
// Sets of outgoing values are not linked into use lists so
@@ -2604,17 +2616,17 @@
// Handle stores.
if (is_store) {
- if (kill->Contains(instr->place_id())) {
- if (!live_in->Contains(instr->place_id()) &&
+ if (kill->Contains(GetPlaceId(instr))) {
+ if (!live_in->Contains(GetPlaceId(instr)) &&
CanEliminateStore(instr)) {
if (FLAG_trace_optimization) {
THR_Print("Removing dead store to place %" Pd " in block B%" Pd
"\n",
- instr->place_id(), block->block_id());
+ GetPlaceId(instr), block->block_id());
}
instr_it.RemoveCurrentFromGraph();
}
- } else if (!live_in->Contains(instr->place_id())) {
+ } else if (!live_in->Contains(GetPlaceId(instr))) {
// Mark this store as down-ward exposed: They are the only
// candidates for the global store elimination.
if (exposed_stores == NULL) {
@@ -2626,8 +2638,8 @@
exposed_stores->Add(instr);
}
// Interfering stores kill only loads from the same place.
- kill->Add(instr->place_id());
- live_in->Remove(instr->place_id());
+ kill->Add(GetPlaceId(instr));
+ live_in->Remove(GetPlaceId(instr));
continue;
}
@@ -2688,11 +2700,11 @@
}
// Eliminate a downward exposed store if the corresponding place is not
// in live-out.
- if (!live_out->Contains(instr->place_id()) &&
+ if (!live_out->Contains(GetPlaceId(instr)) &&
CanEliminateStore(instr)) {
if (FLAG_trace_optimization) {
THR_Print("Removing dead store to place %" Pd " block B%" Pd "\n",
- instr->place_id(), block->block_id());
+ GetPlaceId(instr), block->block_id());
}
instr->RemoveFromGraph(/* ignored */ false);
}
@@ -3314,6 +3326,18 @@
EliminateDeadParameters();
}
+ static intptr_t GetParameterId(const Instruction* instr) {
+ return instr->GetPassSpecificId(CompilerPass::kTryCatchOptimization);
+ }
+
+ static void SetParameterId(Instruction* instr, intptr_t id) {
+ instr->SetPassSpecificId(CompilerPass::kTryCatchOptimization, id);
+ }
+
+ static bool HasParameterId(Instruction* instr) {
+ return instr->HasPassSpecificId(CompilerPass::kTryCatchOptimization);
+ }
+
// Assign sequential ids to each ParameterInstr in each CatchEntryBlock.
// Collect reverse mapping from try indexes to corresponding catches.
void NumberCatchEntryParameters() {
@@ -3322,7 +3346,7 @@
*catch_entry->initial_definitions();
for (auto idef : idefs) {
if (idef->IsParameter()) {
- idef->set_place_id(parameter_info_.length());
+ SetParameterId(idef, parameter_info_.length());
parameter_info_.Add(new ParameterInfo(idef->AsParameter()));
}
}
@@ -3361,14 +3385,14 @@
// already present in the list.
bool found = false;
for (auto other_defn :
- parameter_info_[param->place_id()]->incoming) {
+ parameter_info_[GetParameterId(param)]->incoming) {
if (other_defn == defn) {
found = true;
break;
}
}
if (!found) {
- parameter_info_[param->place_id()]->incoming.Add(defn);
+ parameter_info_[GetParameterId(param)]->incoming.Add(defn);
}
}
}
@@ -3408,7 +3432,7 @@
while (!worklist_.IsEmpty()) {
Definition* defn = worklist_.RemoveLast();
if (ParameterInstr* param = defn->AsParameter()) {
- auto s = parameter_info_[param->place_id()];
+ auto s = parameter_info_[GetParameterId(param)];
for (auto input : s->incoming) {
MarkLive(input);
}
@@ -3429,8 +3453,8 @@
worklist_.Add(phi);
}
} else if (ParameterInstr* param = defn->AsParameter()) {
- if (param->place_id() != -1) {
- auto input_s = parameter_info_[param->place_id()];
+ if (HasParameterId(param)) {
+ auto input_s = parameter_info_[GetParameterId(param)];
if (!input_s->alive) {
input_s->alive = true;
worklist_.Add(param);
@@ -3464,7 +3488,7 @@
for (intptr_t env_idx = 0; env_idx < idefs.length(); ++env_idx) {
if (ParameterInstr* param = idefs[env_idx]->AsParameter()) {
- if (!parameter_info_[param->place_id()]->alive) {
+ if (!parameter_info_[GetParameterId(param)]->alive) {
env->ValueAt(env_idx)->BindToEnvironment(
flow_graph_->constant_null());
}
diff --git a/runtime/vm/compiler/compiler_pass.h b/runtime/vm/compiler/compiler_pass.h
index 0f84e7f..ffda7a6 100644
--- a/runtime/vm/compiler/compiler_pass.h
+++ b/runtime/vm/compiler/compiler_pass.h
@@ -117,7 +117,7 @@
};
#define ADD_ONE(name) +1
- static const intptr_t kNumPasses = 0 COMPILER_PASS_LIST(ADD_ONE);
+ static constexpr intptr_t kNumPasses = 0 COMPILER_PASS_LIST(ADD_ONE);
#undef ADD_ONE
CompilerPass(Id id, const char* name) : name_(name), flags_(0) {