[vm/compiler] Convert _TypedList get and set methods to normal methods.
Previously, they were implemented as native methods with special
replacements in the inliner.
Instead, create force-compiled versions of the original inliner
replacements and use those instead of native methods, unless the
flow graph compiler doesn't support unboxing the requested element type.
In that case, the force-compiled version just calls a native method,
and we only keep the native methods that might be needed (that is,
for double/SIMD element access).
Also, revert the change in 26911a6176ed84, since now the _getX/_setX
methods are appropriately inlined instead of failing to inline due
to being native methods.
TEST=vm/dart/typed_list_index_checkbound_il_test
Cq-Include-Trybots: luci.dart.try:vm-aot-linux-debug-x64-try,vm-aot-linux-release-x64-try,vm-linux-debug-x64-try,vm-aot-linux-release-simarm_x64-try,vm-linux-release-simarm-try,vm-ffi-qemu-linux-release-arm-try
Change-Id: I4840883d1fc12b36a450803da339406bec149044
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/330786
Commit-Queue: Tess Strickland <sstrickl@google.com>
Reviewed-by: Martin Kustermann <kustermann@google.com>
diff --git a/runtime/lib/typed_data.cc b/runtime/lib/typed_data.cc
index e28724a..f06f90c 100644
--- a/runtime/lib/typed_data.cc
+++ b/runtime/lib/typed_data.cc
@@ -15,20 +15,6 @@
// TypedData.
-// Checks to see if offsetInBytes + num_bytes is in the range.
-static void RangeCheck(intptr_t offset_in_bytes,
- intptr_t access_size,
- intptr_t length_in_bytes,
- intptr_t element_size_in_bytes) {
- if (!Utils::RangeCheck(offset_in_bytes, access_size, length_in_bytes)) {
- const intptr_t index =
- (offset_in_bytes + access_size) / element_size_in_bytes;
- const intptr_t length = length_in_bytes / element_size_in_bytes;
- Exceptions::ThrowRangeError("index", Integer::Handle(Integer::New(index)),
- 0, length);
- }
-}
-
DEFINE_NATIVE_ENTRY(TypedDataBase_length, 0, 1) {
GET_NON_NULL_NATIVE_ARGUMENT(TypedDataBase, array, arguments->NativeArgAt(0));
return Smi::New(array.Length());
@@ -130,55 +116,40 @@
return Object::null();
}
-#define TYPED_DATA_GETTER(getter, object, ctor, access_size) \
+// The native getter and setter functions defined here are only called if
+// unboxing doubles or SIMD values is not supported by the flow graph compiler,
+// and the provided offsets have already been range checked by the calling code.
+
+#define TYPED_DATA_GETTER(getter, object, ctor) \
DEFINE_NATIVE_ENTRY(TypedData_##getter, 0, 2) { \
GET_NON_NULL_NATIVE_ARGUMENT(TypedDataBase, array, \
arguments->NativeArgAt(0)); \
GET_NON_NULL_NATIVE_ARGUMENT(Smi, offsetInBytes, \
arguments->NativeArgAt(1)); \
- RangeCheck(offsetInBytes.Value(), access_size, array.LengthInBytes(), \
- access_size); \
return object::ctor(array.getter(offsetInBytes.Value())); \
}
-#define TYPED_DATA_SETTER(setter, object, get_object_value, access_size, \
- access_type) \
+#define TYPED_DATA_SETTER(setter, object, get_object_value, access_type) \
DEFINE_NATIVE_ENTRY(TypedData_##setter, 0, 3) { \
GET_NON_NULL_NATIVE_ARGUMENT(TypedDataBase, array, \
arguments->NativeArgAt(0)); \
GET_NON_NULL_NATIVE_ARGUMENT(Smi, offsetInBytes, \
arguments->NativeArgAt(1)); \
GET_NON_NULL_NATIVE_ARGUMENT(object, value, arguments->NativeArgAt(2)); \
- RangeCheck(offsetInBytes.Value(), access_size, array.LengthInBytes(), \
- access_size); \
array.setter(offsetInBytes.Value(), \
static_cast<access_type>(value.get_object_value())); \
return Object::null(); \
}
#define TYPED_DATA_NATIVES(type_name, object, ctor, get_object_value, \
- access_size, access_type) \
- TYPED_DATA_GETTER(Get##type_name, object, ctor, access_size) \
- TYPED_DATA_SETTER(Set##type_name, object, get_object_value, access_size, \
- access_type)
+ access_type) \
+ TYPED_DATA_GETTER(Get##type_name, object, ctor) \
+ TYPED_DATA_SETTER(Set##type_name, object, get_object_value, access_type)
-TYPED_DATA_NATIVES(Int8, Integer, New, AsTruncatedUint32Value, 1, int8_t)
-TYPED_DATA_NATIVES(Uint8, Integer, New, AsTruncatedUint32Value, 1, uint8_t)
-TYPED_DATA_NATIVES(Int16, Integer, New, AsTruncatedUint32Value, 2, int16_t)
-TYPED_DATA_NATIVES(Uint16, Integer, New, AsTruncatedUint32Value, 2, uint16_t)
-TYPED_DATA_NATIVES(Int32, Integer, New, AsTruncatedUint32Value, 4, int32_t)
-TYPED_DATA_NATIVES(Uint32, Integer, New, AsTruncatedUint32Value, 4, uint32_t)
-TYPED_DATA_NATIVES(Int64, Integer, New, AsTruncatedInt64Value, 8, int64_t)
-TYPED_DATA_NATIVES(Uint64,
- Integer,
- NewFromUint64,
- AsTruncatedInt64Value,
- 8,
- uint64_t)
-TYPED_DATA_NATIVES(Float32, Double, New, value, 4, float)
-TYPED_DATA_NATIVES(Float64, Double, New, value, 8, double)
-TYPED_DATA_NATIVES(Float32x4, Float32x4, New, value, 16, simd128_value_t)
-TYPED_DATA_NATIVES(Int32x4, Int32x4, New, value, 16, simd128_value_t)
-TYPED_DATA_NATIVES(Float64x2, Float64x2, New, value, 16, simd128_value_t)
+TYPED_DATA_NATIVES(Float32, Double, New, value, float)
+TYPED_DATA_NATIVES(Float64, Double, New, value, double)
+TYPED_DATA_NATIVES(Float32x4, Float32x4, New, value, simd128_value_t)
+TYPED_DATA_NATIVES(Int32x4, Int32x4, New, value, simd128_value_t)
+TYPED_DATA_NATIVES(Float64x2, Float64x2, New, value, simd128_value_t)
} // namespace dart
diff --git a/runtime/tests/vm/dart/typed_list_index_checkbound_il_test.dart b/runtime/tests/vm/dart/typed_list_index_checkbound_il_test.dart
index 794dabf..05e1bab 100644
--- a/runtime/tests/vm/dart/typed_list_index_checkbound_il_test.dart
+++ b/runtime/tests/vm/dart/typed_list_index_checkbound_il_test.dart
@@ -24,10 +24,6 @@
int retrieveFromExternal(Int8List src, int n) => src[n];
void matchIL$retrieveFromView(FlowGraph graph) {
- // TODO(https://github.com/flutter/flutter/issues/138689): Once the regression
- // for doing the replacement with the GenericCheckBound instruction is fixed
- // on 32-bit archs, remove this.
- if (is32BitConfiguration) return;
graph.match([
match.block('Graph'),
match.block('Function', [
@@ -60,10 +56,6 @@
}
void matchIL$retrieveFromBase(FlowGraph graph) {
- // TODO(https://github.com/flutter/flutter/issues/138689): Once the regression
- // for doing the replacement with the GenericCheckBound instruction is fixed
- // on 32-bit archs, remove this.
- if (is32BitConfiguration) return;
graph.match([
match.block('Graph'),
match.block('Function', [
@@ -86,10 +78,6 @@
}
void matchIL$retrieveFromExternal(FlowGraph graph) {
- // TODO(https://github.com/flutter/flutter/issues/138689): Once the regression
- // for doing the replacement with the GenericCheckBound instruction is fixed
- // on 32-bit archs, remove this.
- if (is32BitConfiguration) return;
graph.match([
match.block('Graph'),
match.block('Function', [
diff --git a/runtime/vm/bootstrap_natives.h b/runtime/vm/bootstrap_natives.h
index 11be9be..2e4dba9 100644
--- a/runtime/vm/bootstrap_natives.h
+++ b/runtime/vm/bootstrap_natives.h
@@ -160,22 +160,6 @@
V(Timeline_reportTaskEvent, 5) \
V(TypedDataBase_length, 1) \
V(TypedDataBase_setClampedRange, 5) \
- V(TypedData_GetInt8, 2) \
- V(TypedData_SetInt8, 3) \
- V(TypedData_GetUint8, 2) \
- V(TypedData_SetUint8, 3) \
- V(TypedData_GetInt16, 2) \
- V(TypedData_SetInt16, 3) \
- V(TypedData_GetUint16, 2) \
- V(TypedData_SetUint16, 3) \
- V(TypedData_GetInt32, 2) \
- V(TypedData_SetInt32, 3) \
- V(TypedData_GetUint32, 2) \
- V(TypedData_SetUint32, 3) \
- V(TypedData_GetInt64, 2) \
- V(TypedData_SetInt64, 3) \
- V(TypedData_GetUint64, 2) \
- V(TypedData_SetUint64, 3) \
V(TypedData_GetFloat32, 2) \
V(TypedData_SetFloat32, 3) \
V(TypedData_GetFloat64, 2) \
diff --git a/runtime/vm/compiler/backend/il.cc b/runtime/vm/compiler/backend/il.cc
index 92a356b..8f5030c 100644
--- a/runtime/vm/compiler/backend/il.cc
+++ b/runtime/vm/compiler/backend/il.cc
@@ -6777,6 +6777,16 @@
Definition* LoadIndexedInstr::Canonicalize(FlowGraph* flow_graph) {
auto Z = flow_graph->zone();
+ if (auto* const untag_payload = array()->definition()->AsLoadField()) {
+ // If loading from an internal typed data object, remove the load of
+ // PointerBase.data, as LoadIndexed knows how to load from a tagged
+ // internal typed data object directly and the LoadField may interfere with
+ // possible allocation sinking.
+ if (untag_payload->slot().IsIdentical(Slot::PointerBase_data()) &&
+ IsTypedDataClassId(untag_payload->instance()->Type()->ToCid())) {
+ array()->BindTo(untag_payload->instance()->definition());
+ }
+ }
if (auto box = index()->definition()->AsBoxInt64()) {
// TODO(dartbug.com/39432): Make LoadIndexed fully suport unboxed indices.
if (!box->ComputeCanDeoptimize() && compiler::target::kWordSize == 8) {
@@ -6832,6 +6842,16 @@
Instruction* StoreIndexedInstr::Canonicalize(FlowGraph* flow_graph) {
auto Z = flow_graph->zone();
+ if (auto* const untag_payload = array()->definition()->AsLoadField()) {
+ // If loading from an internal typed data object, remove the load of
+ // PointerBase.data, as LoadIndexed knows how to load from a tagged
+ // internal typed data object directly and the LoadField may interfere with
+ // possible allocation sinking.
+ if (untag_payload->slot().IsIdentical(Slot::PointerBase_data()) &&
+ IsTypedDataClassId(untag_payload->instance()->Type()->ToCid())) {
+ array()->BindTo(untag_payload->instance()->definition());
+ }
+ }
if (auto box = index()->definition()->AsBoxInt64()) {
// TODO(dartbug.com/39432): Make StoreIndexed fully suport unboxed indices.
if (!box->ComputeCanDeoptimize() && compiler::target::kWordSize == 8) {
diff --git a/runtime/vm/compiler/backend/inliner.cc b/runtime/vm/compiler/backend/inliner.cc
index 0816a14..dc1a56f 100644
--- a/runtime/vm/compiler/backend/inliner.cc
+++ b/runtime/vm/compiler/backend/inliner.cc
@@ -2757,13 +2757,6 @@
Instruction** last,
Definition** result,
const String& symbol) {
-#if defined(TARGET_ARCH_IS_32_BIT)
- // TODO(https://github.com/flutter/flutter/issues/138689): We only convert
- // the index check to a GenericCheckBound instruction on 64-bit architectures,
- // where the inputs are always unboxed. Once the regressions on 32-bit
- // architectures has been identified and fixed, remove the #ifdef.
- return false;
-#else
*entry =
new (Z) FunctionEntryInstr(graph_entry, flow_graph->allocate_block_id(),
call->GetBlock()->try_index(), DeoptId::kNone);
@@ -2790,7 +2783,6 @@
*last = cursor;
*result = index;
return true;
-#endif
}
static intptr_t PrepareInlineIndexedOp(FlowGraph* flow_graph,
@@ -3214,236 +3206,6 @@
return true;
}
-// Emits preparatory code for a typed getter/setter.
-// Handles three cases:
-// (1) dynamic: generates load untagged (internal or external)
-// (2) external: generates load untagged
-// (3) internal: no code required.
-static void PrepareInlineByteArrayBaseOp(FlowGraph* flow_graph,
- Instruction* call,
- Definition* receiver,
- intptr_t array_cid,
- Definition** array,
- Instruction** cursor) {
- if (array_cid == kDynamicCid || IsExternalTypedDataClassId(array_cid)) {
- // Internal or External typed data: load the untagged base address.
- auto const loads_inner_pointer =
- IsExternalTypedDataClassId(array_cid)
- ? InnerPointerAccess::kCannotBeInnerPointer
- : InnerPointerAccess::kMayBeInnerPointer;
- auto* const elements =
- new (Z) LoadFieldInstr(new (Z) Value(*array), Slot::PointerBase_data(),
- loads_inner_pointer, call->source());
- *cursor =
- flow_graph->AppendTo(*cursor, elements, nullptr, FlowGraph::kValue);
- *array = elements;
- } else {
- // Internal typed data: no action.
- ASSERT(IsTypedDataClassId(array_cid));
- }
-}
-
-static bool InlineByteArrayBaseLoad(FlowGraph* flow_graph,
- Definition* call,
- Definition* receiver,
- intptr_t array_cid,
- intptr_t view_cid,
- GraphEntryInstr* graph_entry,
- FunctionEntryInstr** entry,
- Instruction** last,
- Definition** result) {
- ASSERT(array_cid != kIllegalCid);
-
- // Dynamic calls are polymorphic due to:
- // (A) extra bounds check computations (length stored in receiver),
- // (B) external/internal typed data in receiver.
- // Both issues are resolved in the inlined code.
- // All getters that go through InlineByteArrayBaseLoad() have explicit
- // bounds checks in all their clients in the library, so we can omit yet
- // another inlined bounds check.
- if (array_cid == kDynamicCid) {
- ASSERT(call->IsStaticCall());
- }
-
- Definition* array = receiver;
- Definition* index = call->ArgumentAt(1);
- *entry =
- new (Z) FunctionEntryInstr(graph_entry, flow_graph->allocate_block_id(),
- call->GetBlock()->try_index(), DeoptId::kNone);
- (*entry)->InheritDeoptTarget(Z, call);
- Instruction* cursor = *entry;
-
- // Generates a template for the load, either a dynamic conditional
- // that dispatches on external and internal storage, or a single
- // case that deals with either external or internal storage.
- PrepareInlineByteArrayBaseOp(flow_graph, call, receiver, array_cid, &array,
- &cursor);
-
- // Fill out the generated template with loads.
- // Load from either external or internal.
- LoadIndexedInstr* load = new (Z) LoadIndexedInstr(
- new (Z) Value(array), new (Z) Value(index),
- /*index_unboxed=*/false, /*index_scale=*/1, view_cid, kUnalignedAccess,
- DeoptId::kNone, call->source(), ResultType(call));
- flow_graph->AppendTo(
- cursor, load, call->deopt_id() != DeoptId::kNone ? call->env() : nullptr,
- FlowGraph::kValue);
- cursor = *last = load;
-
- if (view_cid == kTypedDataFloat32ArrayCid) {
- *last = new (Z) FloatToDoubleInstr(new (Z) Value((*last)->AsDefinition()),
- DeoptId::kNone);
- flow_graph->AppendTo(cursor, *last, nullptr, FlowGraph::kValue);
- }
- *result = (*last)->AsDefinition();
- return true;
-}
-
-static StoreIndexedInstr* NewStore(FlowGraph* flow_graph,
- Instruction* call,
- Definition* array,
- Definition* index,
- Definition* stored_value,
- intptr_t view_cid) {
- return new (Z) StoreIndexedInstr(
- new (Z) Value(array), new (Z) Value(index), new (Z) Value(stored_value),
- kNoStoreBarrier, /*index_unboxed=*/false,
- /*index_scale=*/1, view_cid, kUnalignedAccess, call->deopt_id(),
- call->source());
-}
-
-static bool InlineByteArrayBaseStore(FlowGraph* flow_graph,
- const Function& target,
- Instruction* call,
- Definition* receiver,
- intptr_t array_cid,
- intptr_t view_cid,
- GraphEntryInstr* graph_entry,
- FunctionEntryInstr** entry,
- Instruction** last,
- Definition** result) {
- ASSERT(array_cid != kIllegalCid);
-
- // Dynamic calls are polymorphic due to:
- // (A) extra bounds check computations (length stored in receiver),
- // (B) external/internal typed data in receiver.
- // Both issues are resolved in the inlined code.
- // All setters that go through InlineByteArrayBaseLoad() have explicit
- // bounds checks in all their clients in the library, so we can omit yet
- // another inlined bounds check.
- if (array_cid == kDynamicCid) {
- ASSERT(call->IsStaticCall());
- }
-
- Definition* array = receiver;
- Definition* index = call->ArgumentAt(1);
- *entry =
- new (Z) FunctionEntryInstr(graph_entry, flow_graph->allocate_block_id(),
- call->GetBlock()->try_index(), DeoptId::kNone);
- (*entry)->InheritDeoptTarget(Z, call);
- Instruction* cursor = *entry;
-
- Definition* stored_value = call->ArgumentAt(2);
-
- // We know that the incomming type matches, but we still need to handle the
- // null check.
- if (!IsCompilingForSoundNullSafety()) {
- String& name = String::ZoneHandle(Z, target.name());
- Instruction* check = new (Z) CheckNullInstr(
- new (Z) Value(stored_value), name, call->deopt_id(), call->source());
- cursor =
- flow_graph->AppendTo(cursor, check, call->env(), FlowGraph::kEffect);
- }
-
- // Handle conversions and special unboxing (to ensure unboxing instructions
- // are marked as truncating, since [SelectRepresentations] does not take care
- // of that).
- switch (view_cid) {
- case kTypedDataInt8ArrayCid:
- case kTypedDataInt16ArrayCid:
- case kTypedDataUint8ArrayCid:
- case kTypedDataUint8ClampedArrayCid:
- case kTypedDataUint16ArrayCid:
- case kExternalTypedDataUint8ArrayCid:
- case kExternalTypedDataUint8ClampedArrayCid: {
- stored_value =
- UnboxInstr::Create(kUnboxedIntPtr, new (Z) Value(stored_value),
- call->deopt_id(), Instruction::kNotSpeculative);
- stored_value->AsUnboxInteger()->mark_truncating();
- cursor = flow_graph->AppendTo(cursor, stored_value, call->env(),
- FlowGraph::kValue);
- break;
- }
-
- case kTypedDataInt64ArrayCid:
- case kTypedDataUint64ArrayCid: {
- stored_value =
- new (Z) UnboxInt64Instr(new (Z) Value(stored_value), call->deopt_id(),
- Instruction::kNotSpeculative);
- cursor = flow_graph->AppendTo(cursor, stored_value, call->env(),
- FlowGraph::kValue);
- break;
- }
-
- case kTypedDataFloat32ArrayCid:
- case kTypedDataFloat64ArrayCid: {
- stored_value =
- UnboxInstr::Create(kUnboxedDouble, new (Z) Value(stored_value),
- call->deopt_id(), Instruction::kNotSpeculative);
- cursor = flow_graph->AppendTo(cursor, stored_value, call->env(),
- FlowGraph::kValue);
- if (view_cid == kTypedDataFloat32ArrayCid) {
- stored_value = new (Z)
- DoubleToFloatInstr(new (Z) Value(stored_value), call->deopt_id());
- cursor = flow_graph->AppendTo(cursor, stored_value, call->env(),
- FlowGraph::kValue);
- }
- break;
- }
-
- case kTypedDataInt32ArrayCid: {
- stored_value = new (Z)
- UnboxInt32Instr(UnboxInt32Instr::kTruncate,
- new (Z) Value(stored_value), call->deopt_id());
- cursor = flow_graph->AppendTo(cursor, stored_value, call->env(),
- FlowGraph::kValue);
- break;
- }
- case kTypedDataUint32ArrayCid: {
- stored_value = new (Z)
- UnboxUint32Instr(new (Z) Value(stored_value), call->deopt_id());
- ASSERT(stored_value->AsUnboxInteger()->is_truncating());
- cursor = flow_graph->AppendTo(cursor, stored_value, call->env(),
- FlowGraph::kValue);
- break;
- }
- default:
- break;
- }
-
- // Generates a template for the store, either a dynamic conditional
- // that dispatches on external and internal storage, or a single
- // case that deals with either external or internal storage.
- PrepareInlineByteArrayBaseOp(flow_graph, call, receiver, array_cid, &array,
- &cursor);
-
- // Fill out the generated template with stores.
- {
- // Store on either external or internal.
- StoreIndexedInstr* store =
- NewStore(flow_graph, call, array, index, stored_value, view_cid);
- flow_graph->AppendTo(
- cursor, store,
- call->deopt_id() != DeoptId::kNone ? call->env() : nullptr,
- FlowGraph::kEffect);
- *last = store;
- }
- // We need a return value to replace uses of the original definition. However,
- // the final instruction is a use of 'void operator[]=()', so we use null.
- *result = flow_graph->constant_null();
- return true;
-}
-
// Returns the LoadIndexedInstr.
static Definition* PrepareInlineStringIndexOp(FlowGraph* flow_graph,
Instruction* call,
@@ -4579,73 +4341,6 @@
case MethodRecognizer::kUint64ArrayGetIndexed:
return InlineGetIndexed(flow_graph, can_speculate, is_dynamic_call, kind,
call, receiver, graph_entry, entry, last, result);
- case MethodRecognizer::kByteArrayBaseGetInt8:
- return InlineByteArrayBaseLoad(flow_graph, call, receiver, receiver_cid,
- kTypedDataInt8ArrayCid, graph_entry, entry,
- last, result);
- case MethodRecognizer::kByteArrayBaseGetUint8:
- return InlineByteArrayBaseLoad(flow_graph, call, receiver, receiver_cid,
- kTypedDataUint8ArrayCid, graph_entry,
- entry, last, result);
- case MethodRecognizer::kByteArrayBaseGetInt16:
- return InlineByteArrayBaseLoad(flow_graph, call, receiver, receiver_cid,
- kTypedDataInt16ArrayCid, graph_entry,
- entry, last, result);
- case MethodRecognizer::kByteArrayBaseGetUint16:
- return InlineByteArrayBaseLoad(flow_graph, call, receiver, receiver_cid,
- kTypedDataUint16ArrayCid, graph_entry,
- entry, last, result);
- case MethodRecognizer::kByteArrayBaseGetInt32:
- return InlineByteArrayBaseLoad(flow_graph, call, receiver, receiver_cid,
- kTypedDataInt32ArrayCid, graph_entry,
- entry, last, result);
- case MethodRecognizer::kByteArrayBaseGetUint32:
- return InlineByteArrayBaseLoad(flow_graph, call, receiver, receiver_cid,
- kTypedDataUint32ArrayCid, graph_entry,
- entry, last, result);
- case MethodRecognizer::kByteArrayBaseGetInt64:
- return InlineByteArrayBaseLoad(flow_graph, call, receiver, receiver_cid,
- kTypedDataInt64ArrayCid, graph_entry,
- entry, last, result);
- case MethodRecognizer::kByteArrayBaseGetUint64:
- return InlineByteArrayBaseLoad(flow_graph, call, receiver, receiver_cid,
- kTypedDataUint64ArrayCid, graph_entry,
- entry, last, result);
- case MethodRecognizer::kByteArrayBaseGetFloat32:
- if (!CanUnboxDouble()) {
- return false;
- }
- return InlineByteArrayBaseLoad(flow_graph, call, receiver, receiver_cid,
- kTypedDataFloat32ArrayCid, graph_entry,
- entry, last, result);
- case MethodRecognizer::kByteArrayBaseGetFloat64:
- if (!CanUnboxDouble()) {
- return false;
- }
- return InlineByteArrayBaseLoad(flow_graph, call, receiver, receiver_cid,
- kTypedDataFloat64ArrayCid, graph_entry,
- entry, last, result);
- case MethodRecognizer::kByteArrayBaseGetFloat32x4:
- if (!ShouldInlineSimd()) {
- return false;
- }
- return InlineByteArrayBaseLoad(flow_graph, call, receiver, receiver_cid,
- kTypedDataFloat32x4ArrayCid, graph_entry,
- entry, last, result);
- case MethodRecognizer::kByteArrayBaseGetFloat64x2:
- if (!ShouldInlineSimd()) {
- return false;
- }
- return InlineByteArrayBaseLoad(flow_graph, call, receiver, receiver_cid,
- kTypedDataFloat64x2ArrayCid, graph_entry,
- entry, last, result);
- case MethodRecognizer::kByteArrayBaseGetInt32x4:
- if (!ShouldInlineSimd()) {
- return false;
- }
- return InlineByteArrayBaseLoad(flow_graph, call, receiver, receiver_cid,
- kTypedDataInt32x4ArrayCid, graph_entry,
- entry, last, result);
case MethodRecognizer::kClassIDgetID:
return InlineLoadClassId(flow_graph, call, graph_entry, entry, last,
result);
@@ -4704,73 +4399,6 @@
return InlineSetIndexed(flow_graph, kind, target, call, receiver, source,
exactness, graph_entry, entry, last, result);
}
- case MethodRecognizer::kByteArrayBaseSetInt8:
- return InlineByteArrayBaseStore(flow_graph, target, call, receiver,
- receiver_cid, kTypedDataInt8ArrayCid,
- graph_entry, entry, last, result);
- case MethodRecognizer::kByteArrayBaseSetUint8:
- return InlineByteArrayBaseStore(flow_graph, target, call, receiver,
- receiver_cid, kTypedDataUint8ArrayCid,
- graph_entry, entry, last, result);
- case MethodRecognizer::kByteArrayBaseSetInt16:
- return InlineByteArrayBaseStore(flow_graph, target, call, receiver,
- receiver_cid, kTypedDataInt16ArrayCid,
- graph_entry, entry, last, result);
- case MethodRecognizer::kByteArrayBaseSetUint16:
- return InlineByteArrayBaseStore(flow_graph, target, call, receiver,
- receiver_cid, kTypedDataUint16ArrayCid,
- graph_entry, entry, last, result);
- case MethodRecognizer::kByteArrayBaseSetInt32:
- return InlineByteArrayBaseStore(flow_graph, target, call, receiver,
- receiver_cid, kTypedDataInt32ArrayCid,
- graph_entry, entry, last, result);
- case MethodRecognizer::kByteArrayBaseSetUint32:
- return InlineByteArrayBaseStore(flow_graph, target, call, receiver,
- receiver_cid, kTypedDataUint32ArrayCid,
- graph_entry, entry, last, result);
- case MethodRecognizer::kByteArrayBaseSetInt64:
- return InlineByteArrayBaseStore(flow_graph, target, call, receiver,
- receiver_cid, kTypedDataInt64ArrayCid,
- graph_entry, entry, last, result);
- case MethodRecognizer::kByteArrayBaseSetUint64:
- return InlineByteArrayBaseStore(flow_graph, target, call, receiver,
- receiver_cid, kTypedDataUint64ArrayCid,
- graph_entry, entry, last, result);
- case MethodRecognizer::kByteArrayBaseSetFloat32:
- if (!CanUnboxDouble()) {
- return false;
- }
- return InlineByteArrayBaseStore(flow_graph, target, call, receiver,
- receiver_cid, kTypedDataFloat32ArrayCid,
- graph_entry, entry, last, result);
- case MethodRecognizer::kByteArrayBaseSetFloat64:
- if (!CanUnboxDouble()) {
- return false;
- }
- return InlineByteArrayBaseStore(flow_graph, target, call, receiver,
- receiver_cid, kTypedDataFloat64ArrayCid,
- graph_entry, entry, last, result);
- case MethodRecognizer::kByteArrayBaseSetFloat32x4:
- if (!ShouldInlineSimd()) {
- return false;
- }
- return InlineByteArrayBaseStore(flow_graph, target, call, receiver,
- receiver_cid, kTypedDataFloat32x4ArrayCid,
- graph_entry, entry, last, result);
- case MethodRecognizer::kByteArrayBaseSetFloat64x2:
- if (!ShouldInlineSimd()) {
- return false;
- }
- return InlineByteArrayBaseStore(flow_graph, target, call, receiver,
- receiver_cid, kTypedDataFloat64x2ArrayCid,
- graph_entry, entry, last, result);
- case MethodRecognizer::kByteArrayBaseSetInt32x4:
- if (!ShouldInlineSimd()) {
- return false;
- }
- return InlineByteArrayBaseStore(flow_graph, target, call, receiver,
- receiver_cid, kTypedDataInt32x4ArrayCid,
- graph_entry, entry, last, result);
case MethodRecognizer::kOneByteStringCodeUnitAt:
case MethodRecognizer::kTwoByteStringCodeUnitAt:
case MethodRecognizer::kExternalOneByteStringCodeUnitAt:
diff --git a/runtime/vm/compiler/compiler_state.cc b/runtime/vm/compiler/compiler_state.cc
index eb9e57e..8085d2d 100644
--- a/runtime/vm/compiler/compiler_state.cc
+++ b/runtime/vm/compiler/compiler_state.cc
@@ -129,6 +129,54 @@
return *interpolate_;
}
+const Class& CompilerState::TypedListClass() {
+ if (typed_list_class_ == nullptr) {
+ Thread* thread = Thread::Current();
+ Zone* zone = thread->zone();
+
+ const Library& lib = Library::Handle(zone, Library::TypedDataLibrary());
+ const Class& cls = Class::ZoneHandle(
+ zone, lib.LookupClassAllowPrivate(Symbols::_TypedList()));
+ ASSERT(!cls.IsNull());
+ const Error& error = Error::Handle(zone, cls.EnsureIsFinalized(thread));
+ ASSERT(!error.IsNull());
+ typed_list_class_ = &cls;
+ }
+ return *typed_list_class_;
+}
+
+#define DEFINE_TYPED_LIST_NATIVE_FUNCTION_GETTER(Upper, Lower) \
+ const Function& CompilerState::TypedListGet##Upper() { \
+ if (typed_list_get_##Lower##_ == nullptr) { \
+ Thread* thread = Thread::Current(); \
+ Zone* zone = thread->zone(); \
+ const auto& cls = CompilerState::TypedListClass(); \
+ typed_list_get_##Lower##_ = &Function::ZoneHandle( \
+ zone, cls.LookupFunctionAllowPrivate(Symbols::_nativeGet##Upper())); \
+ ASSERT(!typed_list_get_##Lower##_->IsNull()); \
+ } \
+ return *typed_list_get_##Lower##_; \
+ } \
+ const Function& CompilerState::TypedListSet##Upper() { \
+ if (typed_list_set_##Lower##_ == nullptr) { \
+ Thread* thread = Thread::Current(); \
+ Zone* zone = thread->zone(); \
+ const auto& cls = CompilerState::TypedListClass(); \
+ typed_list_set_##Lower##_ = &Function::ZoneHandle( \
+ zone, cls.LookupFunctionAllowPrivate(Symbols::_nativeSet##Upper())); \
+ ASSERT(!typed_list_set_##Lower##_->IsNull()); \
+ } \
+ return *typed_list_set_##Lower##_; \
+ }
+
+DEFINE_TYPED_LIST_NATIVE_FUNCTION_GETTER(Float32, float32)
+DEFINE_TYPED_LIST_NATIVE_FUNCTION_GETTER(Float64, float64)
+DEFINE_TYPED_LIST_NATIVE_FUNCTION_GETTER(Float32x4, float32x4)
+DEFINE_TYPED_LIST_NATIVE_FUNCTION_GETTER(Int32x4, int32x4)
+DEFINE_TYPED_LIST_NATIVE_FUNCTION_GETTER(Float64x2, float64x2)
+
+#undef DEFINE_TYPED_LIST_NATIVE_FUNCTION_GETTER
+
void CompilerState::ReportCrash() {
OS::PrintErr("=== Crash occurred when compiling %s in %s mode in %s pass\n",
function() != nullptr ? function()->ToFullyQualifiedCString()
diff --git a/runtime/vm/compiler/compiler_state.h b/runtime/vm/compiler/compiler_state.h
index ff3ac87..56c5e7d 100644
--- a/runtime/vm/compiler/compiler_state.h
+++ b/runtime/vm/compiler/compiler_state.h
@@ -107,6 +107,17 @@
// Returns _StringBase._interpolateSingle
const Function& StringBaseInterpolateSingle();
+ const Function& TypedListGetFloat32();
+ const Function& TypedListSetFloat32();
+ const Function& TypedListGetFloat64();
+ const Function& TypedListSetFloat64();
+ const Function& TypedListGetFloat32x4();
+ const Function& TypedListSetFloat32x4();
+ const Function& TypedListGetInt32x4();
+ const Function& TypedListSetInt32x4();
+ const Function& TypedListGetFloat64x2();
+ const Function& TypedListSetFloat64x2();
+
const Function* function() const { return function_; }
void set_function(const Function& function) { function_ = &function; }
@@ -122,6 +133,8 @@
void ReportCrash();
private:
+ const Class& TypedListClass();
+
CHA cha_;
intptr_t deopt_id_ = 0;
@@ -142,6 +155,17 @@
const Class* comparable_class_ = nullptr;
const Function* interpolate_ = nullptr;
const Function* interpolate_single_ = nullptr;
+ const Class* typed_list_class_ = nullptr;
+ const Function* typed_list_get_float32_ = nullptr;
+ const Function* typed_list_set_float32_ = nullptr;
+ const Function* typed_list_get_float64_ = nullptr;
+ const Function* typed_list_set_float64_ = nullptr;
+ const Function* typed_list_get_float32x4_ = nullptr;
+ const Function* typed_list_set_float32x4_ = nullptr;
+ const Function* typed_list_get_int32x4_ = nullptr;
+ const Function* typed_list_set_int32x4_ = nullptr;
+ const Function* typed_list_get_float64x2_ = nullptr;
+ const Function* typed_list_set_float64x2_ = nullptr;
const Function* function_ = nullptr;
const CompilerPass* pass_ = nullptr;
diff --git a/runtime/vm/compiler/frontend/kernel_to_il.cc b/runtime/vm/compiler/frontend/kernel_to_il.cc
index fb11748..e6e3652 100644
--- a/runtime/vm/compiler/frontend/kernel_to_il.cc
+++ b/runtime/vm/compiler/frontend/kernel_to_il.cc
@@ -942,6 +942,32 @@
case MethodRecognizer::kRecord_numFields:
case MethodRecognizer::kSuspendState_clone:
case MethodRecognizer::kSuspendState_resume:
+ case MethodRecognizer::kTypedList_GetInt8:
+ case MethodRecognizer::kTypedList_SetInt8:
+ case MethodRecognizer::kTypedList_GetUint8:
+ case MethodRecognizer::kTypedList_SetUint8:
+ case MethodRecognizer::kTypedList_GetInt16:
+ case MethodRecognizer::kTypedList_SetInt16:
+ case MethodRecognizer::kTypedList_GetUint16:
+ case MethodRecognizer::kTypedList_SetUint16:
+ case MethodRecognizer::kTypedList_GetInt32:
+ case MethodRecognizer::kTypedList_SetInt32:
+ case MethodRecognizer::kTypedList_GetUint32:
+ case MethodRecognizer::kTypedList_SetUint32:
+ case MethodRecognizer::kTypedList_GetInt64:
+ case MethodRecognizer::kTypedList_SetInt64:
+ case MethodRecognizer::kTypedList_GetUint64:
+ case MethodRecognizer::kTypedList_SetUint64:
+ case MethodRecognizer::kTypedList_GetFloat32:
+ case MethodRecognizer::kTypedList_SetFloat32:
+ case MethodRecognizer::kTypedList_GetFloat64:
+ case MethodRecognizer::kTypedList_SetFloat64:
+ case MethodRecognizer::kTypedList_GetInt32x4:
+ case MethodRecognizer::kTypedList_SetInt32x4:
+ case MethodRecognizer::kTypedList_GetFloat32x4:
+ case MethodRecognizer::kTypedList_SetFloat32x4:
+ case MethodRecognizer::kTypedList_GetFloat64x2:
+ case MethodRecognizer::kTypedList_SetFloat64x2:
case MethodRecognizer::kTypedData_memMove1:
case MethodRecognizer::kTypedData_memMove2:
case MethodRecognizer::kTypedData_memMove4:
@@ -1155,8 +1181,85 @@
body += TailCall(resume_stub);
break;
}
+ case MethodRecognizer::kTypedList_GetInt8:
+ body += BuildTypedListGet(function, kTypedDataInt8ArrayCid);
+ break;
+ case MethodRecognizer::kTypedList_SetInt8:
+ body += BuildTypedListSet(function, kTypedDataInt8ArrayCid);
+ break;
+ case MethodRecognizer::kTypedList_GetUint8:
+ body += BuildTypedListGet(function, kTypedDataUint8ArrayCid);
+ break;
+ case MethodRecognizer::kTypedList_SetUint8:
+ body += BuildTypedListSet(function, kTypedDataUint8ArrayCid);
+ break;
+ case MethodRecognizer::kTypedList_GetInt16:
+ body += BuildTypedListGet(function, kTypedDataInt16ArrayCid);
+ break;
+ case MethodRecognizer::kTypedList_SetInt16:
+ body += BuildTypedListSet(function, kTypedDataInt16ArrayCid);
+ break;
+ case MethodRecognizer::kTypedList_GetUint16:
+ body += BuildTypedListGet(function, kTypedDataUint16ArrayCid);
+ break;
+ case MethodRecognizer::kTypedList_SetUint16:
+ body += BuildTypedListSet(function, kTypedDataUint16ArrayCid);
+ break;
+ case MethodRecognizer::kTypedList_GetInt32:
+ body += BuildTypedListGet(function, kTypedDataInt32ArrayCid);
+ break;
+ case MethodRecognizer::kTypedList_SetInt32:
+ body += BuildTypedListSet(function, kTypedDataInt32ArrayCid);
+ break;
+ case MethodRecognizer::kTypedList_GetUint32:
+ body += BuildTypedListGet(function, kTypedDataUint32ArrayCid);
+ break;
+ case MethodRecognizer::kTypedList_SetUint32:
+ body += BuildTypedListSet(function, kTypedDataUint32ArrayCid);
+ break;
+ case MethodRecognizer::kTypedList_GetInt64:
+ body += BuildTypedListGet(function, kTypedDataInt64ArrayCid);
+ break;
+ case MethodRecognizer::kTypedList_SetInt64:
+ body += BuildTypedListSet(function, kTypedDataInt64ArrayCid);
+ break;
+ case MethodRecognizer::kTypedList_GetUint64:
+ body += BuildTypedListGet(function, kTypedDataUint64ArrayCid);
+ break;
+ case MethodRecognizer::kTypedList_SetUint64:
+ body += BuildTypedListSet(function, kTypedDataUint64ArrayCid);
+ break;
+ case MethodRecognizer::kTypedList_GetFloat32:
+ body += BuildTypedListGet(function, kTypedDataFloat32ArrayCid);
+ break;
+ case MethodRecognizer::kTypedList_SetFloat32:
+ body += BuildTypedListSet(function, kTypedDataFloat32ArrayCid);
+ break;
+ case MethodRecognizer::kTypedList_GetFloat64:
+ body += BuildTypedListGet(function, kTypedDataFloat64ArrayCid);
+ break;
+ case MethodRecognizer::kTypedList_SetFloat64:
+ body += BuildTypedListSet(function, kTypedDataFloat64ArrayCid);
+ break;
+ case MethodRecognizer::kTypedList_GetInt32x4:
+ body += BuildTypedListGet(function, kTypedDataInt32x4ArrayCid);
+ break;
+ case MethodRecognizer::kTypedList_SetInt32x4:
+ body += BuildTypedListSet(function, kTypedDataInt32x4ArrayCid);
+ break;
+ case MethodRecognizer::kTypedList_GetFloat32x4:
+ body += BuildTypedListGet(function, kTypedDataFloat32x4ArrayCid);
+ break;
+ case MethodRecognizer::kTypedList_SetFloat32x4:
+ body += BuildTypedListSet(function, kTypedDataFloat32x4ArrayCid);
+ break;
+ case MethodRecognizer::kTypedList_GetFloat64x2:
+ body += BuildTypedListGet(function, kTypedDataFloat64x2ArrayCid);
+ break;
+ case MethodRecognizer::kTypedList_SetFloat64x2:
+ body += BuildTypedListSet(function, kTypedDataFloat64x2ArrayCid);
+ break;
case MethodRecognizer::kTypedData_memMove1:
- // Pick an appropriate typed data cid based on the element size.
body += BuildTypedDataMemMove(function, 1);
break;
case MethodRecognizer::kTypedData_memMove2:
@@ -1815,6 +1918,127 @@
return body;
}
+static bool CanUnboxElements(intptr_t view_cid) {
+ switch (view_cid) {
+ case kTypedDataFloat32ArrayCid:
+ case kTypedDataFloat64ArrayCid:
+ return FlowGraphCompiler::SupportsUnboxedDoubles();
+ case kTypedDataInt32x4ArrayCid:
+ case kTypedDataFloat32x4ArrayCid:
+ case kTypedDataFloat64x2ArrayCid:
+ return FlowGraphCompiler::SupportsUnboxedSimd128();
+ default:
+ return true;
+ }
+}
+
+static const Function& TypedListGetNativeFunction(Thread* thread,
+ intptr_t view_cid) {
+ auto& state = thread->compiler_state();
+ switch (view_cid) {
+ case kTypedDataFloat32ArrayCid:
+ return state.TypedListGetFloat32();
+ case kTypedDataFloat64ArrayCid:
+ return state.TypedListGetFloat64();
+ case kTypedDataInt32x4ArrayCid:
+ return state.TypedListGetInt32x4();
+ case kTypedDataFloat32x4ArrayCid:
+ return state.TypedListGetFloat32x4();
+ case kTypedDataFloat64x2ArrayCid:
+ return state.TypedListGetFloat64x2();
+ default:
+ UNREACHABLE();
+ return Object::null_function();
+ }
+}
+
+Fragment FlowGraphBuilder::BuildTypedListGet(const Function& function,
+ intptr_t view_cid) {
+ const intptr_t kNumParameters = 2;
+ ASSERT_EQUAL(parsed_function_->function().NumParameters(), kNumParameters);
+ // Guaranteed to be non-null since it's only called internally from other
+ // instance methods.
+ LocalVariable* arg_receiver = parsed_function_->RawParameterVariable(0);
+ // Guaranteed to be a non-null Smi due to bounds checks prior to call.
+ LocalVariable* arg_offset_in_bytes =
+ parsed_function_->RawParameterVariable(1);
+
+ Fragment body;
+ if (CanUnboxElements(view_cid)) {
+ body += LoadLocal(arg_receiver);
+ body += LoadNativeField(Slot::PointerBase_data(),
+ InnerPointerAccess::kMayBeInnerPointer);
+ body += LoadLocal(arg_offset_in_bytes);
+ body += LoadIndexed(view_cid, /*index_scale=*/1,
+ /*index_unboxed=*/false, kUnalignedAccess);
+ body += Box(LoadIndexedInstr::RepresentationOfArrayElement(view_cid));
+ } else {
+ const auto& native_function = TypedListGetNativeFunction(thread_, view_cid);
+ body += LoadLocal(arg_receiver);
+ body += LoadLocal(arg_offset_in_bytes);
+ body += StaticCall(TokenPosition::kNoSource, native_function,
+ kNumParameters, ICData::kNoRebind);
+ }
+ return body;
+}
+
+static const Function& TypedListSetNativeFunction(Thread* thread,
+ intptr_t view_cid) {
+ auto& state = thread->compiler_state();
+ switch (view_cid) {
+ case kTypedDataFloat32ArrayCid:
+ return state.TypedListSetFloat32();
+ case kTypedDataFloat64ArrayCid:
+ return state.TypedListSetFloat64();
+ case kTypedDataInt32x4ArrayCid:
+ return state.TypedListSetInt32x4();
+ case kTypedDataFloat32x4ArrayCid:
+ return state.TypedListSetFloat32x4();
+ case kTypedDataFloat64x2ArrayCid:
+ return state.TypedListSetFloat64x2();
+ default:
+ UNREACHABLE();
+ return Object::null_function();
+ }
+}
+
+Fragment FlowGraphBuilder::BuildTypedListSet(const Function& function,
+ intptr_t view_cid) {
+ const intptr_t kNumParameters = 3;
+ ASSERT_EQUAL(parsed_function_->function().NumParameters(), kNumParameters);
+ // Guaranteed to be non-null since it's only called internally from other
+ // instance methods.
+ LocalVariable* arg_receiver = parsed_function_->RawParameterVariable(0);
+ // Guaranteed to be a non-null Smi due to bounds checks prior to call.
+ LocalVariable* arg_offset_in_bytes =
+ parsed_function_->RawParameterVariable(1);
+ LocalVariable* arg_value = parsed_function_->RawParameterVariable(2);
+
+ Fragment body;
+ if (CanUnboxElements(view_cid)) {
+ body += LoadLocal(arg_receiver);
+ body += LoadNativeField(Slot::PointerBase_data(),
+ InnerPointerAccess::kMayBeInnerPointer);
+ body += LoadLocal(arg_offset_in_bytes);
+ body += LoadLocal(arg_value);
+ body +=
+ CheckNullOptimized(Symbols::Value(), CheckNullInstr::kArgumentError);
+ body += UnboxTruncate(
+ StoreIndexedInstr::RepresentationOfArrayElement(view_cid));
+ body += StoreIndexedTypedData(view_cid, /*index_scale=*/1,
+ /*index_unboxed=*/false, kUnalignedAccess);
+ body += NullConstant();
+ } else {
+ const auto& native_function = TypedListSetNativeFunction(thread_, view_cid);
+ body += LoadLocal(arg_receiver);
+ body += LoadLocal(arg_offset_in_bytes);
+ body += LoadLocal(arg_value);
+ body += StaticCall(TokenPosition::kNoSource, native_function,
+ kNumParameters, ICData::kNoRebind);
+ }
+ return body;
+}
+
Fragment FlowGraphBuilder::BuildTypedDataMemMove(const Function& function,
intptr_t element_size) {
ASSERT_EQUAL(parsed_function_->function().NumParameters(), 5);
diff --git a/runtime/vm/compiler/frontend/kernel_to_il.h b/runtime/vm/compiler/frontend/kernel_to_il.h
index c112a32..208ca7d 100644
--- a/runtime/vm/compiler/frontend/kernel_to_il.h
+++ b/runtime/vm/compiler/frontend/kernel_to_il.h
@@ -158,6 +158,8 @@
FlowGraph* BuildGraphOfRecognizedMethod(const Function& function);
+ Fragment BuildTypedListGet(const Function& function, intptr_t view_cid);
+ Fragment BuildTypedListSet(const Function& function, intptr_t view_cid);
Fragment BuildTypedDataMemMove(const Function& function,
intptr_t element_size);
Fragment BuildTypedDataViewFactoryConstructor(const Function& function,
diff --git a/runtime/vm/compiler/recognized_methods_list.h b/runtime/vm/compiler/recognized_methods_list.h
index 8b47dc3..b6528fe 100644
--- a/runtime/vm/compiler/recognized_methods_list.h
+++ b/runtime/vm/compiler/recognized_methods_list.h
@@ -23,32 +23,32 @@
V(_Record, get:_numFields, Record_numFields, 0x7bb37f73) \
V(_Record, get:_shape, Record_shape, 0x70d29513) \
V(_Record, _fieldAt, Record_fieldAt, 0xb48e2c93) \
- V(_TypedList, _getInt8, ByteArrayBaseGetInt8, 0x16155054) \
- V(_TypedList, _getUint8, ByteArrayBaseGetUint8, 0x1771724a) \
- V(_TypedList, _getInt16, ByteArrayBaseGetInt16, 0x2e320a6f) \
- V(_TypedList, _getUint16, ByteArrayBaseGetUint16, 0x2fb36ad9) \
- V(_TypedList, _getInt32, ByteArrayBaseGetInt32, 0x1909a12a) \
- V(_TypedList, _getUint32, ByteArrayBaseGetUint32, 0x194ee29b) \
- V(_TypedList, _getInt64, ByteArrayBaseGetInt64, 0xf652341f) \
- V(_TypedList, _getUint64, ByteArrayBaseGetUint64, 0x2c4ced79) \
- V(_TypedList, _getFloat32, ByteArrayBaseGetFloat32, 0xe8e81527) \
- V(_TypedList, _getFloat64, ByteArrayBaseGetFloat64, 0xf81baa54) \
- V(_TypedList, _getFloat32x4, ByteArrayBaseGetFloat32x4, 0xaf1e8105) \
- V(_TypedList, _getFloat64x2, ByteArrayBaseGetFloat64x2, 0x544ea0e0) \
- V(_TypedList, _getInt32x4, ByteArrayBaseGetInt32x4, 0x5564e82b) \
- V(_TypedList, _setInt8, ByteArrayBaseSetInt8, 0xe17ab7c2) \
- V(_TypedList, _setUint8, ByteArrayBaseSetUint8, 0xaf4b2b68) \
- V(_TypedList, _setInt16, ByteArrayBaseSetInt16, 0xbad7b447) \
- V(_TypedList, _setUint16, ByteArrayBaseSetUint16, 0xce13bc6f) \
- V(_TypedList, _setInt32, ByteArrayBaseSetInt32, 0xbdcc1f60) \
- V(_TypedList, _setUint32, ByteArrayBaseSetUint32, 0xb95817d2) \
- V(_TypedList, _setInt64, ByteArrayBaseSetInt64, 0xc8bec39a) \
- V(_TypedList, _setUint64, ByteArrayBaseSetUint64, 0xda38a625) \
- V(_TypedList, _setFloat32, ByteArrayBaseSetFloat32, 0x2f27a200) \
- V(_TypedList, _setFloat64, ByteArrayBaseSetFloat64, 0x234b6cf2) \
- V(_TypedList, _setFloat32x4, ByteArrayBaseSetFloat32x4, 0x38b79d7a) \
- V(_TypedList, _setFloat64x2, ByteArrayBaseSetFloat64x2, 0xbadc4b5f) \
- V(_TypedList, _setInt32x4, ByteArrayBaseSetInt32x4, 0x5cda767b) \
+ V(_TypedList, _getInt8, TypedList_GetInt8, 0x16155415) \
+ V(_TypedList, _getUint8, TypedList_GetUint8, 0x1771760b) \
+ V(_TypedList, _getInt16, TypedList_GetInt16, 0x2e320e30) \
+ V(_TypedList, _getUint16, TypedList_GetUint16, 0x2fb36e9a) \
+ V(_TypedList, _getInt32, TypedList_GetInt32, 0x1909a4eb) \
+ V(_TypedList, _getUint32, TypedList_GetUint32, 0x194ee65c) \
+ V(_TypedList, _getInt64, TypedList_GetInt64, 0xf65237e0) \
+ V(_TypedList, _getUint64, TypedList_GetUint64, 0x2c4cf13a) \
+ V(_TypedList, _getFloat32, TypedList_GetFloat32, 0xe8e818e8) \
+ V(_TypedList, _getFloat64, TypedList_GetFloat64, 0xf81bae15) \
+ V(_TypedList, _getFloat32x4, TypedList_GetFloat32x4, 0xaf1e84c6) \
+ V(_TypedList, _getFloat64x2, TypedList_GetFloat64x2, 0x544ea4a1) \
+ V(_TypedList, _getInt32x4, TypedList_GetInt32x4, 0x5564ebec) \
+ V(_TypedList, _setInt8, TypedList_SetInt8, 0xe17abb83) \
+ V(_TypedList, _setUint8, TypedList_SetUint8, 0xaf4b2f29) \
+ V(_TypedList, _setInt16, TypedList_SetInt16, 0xbad7b808) \
+ V(_TypedList, _setUint16, TypedList_SetUint16, 0xce13c030) \
+ V(_TypedList, _setInt32, TypedList_SetInt32, 0xbdcc2321) \
+ V(_TypedList, _setUint32, TypedList_SetUint32, 0xb9581b93) \
+ V(_TypedList, _setInt64, TypedList_SetInt64, 0xc8bec75b) \
+ V(_TypedList, _setUint64, TypedList_SetUint64, 0xda38a9e6) \
+ V(_TypedList, _setFloat32, TypedList_SetFloat32, 0x2f27a5c1) \
+ V(_TypedList, _setFloat64, TypedList_SetFloat64, 0x234b70b3) \
+ V(_TypedList, _setFloat32x4, TypedList_SetFloat32x4, 0x38b7a13b) \
+ V(_TypedList, _setFloat64x2, TypedList_SetFloat64x2, 0xbadc4f20) \
+ V(_TypedList, _setInt32x4, TypedList_SetInt32x4, 0x5cda7a3c) \
V(ByteData, ., ByteDataFactory, 0x45e89423) \
V(_ByteDataView, get:offsetInBytes, ByteDataViewOffsetInBytes, 0x60c0664c) \
V(_ByteDataView, get:_typedData, ByteDataViewTypedData, 0xb9c2d41a) \
@@ -505,30 +505,30 @@
// A list of core functions that internally dispatch based on received id.
#define POLYMORPHIC_TARGET_LIST(V) \
V(_StringBase, [], StringBaseCharAt, 0xd0613adf) \
- V(_TypedList, _getInt8, ByteArrayBaseGetInt8, 0x16155054) \
- V(_TypedList, _getUint8, ByteArrayBaseGetUint8, 0x1771724a) \
- V(_TypedList, _getInt16, ByteArrayBaseGetInt16, 0x2e320a6f) \
- V(_TypedList, _getUint16, ByteArrayBaseGetUint16, 0x2fb36ad9) \
- V(_TypedList, _getInt32, ByteArrayBaseGetInt32, 0x1909a12a) \
- V(_TypedList, _getUint32, ByteArrayBaseGetUint32, 0x194ee29b) \
- V(_TypedList, _getInt64, ByteArrayBaseGetInt64, 0xf652341f) \
- V(_TypedList, _getUint64, ByteArrayBaseGetUint64, 0x2c4ced79) \
- V(_TypedList, _getFloat32, ByteArrayBaseGetFloat32, 0xe8e81527) \
- V(_TypedList, _getFloat64, ByteArrayBaseGetFloat64, 0xf81baa54) \
- V(_TypedList, _getFloat32x4, ByteArrayBaseGetFloat32x4, 0xaf1e8105) \
- V(_TypedList, _getInt32x4, ByteArrayBaseGetInt32x4, 0x5564e82b) \
- V(_TypedList, _setInt8, ByteArrayBaseSetInt8, 0xe17ab7c2) \
- V(_TypedList, _setUint8, ByteArrayBaseSetInt8, 0xaf4b2b68) \
- V(_TypedList, _setInt16, ByteArrayBaseSetInt16, 0xbad7b447) \
- V(_TypedList, _setUint16, ByteArrayBaseSetInt16, 0xce13bc6f) \
- V(_TypedList, _setInt32, ByteArrayBaseSetInt32, 0xbdcc1f60) \
- V(_TypedList, _setUint32, ByteArrayBaseSetUint32, 0xb95817d2) \
- V(_TypedList, _setInt64, ByteArrayBaseSetInt64, 0xc8bec39a) \
- V(_TypedList, _setUint64, ByteArrayBaseSetUint64, 0xda38a625) \
- V(_TypedList, _setFloat32, ByteArrayBaseSetFloat32, 0x2f27a200) \
- V(_TypedList, _setFloat64, ByteArrayBaseSetFloat64, 0x234b6cf2) \
- V(_TypedList, _setFloat32x4, ByteArrayBaseSetFloat32x4, 0x38b79d7a) \
- V(_TypedList, _setInt32x4, ByteArrayBaseSetInt32x4, 0x5cda767b) \
+ V(_TypedList, _getInt8, TypedList_GetInt8, 0x16155415) \
+ V(_TypedList, _getUint8, TypedList_GetUint8, 0x1771760b) \
+ V(_TypedList, _getInt16, TypedList_GetInt16, 0x2e320e30) \
+ V(_TypedList, _getUint16, TypedList_GetUint16, 0x2fb36e9a) \
+ V(_TypedList, _getInt32, TypedList_GetInt32, 0x1909a4eb) \
+ V(_TypedList, _getUint32, TypedList_GetUint32, 0x194ee65c) \
+ V(_TypedList, _getInt64, TypedList_GetInt64, 0xf65237e0) \
+ V(_TypedList, _getUint64, TypedList_GetUint64, 0x2c4cf13a) \
+ V(_TypedList, _getFloat32, TypedList_GetFloat32, 0xe8e818e8) \
+ V(_TypedList, _getFloat64, TypedList_GetFloat64, 0xf81bae15) \
+ V(_TypedList, _getFloat32x4, TypedList_GetFloat32x4, 0xaf1e84c6) \
+ V(_TypedList, _getInt32x4, TypedList_GetInt32x4, 0x5564ebec) \
+ V(_TypedList, _setInt8, TypedList_SetInt8, 0xe17abb83) \
+ V(_TypedList, _setUint8, TypedList_SetInt8, 0xaf4b2f29) \
+ V(_TypedList, _setInt16, TypedList_SetInt16, 0xbad7b808) \
+ V(_TypedList, _setUint16, TypedList_SetInt16, 0xce13c030) \
+ V(_TypedList, _setInt32, TypedList_SetInt32, 0xbdcc2321) \
+ V(_TypedList, _setUint32, TypedList_SetUint32, 0xb9581b93) \
+ V(_TypedList, _setInt64, TypedList_SetInt64, 0xc8bec75b) \
+ V(_TypedList, _setUint64, TypedList_SetUint64, 0xda38a9e6) \
+ V(_TypedList, _setFloat32, TypedList_SetFloat32, 0x2f27a5c1) \
+ V(_TypedList, _setFloat64, TypedList_SetFloat64, 0x234b70b3) \
+ V(_TypedList, _setFloat32x4, TypedList_SetFloat32x4, 0x38b7a13b) \
+ V(_TypedList, _setInt32x4, TypedList_SetInt32x4, 0x5cda7a3c) \
V(Object, get:runtimeType, ObjectRuntimeType, 0x03733c71)
// List of recognized list factories:
diff --git a/runtime/vm/object.cc b/runtime/vm/object.cc
index eddb10d..0a1e958 100644
--- a/runtime/vm/object.cc
+++ b/runtime/vm/object.cc
@@ -9160,6 +9160,13 @@
return InVmTests(*this);
}
+bool Function::IsPreferInline() const {
+ if (!has_pragma()) return false;
+
+ return Library::FindPragma(Thread::Current(), /*only_core=*/false, *this,
+ Symbols::vm_prefer_inline());
+}
+
bool Function::IsIdempotent() const {
if (!has_pragma()) return false;
@@ -9255,6 +9262,32 @@
case MethodRecognizer::kRecord_numFields:
case MethodRecognizer::kUtf8DecoderScan:
case MethodRecognizer::kDouble_hashCode:
+ case MethodRecognizer::kTypedList_GetInt8:
+ case MethodRecognizer::kTypedList_SetInt8:
+ case MethodRecognizer::kTypedList_GetUint8:
+ case MethodRecognizer::kTypedList_SetUint8:
+ case MethodRecognizer::kTypedList_GetInt16:
+ case MethodRecognizer::kTypedList_SetInt16:
+ case MethodRecognizer::kTypedList_GetUint16:
+ case MethodRecognizer::kTypedList_SetUint16:
+ case MethodRecognizer::kTypedList_GetInt32:
+ case MethodRecognizer::kTypedList_SetInt32:
+ case MethodRecognizer::kTypedList_GetUint32:
+ case MethodRecognizer::kTypedList_SetUint32:
+ case MethodRecognizer::kTypedList_GetInt64:
+ case MethodRecognizer::kTypedList_SetInt64:
+ case MethodRecognizer::kTypedList_GetUint64:
+ case MethodRecognizer::kTypedList_SetUint64:
+ case MethodRecognizer::kTypedList_GetFloat32:
+ case MethodRecognizer::kTypedList_SetFloat32:
+ case MethodRecognizer::kTypedList_GetFloat64:
+ case MethodRecognizer::kTypedList_SetFloat64:
+ case MethodRecognizer::kTypedList_GetInt32x4:
+ case MethodRecognizer::kTypedList_SetInt32x4:
+ case MethodRecognizer::kTypedList_GetFloat32x4:
+ case MethodRecognizer::kTypedList_SetFloat32x4:
+ case MethodRecognizer::kTypedList_GetFloat64x2:
+ case MethodRecognizer::kTypedList_SetFloat64x2:
case MethodRecognizer::kTypedData_memMove1:
case MethodRecognizer::kTypedData_memMove2:
case MethodRecognizer::kTypedData_memMove4:
@@ -9293,6 +9326,7 @@
// idempotent becase if deoptimization is needed in inlined body, the
// execution of the force-optimized will be restarted at the beginning of
// the function.
+ ASSERT(!IsPreferInline() || IsIdempotent());
return IsIdempotent();
}
diff --git a/runtime/vm/object.h b/runtime/vm/object.h
index 9d898ef..2818bdd 100644
--- a/runtime/vm/object.h
+++ b/runtime/vm/object.h
@@ -3566,6 +3566,9 @@
// run.
bool ForceOptimize() const;
+ // Whether this function should be inlined if at all possible.
+ bool IsPreferInline() const;
+
// Whether this function is idempotent (i.e. calling it twice has the same
// effect as calling it once - no visible side effects).
//
diff --git a/runtime/vm/object_store.h b/runtime/vm/object_store.h
index e9d0063..cf30fbc 100644
--- a/runtime/vm/object_store.h
+++ b/runtime/vm/object_store.h
@@ -527,6 +527,7 @@
#undef DECLARE_LAZY_INIT_ASYNC_GETTER
#undef DECLARE_LAZY_INIT_ISOLATE_GETTER
#undef DECLARE_LAZY_INIT_INTERNAL_GETTER
+#undef DECLARE_LAZY_INIT_TYPED_DATA_GETTER
LibraryPtr bootstrap_library(BootstrapLibraryId index) {
switch (index) {
diff --git a/runtime/vm/symbols.h b/runtime/vm/symbols.h
index c937627..79ffa65 100644
--- a/runtime/vm/symbols.h
+++ b/runtime/vm/symbols.h
@@ -386,6 +386,7 @@
V(_Type, "_Type") \
V(_TypeParameter, "_TypeParameter") \
V(_TypeVariableMirror, "_TypeVariableMirror") \
+ V(_TypedList, "_TypedList") \
V(_TypedListBase, "_TypedListBase") \
V(_Uint16ArrayFactory, "Uint16List.") \
V(_Uint16ArrayView, "_Uint16ArrayView") \
@@ -451,6 +452,16 @@
V(_mapGet, "_mapGet") \
V(_mapKeys, "_mapKeys") \
V(_name, "_name") \
+ V(_nativeGetFloat32, "_nativeGetFloat32") \
+ V(_nativeSetFloat32, "_nativeSetFloat32") \
+ V(_nativeGetFloat64, "_nativeGetFloat64") \
+ V(_nativeSetFloat64, "_nativeSetFloat64") \
+ V(_nativeGetFloat32x4, "_nativeGetFloat32x4") \
+ V(_nativeSetFloat32x4, "_nativeSetFloat32x4") \
+ V(_nativeGetInt32x4, "_nativeGetInt32x4") \
+ V(_nativeSetInt32x4, "_nativeSetInt32x4") \
+ V(_nativeGetFloat64x2, "_nativeGetFloat64x2") \
+ V(_nativeSetFloat64x2, "_nativeSetFloat64x2") \
V(_nativeSetRange, "_nativeSetRange") \
V(_objectEquals, "_objectEquals") \
V(_objectHashCode, "_objectHashCode") \
diff --git a/sdk/lib/_internal/vm/lib/typed_data_patch.dart b/sdk/lib/_internal/vm/lib/typed_data_patch.dart
index ef8159e..50a1257 100644
--- a/sdk/lib/_internal/vm/lib/typed_data_patch.dart
+++ b/sdk/lib/_internal/vm/lib/typed_data_patch.dart
@@ -2055,104 +2055,173 @@
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", "dart:core#_Smi")
- @pragma("vm:external-name", "TypedData_GetInt8")
+ @pragma("vm:prefer-inline")
+ @pragma("vm:idempotent")
external int _getInt8(int offsetInBytes);
@pragma("vm:recognized", "other")
- @pragma("vm:external-name", "TypedData_SetInt8")
+ @pragma("vm:prefer-inline")
+ @pragma("vm:idempotent")
external void _setInt8(int offsetInBytes, int value);
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", "dart:core#_Smi")
- @pragma("vm:external-name", "TypedData_GetUint8")
+ @pragma("vm:prefer-inline")
+ @pragma("vm:idempotent")
external int _getUint8(int offsetInBytes);
@pragma("vm:recognized", "other")
- @pragma("vm:external-name", "TypedData_SetUint8")
+ @pragma("vm:prefer-inline")
+ @pragma("vm:idempotent")
external void _setUint8(int offsetInBytes, int value);
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", "dart:core#_Smi")
- @pragma("vm:external-name", "TypedData_GetInt16")
+ @pragma("vm:prefer-inline")
+ @pragma("vm:idempotent")
external int _getInt16(int offsetInBytes);
@pragma("vm:recognized", "other")
- @pragma("vm:external-name", "TypedData_SetInt16")
+ @pragma("vm:prefer-inline")
+ @pragma("vm:idempotent")
external void _setInt16(int offsetInBytes, int value);
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", "dart:core#_Smi")
- @pragma("vm:external-name", "TypedData_GetUint16")
+ @pragma("vm:prefer-inline")
+ @pragma("vm:idempotent")
external int _getUint16(int offsetInBytes);
@pragma("vm:recognized", "other")
- @pragma("vm:external-name", "TypedData_SetUint16")
+ @pragma("vm:prefer-inline")
+ @pragma("vm:idempotent")
external void _setUint16(int offsetInBytes, int value);
@pragma("vm:recognized", "other")
- @pragma("vm:external-name", "TypedData_GetInt32")
+ @pragma("vm:prefer-inline")
+ @pragma("vm:idempotent")
external int _getInt32(int offsetInBytes);
@pragma("vm:recognized", "other")
- @pragma("vm:external-name", "TypedData_SetInt32")
+ @pragma("vm:prefer-inline")
+ @pragma("vm:idempotent")
external void _setInt32(int offsetInBytes, int value);
@pragma("vm:recognized", "other")
- @pragma("vm:external-name", "TypedData_GetUint32")
+ @pragma("vm:prefer-inline")
+ @pragma("vm:idempotent")
external int _getUint32(int offsetInBytes);
@pragma("vm:recognized", "other")
- @pragma("vm:external-name", "TypedData_SetUint32")
+ @pragma("vm:prefer-inline")
+ @pragma("vm:idempotent")
external void _setUint32(int offsetInBytes, int value);
@pragma("vm:recognized", "other")
- @pragma("vm:external-name", "TypedData_GetInt64")
+ @pragma("vm:prefer-inline")
+ @pragma("vm:idempotent")
external int _getInt64(int offsetInBytes);
@pragma("vm:recognized", "other")
- @pragma("vm:external-name", "TypedData_SetInt64")
+ @pragma("vm:prefer-inline")
+ @pragma("vm:idempotent")
external void _setInt64(int offsetInBytes, int value);
@pragma("vm:recognized", "other")
- @pragma("vm:external-name", "TypedData_GetUint64")
+ @pragma("vm:prefer-inline")
+ @pragma("vm:idempotent")
external int _getUint64(int offsetInBytes);
@pragma("vm:recognized", "other")
- @pragma("vm:external-name", "TypedData_SetUint64")
+ @pragma("vm:prefer-inline")
+ @pragma("vm:idempotent")
external void _setUint64(int offsetInBytes, int value);
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", "dart:core#_Double")
- @pragma("vm:external-name", "TypedData_GetFloat32")
+ @pragma("vm:prefer-inline")
+ @pragma("vm:idempotent")
external double _getFloat32(int offsetInBytes);
@pragma("vm:recognized", "other")
- @pragma("vm:external-name", "TypedData_SetFloat32")
+ @pragma("vm:prefer-inline")
+ @pragma("vm:idempotent")
external void _setFloat32(int offsetInBytes, double value);
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", "dart:core#_Double")
- @pragma("vm:external-name", "TypedData_GetFloat64")
+ @pragma("vm:prefer-inline")
+ @pragma("vm:idempotent")
external double _getFloat64(int offsetInBytes);
@pragma("vm:recognized", "other")
- @pragma("vm:external-name", "TypedData_SetFloat64")
+ @pragma("vm:prefer-inline")
+ @pragma("vm:idempotent")
external void _setFloat64(int offsetInBytes, double value);
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _Float32x4)
- @pragma("vm:external-name", "TypedData_GetFloat32x4")
+ @pragma("vm:prefer-inline")
+ @pragma("vm:idempotent")
external Float32x4 _getFloat32x4(int offsetInBytes);
@pragma("vm:recognized", "other")
- @pragma("vm:external-name", "TypedData_SetFloat32x4")
+ @pragma("vm:prefer-inline")
+ @pragma("vm:idempotent")
external void _setFloat32x4(int offsetInBytes, Float32x4 value);
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _Int32x4)
- @pragma("vm:external-name", "TypedData_GetInt32x4")
+ @pragma("vm:prefer-inline")
+ @pragma("vm:idempotent")
external Int32x4 _getInt32x4(int offsetInBytes);
@pragma("vm:recognized", "other")
- @pragma("vm:external-name", "TypedData_SetInt32x4")
+ @pragma("vm:prefer-inline")
+ @pragma("vm:idempotent")
external void _setInt32x4(int offsetInBytes, Int32x4 value);
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _Float64x2)
- @pragma("vm:external-name", "TypedData_GetFloat64x2")
+ @pragma("vm:prefer-inline")
+ @pragma("vm:idempotent")
external Float64x2 _getFloat64x2(int offsetInBytes);
@pragma("vm:recognized", "other")
- @pragma("vm:external-name", "TypedData_SetFloat64x2")
+ @pragma("vm:prefer-inline")
+ @pragma("vm:idempotent")
external void _setFloat64x2(int offsetInBytes, Float64x2 value);
+ // The _nativeGetX and _nativeSetX methods are only used as a fallback when
+ // unboxed double or SIMD values are unsupported by the flow graph compiler.
+
+ @pragma("vm:entry-point", "call")
+ @pragma("vm:exact-result-type", "dart:core#_Double")
+ @pragma("vm:external-name", "TypedData_GetFloat32")
+ external double _nativeGetFloat32(int offsetInBytes);
+ @pragma("vm:entry-point", "call")
+ @pragma("vm:external-name", "TypedData_SetFloat32")
+ external void _nativeSetFloat32(int offsetInBytes, double value);
+
+ @pragma("vm:entry-point", "call")
+ @pragma("vm:exact-result-type", "dart:core#_Double")
+ @pragma("vm:external-name", "TypedData_GetFloat64")
+ external double _nativeGetFloat64(int offsetInBytes);
+ @pragma("vm:entry-point", "call")
+ @pragma("vm:external-name", "TypedData_SetFloat64")
+ external void _nativeSetFloat64(int offsetInBytes, double value);
+
+ @pragma("vm:entry-point", "call")
+ @pragma("vm:exact-result-type", _Float32x4)
+ @pragma("vm:external-name", "TypedData_GetFloat32x4")
+ external Float32x4 _nativeGetFloat32x4(int offsetInBytes);
+ @pragma("vm:entry-point", "call")
+ @pragma("vm:external-name", "TypedData_SetFloat32x4")
+ external void _nativeSetFloat32x4(int offsetInBytes, Float32x4 value);
+
+ @pragma("vm:entry-point", "call")
+ @pragma("vm:exact-result-type", _Int32x4)
+ @pragma("vm:external-name", "TypedData_GetInt32x4")
+ external Int32x4 _nativeGetInt32x4(int offsetInBytes);
+ @pragma("vm:entry-point", "call")
+ @pragma("vm:external-name", "TypedData_SetInt32x4")
+ external void _nativeSetInt32x4(int offsetInBytes, Int32x4 value);
+
+ @pragma("vm:entry-point", "call")
+ @pragma("vm:exact-result-type", _Float64x2)
+ @pragma("vm:external-name", "TypedData_GetFloat64x2")
+ external Float64x2 _nativeGetFloat64x2(int offsetInBytes);
+ @pragma("vm:entry-point", "call")
+ @pragma("vm:external-name", "TypedData_SetFloat64x2")
+ external void _nativeSetFloat64x2(int offsetInBytes, Float64x2 value);
+
/**
* Stores the [CodeUnits] as UTF-16 units into this TypedData at
* positions [start]..[end] (uint16 indices).