Revert "[vm/compiler] Change MemoryCopy to also take untagged addresses."
This reverts commit 06d7a2352e1f74141da4cbd19782d6b9bb4e6e49.
Reason for revert: everything crashes on vm-aot-linux-debug-simarm_x64
Original change's description:
> [vm/compiler] Change MemoryCopy to also take untagged addresses.
>
> This CL adds the ability to pass the payload address of the source
> and destination directly to the MemoryCopy instruction as an untagged
> value.
>
> The new translation of the _TypedListBase._memMoveN methods use the new
> MemoryCopy constructor, retrieving the untagged value of the data field
> of both the source and destination. This way, if inlining exposes the
> allocation of the object from which the data field is being retrieved,
> then allocation sinking can remove the intermediate allocation if there
> are no escaping uses of the object.
>
> Since Pointer.asTypedList allocates such ExternalTypedData objects,
> this CL makes that method inlined if at all possible, which removes
> the intermediate allocation if the only use of the TypedData object
> is to call setRange for memory copying purposes.
>
> This CL also separates unboxed native slots into two groups: those
> that contain untagged addresses and those that do not. The former
> group now have the kUntagged representation, which mimics the old
> use of LoadUntagged for the PointerBase data field and also ensures
> that any arithmetic operations on untagged addresses must first be
> explicitly converted to an unboxed integer and then explicitly converted
> back to untagged before being stored in a slot that contains untagged
> addresses.
>
> When a unboxed native slot that contains untagged addresses is defined,
> the definition also includes a boolean which represents whether
> addresses that may be moved by the GC can be stored in this slot or not.
> The redundancy eliminator uses this to decide whether it is safe to
> eliminate a duplicate load, replace a load with the value originally
> stored in the slot, or lift a load out of a loop.
>
> In particular, the PointerBase data field may contain GC-moveable
> addresses, but only for internal TypedData objects and views, not
> for external TypedData objects or Pointers. To allow load optimizations
> involving the latter, the LoadField and StoreField instructions now
> take boolean flags for whether loads or stores from the slot are
> guaranteed to not be GC-moveable, to override the information from
> the slot argument.
>
> Notable benchmark changes on x64 (similar for other archs unless noted):
>
> JIT:
> * FfiMemory.PointerPointer: 250.7%
> * FfiStructCopy.Copy1Bytes: -26.73% (only x64)
> * FfiStructCopy.Copy32Bytes: -25.18% (only x64)
> * MemoryCopy.64.setRange.Pointer.Uint8: 19.36%
> * MemoryCopy.64.setRange.Pointer.Double: 18.96%
> * MemoryCopy.8.setRange.Pointer.Double: 17.59%
> * MemoryCopy.8.setRange.Pointer.Uint8: 19.46%
>
> AOT:
> * FfiMemory.PointerPointer: 323.5%
> * FfiStruct.FieldLoadStore: 483.3%
> * FileIO_readwrite_64kb: 15.39%
> * FileIO_readwrite_512kb (Intel Xeon): 46.22%
> * MemoryCopy.512.setRange.Pointer.Uint8: 35.20%
> * MemoryCopy.64.setRange.Pointer.Uint8: 55.40%
> * MemoryCopy.512.setRange.Pointer.Double: 29.45%
> * MemoryCopy.64.setRange.Pointer.Double: 60.37%
> * MemoryCopy.8.setRange.Pointer.Double: 59.54%
> * MemoryCopy.8.setRange.Pointer.Uint8: 55.40%
> * FfiStructCopy.Copy32Bytes: 398.3%
> * FfiStructCopy.Copy1Bytes: 1233%
>
> TEST=vm/dart/address_local_pointer, vm/dart/pointer_as_typed_list
>
> Issue: https://github.com/dart-lang/sdk/issues/42072
> Fixes: https://github.com/dart-lang/sdk/issues/53124
>
> Cq-Include-Trybots: luci.dart.try:vm-ffi-qemu-linux-release-arm-try,vm-eager-optimization-linux-release-x64-try,vm-linux-release-x64-try,vm-linux-debug-x64-try,vm-aot-linux-release-x64-try,vm-aot-linux-debug-x64-try
> Change-Id: I563e0bfac5b1ac6cf1111649934067c12891b631
> Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/324820
> Reviewed-by: Alexander Markov <alexmarkov@google.com>
> Commit-Queue: Tess Strickland <sstrickl@google.com>
> Reviewed-by: Martin Kustermann <kustermann@google.com>
Issue: https://github.com/dart-lang/sdk/issues/42072
Change-Id: I7c31434e01108487de69a32154bbefd1538c6f0f
Cq-Include-Trybots: luci.dart.try:vm-ffi-qemu-linux-release-arm-try,vm-eager-optimization-linux-release-x64-try,vm-linux-release-x64-try,vm-linux-debug-x64-try,vm-aot-linux-release-x64-try,vm-aot-linux-debug-x64-try
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/330523
Bot-Commit: Rubber Stamper <rubber-stamper@appspot.gserviceaccount.com>
Commit-Queue: Alexander Aprelev <aam@google.com>
Auto-Submit: Alexander Markov <alexmarkov@google.com>
Reviewed-by: Alexander Aprelev <aam@google.com>
diff --git a/pkg/vm/lib/testing/il_matchers.dart b/pkg/vm/lib/testing/il_matchers.dart
index e95e5c1..3d256ab 100644
--- a/pkg/vm/lib/testing/il_matchers.dart
+++ b/pkg/vm/lib/testing/il_matchers.dart
@@ -183,10 +183,6 @@
void bind(String name, Map<String, dynamic> instrOrBlock) {
final id = instrOrBlock['v'] ?? instrOrBlock['b'];
- if (id == null) {
- throw 'Instruction is not a definition or a block: ${instrOrBlock['o']}';
- }
-
if (nameToId.containsKey(name)) {
if (nameToId[name] != id) {
throw 'Binding mismatch for $name: got ${nameToId[name]} and $id';
diff --git a/runtime/lib/typed_data.cc b/runtime/lib/typed_data.cc
index e28724a..88bda41 100644
--- a/runtime/lib/typed_data.cc
+++ b/runtime/lib/typed_data.cc
@@ -29,6 +29,24 @@
}
}
+static void AlignmentCheck(intptr_t offset_in_bytes, intptr_t element_size) {
+ if ((offset_in_bytes % element_size) != 0) {
+ const auto& error = String::Handle(String::NewFormatted(
+ "Offset in bytes (%" Pd ") must be a multiple of %" Pd "",
+ offset_in_bytes, element_size));
+ Exceptions::ThrowArgumentError(error);
+ }
+}
+
+// Checks to see if a length will not result in an OOM error.
+static void LengthCheck(intptr_t len, intptr_t max) {
+ if (len < 0 || len > max) {
+ const String& error = String::Handle(String::NewFormatted(
+ "Length (%" Pd ") of object must be in range [0..%" Pd "]", len, max));
+ Exceptions::ThrowArgumentError(error);
+ }
+}
+
DEFINE_NATIVE_ENTRY(TypedDataBase_length, 0, 1) {
GET_NON_NULL_NATIVE_ARGUMENT(TypedDataBase, array, arguments->NativeArgAt(0));
return Smi::New(array.Length());
@@ -130,6 +148,67 @@
return Object::null();
}
+// Native methods for typed data allocation are recognized and implemented
+// in FlowGraphBuilder::BuildGraphOfRecognizedMethod.
+// These bodies exist only to assert that they are not used.
+#define TYPED_DATA_NEW(name) \
+ DEFINE_NATIVE_ENTRY(TypedData_##name##_new, 0, 2) { \
+ UNREACHABLE(); \
+ return Object::null(); \
+ }
+
+#define TYPED_DATA_NEW_NATIVE(name) TYPED_DATA_NEW(name)
+
+CLASS_LIST_TYPED_DATA(TYPED_DATA_NEW_NATIVE)
+#undef TYPED_DATA_NEW_NATIVE
+#undef TYPED_DATA_NEW
+
+// We check the length parameter against a possible maximum length for the
+// array based on available physical addressable memory on the system.
+//
+// More specifically
+//
+// TypedData::MaxElements(cid) is equal to (kSmiMax / ElementSizeInBytes(cid))
+//
+// which ensures that the number of bytes the array holds is guaranteed to fit
+// into a _Smi.
+//
+// Argument 0 is type arguments and is ignored.
+static InstancePtr NewTypedDataView(intptr_t cid,
+ intptr_t element_size,
+ Zone* zone,
+ NativeArguments* arguments) {
+ GET_NON_NULL_NATIVE_ARGUMENT(TypedDataBase, typed_data,
+ arguments->NativeArgAt(1));
+ GET_NON_NULL_NATIVE_ARGUMENT(Smi, offset, arguments->NativeArgAt(2));
+ GET_NON_NULL_NATIVE_ARGUMENT(Smi, len, arguments->NativeArgAt(3));
+ const intptr_t backing_length = typed_data.LengthInBytes();
+ const intptr_t offset_in_bytes = offset.Value();
+ const intptr_t length = len.Value();
+ AlignmentCheck(offset_in_bytes, element_size);
+ LengthCheck(offset_in_bytes + length * element_size, backing_length);
+ return TypedDataView::New(cid, typed_data, offset_in_bytes, length);
+}
+
+#define TYPED_DATA_VIEW_NEW(native_name, cid) \
+ DEFINE_NATIVE_ENTRY(native_name, 0, 4) { \
+ return NewTypedDataView(cid, TypedDataBase::ElementSizeInBytes(cid), zone, \
+ arguments); \
+ }
+
+#define TYPED_DATA_NEW_NATIVE(name) \
+ TYPED_DATA_VIEW_NEW(TypedDataView_##name##View_new, \
+ kTypedData##name##ViewCid) \
+ TYPED_DATA_VIEW_NEW(TypedDataView_Unmodifiable##name##View_new, \
+ kUnmodifiableTypedData##name##ViewCid)
+
+CLASS_LIST_TYPED_DATA(TYPED_DATA_NEW_NATIVE)
+TYPED_DATA_VIEW_NEW(TypedDataView_ByteDataView_new, kByteDataViewCid)
+TYPED_DATA_VIEW_NEW(TypedDataView_UnmodifiableByteDataView_new,
+ kUnmodifiableByteDataViewCid)
+#undef TYPED_DATA_NEW_NATIVE
+#undef TYPED_DATA_VIEW_NEW
+
#define TYPED_DATA_GETTER(getter, object, ctor, access_size) \
DEFINE_NATIVE_ENTRY(TypedData_##getter, 0, 2) { \
GET_NON_NULL_NATIVE_ARGUMENT(TypedDataBase, array, \
diff --git a/runtime/tests/vm/dart/address_local_pointer_il_test.dart b/runtime/tests/vm/dart/address_local_pointer_il_test.dart
deleted file mode 100644
index 46e8f7b..0000000
--- a/runtime/tests/vm/dart/address_local_pointer_il_test.dart
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright (c) 2023, the Dart project authors. Please see the AUTHORS file
-// for details. All rights reserved. Use of this source code is governed by a
-// BSD-style license that can be found in the LICENSE file.
-
-// Verify that returning the address of a locally created Pointer that doesn't
-// escape just returns the address used to create the Pointer without actually
-// creating it. (See https://github.com/dart-lang/sdk/issues/53124.)
-
-import 'dart:ffi';
-
-import 'package:expect/expect.dart';
-import 'package:ffi/ffi.dart';
-import 'package:vm/testing/il_matchers.dart';
-
-@pragma('vm:never-inline')
-@pragma('vm:testing:print-flow-graph')
-int identity(int address) => Pointer<Void>.fromAddress(address).address;
-
-void matchIL$identity(FlowGraph graph) {
- graph.dump();
- graph.match([
- match.block('Graph'),
- match.block('Function', [
- 'address' << match.Parameter(index: 0),
- match.Return('address'),
- ]),
- ]);
-}
-
-void main(List<String> args) {
- final n = args.isEmpty ? 100 : int.parse(args.first);
- Expect.equals(n, identity(n));
-}
diff --git a/runtime/tests/vm/dart/pointer_as_typed_list_il_test.dart b/runtime/tests/vm/dart/pointer_as_typed_list_il_test.dart
deleted file mode 100644
index 5b56770..0000000
--- a/runtime/tests/vm/dart/pointer_as_typed_list_il_test.dart
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright (c) 2023, the Dart project authors. Please see the AUTHORS file
-// for details. All rights reserved. Use of this source code is governed by a
-// BSD-style license that can be found in the LICENSE file.
-
-// Verify that we don't generate intermediate external TypedData views when
-// using setRange to copy between Pointers.
-
-import 'dart:ffi';
-
-import 'package:expect/expect.dart';
-import 'package:ffi/ffi.dart';
-import 'package:vm/testing/il_matchers.dart';
-
-@pragma('vm:never-inline')
-@pragma('vm:testing:print-flow-graph')
-void copyPointerContents(Pointer<Uint8> dest, Pointer<Uint8> src, int n) {
- dest.asTypedList(n).setRange(0, n, src.asTypedList(n));
-}
-
-void matchIL$copyPointerContents(FlowGraph graph) {
- graph.dump();
- // Since we only call it with n == 100, the third argument will get optimized
- // away. The element_size starts as 1, but canonicalization will turn it into
- // 4, the length to 100 / 4 == 25, and the starting offsets to 0 / 4 == 0.
- //
- // We could change the definition of n in main to:
- //
- // final n = args.isEmpty ? 100 : int.parse(args.first);
- //
- // but then we'd have to wade through the generated bounds checks here.
- graph.match([
- match.block('Graph', [
- 'cnull' << match.Constant(value: null),
- 'c0' << match.Constant(value: 0),
- 'c25' << match.Constant(value: 25),
- ]),
- match.block('Function', [
- 'dest' << match.Parameter(index: 0),
- 'src' << match.Parameter(index: 1),
- 'dest.data' << match.LoadField('dest', slot: 'PointerBase.data'),
- 'src.data' << match.LoadField('src', slot: 'PointerBase.data'),
- match.MemoryCopy('src.data', 'dest.data', 'c0', 'c0', 'c25',
- element_size: 4),
- match.Return('cnull'),
- ]),
- ]);
-}
-
-void main(List<String> args) {
- final n = 100;
- final src = malloc<Uint8>(n);
- for (int i = 0; i < n; i++) {
- src[i] = n - i;
- }
- final dest = calloc<Uint8>(n);
- copyPointerContents(dest, src, n);
- Expect.listEquals(src.asTypedList(n), dest.asTypedList(n));
-}
diff --git a/runtime/tests/vm/dart/typed_data_aot_regress43534_il_test.dart b/runtime/tests/vm/dart/typed_data_aot_regress43534_il_test.dart
index bf4b664..50fe78c 100644
--- a/runtime/tests/vm/dart/typed_data_aot_regress43534_il_test.dart
+++ b/runtime/tests/vm/dart/typed_data_aot_regress43534_il_test.dart
@@ -26,10 +26,9 @@
graph.match([
match.block('Graph'),
match.block('Function', [
- 'list' << match.Parameter(index: 1),
- match.LoadField('list', slot: 'TypedDataBase.length'),
+ match.LoadField(),
match.GenericCheckBound(),
- match.LoadField('list', slot: 'PointerBase.data'),
+ match.LoadUntagged(),
match.LoadIndexed(),
]),
]);
diff --git a/runtime/tests/vm/dart_2/typed_data_aot_regress43534_il_test.dart b/runtime/tests/vm/dart_2/typed_data_aot_regress43534_il_test.dart
index bf4b664..50fe78c 100644
--- a/runtime/tests/vm/dart_2/typed_data_aot_regress43534_il_test.dart
+++ b/runtime/tests/vm/dart_2/typed_data_aot_regress43534_il_test.dart
@@ -26,10 +26,9 @@
graph.match([
match.block('Graph'),
match.block('Function', [
- 'list' << match.Parameter(index: 1),
- match.LoadField('list', slot: 'TypedDataBase.length'),
+ match.LoadField(),
match.GenericCheckBound(),
- match.LoadField('list', slot: 'PointerBase.data'),
+ match.LoadUntagged(),
match.LoadIndexed(),
]),
]);
diff --git a/runtime/tools/ffi/sdk_lib_ffi_generator.dart b/runtime/tools/ffi/sdk_lib_ffi_generator.dart
index 41a6c3f..7989f58 100644
--- a/runtime/tools/ffi/sdk_lib_ffi_generator.dart
+++ b/runtime/tools/ffi/sdk_lib_ffi_generator.dart
@@ -258,7 +258,6 @@
? ""
: """
@patch
- @pragma("vm:prefer-inline")
$typedListType asTypedList(
int length, {
Pointer<NativeFinalizerFunction>? finalizer,
diff --git a/runtime/vm/bootstrap_natives.h b/runtime/vm/bootstrap_natives.h
index b9e42ce..e58de2f 100644
--- a/runtime/vm/bootstrap_natives.h
+++ b/runtime/vm/bootstrap_natives.h
@@ -158,6 +158,20 @@
V(Timeline_getTraceClock, 0) \
V(Timeline_isDartStreamEnabled, 0) \
V(Timeline_reportTaskEvent, 5) \
+ V(TypedData_Int8Array_new, 2) \
+ V(TypedData_Uint8Array_new, 2) \
+ V(TypedData_Uint8ClampedArray_new, 2) \
+ V(TypedData_Int16Array_new, 2) \
+ V(TypedData_Uint16Array_new, 2) \
+ V(TypedData_Int32Array_new, 2) \
+ V(TypedData_Uint32Array_new, 2) \
+ V(TypedData_Int64Array_new, 2) \
+ V(TypedData_Uint64Array_new, 2) \
+ V(TypedData_Float32Array_new, 2) \
+ V(TypedData_Float64Array_new, 2) \
+ V(TypedData_Float32x4Array_new, 2) \
+ V(TypedData_Int32x4Array_new, 2) \
+ V(TypedData_Float64x2Array_new, 2) \
V(TypedDataBase_length, 1) \
V(TypedDataBase_setClampedRange, 5) \
V(TypedData_GetInt8, 2) \
@@ -186,8 +200,38 @@
V(TypedData_SetInt32x4, 3) \
V(TypedData_GetFloat64x2, 2) \
V(TypedData_SetFloat64x2, 3) \
+ V(TypedDataView_ByteDataView_new, 4) \
+ V(TypedDataView_Int8ArrayView_new, 4) \
+ V(TypedDataView_Uint8ArrayView_new, 4) \
+ V(TypedDataView_Uint8ClampedArrayView_new, 4) \
+ V(TypedDataView_Int16ArrayView_new, 4) \
+ V(TypedDataView_Uint16ArrayView_new, 4) \
+ V(TypedDataView_Int32ArrayView_new, 4) \
+ V(TypedDataView_Uint32ArrayView_new, 4) \
+ V(TypedDataView_Int64ArrayView_new, 4) \
+ V(TypedDataView_Uint64ArrayView_new, 4) \
+ V(TypedDataView_Float32ArrayView_new, 4) \
+ V(TypedDataView_Float64ArrayView_new, 4) \
+ V(TypedDataView_Float32x4ArrayView_new, 4) \
+ V(TypedDataView_Int32x4ArrayView_new, 4) \
+ V(TypedDataView_Float64x2ArrayView_new, 4) \
V(TypedDataView_offsetInBytes, 1) \
V(TypedDataView_typedData, 1) \
+ V(TypedDataView_UnmodifiableByteDataView_new, 4) \
+ V(TypedDataView_UnmodifiableInt8ArrayView_new, 4) \
+ V(TypedDataView_UnmodifiableUint8ArrayView_new, 4) \
+ V(TypedDataView_UnmodifiableUint8ClampedArrayView_new, 4) \
+ V(TypedDataView_UnmodifiableInt16ArrayView_new, 4) \
+ V(TypedDataView_UnmodifiableUint16ArrayView_new, 4) \
+ V(TypedDataView_UnmodifiableInt32ArrayView_new, 4) \
+ V(TypedDataView_UnmodifiableUint32ArrayView_new, 4) \
+ V(TypedDataView_UnmodifiableInt64ArrayView_new, 4) \
+ V(TypedDataView_UnmodifiableUint64ArrayView_new, 4) \
+ V(TypedDataView_UnmodifiableFloat32ArrayView_new, 4) \
+ V(TypedDataView_UnmodifiableFloat64ArrayView_new, 4) \
+ V(TypedDataView_UnmodifiableFloat32x4ArrayView_new, 4) \
+ V(TypedDataView_UnmodifiableInt32x4ArrayView_new, 4) \
+ V(TypedDataView_UnmodifiableFloat64x2ArrayView_new, 4) \
V(Float32x4_fromDoubles, 4) \
V(Float32x4_splat, 1) \
V(Float32x4_fromInt32x4Bits, 2) \
diff --git a/runtime/vm/compiler/backend/compile_type.h b/runtime/vm/compiler/backend/compile_type.h
index 47a8312..b812552 100644
--- a/runtime/vm/compiler/backend/compile_type.h
+++ b/runtime/vm/compiler/backend/compile_type.h
@@ -191,9 +191,6 @@
// Create non-nullable String type.
static CompileType String();
- // Create non-nullable Object type.
- static CompileType Object();
-
// Perform a join operation over the type lattice.
void Union(CompileType* other);
diff --git a/runtime/vm/compiler/backend/flow_graph.cc b/runtime/vm/compiler/backend/flow_graph.cc
index e1117fe..29019cf 100644
--- a/runtime/vm/compiler/backend/flow_graph.cc
+++ b/runtime/vm/compiler/backend/flow_graph.cc
@@ -2082,15 +2082,16 @@
: worklist_(flow_graph, 10) {}
void Process(PhiInstr* phi) {
- auto new_representation = kTagged;
+ Representation unboxed = phi->representation();
+
switch (phi->Type()->ToCid()) {
case kDoubleCid:
if (CanUnboxDouble()) {
- new_representation = DetermineIfAnyIncomingUnboxedFloats(phi)
- ? kUnboxedFloat
- : kUnboxedDouble;
+ // Could be UnboxedDouble or UnboxedFloat
+ unboxed = DetermineIfAnyIncomingUnboxedFloats(phi) ? kUnboxedFloat
+ : kUnboxedDouble;
#if defined(DEBUG)
- if (new_representation == kUnboxedFloat) {
+ if (unboxed == kUnboxedFloat) {
for (auto input : phi->inputs()) {
ASSERT(input->representation() != kUnboxedDouble);
}
@@ -2100,90 +2101,78 @@
break;
case kFloat32x4Cid:
if (ShouldInlineSimd()) {
- new_representation = kUnboxedFloat32x4;
+ unboxed = kUnboxedFloat32x4;
}
break;
case kInt32x4Cid:
if (ShouldInlineSimd()) {
- new_representation = kUnboxedInt32x4;
+ unboxed = kUnboxedInt32x4;
}
break;
case kFloat64x2Cid:
if (ShouldInlineSimd()) {
- new_representation = kUnboxedFloat64x2;
+ unboxed = kUnboxedFloat64x2;
}
break;
}
- // If all the inputs are untagged or all the inputs are compatible unboxed
- // integers, leave the Phi unboxed.
- if (new_representation == kTagged && phi->Type()->IsInt()) {
+ // If all the inputs are unboxed, leave the Phi unboxed.
+ if ((unboxed == kTagged) && phi->Type()->IsInt()) {
+ bool should_unbox = true;
+ Representation new_representation = kTagged;
for (auto input : phi->inputs()) {
if (input == phi) continue;
- if (input->representation() != kUntagged &&
- !IsUnboxedInteger(input->representation())) {
- new_representation = kTagged; // Reset to a boxed phi.
+ if (!IsUnboxedInteger(input->representation())) {
+ should_unbox = false;
break;
}
if (new_representation == kTagged) {
new_representation = input->representation();
- } else if (new_representation == kUntagged) {
- // Don't allow mixing of untagged and unboxed values.
- ASSERT_EQUAL(input->representation(), kUntagged);
} else if (new_representation != input->representation()) {
- // Don't allow mixing of untagged and unboxed values.
- ASSERT(IsUnboxedInteger(input->representation()));
- // Don't allow implicit conversion between signed and unsigned
- // representations of the same size, since that loses information.
- // This means the value sizes must be different if they are different
- // unboxed integer representations.
- ASSERT(RepresentationUtils::ValueSize(new_representation) !=
- RepresentationUtils::ValueSize(input->representation()));
- // Take the larger representation. If the larger representation is
- // unsigned, then the smaller must be as well.
- if (RepresentationUtils::ValueSize(new_representation) <
- RepresentationUtils::ValueSize(input->representation())) {
- ASSERT(!RepresentationUtils::IsUnsigned(input->representation()) ||
- RepresentationUtils::IsUnsigned(new_representation));
- new_representation = input->representation();
- } else {
- ASSERT(!RepresentationUtils::IsUnsigned(new_representation) ||
- RepresentationUtils::IsUnsigned(input->representation()));
- }
+ new_representation = kNoRepresentation;
}
}
- // Decide if it is worth to unbox an boxed integer phi.
- if (new_representation == kTagged && !phi->Type()->can_be_sentinel()) {
-#if defined(TARGET_ARCH_IS_64_BIT)
- // In AOT mode on 64-bit platforms always unbox integer typed phis
- // (similar to how we treat doubles and other boxed numeric types).
- // In JIT mode only unbox phis which are not fully known to be Smi.
- if (is_aot_ || phi->Type()->ToCid() != kSmiCid) {
- new_representation = kUnboxedInt64;
- }
-#else
- // If we are on a 32-bit platform check if there are unboxed values
- // flowing into the phi and the phi value itself is flowing into an
- // unboxed operation prefer to keep it unboxed.
- // We use this heuristic instead of eagerly unboxing all the phis
- // because we are concerned about the code size and register pressure.
- const bool has_unboxed_incoming_value = HasUnboxedIncomingValue(phi);
- const bool flows_into_unboxed_use = FlowsIntoUnboxedUse(phi);
-
- if (has_unboxed_incoming_value && flows_into_unboxed_use) {
- new_representation =
- RangeUtils::Fits(phi->range(), RangeBoundary::kRangeBoundaryInt32)
- ? kUnboxedInt32
- : kUnboxedInt64;
- }
-#endif
+ if (should_unbox) {
+ unboxed =
+ new_representation != kNoRepresentation ? new_representation
+ : RangeUtils::Fits(phi->range(), RangeBoundary::kRangeBoundaryInt32)
+ ? kUnboxedInt32
+ : kUnboxedInt64;
}
}
- phi->set_representation(new_representation);
+ // Decide if it is worth to unbox an integer phi.
+ if ((unboxed == kTagged) && phi->Type()->IsInt() &&
+ !phi->Type()->can_be_sentinel()) {
+#if defined(TARGET_ARCH_IS_64_BIT)
+ // In AOT mode on 64-bit platforms always unbox integer typed phis
+ // (similar to how we treat doubles and other boxed numeric types).
+ // In JIT mode only unbox phis which are not fully known to be Smi.
+ if (is_aot_ || phi->Type()->ToCid() != kSmiCid) {
+ unboxed = kUnboxedInt64;
+ }
+#else
+ // If we are on a 32-bit platform check if there are unboxed values
+ // flowing into the phi and the phi value itself is flowing into an
+ // unboxed operation prefer to keep it unboxed.
+ // We use this heuristic instead of eagerly unboxing all the phis
+ // because we are concerned about the code size and register pressure.
+ const bool has_unboxed_incoming_value = HasUnboxedIncomingValue(phi);
+ const bool flows_into_unboxed_use = FlowsIntoUnboxedUse(phi);
+
+ if (has_unboxed_incoming_value && flows_into_unboxed_use) {
+ unboxed =
+ RangeUtils::Fits(phi->range(), RangeBoundary::kRangeBoundaryInt32)
+ ? kUnboxedInt32
+ : kUnboxedInt64;
+ }
+#endif
+ }
+
+ phi->set_representation(unboxed);
}
private:
diff --git a/runtime/vm/compiler/backend/il.cc b/runtime/vm/compiler/backend/il.cc
index 782e499..8537ae2 100644
--- a/runtime/vm/compiler/backend/il.cc
+++ b/runtime/vm/compiler/backend/il.cc
@@ -990,15 +990,6 @@
value()->BindsToConstantNull()) {
return nullptr;
}
-
- if (slot().kind() == Slot::Kind::kPointerBase_data &&
- stores_inner_pointer() == InnerPointerAccess::kMayBeInnerPointer) {
- const intptr_t cid = instance()->Type()->ToNullableCid();
- // Pointers and ExternalTypedData objects never contain inner pointers.
- if (cid == kPointerCid || IsExternalTypedDataClassId(cid)) {
- set_stores_inner_pointer(InnerPointerAccess::kCannotBeInnerPointer);
- }
- }
return this;
}
@@ -2741,24 +2732,6 @@
return true;
}
-bool LoadFieldInstr::MayCreateUntaggedAlias() const {
- // If the load is guaranteed to never retrieve a GC-moveable address,
- // then the returned address can't alias the (GC-moveable) instance.
- if (loads_inner_pointer() != InnerPointerAccess::kMayBeInnerPointer) {
- return false;
- }
- if (slot().IsIdentical(Slot::PointerBase_data())) {
- // If we know statically that the instance is a Pointer, typed data view,
- // or external typed data, then the data field doesn't alias the instance.
- const intptr_t cid = instance()->Type()->ToNullableCid();
- if (cid == kPointerCid) return false;
- if (IsTypedDataViewClassId(cid)) return false;
- if (IsUnmodifiableTypedDataViewClassId(cid)) return false;
- if (IsExternalTypedDataClassId(cid)) return false;
- }
- return true;
-}
-
bool LoadFieldInstr::Evaluate(const Object& instance, Object* result) {
return TryEvaluateLoad(instance, slot(), result);
}
@@ -2900,16 +2873,6 @@
}
}
break;
- case Slot::Kind::kPointerBase_data:
- ASSERT(!calls_initializer());
- if (loads_inner_pointer() == InnerPointerAccess::kMayBeInnerPointer) {
- const intptr_t cid = instance()->Type()->ToNullableCid();
- // Pointers and ExternalTypedData objects never contain inner pointers.
- if (cid == kPointerCid || IsExternalTypedDataClassId(cid)) {
- set_loads_inner_pointer(InnerPointerAccess::kCannotBeInnerPointer);
- }
- }
- break;
default:
break;
}
@@ -2924,7 +2887,7 @@
}
}
- if (instance()->definition()->IsAllocateObject() && IsImmutableLoad()) {
+ if (instance()->definition()->IsAllocateObject() && slot().is_immutable()) {
StoreFieldInstr* initializing_store = nullptr;
for (auto use : instance()->definition()->input_uses()) {
if (auto store = use->instruction()->AsStoreField()) {
@@ -3367,14 +3330,11 @@
const auto intermediate_rep = first_converter->representation();
// Only eliminate intermediate conversion if it does not change the value.
auto src_defn = first_converter->value()->definition();
- if (intermediate_rep == kUntagged) {
- // Both conversions are no-ops, as the other representations must be
- // either kUnboxedIntPtr or kUnboxedFfiIntPtr.
- } else if (!Range::Fits(src_defn->range(), intermediate_rep)) {
+ if (!Range::Fits(src_defn->range(), intermediate_rep)) {
return this;
}
- // Otherwise it is safe to discard any other conversions from and then back
+ // Otherise it is safe to discard any other conversions from and then back
// to the same integer type.
if (first_converter->from() == to()) {
return src_defn;
@@ -4490,40 +4450,20 @@
}
}
-LocationSummary* LoadUntaggedInstr::MakeLocationSummary(Zone* zone,
- bool opt) const {
- const intptr_t kNumInputs = 1;
- return LocationSummary::Make(zone, kNumInputs, Location::RequiresRegister(),
- LocationSummary::kNoCall);
-}
-
-void LoadUntaggedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- Register obj = locs()->in(0).reg();
- Register result = locs()->out(0).reg();
- if (object()->definition()->representation() == kUntagged) {
- __ LoadFromOffset(result, obj, offset());
- } else {
- ASSERT(object()->definition()->representation() == kTagged);
- __ LoadFieldFromOffset(result, obj, offset());
- }
-}
-
LocationSummary* LoadFieldInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
LocationSummary* locs = nullptr;
- auto const rep = slot().representation();
- if (rep != kTagged) {
+ if (slot().representation() != kTagged) {
ASSERT(!calls_initializer());
const intptr_t kNumTemps = 0;
locs = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
locs->set_in(0, Location::RequiresRegister());
- if (rep == kUntagged) {
- locs->set_out(0, Location::RequiresRegister());
- } else if (RepresentationUtils::IsUnboxedInteger(rep)) {
- const size_t value_size = RepresentationUtils::ValueSize(rep);
+ if (RepresentationUtils::IsUnboxedInteger(slot().representation())) {
+ const size_t value_size =
+ RepresentationUtils::ValueSize(slot().representation());
if (value_size <= compiler::target::kWordSize) {
locs->set_out(0, Location::RequiresRegister());
} else {
@@ -4569,18 +4509,15 @@
void LoadFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const Register instance_reg = locs()->in(0).reg();
- auto const rep = slot().representation();
- if (rep != kTagged) {
- if (rep == kUntagged) {
- const Register result = locs()->out(0).reg();
- __ LoadFieldFromOffset(result, instance_reg, OffsetInBytes(),
- RepresentationUtils::OperandSize(rep));
- } else if (RepresentationUtils::IsUnboxedInteger(rep)) {
- const size_t value_size = RepresentationUtils::ValueSize(rep);
+ if (representation() != kTagged) {
+ if (RepresentationUtils::IsUnboxedInteger(representation())) {
+ const size_t value_size =
+ RepresentationUtils::ValueSize(representation());
if (value_size <= compiler::target::kWordSize) {
const Register result = locs()->out(0).reg();
- __ LoadFieldFromOffset(result, instance_reg, OffsetInBytes(),
- RepresentationUtils::OperandSize(rep));
+ __ LoadFieldFromOffset(
+ result, instance_reg, OffsetInBytes(),
+ RepresentationUtils::OperandSize(representation()));
} else {
auto const result_pair = locs()->out(0).AsPairLocation();
const Register result_lo = result_pair->At(0).reg();
@@ -6878,8 +6815,6 @@
void MemoryCopyInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const Register src_reg = locs()->in(kSrcPos).reg();
const Register dest_reg = locs()->in(kDestPos).reg();
- const Representation src_rep = RequiredInputRepresentation(kSrcPos);
- const Representation dest_rep = RequiredInputRepresentation(kDestPos);
const Location& src_start_loc = locs()->in(kSrcStartPos);
const Location& dest_start_loc = locs()->in(kDestStartPos);
const Location& length_loc = locs()->in(kLengthPos);
@@ -6895,9 +6830,8 @@
// The zero constant case should be handled via canonicalization.
ASSERT(!constant_length || num_elements > 0);
- EmitComputeStartPointer(compiler, src_cid_, src_reg, src_rep, src_start_loc);
- EmitComputeStartPointer(compiler, dest_cid_, dest_reg, dest_rep,
- dest_start_loc);
+ EmitComputeStartPointer(compiler, src_cid_, src_reg, src_start_loc);
+ EmitComputeStartPointer(compiler, dest_cid_, dest_reg, dest_start_loc);
compiler::Label copy_forwards, done;
if (!constant_length) {
@@ -7392,7 +7326,9 @@
compiler->zone(), pointer_loc.payload_type(),
pointer_loc.container_type(), temp0);
compiler->EmitNativeMove(dst, pointer_loc, &temp_alloc);
- __ LoadFromSlot(temp0, temp0, Slot::PointerBase_data());
+ __ LoadField(temp0,
+ compiler::FieldAddress(
+ temp0, compiler::target::PointerBase::data_offset()));
// Copy chunks.
const intptr_t sp_offset =
@@ -7460,7 +7396,9 @@
compiler->EmitMove(Location::RegisterLocation(temp0), typed_data_loc,
&no_temp);
}
- __ LoadFromSlot(temp0, temp0, Slot::PointerBase_data());
+ __ LoadField(temp0,
+ compiler::FieldAddress(
+ temp0, compiler::target::PointerBase::data_offset()));
if (returnLocation.IsPointerToMemory()) {
// Copy blocks from the stack location to TypedData.
@@ -7540,12 +7478,10 @@
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
summary->set_in(kInstancePos, Location::RequiresRegister());
- const Representation rep = slot().representation();
- if (rep != kTagged) {
- if (rep == kUntagged) {
- summary->set_in(kValuePos, Location::RequiresRegister());
- } else if (RepresentationUtils::IsUnboxedInteger(rep)) {
- const size_t value_size = RepresentationUtils::ValueSize(rep);
+ if (slot().representation() != kTagged) {
+ if (RepresentationUtils::IsUnboxedInteger(slot().representation())) {
+ const size_t value_size =
+ RepresentationUtils::ValueSize(slot().representation());
if (value_size <= compiler::target::kWordSize) {
summary->set_in(kValuePos, Location::RequiresRegister());
} else {
@@ -7603,20 +7539,17 @@
const intptr_t offset_in_bytes = OffsetInBytes();
ASSERT(offset_in_bytes > 0); // Field is finalized and points after header.
- auto const rep = slot().representation();
if (slot().representation() != kTagged) {
// Unboxed field.
ASSERT(memory_order_ != compiler::AssemblerBase::kRelease);
- if (rep == kUntagged) {
- const Register value = locs()->in(kValuePos).reg();
- __ StoreFieldToOffset(value, instance_reg, offset_in_bytes,
- RepresentationUtils::OperandSize(rep));
- } else if (RepresentationUtils::IsUnboxedInteger(rep)) {
- const size_t value_size = RepresentationUtils::ValueSize(rep);
+ if (RepresentationUtils::IsUnboxedInteger(slot().representation())) {
+ const size_t value_size =
+ RepresentationUtils::ValueSize(slot().representation());
if (value_size <= compiler::target::kWordSize) {
const Register value = locs()->in(kValuePos).reg();
- __ StoreFieldToOffset(value, instance_reg, offset_in_bytes,
- RepresentationUtils::OperandSize(rep));
+ __ StoreFieldToOffset(
+ value, instance_reg, offset_in_bytes,
+ RepresentationUtils::OperandSize(slot().representation()));
} else {
auto const value_pair = locs()->in(kValuePos).AsPairLocation();
const Register value_lo = value_pair->At(0).reg();
@@ -7736,7 +7669,10 @@
if (dst1.IsMultiple()) {
Register typed_data_reg = locs()->in(0).reg();
// Load the data pointer out of the TypedData/Pointer.
- __ LoadFromSlot(typed_data_reg, typed_data_reg, Slot::PointerBase_data());
+ __ LoadField(
+ typed_data_reg,
+ compiler::FieldAddress(typed_data_reg,
+ compiler::target::PointerBase::data_offset()));
const auto& multiple = dst1.AsMultiple();
int offset_in_bytes = 0;
diff --git a/runtime/vm/compiler/backend/il.h b/runtime/vm/compiler/backend/il.h
index e521cfa..44deff9 100644
--- a/runtime/vm/compiler/backend/il.h
+++ b/runtime/vm/compiler/backend/il.h
@@ -3080,47 +3080,29 @@
class MemoryCopyInstr : public TemplateInstruction<5, NoThrow> {
public:
MemoryCopyInstr(Value* src,
+ Value* dest,
+ Value* src_start,
+ Value* dest_start,
+ Value* length,
classid_t src_cid,
- Value* dest,
classid_t dest_cid,
- Value* src_start,
- Value* dest_start,
- Value* length,
bool unboxed_inputs,
bool can_overlap = true)
- : MemoryCopyInstr(Instance::ElementSizeFor(src_cid),
- src,
- kTagged,
- src_cid,
- dest,
- kTagged,
- dest_cid,
- src_start,
- dest_start,
- length,
- unboxed_inputs,
- can_overlap) {}
-
- MemoryCopyInstr(intptr_t element_size,
- Value* src,
- Value* dest,
- Value* src_start,
- Value* dest_start,
- Value* length,
- bool unboxed_inputs,
- bool can_overlap = true)
- : MemoryCopyInstr(element_size,
- src,
- kUntagged,
- kIllegalCid,
- dest,
- kUntagged,
- kIllegalCid,
- src_start,
- dest_start,
- length,
- unboxed_inputs,
- can_overlap) {}
+ : src_cid_(src_cid),
+ dest_cid_(dest_cid),
+ element_size_(Instance::ElementSizeFor(src_cid)),
+ unboxed_inputs_(unboxed_inputs),
+ can_overlap_(can_overlap) {
+ ASSERT(IsArrayTypeSupported(src_cid));
+ ASSERT(IsArrayTypeSupported(dest_cid));
+ ASSERT(Instance::ElementSizeFor(src_cid) ==
+ Instance::ElementSizeFor(dest_cid));
+ SetInputAt(kSrcPos, src);
+ SetInputAt(kDestPos, dest);
+ SetInputAt(kSrcStartPos, src_start);
+ SetInputAt(kDestStartPos, dest_start);
+ SetInputAt(kLengthPos, length);
+ }
enum {
kSrcPos = 0,
@@ -3133,11 +3115,9 @@
DECLARE_INSTRUCTION(MemoryCopy)
virtual Representation RequiredInputRepresentation(intptr_t index) const {
- if (index == kSrcPos) {
- return src_representation_;
- }
- if (index == kDestPos) {
- return dest_representation_;
+ if (index == kSrcPos || index == kDestPos) {
+ // The object inputs are always tagged.
+ return kTagged;
}
return unboxed_inputs() ? kUnboxedIntPtr : kTagged;
}
@@ -3145,19 +3125,7 @@
virtual bool ComputeCanDeoptimize() const { return false; }
virtual bool HasUnknownSideEffects() const { return true; }
- virtual bool AttributesEqual(const Instruction& other) const {
- if (auto* const copy = other.AsMemoryCopy()) {
- if (element_size_ != copy->element_size_) return false;
- if (unboxed_inputs_ != copy->unboxed_inputs_) return false;
- if (can_overlap_ != copy->can_overlap_) return false;
- if (src_representation_ != copy->src_representation_) return false;
- if (dest_representation_ != copy->dest_representation_) return false;
- if (src_cid_ != copy->src_cid_) return false;
- if (dest_cid_ != copy->dest_cid_) return false;
- return true;
- }
- return false;
- }
+ virtual bool AttributesEqual(const Instruction& other) const { return true; }
Value* src() const { return inputs_[kSrcPos]; }
Value* dest() const { return inputs_[kDestPos]; }
@@ -3174,16 +3142,12 @@
PRINT_OPERANDS_TO_SUPPORT
- DECLARE_ATTRIBUTE(element_size());
-
#define FIELD_LIST(F) \
- F(const classid_t, src_cid_) \
- F(const classid_t, dest_cid_) \
+ F(classid_t, src_cid_) \
+ F(classid_t, dest_cid_) \
F(intptr_t, element_size_) \
F(bool, unboxed_inputs_) \
- F(const bool, can_overlap_) \
- F(const Representation, src_representation_) \
- F(const Representation, dest_representation_)
+ F(bool, can_overlap_)
DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(MemoryCopyInstr,
TemplateInstruction,
@@ -3191,55 +3155,11 @@
#undef FIELD_LIST
private:
- MemoryCopyInstr(intptr_t element_size,
- Value* src,
- Representation src_representation,
- classid_t src_cid,
- Value* dest,
- Representation dest_representation,
- classid_t dest_cid,
- Value* src_start,
- Value* dest_start,
- Value* length,
- bool unboxed_inputs,
- bool can_overlap = true)
- : src_cid_(src_cid),
- dest_cid_(dest_cid),
- element_size_(element_size),
- unboxed_inputs_(unboxed_inputs),
- can_overlap_(can_overlap),
- src_representation_(src_representation),
- dest_representation_(dest_representation) {
- if (src_representation == kTagged) {
- ASSERT(IsArrayTypeSupported(src_cid));
- ASSERT_EQUAL(Instance::ElementSizeFor(src_cid), element_size);
- } else {
- ASSERT_EQUAL(src_representation, kUntagged);
- ASSERT_EQUAL(src_cid, kIllegalCid);
- }
- if (dest_representation == kTagged) {
- ASSERT(IsArrayTypeSupported(dest_cid));
- ASSERT_EQUAL(Instance::ElementSizeFor(dest_cid), element_size);
- } else {
- ASSERT_EQUAL(dest_representation, kUntagged);
- ASSERT_EQUAL(dest_cid, kIllegalCid);
- }
- SetInputAt(kSrcPos, src);
- SetInputAt(kDestPos, dest);
- SetInputAt(kSrcStartPos, src_start);
- SetInputAt(kDestStartPos, dest_start);
- SetInputAt(kLengthPos, length);
- }
-
// Set array_reg to point to the index indicated by start (contained in
// start_loc) of the typed data or string in array (contained in array_reg).
- // If array_rep is tagged, then the payload address is retrieved according
- // to array_cid, otherwise the register is assumed to already have the
- // payload address.
void EmitComputeStartPointer(FlowGraphCompiler* compiler,
classid_t array_cid,
Register array_reg,
- Representation array_rep,
Location start_loc);
// Generates an unrolled loop for copying a known amount of data from
@@ -6111,12 +6031,6 @@
DISALLOW_COPY_AND_ASSIGN(DebugStepCheckInstr);
};
-enum class InnerPointerAccess {
- kNotUntagged,
- kMayBeInnerPointer,
- kCannotBeInnerPointer,
-};
-
enum StoreBarrierType { kNoStoreBarrier, kEmitStoreBarrier };
// StoreField instruction represents a store of the given [value] into
@@ -6164,7 +6078,6 @@
Value* instance,
Value* value,
StoreBarrierType emit_store_barrier,
- InnerPointerAccess stores_inner_pointer,
const InstructionSource& source,
Kind kind = Kind::kOther,
compiler::Assembler::MemoryOrder memory_order =
@@ -6174,42 +6087,11 @@
emit_store_barrier_(emit_store_barrier),
memory_order_(memory_order),
token_pos_(source.token_pos),
- is_initialization_(kind == Kind::kInitializing),
- stores_inner_pointer_(stores_inner_pointer) {
- switch (stores_inner_pointer) {
- case InnerPointerAccess::kNotUntagged:
- ASSERT(slot.representation() != kUntagged);
- break;
- case InnerPointerAccess::kMayBeInnerPointer:
- ASSERT(slot.representation() == kUntagged);
- ASSERT(slot.may_contain_inner_pointer());
- break;
- case InnerPointerAccess::kCannotBeInnerPointer:
- ASSERT(slot.representation() == kUntagged);
- break;
- }
+ is_initialization_(kind == Kind::kInitializing) {
SetInputAt(kInstancePos, instance);
SetInputAt(kValuePos, value);
}
- // Convenience constructor for slots not containing an untagged address.
- StoreFieldInstr(const Slot& slot,
- Value* instance,
- Value* value,
- StoreBarrierType emit_store_barrier,
- const InstructionSource& source,
- Kind kind = Kind::kOther,
- compiler::Assembler::MemoryOrder memory_order =
- compiler::Assembler::kRelaxedNonAtomic)
- : StoreFieldInstr(slot,
- instance,
- value,
- emit_store_barrier,
- InnerPointerAccess::kNotUntagged,
- source,
- kind,
- memory_order) {}
-
// Convenience constructor that looks up an IL Slot for the given [field].
StoreFieldInstr(const Field& field,
Value* instance,
@@ -6246,7 +6128,7 @@
bool is_initialization() const { return is_initialization_; }
bool ShouldEmitStoreBarrier() const {
- if (slot().representation() != kTagged) {
+ if (RepresentationUtils::IsUnboxed(slot().representation())) {
// The target field is native and unboxed, so not traversed by the GC.
return false;
}
@@ -6267,17 +6149,6 @@
emit_store_barrier_ = value;
}
- InnerPointerAccess stores_inner_pointer() const {
- return stores_inner_pointer_;
- }
- void set_stores_inner_pointer(InnerPointerAccess value) {
- // We should never change this for a non-untagged field.
- ASSERT(stores_inner_pointer_ != InnerPointerAccess::kNotUntagged);
- // We only convert from may to cannot, never the other direction.
- ASSERT(value == InnerPointerAccess::kCannotBeInnerPointer);
- stores_inner_pointer_ = value;
- }
-
virtual bool CanTriggerGC() const { return false; }
virtual bool ComputeCanDeoptimize() const { return false; }
@@ -6302,8 +6173,7 @@
F(compiler::Assembler::MemoryOrder, memory_order_) \
F(const TokenPosition, token_pos_) \
/* Marks initializing stores. E.g. in the constructor. */ \
- F(const bool, is_initialization_) \
- F(InnerPointerAccess, stores_inner_pointer_)
+ F(const bool, is_initialization_)
DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(StoreFieldInstr,
TemplateInstruction,
@@ -7677,12 +7547,11 @@
DISALLOW_COPY_AND_ASSIGN(AllocateTypedDataInstr);
};
-// This instruction is used to access fields in non-Dart objects, such as Thread
-// and IsolateGroup.
-//
-// Note: The instruction must not be moved without the indexed access or store
-// that depends on it (e.g. out of loops), as the GC may collect or move the
-// object containing that address.
+// Note: This instruction must not be moved without the indexed access that
+// depends on it (e.g. out of loops). GC may collect the array while the
+// external data-array is still accessed.
+// TODO(vegorov) enable LICMing this instruction by ensuring that array itself
+// is kept alive.
class LoadUntaggedInstr : public TemplateDefinition<1, NoThrow> {
public:
LoadUntaggedInstr(Value* object, intptr_t offset) : offset_(offset) {
@@ -7777,7 +7646,6 @@
public:
LoadFieldInstr(Value* instance,
const Slot& slot,
- InnerPointerAccess loads_inner_pointer,
const InstructionSource& source,
bool calls_initializer = false,
intptr_t deopt_id = DeoptId::kNone)
@@ -7785,50 +7653,13 @@
calls_initializer,
deopt_id,
slot.IsDartField() ? &slot.field() : nullptr),
- slot_(slot),
- loads_inner_pointer_(loads_inner_pointer) {
- switch (loads_inner_pointer) {
- case InnerPointerAccess::kNotUntagged:
- ASSERT(slot.representation() != kUntagged);
- break;
- case InnerPointerAccess::kMayBeInnerPointer:
- ASSERT(slot.representation() == kUntagged);
- ASSERT(slot.may_contain_inner_pointer());
- break;
- case InnerPointerAccess::kCannotBeInnerPointer:
- ASSERT(slot.representation() == kUntagged);
- break;
- }
+ slot_(slot) {
SetInputAt(0, instance);
}
- // Convenience function for slots that cannot hold untagged addresses.
- LoadFieldInstr(Value* instance,
- const Slot& slot,
- const InstructionSource& source,
- bool calls_initializer = false,
- intptr_t deopt_id = DeoptId::kNone)
- : LoadFieldInstr(instance,
- slot,
- InnerPointerAccess::kNotUntagged,
- source,
- calls_initializer,
- deopt_id) {}
-
Value* instance() const { return inputs_[0]; }
const Slot& slot() const { return slot_; }
- InnerPointerAccess loads_inner_pointer() const {
- return loads_inner_pointer_;
- }
- void set_loads_inner_pointer(InnerPointerAccess value) {
- // We should never change this for a non-untagged field.
- ASSERT(loads_inner_pointer_ != InnerPointerAccess::kNotUntagged);
- // We only convert from may to cannot, never the other direction.
- ASSERT(value == InnerPointerAccess::kCannotBeInnerPointer);
- loads_inner_pointer_ = value;
- }
-
virtual Representation representation() const;
DECLARE_INSTRUCTION(LoadField)
@@ -7838,18 +7669,6 @@
virtual void InferRange(RangeAnalysis* analysis, Range* range);
- bool MayCreateUntaggedAlias() const;
-
- bool IsImmutableLoad() const {
- // The data() field in PointerBase is marked mutable, but is not actually
- // mutable if it doesn't contain an inner pointer (e.g., for external
- // typed data and Pointer objects).
- if (slot().IsIdentical(Slot::PointerBase_data())) {
- return loads_inner_pointer() != InnerPointerAccess::kMayBeInnerPointer;
- }
- return slot().is_immutable();
- }
-
bool IsImmutableLengthLoad() const { return slot().IsImmutableLengthSlot(); }
// Try evaluating this load against the given constant value of
@@ -7881,9 +7700,7 @@
PRINT_OPERANDS_TO_SUPPORT
-#define FIELD_LIST(F) \
- F(const Slot&, slot_) \
- F(InnerPointerAccess, loads_inner_pointer_)
+#define FIELD_LIST(F) F(const Slot&, slot_)
DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(LoadFieldInstr,
TemplateLoadField,
diff --git a/runtime/vm/compiler/backend/il_arm.cc b/runtime/vm/compiler/backend/il_arm.cc
index 797863c..05121bf 100644
--- a/runtime/vm/compiler/backend/il_arm.cc
+++ b/runtime/vm/compiler/backend/il_arm.cc
@@ -389,13 +389,13 @@
void MemoryCopyInstr::EmitComputeStartPointer(FlowGraphCompiler* compiler,
classid_t array_cid,
Register array_reg,
- Representation array_rep,
Location start_loc) {
- intptr_t offset = 0;
- if (array_rep != kTagged) {
- // Do nothing, array_reg already contains the payload address.
- } else if (IsTypedDataBaseClassId(array_cid)) {
- __ LoadFromSlot(array_reg, array_reg, Slot::PointerBase_data());
+ intptr_t offset;
+ if (IsTypedDataBaseClassId(array_cid)) {
+ __ ldr(array_reg,
+ compiler::FieldAddress(
+ array_reg, compiler::target::PointerBase::data_offset()));
+ offset = 0;
} else {
switch (array_cid) {
case kOneByteStringCid:
@@ -411,12 +411,14 @@
compiler::FieldAddress(array_reg,
compiler::target::ExternalOneByteString::
external_data_offset()));
+ offset = 0;
break;
case kExternalTwoByteStringCid:
__ ldr(array_reg,
compiler::FieldAddress(array_reg,
compiler::target::ExternalTwoByteString::
external_data_offset()));
+ offset = 0;
break;
default:
UNREACHABLE();
@@ -2045,7 +2047,8 @@
compiler::Label loop, loop_in;
// Address of input bytes.
- __ LoadFromSlot(bytes_reg, bytes_reg, Slot::PointerBase_data());
+ __ LoadFieldFromOffset(bytes_reg, bytes_reg,
+ compiler::target::PointerBase::data_offset());
// Table.
__ AddImmediate(
@@ -2097,6 +2100,24 @@
__ StoreFieldToOffset(flags_temp_reg, decoder_reg, scan_flags_field_offset);
}
+LocationSummary* LoadUntaggedInstr::MakeLocationSummary(Zone* zone,
+ bool opt) const {
+ const intptr_t kNumInputs = 1;
+ return LocationSummary::Make(zone, kNumInputs, Location::RequiresRegister(),
+ LocationSummary::kNoCall);
+}
+
+void LoadUntaggedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+ const Register obj = locs()->in(0).reg();
+ const Register result = locs()->out(0).reg();
+ if (object()->definition()->representation() == kUntagged) {
+ __ LoadFromOffset(result, obj, offset());
+ } else {
+ ASSERT(object()->definition()->representation() == kTagged);
+ __ LoadFieldFromOffset(result, obj, offset());
+ }
+}
+
static bool CanBeImmediateIndex(Value* value,
intptr_t cid,
bool is_external,
diff --git a/runtime/vm/compiler/backend/il_arm64.cc b/runtime/vm/compiler/backend/il_arm64.cc
index bc60c61..8fdf75f 100644
--- a/runtime/vm/compiler/backend/il_arm64.cc
+++ b/runtime/vm/compiler/backend/il_arm64.cc
@@ -290,13 +290,13 @@
void MemoryCopyInstr::EmitComputeStartPointer(FlowGraphCompiler* compiler,
classid_t array_cid,
Register array_reg,
- Representation array_rep,
Location start_loc) {
- intptr_t offset = 0;
- if (array_rep != kTagged) {
- // Do nothing, array_reg already contains the payload address.
- } else if (IsTypedDataBaseClassId(array_cid)) {
- __ LoadFromSlot(array_reg, array_reg, Slot::PointerBase_data());
+ intptr_t offset;
+ if (IsTypedDataBaseClassId(array_cid)) {
+ __ ldr(array_reg,
+ compiler::FieldAddress(
+ array_reg, compiler::target::PointerBase::data_offset()));
+ offset = 0;
} else {
switch (array_cid) {
case kOneByteStringCid:
@@ -312,12 +312,14 @@
compiler::FieldAddress(array_reg,
compiler::target::ExternalOneByteString::
external_data_offset()));
+ offset = 0;
break;
case kExternalTwoByteStringCid:
__ ldr(array_reg,
compiler::FieldAddress(array_reg,
compiler::target::ExternalTwoByteString::
external_data_offset()));
+ offset = 0;
break;
default:
UNREACHABLE();
@@ -1809,7 +1811,8 @@
compiler::Label loop, loop_in;
// Address of input bytes.
- __ LoadFromSlot(bytes_reg, bytes_reg, Slot::PointerBase_data());
+ __ LoadFieldFromOffset(bytes_reg, bytes_reg,
+ compiler::target::PointerBase::data_offset());
// Table.
__ AddImmediate(
@@ -1873,6 +1876,24 @@
}
}
+LocationSummary* LoadUntaggedInstr::MakeLocationSummary(Zone* zone,
+ bool opt) const {
+ const intptr_t kNumInputs = 1;
+ return LocationSummary::Make(zone, kNumInputs, Location::RequiresRegister(),
+ LocationSummary::kNoCall);
+}
+
+void LoadUntaggedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+ const Register obj = locs()->in(0).reg();
+ const Register result = locs()->out(0).reg();
+ if (object()->definition()->representation() == kUntagged) {
+ __ LoadFromOffset(result, obj, offset());
+ } else {
+ ASSERT(object()->definition()->representation() == kTagged);
+ __ LoadFieldFromOffset(result, obj, offset());
+ }
+}
+
static bool CanBeImmediateIndex(Value* value, intptr_t cid, bool is_external) {
ConstantInstr* constant = value->definition()->AsConstant();
if ((constant == nullptr) || !constant->value().IsSmi()) {
diff --git a/runtime/vm/compiler/backend/il_ia32.cc b/runtime/vm/compiler/backend/il_ia32.cc
index 5b94499..7b80097 100644
--- a/runtime/vm/compiler/backend/il_ia32.cc
+++ b/runtime/vm/compiler/backend/il_ia32.cc
@@ -183,13 +183,13 @@
void MemoryCopyInstr::EmitComputeStartPointer(FlowGraphCompiler* compiler,
classid_t array_cid,
Register array_reg,
- Representation array_rep,
Location start_loc) {
- intptr_t offset = 0;
- if (array_rep != kTagged) {
- // Do nothing, array_reg already contains the payload address.
- } else if (IsTypedDataBaseClassId(array_cid)) {
- __ LoadFromSlot(array_reg, array_reg, Slot::PointerBase_data());
+ intptr_t offset;
+ if (IsTypedDataBaseClassId(array_cid)) {
+ __ movl(array_reg,
+ compiler::FieldAddress(
+ array_reg, compiler::target::PointerBase::data_offset()));
+ offset = 0;
} else {
switch (array_cid) {
case kOneByteStringCid:
@@ -205,12 +205,14 @@
compiler::FieldAddress(array_reg,
compiler::target::ExternalOneByteString::
external_data_offset()));
+ offset = 0;
break;
case kExternalTwoByteStringCid:
__ movl(array_reg,
compiler::FieldAddress(array_reg,
compiler::target::ExternalTwoByteString::
external_data_offset()));
+ offset = 0;
break;
default:
UNREACHABLE();
@@ -1465,7 +1467,9 @@
compiler::Label rest, rest_loop, rest_loop_in, done;
// Address of input bytes.
- __ LoadFromSlot(bytes_reg, bytes_reg, Slot::PointerBase_data());
+ __ movl(bytes_reg,
+ compiler::FieldAddress(bytes_reg,
+ compiler::target::PointerBase::data_offset()));
// Pointers to start, end and end-16.
__ leal(bytes_ptr_reg, compiler::Address(bytes_reg, start_reg, TIMES_1, 0));
@@ -1583,6 +1587,24 @@
flags_reg);
}
+LocationSummary* LoadUntaggedInstr::MakeLocationSummary(Zone* zone,
+ bool opt) const {
+ const intptr_t kNumInputs = 1;
+ return LocationSummary::Make(zone, kNumInputs, Location::SameAsFirstInput(),
+ LocationSummary::kNoCall);
+}
+
+void LoadUntaggedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+ Register obj = locs()->in(0).reg();
+ Register result = locs()->out(0).reg();
+ if (object()->definition()->representation() == kUntagged) {
+ __ movl(result, compiler::Address(obj, offset()));
+ } else {
+ ASSERT(object()->definition()->representation() == kTagged);
+ __ movl(result, compiler::FieldAddress(obj, offset()));
+ }
+}
+
LocationSummary* LoadIndexedInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 2;
diff --git a/runtime/vm/compiler/backend/il_printer.cc b/runtime/vm/compiler/backend/il_printer.cc
index e632c05..ed9ea8a 100644
--- a/runtime/vm/compiler/backend/il_printer.cc
+++ b/runtime/vm/compiler/backend/il_printer.cc
@@ -927,9 +927,6 @@
if (emit_store_barrier_ == kNoStoreBarrier) {
f->AddString(", NoStoreBarrier");
}
- if (stores_inner_pointer() == InnerPointerAccess::kMayBeInnerPointer) {
- f->AddString(", MayStoreInnerPointer");
- }
}
void IfThenElseInstr::PrintOperandsTo(BaseTextBuffer* f) const {
@@ -995,13 +992,10 @@
void LoadFieldInstr::PrintOperandsTo(BaseTextBuffer* f) const {
instance()->PrintTo(f);
- f->Printf(" . %s%s", slot().Name(), IsImmutableLoad() ? " {final}" : "");
+ f->Printf(" . %s%s", slot().Name(), slot().is_immutable() ? " {final}" : "");
if (calls_initializer()) {
f->AddString(", CallsInitializer");
}
- if (loads_inner_pointer() == InnerPointerAccess::kMayBeInnerPointer) {
- f->AddString(", MayLoadInnerPointer");
- }
}
void LoadUntaggedInstr::PrintOperandsTo(BaseTextBuffer* f) const {
@@ -1452,43 +1446,23 @@
// kTypedDataUint8ArrayCid is used as the default cid for cases where
// the destination object is a subclass of PointerBase and the arguments
// are given in terms of bytes, so only print if the cid differs.
- switch (dest_representation_) {
- case kUntagged:
- f->Printf(", dest untagged");
- break;
- case kTagged:
- if (dest_cid_ != kTypedDataUint8ArrayCid) {
- const Class& cls = Class::Handle(
- IsolateGroup::Current()->class_table()->At(dest_cid_));
- if (!cls.IsNull()) {
- f->Printf(", dest_cid=%s (%d)", cls.ScrubbedNameCString(), dest_cid_);
- } else {
- f->Printf(", dest_cid=%d", dest_cid_);
- }
- }
- break;
- default:
- UNREACHABLE();
+ if (dest_cid_ != kTypedDataUint8ArrayCid) {
+ const Class& cls =
+ Class::Handle(IsolateGroup::Current()->class_table()->At(dest_cid_));
+ if (!cls.IsNull()) {
+ f->Printf(", dest_cid=%s (%d)", cls.ScrubbedNameCString(), dest_cid_);
+ } else {
+ f->Printf(", dest_cid=%d", dest_cid_);
+ }
}
- switch (src_representation_) {
- case kUntagged:
- f->Printf(", src untagged");
- break;
- case kTagged:
- if ((dest_representation_ == kTagged && dest_cid_ != src_cid_) ||
- (dest_representation_ != kTagged &&
- src_cid_ != kTypedDataUint8ArrayCid)) {
- const Class& cls =
- Class::Handle(IsolateGroup::Current()->class_table()->At(src_cid_));
- if (!cls.IsNull()) {
- f->Printf(", src_cid=%s (%d)", cls.ScrubbedNameCString(), src_cid_);
- } else {
- f->Printf(", src_cid=%d", src_cid_);
- }
- }
- break;
- default:
- UNREACHABLE();
+ if (src_cid_ != dest_cid_) {
+ const Class& cls =
+ Class::Handle(IsolateGroup::Current()->class_table()->At(src_cid_));
+ if (!cls.IsNull()) {
+ f->Printf(", src_cid=%s (%d)", cls.ScrubbedNameCString(), src_cid_);
+ } else {
+ f->Printf(", src_cid=%d", src_cid_);
+ }
}
if (element_size() != 1) {
f->Printf(", element_size=%" Pd "", element_size());
diff --git a/runtime/vm/compiler/backend/il_riscv.cc b/runtime/vm/compiler/backend/il_riscv.cc
index 8f7bd64..49209f3 100644
--- a/runtime/vm/compiler/backend/il_riscv.cc
+++ b/runtime/vm/compiler/backend/il_riscv.cc
@@ -385,13 +385,13 @@
void MemoryCopyInstr::EmitComputeStartPointer(FlowGraphCompiler* compiler,
classid_t array_cid,
Register array_reg,
- Representation array_rep,
Location start_loc) {
- intptr_t offset = 0;
- if (array_rep != kTagged) {
- // Do nothing, array_reg already contains the payload address.
- } else if (IsTypedDataBaseClassId(array_cid)) {
- __ LoadFromSlot(array_reg, array_reg, Slot::PointerBase_data());
+ intptr_t offset;
+ if (IsTypedDataBaseClassId(array_cid)) {
+ __ lx(array_reg,
+ compiler::FieldAddress(array_reg,
+ compiler::target::PointerBase::data_offset()));
+ offset = 0;
} else {
switch (array_cid) {
case kOneByteStringCid:
@@ -407,12 +407,14 @@
compiler::FieldAddress(array_reg,
compiler::target::ExternalOneByteString::
external_data_offset()));
+ offset = 0;
break;
case kExternalTwoByteStringCid:
__ lx(array_reg,
compiler::FieldAddress(array_reg,
compiler::target::ExternalTwoByteString::
external_data_offset()));
+ offset = 0;
break;
default:
UNREACHABLE();
@@ -1977,7 +1979,8 @@
compiler::Label loop, loop_in;
// Address of input bytes.
- __ LoadFromSlot(bytes_reg, bytes_reg, Slot::PointerBase_data());
+ __ LoadFieldFromOffset(bytes_reg, bytes_reg,
+ compiler::target::PointerBase::data_offset());
// Table.
__ AddImmediate(
@@ -2034,6 +2037,24 @@
}
}
+LocationSummary* LoadUntaggedInstr::MakeLocationSummary(Zone* zone,
+ bool opt) const {
+ const intptr_t kNumInputs = 1;
+ return LocationSummary::Make(zone, kNumInputs, Location::RequiresRegister(),
+ LocationSummary::kNoCall);
+}
+
+void LoadUntaggedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+ const Register obj = locs()->in(0).reg();
+ const Register result = locs()->out(0).reg();
+ if (object()->definition()->representation() == kUntagged) {
+ __ LoadFromOffset(result, obj, offset());
+ } else {
+ ASSERT(object()->definition()->representation() == kTagged);
+ __ LoadFieldFromOffset(result, obj, offset());
+ }
+}
+
static bool CanBeImmediateIndex(Value* value, intptr_t cid, bool is_external) {
ConstantInstr* constant = value->definition()->AsConstant();
if ((constant == nullptr) || !constant->value().IsSmi()) {
diff --git a/runtime/vm/compiler/backend/il_test.cc b/runtime/vm/compiler/backend/il_test.cc
index fd98abf..335ddaf 100644
--- a/runtime/vm/compiler/backend/il_test.cc
+++ b/runtime/vm/compiler/backend/il_test.cc
@@ -740,15 +740,14 @@
}));
}
auto pointer_value = Value(pointer);
- auto* const load_field_instr = new (zone) LoadFieldInstr(
- &pointer_value, Slot::PointerBase_data(),
- InnerPointerAccess::kCannotBeInnerPointer, InstructionSource());
- flow_graph->InsertBefore(another_function_call, load_field_instr, nullptr,
+ auto* const load_untagged_instr = new (zone) LoadUntaggedInstr(
+ &pointer_value, compiler::target::PointerBase::data_offset());
+ flow_graph->InsertBefore(another_function_call, load_untagged_instr, nullptr,
FlowGraph::kValue);
- auto load_field_value = Value(load_field_instr);
+ auto load_untagged_value = Value(load_untagged_instr);
auto pointer_value2 = Value(pointer);
auto* const raw_store_field_instr =
- new (zone) RawStoreFieldInstr(&load_field_value, &pointer_value2, 0);
+ new (zone) RawStoreFieldInstr(&load_untagged_value, &pointer_value2, 0);
flow_graph->InsertBefore(another_function_call, raw_store_field_instr,
nullptr, FlowGraph::kEffect);
another_function_call->RemoveFromGraph();
@@ -759,7 +758,7 @@
EXPECT(cursor.TryMatch({
kMoveGlob,
kMatchAndMoveStaticCall,
- kMatchAndMoveLoadField,
+ kMatchAndMoveLoadUntagged,
kMatchAndMoveRawStoreField,
}));
}
@@ -836,19 +835,18 @@
}));
}
auto pointer_value = Value(pointer);
- auto* const load_field_instr = new (zone) LoadFieldInstr(
- &pointer_value, Slot::PointerBase_data(),
- InnerPointerAccess::kCannotBeInnerPointer, InstructionSource());
- flow_graph->InsertBefore(another_function_call, load_field_instr, nullptr,
+ auto* const load_untagged_instr = new (zone) LoadUntaggedInstr(
+ &pointer_value, compiler::target::PointerBase::data_offset());
+ flow_graph->InsertBefore(another_function_call, load_untagged_instr, nullptr,
FlowGraph::kValue);
- auto load_field_value = Value(load_field_instr);
+ auto load_untagged_value = Value(load_untagged_instr);
auto* const constant_instr = new (zone) UnboxedConstantInstr(
Integer::ZoneHandle(zone, Integer::New(0, Heap::kOld)), kUnboxedIntPtr);
flow_graph->InsertBefore(another_function_call, constant_instr, nullptr,
FlowGraph::kValue);
auto constant_value = Value(constant_instr);
auto* const load_indexed_instr = new (zone)
- LoadIndexedInstr(&load_field_value, &constant_value,
+ LoadIndexedInstr(&load_untagged_value, &constant_value,
/*index_unboxed=*/true, /*index_scale=*/1, kArrayCid,
kAlignedAccess, DeoptId::kNone, InstructionSource());
flow_graph->InsertBefore(another_function_call, load_indexed_instr, nullptr,
@@ -863,7 +861,7 @@
EXPECT(cursor.TryMatch({
kMoveGlob,
kMatchAndMoveStaticCall,
- kMatchAndMoveLoadField,
+ kMatchAndMoveLoadUntagged,
kMatchAndMoveUnboxedConstant,
kMatchAndMoveLoadIndexed,
kMatchAndMoveStaticCall,
diff --git a/runtime/vm/compiler/backend/il_x64.cc b/runtime/vm/compiler/backend/il_x64.cc
index 8420dfe..0ab9e88 100644
--- a/runtime/vm/compiler/backend/il_x64.cc
+++ b/runtime/vm/compiler/backend/il_x64.cc
@@ -276,16 +276,14 @@
void MemoryCopyInstr::EmitComputeStartPointer(FlowGraphCompiler* compiler,
classid_t array_cid,
Register array_reg,
- Representation array_rep,
Location start_loc) {
- intptr_t offset = 0;
- if (array_rep != kTagged) {
- // Do nothing, array_reg already contains the payload address.
- } else if (IsTypedDataBaseClassId(array_cid)) {
- ASSERT_EQUAL(array_rep, kTagged);
- __ LoadFromSlot(array_reg, array_reg, Slot::PointerBase_data());
+ intptr_t offset;
+ if (IsTypedDataBaseClassId(array_cid)) {
+ __ movq(array_reg,
+ compiler::FieldAddress(
+ array_reg, compiler::target::PointerBase::data_offset()));
+ offset = 0;
} else {
- ASSERT_EQUAL(array_rep, kTagged);
switch (array_cid) {
case kOneByteStringCid:
offset =
@@ -300,12 +298,14 @@
compiler::FieldAddress(array_reg,
compiler::target::ExternalOneByteString::
external_data_offset()));
+ offset = 0;
break;
case kExternalTwoByteStringCid:
__ movq(array_reg,
compiler::FieldAddress(array_reg,
compiler::target::ExternalTwoByteString::
external_data_offset()));
+ offset = 0;
break;
default:
UNREACHABLE();
@@ -1697,7 +1697,9 @@
compiler::Label rest, rest_loop, rest_loop_in, done;
// Address of input bytes.
- __ LoadFromSlot(bytes_reg, bytes_reg, Slot::PointerBase_data());
+ __ movq(bytes_reg,
+ compiler::FieldAddress(bytes_reg,
+ compiler::target::PointerBase::data_offset()));
// Pointers to start, end and end-16.
__ leaq(bytes_ptr_reg, compiler::Address(bytes_reg, start_reg, TIMES_1, 0));
@@ -1815,6 +1817,24 @@
}
}
+LocationSummary* LoadUntaggedInstr::MakeLocationSummary(Zone* zone,
+ bool opt) const {
+ const intptr_t kNumInputs = 1;
+ return LocationSummary::Make(zone, kNumInputs, Location::RequiresRegister(),
+ LocationSummary::kNoCall);
+}
+
+void LoadUntaggedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+ Register obj = locs()->in(0).reg();
+ Register result = locs()->out(0).reg();
+ if (object()->definition()->representation() == kUntagged) {
+ __ movq(result, compiler::Address(obj, offset()));
+ } else {
+ ASSERT(object()->definition()->representation() == kTagged);
+ __ movq(result, compiler::FieldAddress(obj, offset()));
+ }
+}
+
LocationSummary* LoadIndexedInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 2;
diff --git a/runtime/vm/compiler/backend/inliner.cc b/runtime/vm/compiler/backend/inliner.cc
index 89e0dd3..7ff8ceb 100644
--- a/runtime/vm/compiler/backend/inliner.cc
+++ b/runtime/vm/compiler/backend/inliner.cc
@@ -2781,9 +2781,8 @@
*array = elements;
array_cid = kArrayCid;
} else if (IsExternalTypedDataClassId(array_cid)) {
- auto* const elements = new (Z) LoadFieldInstr(
- new (Z) Value(*array), Slot::PointerBase_data(),
- InnerPointerAccess::kCannotBeInnerPointer, call->source());
+ LoadUntaggedInstr* elements = new (Z) LoadUntaggedInstr(
+ new (Z) Value(*array), compiler::target::PointerBase::data_offset());
*cursor =
flow_graph->AppendTo(*cursor, elements, nullptr, FlowGraph::kValue);
*array = elements;
@@ -3178,14 +3177,9 @@
Definition** array,
Instruction** cursor) {
if (array_cid == kDynamicCid || IsExternalTypedDataClassId(array_cid)) {
- // Internal or External typed data: load the untagged base address.
- auto const loads_inner_pointer =
- IsExternalTypedDataClassId(array_cid)
- ? InnerPointerAccess::kCannotBeInnerPointer
- : InnerPointerAccess::kMayBeInnerPointer;
- auto* const elements =
- new (Z) LoadFieldInstr(new (Z) Value(*array), Slot::PointerBase_data(),
- loads_inner_pointer, call->source());
+ // Internal or External typed data: load untagged.
+ auto elements = new (Z) LoadUntaggedInstr(
+ new (Z) Value(*array), compiler::target::PointerBase::data_offset());
*cursor =
flow_graph->AppendTo(*cursor, elements, nullptr, FlowGraph::kValue);
*array = elements;
@@ -5040,6 +5034,55 @@
return true;
}
+ case MethodRecognizer::kMemCopy: {
+ // Keep consistent with kernel_to_il.cc (except unboxed param).
+ *entry = new (Z)
+ FunctionEntryInstr(graph_entry, flow_graph->allocate_block_id(),
+ call->GetBlock()->try_index(), DeoptId::kNone);
+ (*entry)->InheritDeoptTarget(Z, call);
+ Definition* arg_target = call->ArgumentAt(0);
+ Definition* arg_target_offset_in_bytes = call->ArgumentAt(1);
+ Definition* arg_source = call->ArgumentAt(2);
+ Definition* arg_source_offset_in_bytes = call->ArgumentAt(3);
+ Definition* arg_length_in_bytes = call->ArgumentAt(4);
+
+ auto env = call->deopt_id() != DeoptId::kNone ? call->env() : nullptr;
+
+ // Insert explicit unboxing instructions with truncation to avoid relying
+ // on [SelectRepresentations] which doesn't mark them as truncating.
+ arg_target_offset_in_bytes = UnboxInstr::Create(
+ kUnboxedIntPtr, new (Z) Value(arg_target_offset_in_bytes),
+ call->deopt_id(), Instruction::kNotSpeculative);
+ arg_target_offset_in_bytes->AsUnboxInteger()->mark_truncating();
+ flow_graph->AppendTo(*entry, arg_target_offset_in_bytes, env,
+ FlowGraph::kValue);
+ arg_source_offset_in_bytes = UnboxInstr::Create(
+ kUnboxedIntPtr, new (Z) Value(arg_source_offset_in_bytes),
+ call->deopt_id(), Instruction::kNotSpeculative);
+ arg_source_offset_in_bytes->AsUnboxInteger()->mark_truncating();
+ flow_graph->AppendTo(arg_target_offset_in_bytes,
+ arg_source_offset_in_bytes, env, FlowGraph::kValue);
+ arg_length_in_bytes =
+ UnboxInstr::Create(kUnboxedIntPtr, new (Z) Value(arg_length_in_bytes),
+ call->deopt_id(), Instruction::kNotSpeculative);
+ arg_length_in_bytes->AsUnboxInteger()->mark_truncating();
+ flow_graph->AppendTo(arg_source_offset_in_bytes, arg_length_in_bytes, env,
+ FlowGraph::kValue);
+
+ *last = new (Z)
+ MemoryCopyInstr(new (Z) Value(arg_source), new (Z) Value(arg_target),
+ new (Z) Value(arg_source_offset_in_bytes),
+ new (Z) Value(arg_target_offset_in_bytes),
+ new (Z) Value(arg_length_in_bytes),
+ /*src_cid=*/kTypedDataUint8ArrayCid,
+ /*dest_cid=*/kTypedDataUint8ArrayCid,
+ /*unboxed_inputs=*/true, /*can_overlap=*/true);
+ flow_graph->AppendTo(arg_length_in_bytes, *last, env, FlowGraph::kEffect);
+
+ *result = flow_graph->constant_null();
+ return true;
+ }
+
default:
return false;
}
diff --git a/runtime/vm/compiler/backend/locations.cc b/runtime/vm/compiler/backend/locations.cc
index 70fd8a8..e37b9fa 100644
--- a/runtime/vm/compiler/backend/locations.cc
+++ b/runtime/vm/compiler/backend/locations.cc
@@ -60,13 +60,8 @@
#undef REP_IN_SET_CLAUSE
compiler::OperandSize RepresentationUtils::OperandSize(Representation rep) {
- if (rep == kTagged) {
+ if (rep == kTagged || rep == kUntagged) {
return compiler::kObjectBytes;
- } else if (rep == kUntagged) {
- // Untagged addresses are either loaded from and stored to word size native
- // fields or generated from already-extended tagged addresses when
- // compressed pointers are enabled.
- return compiler::kWordBytes;
}
ASSERT(IsUnboxedInteger(rep));
switch (ValueSize(rep)) {
diff --git a/runtime/vm/compiler/backend/memory_copy_test.cc b/runtime/vm/compiler/backend/memory_copy_test.cc
index 4261dc8..c084c6f 100644
--- a/runtime/vm/compiler/backend/memory_copy_test.cc
+++ b/runtime/vm/compiler/backend/memory_copy_test.cc
@@ -224,11 +224,12 @@
Integer::ZoneHandle(zone, Integer::New(length, Heap::kOld)), rep);
auto* const memory_copy_instr = new (zone) MemoryCopyInstr(
- new (zone) Value(pointer), /*src_cid=*/cid, new (zone) Value(pointer2),
- /*dest_cid=*/cid, new (zone) Value(src_start_constant_instr),
+ new (zone) Value(pointer), new (zone) Value(pointer2),
+ new (zone) Value(src_start_constant_instr),
new (zone) Value(dest_start_constant_instr),
- new (zone) Value(length_constant_instr), unboxed_inputs,
- /*can_overlap=*/use_same_buffer);
+ new (zone) Value(length_constant_instr),
+ /*src_cid=*/cid,
+ /*dest_cid=*/cid, unboxed_inputs, /*can_overlap=*/use_same_buffer);
flow_graph->InsertBefore(another_function_call, memory_copy_instr, nullptr,
FlowGraph::kEffect);
@@ -341,12 +342,11 @@
}
auto* const memory_copy_instr = new (zone) MemoryCopyInstr(
- new (zone) Value(param_ptr), /*src_cid=*/cid,
- new (zone) Value(param_ptr2), /*dest_cid=*/cid,
+ new (zone) Value(param_ptr), new (zone) Value(param_ptr2),
new (zone) Value(src_start_def), new (zone) Value(dest_start_def),
new (zone) Value(length_def),
-
- unboxed_inputs, /*can_overlap=*/use_same_buffer);
+ /*src_cid=*/cid,
+ /*dest_cid=*/cid, unboxed_inputs, /*can_overlap=*/use_same_buffer);
flow_graph->InsertBefore(return_instr, memory_copy_instr, nullptr,
FlowGraph::kEffect);
diff --git a/runtime/vm/compiler/backend/range_analysis.cc b/runtime/vm/compiler/backend/range_analysis.cc
index 2d5063a..b717a9e 100644
--- a/runtime/vm/compiler/backend/range_analysis.cc
+++ b/runtime/vm/compiler/backend/range_analysis.cc
@@ -2850,22 +2850,13 @@
UNREACHABLE();
break;
-#define UNBOXED_NATIVE_NONADDRESS_SLOT_CASE(Class, Untagged, Field, Rep, \
- IsFinal) \
+#define UNBOXED_NATIVE_SLOT_CASE(Class, Untagged, Field, Rep, IsFinal) \
case Slot::Kind::k##Class##_##Field:
- UNBOXED_NATIVE_NONADDRESS_SLOTS_LIST(UNBOXED_NATIVE_NONADDRESS_SLOT_CASE)
-#undef UNBOXED_NATIVE_NONADDRESS_SLOT_CASE
+ UNBOXED_NATIVE_SLOTS_LIST(UNBOXED_NATIVE_SLOT_CASE)
+#undef UNBOXED_NATIVE_SLOT_CASE
*range = Range::Full(RepresentationToRangeSize(slot().representation()));
break;
-#define UNBOXED_NATIVE_ADDRESS_SLOT_CASE(Class, Untagged, Field, MayMove, \
- IsFinal) \
- case Slot::Kind::k##Class##_##Field:
- UNBOXED_NATIVE_ADDRESS_SLOTS_LIST(UNBOXED_NATIVE_ADDRESS_SLOT_CASE)
-#undef UNBOXED_NATIVE_ADDRESS_SLOT_CASE
- UNREACHABLE();
- break;
-
case Slot::Kind::kClosure_hash:
case Slot::Kind::kLinkedHashBase_hash_mask:
case Slot::Kind::kLinkedHashBase_used_data:
diff --git a/runtime/vm/compiler/backend/redundancy_elimination.cc b/runtime/vm/compiler/backend/redundancy_elimination.cc
index 60538db..8dbcf86 100644
--- a/runtime/vm/compiler/backend/redundancy_elimination.cc
+++ b/runtime/vm/compiler/backend/redundancy_elimination.cc
@@ -7,7 +7,6 @@
#include <utility>
#include "vm/bit_vector.h"
-#include "vm/class_id.h"
#include "vm/compiler/backend/flow_graph.h"
#include "vm/compiler/backend/il.h"
#include "vm/compiler/backend/il_printer.h"
@@ -1121,8 +1120,6 @@
use = use->next_use()) {
Instruction* instr = use->instruction();
if (instr->HasUnknownSideEffects() || instr->IsLoadUntagged() ||
- (instr->IsLoadField() &&
- instr->AsLoadField()->MayCreateUntaggedAlias()) ||
(instr->IsStoreIndexed() &&
(use->use_index() == StoreIndexedInstr::kValuePos)) ||
instr->IsStoreStaticField() || instr->IsPhi()) {
@@ -1418,9 +1415,8 @@
// Load instructions handled by load elimination.
static bool IsLoadEliminationCandidate(Instruction* instr) {
- return (instr->IsLoadField() && instr->AsLoadField()->loads_inner_pointer() !=
- InnerPointerAccess::kMayBeInnerPointer) ||
- instr->IsLoadIndexed() || instr->IsLoadStaticField();
+ return instr->IsLoadField() || instr->IsLoadIndexed() ||
+ instr->IsLoadStaticField();
}
static bool IsLoopInvariantLoad(ZoneGrowableArray<BitVector*>* sets,
@@ -3836,23 +3832,8 @@
/*index_scale=*/compiler::target::Instance::ElementSizeFor(array_cid),
array_cid, kAlignedAccess, DeoptId::kNone, alloc->source());
} else {
- auto loads_inner_pointer =
- slot->representation() != kUntagged ? InnerPointerAccess::kNotUntagged
- : slot->may_contain_inner_pointer()
- ? InnerPointerAccess::kMayBeInnerPointer
- : InnerPointerAccess::kCannotBeInnerPointer;
- // PointerBase.data loads for external typed data and pointers never
- // access an inner pointer.
- if (slot->IsIdentical(Slot::PointerBase_data())) {
- if (auto* const alloc_obj = alloc->AsAllocateObject()) {
- const classid_t cid = alloc_obj->cls().id();
- if (cid == kPointerCid || IsExternalTypedDataClassId(cid)) {
- loads_inner_pointer = InnerPointerAccess::kCannotBeInnerPointer;
- }
- }
- }
- load = new (Z) LoadFieldInstr(new (Z) Value(alloc), *slot,
- loads_inner_pointer, alloc->source());
+ load =
+ new (Z) LoadFieldInstr(new (Z) Value(alloc), *slot, alloc->source());
}
flow_graph_->InsertBefore(load_point, load, nullptr, FlowGraph::kValue);
values.Add(new (Z) Value(load));
diff --git a/runtime/vm/compiler/backend/redundancy_elimination_test.cc b/runtime/vm/compiler/backend/redundancy_elimination_test.cc
index b814190..ef41a26 100644
--- a/runtime/vm/compiler/backend/redundancy_elimination_test.cc
+++ b/runtime/vm/compiler/backend/redundancy_elimination_test.cc
@@ -722,16 +722,16 @@
new AllocateObjectInstr(InstructionSource(), view_cls, DeoptId::kNone));
// v1 <- LoadNativeField(array, Slot::PointerBase_data())
- v1 = builder.AddDefinition(new LoadFieldInstr(
- new (zone) Value(array), Slot::PointerBase_data(),
- InnerPointerAccess::kMayBeInnerPointer, InstructionSource()));
+ v1 = builder.AddDefinition(new LoadFieldInstr(new (zone) Value(array),
+ Slot::PointerBase_data(),
+ InstructionSource()));
// StoreNativeField(Slot::PointerBase_data(), view, v1, kNoStoreBarrier,
// kInitalizing)
store = builder.AddInstruction(new StoreFieldInstr(
Slot::PointerBase_data(), new (zone) Value(view), new (zone) Value(v1),
- kNoStoreBarrier, InnerPointerAccess::kMayBeInnerPointer,
- InstructionSource(), StoreFieldInstr::Kind::kInitializing));
+ kNoStoreBarrier, InstructionSource(),
+ StoreFieldInstr::Kind::kInitializing));
// return view
ret = builder.AddInstruction(new ReturnInstr(
diff --git a/runtime/vm/compiler/backend/slot.cc b/runtime/vm/compiler/backend/slot.cc
index 91f9984..f21db36 100644
--- a/runtime/vm/compiler/backend/slot.cc
+++ b/runtime/vm/compiler/backend/slot.cc
@@ -106,8 +106,8 @@
#undef DEFINE_NONNULLABLE_BOXED_NATIVE_FIELD
-#define DEFINE_UNBOXED_NATIVE_NONADDRESS_FIELD( \
- ClassName, UnderlyingType, FieldName, representation, mutability) \
+#define DEFINE_UNBOXED_NATIVE_FIELD(ClassName, UnderlyingType, FieldName, \
+ representation, mutability) \
case Slot::Kind::k##ClassName##_##FieldName: \
return new (zone_) \
Slot(Slot::Kind::k##ClassName##_##FieldName, \
@@ -118,24 +118,9 @@
CompileType::FromUnboxedRepresentation(kUnboxed##representation), \
kUnboxed##representation);
- UNBOXED_NATIVE_NONADDRESS_SLOTS_LIST(DEFINE_UNBOXED_NATIVE_NONADDRESS_FIELD)
+ UNBOXED_NATIVE_SLOTS_LIST(DEFINE_UNBOXED_NATIVE_FIELD)
-#undef DEFINE_UNBOXED_NATIVE_NONADDRESS_FIELD
-
-#define DEFINE_UNBOXED_NATIVE_ADDRESS_FIELD(ClassName, UnderlyingType, \
- FieldName, GcMayMove, mutability) \
- case Slot::Kind::k##ClassName##_##FieldName: \
- return new (zone_) \
- Slot(Slot::Kind::k##ClassName##_##FieldName, \
- Slot::IsImmutableBit::encode(FIELD_##mutability) | \
- Slot::MayContainInnerPointerBit::encode(GcMayMove) | \
- Slot::IsUnboxedBit::encode(true), \
- compiler::target::ClassName::FieldName##_offset(), \
- #ClassName "." #FieldName, CompileType::Object(), kUntagged);
-
- UNBOXED_NATIVE_ADDRESS_SLOTS_LIST(DEFINE_UNBOXED_NATIVE_ADDRESS_FIELD)
-
-#undef DEFINE_UNBOXED_NATIVE_NONADDRESS_FIELD
+#undef DEFINE_UNBOXED_NATIVE_FIELD
#undef FIELD_VAR
#undef FIELD_FINAL
default:
@@ -333,6 +318,19 @@
: state_(GuardedCidBits::encode(field.guarded_cid()) |
IsNullableBit::encode(field.is_nullable())) {}
+Representation Slot::UnboxedRepresentation() const {
+ switch (field_guard_state().guarded_cid()) {
+ case kDoubleCid:
+ return kUnboxedDouble;
+ case kFloat32x4Cid:
+ return kUnboxedFloat32x4;
+ case kFloat64x2Cid:
+ return kUnboxedFloat64x2;
+ default:
+ return kUnboxedInt64;
+ }
+}
+
const Slot& Slot::Get(const Field& field,
const ParsedFunction* parsed_function) {
Thread* thread = Thread::Current();
diff --git a/runtime/vm/compiler/backend/slot.h b/runtime/vm/compiler/backend/slot.h
index a04fe60..a5e49f0 100644
--- a/runtime/vm/compiler/backend/slot.h
+++ b/runtime/vm/compiler/backend/slot.h
@@ -145,8 +145,17 @@
NONNULLABLE_BOXED_NATIVE_SLOTS_LIST(FOR_EACH_NATIVE_SLOT)
#undef FOR_EACH_NATIVE_SLOT
-// List of slots that correspond to unboxed fields of native objects that
-// do not contain untagged addresses in the following format:
+// Only define AOT-only unboxed native slots when in the precompiler. See
+// UNBOXED_NATIVE_SLOTS_LIST for the format.
+#if defined(DART_PRECOMPILER) && !defined(TARGET_ARCH_IA32)
+#define AOT_ONLY_UNBOXED_NATIVE_SLOTS_LIST(V) \
+ V(Closure, UntaggedClosure, entry_point, Uword, FINAL)
+#else
+#define AOT_ONLY_UNBOXED_NATIVE_SLOTS_LIST(V)
+#endif
+
+// List of slots that correspond to unboxed fields of native objects in the
+// following format:
//
// V(class_name, underlying_type, field_name, representation, FINAL|VAR)
//
@@ -163,63 +172,21 @@
//
// Note: Currently LoadFieldInstr::IsImmutableLengthLoad() assumes that no
// unboxed slots represent length loads.
-#define UNBOXED_NATIVE_NONADDRESS_SLOTS_LIST(V) \
+#define UNBOXED_NATIVE_SLOTS_LIST(V) \
+ AOT_ONLY_UNBOXED_NATIVE_SLOTS_LIST(V) \
V(AbstractType, UntaggedAbstractType, flags, Uint32, FINAL) \
V(ClosureData, UntaggedClosureData, packed_fields, Uint32, FINAL) \
+ V(FinalizerBase, UntaggedFinalizerBase, isolate, IntPtr, VAR) \
V(FinalizerEntry, UntaggedFinalizerEntry, external_size, IntPtr, VAR) \
+ V(Function, UntaggedFunction, entry_point, Uword, FINAL) \
V(Function, UntaggedFunction, kind_tag, Uint32, FINAL) \
V(FunctionType, UntaggedFunctionType, packed_parameter_counts, Uint32, \
FINAL) \
V(FunctionType, UntaggedFunctionType, packed_type_parameter_counts, Uint16, \
FINAL) \
+ V(PointerBase, UntaggedPointerBase, data, IntPtr, VAR) \
V(SubtypeTestCache, UntaggedSubtypeTestCache, num_inputs, Uint32, FINAL)
-// Unboxed native slots containing untagged addresses that do not exist
-// in JIT mode. See UNBOXED_NATIVE_ADDRESS_SLOTS_LIST for the format.
-#if defined(DART_PRECOMPILER) && !defined(TARGET_ARCH_IA32)
-#define AOT_ONLY_UNBOXED_NATIVE_ADDRESS_SLOTS_LIST(V) \
- V(Closure, UntaggedClosure, entry_point, false, FINAL)
-#else
-#define AOT_ONLY_UNBOXED_NATIVE_ADDRESS_SLOTS_LIST(V)
-#endif
-
-// List of slots that correspond to unboxed fields of native objects containing
-// untagged addresses in the following format:
-//
-// V(class_name, underlying_type, field_name, gc_may_move, FINAL|VAR)
-//
-// - class_name and field_name specify the name of the host class and the name
-// of the field respectively;
-// - underlying_type: the Raw class which holds the field;
-// - gc_may_move: whether the untagged address contained in this field is a
-// pointer to memory that may be moved by the GC, which means a value loaded
-// from this field is invalidated by any instruction that can cause GC;
-// - the last component specifies whether field behaves like a final field
-// (i.e. initialized once at construction time and does not change after
-// that) or like a non-final field.
-//
-// Note: As the underlying field is unboxed, these slots cannot be nullable.
-//
-// Note: All slots for unboxed fields that contain untagged addresses are given
-// the kUntagged representation, and so a value loaded from these fields must
-// be converted explicitly to an unboxed integer representation for any
-// pointer arithmetic before use, and an unboxed integer must be converted
-// explicitly to an untagged address before being stored to these fields.
-//
-// Note: Currently LoadFieldInstr::IsImmutableLengthLoad() assumes that no
-// unboxed slots represent length loads.
-#define UNBOXED_NATIVE_ADDRESS_SLOTS_LIST(V) \
- AOT_ONLY_UNBOXED_NATIVE_ADDRESS_SLOTS_LIST(V) \
- V(Function, UntaggedFunction, entry_point, false, FINAL) \
- V(FinalizerBase, UntaggedFinalizerBase, isolate, false, VAR) \
- V(PointerBase, UntaggedPointerBase, data, true, VAR)
-
-// For uses that do not need to know whether a given slot may contain an
-// inner pointer to a GC-able object or not. (Generally, such users only need
-// the class name, the underlying type, and/or the field name.)
-#define UNBOXED_NATIVE_SLOTS_LIST(V) \
- UNBOXED_NATIVE_NONADDRESS_SLOTS_LIST(V) UNBOXED_NATIVE_ADDRESS_SLOTS_LIST(V)
-
// For uses that do not need the exact_type (boxed) or representation (unboxed)
// or whether a boxed native slot is nullable. (Generally, such users only need
// the class name, the underlying type, and/or the field name.)
@@ -350,12 +317,6 @@
bool is_compressed() const { return IsCompressedBit::decode(flags_); }
- // Returns true if the field is an unboxed native field that may contain an
- // inner pointer to a GC-movable object.
- bool may_contain_inner_pointer() const {
- return MayContainInnerPointerBit::decode(flags_);
- }
-
// Type information about values that can be read from this slot.
CompileType type() const { return type_; }
@@ -374,7 +335,10 @@
return kind() == Kind::kCapturedVariable || kind() == Kind::kContext_parent;
}
- bool is_unboxed() const { return IsUnboxedBit::decode(flags_); }
+ bool is_unboxed() const {
+ return IsUnboxedBit::decode(flags_);
+ }
+ Representation UnboxedRepresentation() const;
void Write(FlowGraphSerializer* s) const;
static const Slot& Read(FlowGraphDeserializer* d);
@@ -408,8 +372,6 @@
using IsGuardedBit = BitField<int8_t, bool, IsImmutableBit::kNextBit, 1>;
using IsCompressedBit = BitField<int8_t, bool, IsGuardedBit::kNextBit, 1>;
using IsUnboxedBit = BitField<int8_t, bool, IsCompressedBit::kNextBit, 1>;
- using MayContainInnerPointerBit =
- BitField<int8_t, bool, IsUnboxedBit::kNextBit, 1>;
template <typename T>
const T* DataAs() const {
diff --git a/runtime/vm/compiler/backend/type_propagator.cc b/runtime/vm/compiler/backend/type_propagator.cc
index 8bd5305..f997abf 100644
--- a/runtime/vm/compiler/backend/type_propagator.cc
+++ b/runtime/vm/compiler/backend/type_propagator.cc
@@ -760,11 +760,6 @@
kCannotBeSentinel);
}
-CompileType CompileType::Object() {
- return FromAbstractType(Type::ZoneHandle(Type::ObjectType()), kCannotBeNull,
- kCannotBeSentinel);
-}
-
intptr_t CompileType::ToCid() {
if (cid_ == kIllegalCid) {
// Make sure to initialize cid_ for Null type to consistently return
diff --git a/runtime/vm/compiler/backend/typed_data_aot_test.cc b/runtime/vm/compiler/backend/typed_data_aot_test.cc
index 22759ae..1a0efd7 100644
--- a/runtime/vm/compiler/backend/typed_data_aot_test.cc
+++ b/runtime/vm/compiler/backend/typed_data_aot_test.cc
@@ -42,7 +42,7 @@
CheckNullInstr* check_null = nullptr;
LoadFieldInstr* load_field = nullptr;
GenericCheckBoundInstr* bounds_check = nullptr;
- LoadFieldInstr* load_untagged = nullptr;
+ Instruction* load_untagged = nullptr;
LoadIndexedInstr* load_indexed = nullptr;
ILMatcher cursor(flow_graph, entry);
@@ -54,7 +54,7 @@
kMatchAndMoveBranchTrue,
kMoveGlob,
{kMatchAndMoveGenericCheckBound, &bounds_check},
- {kMatchAndMoveLoadField, &load_untagged},
+ {kMatchAndMoveLoadUntagged, &load_untagged},
kMoveParallelMoves,
{kMatchAndMoveLoadIndexed, &load_indexed},
kMoveGlob,
@@ -69,7 +69,7 @@
kMatchAndMoveBranchTrue,
kMoveGlob,
{kMatchAndMoveGenericCheckBound, &bounds_check},
- {kMatchAndMoveLoadField, &load_untagged},
+ {kMatchAndMoveLoadUntagged, &load_untagged},
kMoveParallelMoves,
{kMatchAndMoveLoadIndexed, &load_indexed},
kMoveGlob,
@@ -140,27 +140,27 @@
// Load 1
kMatchAndMoveGenericCheckBound,
kMoveGlob,
- kMatchAndMoveLoadField,
+ kMatchAndMoveLoadUntagged,
kMoveParallelMoves,
kMatchAndMoveLoadIndexed,
kMoveGlob,
// Load 2
kMatchAndMoveGenericCheckBound,
kMoveGlob,
- kMatchAndMoveLoadField,
+ kMatchAndMoveLoadUntagged,
kMoveParallelMoves,
kMatchAndMoveLoadIndexed,
kMoveGlob,
// Store 1
kMatchAndMoveCheckWritable,
kMoveParallelMoves,
- kMatchAndMoveLoadField,
+ kMatchAndMoveLoadUntagged,
kMoveParallelMoves,
kMatchAndMoveStoreIndexed,
kMoveGlob,
// Store 2
kMoveParallelMoves,
- kMatchAndMoveLoadField,
+ kMatchAndMoveLoadUntagged,
kMoveParallelMoves,
kMatchAndMoveStoreIndexed,
kMoveGlob,
@@ -184,27 +184,27 @@
// Load 1
kMatchAndMoveGenericCheckBound,
kMoveGlob,
- kMatchAndMoveLoadField,
+ kMatchAndMoveLoadUntagged,
kMoveParallelMoves,
kMatchAndMoveLoadIndexed,
kMoveGlob,
// Load 2
kMatchAndMoveGenericCheckBound,
kMoveGlob,
- kMatchAndMoveLoadField,
+ kMatchAndMoveLoadUntagged,
kMoveParallelMoves,
kMatchAndMoveLoadIndexed,
kMoveGlob,
// Store 1
kMatchAndMoveCheckWritable,
kMoveParallelMoves,
- kMatchAndMoveLoadField,
+ kMatchAndMoveLoadUntagged,
kMoveParallelMoves,
kMatchAndMoveStoreIndexed,
kMoveGlob,
// Store 2
kMoveParallelMoves,
- kMatchAndMoveLoadField,
+ kMatchAndMoveLoadUntagged,
kMoveParallelMoves,
kMatchAndMoveStoreIndexed,
kMoveGlob,
@@ -282,7 +282,7 @@
// Store value.
kMoveGlob,
- kMatchAndMoveLoadField,
+ kMatchAndMoveLoadUntagged,
kMoveParallelMoves,
kMatchAndMoveOptionalUnbox,
kMoveParallelMoves,
@@ -316,7 +316,7 @@
// Store value.
kMoveGlob,
- kMatchAndMoveLoadField,
+ kMatchAndMoveLoadUntagged,
kMoveParallelMoves,
kMatchAndMoveOptionalUnbox,
kMoveParallelMoves,
diff --git a/runtime/vm/compiler/call_specializer.cc b/runtime/vm/compiler/call_specializer.cc
index b147d29..a063c73 100644
--- a/runtime/vm/compiler/call_specializer.cc
+++ b/runtime/vm/compiler/call_specializer.cc
@@ -1646,9 +1646,8 @@
const intptr_t element_size = TypedDataBase::ElementSizeFor(cid);
const intptr_t index_scale = element_size;
- auto data = new (Z)
- LoadFieldInstr(new (Z) Value(array), Slot::PointerBase_data(),
- InnerPointerAccess::kMayBeInnerPointer, call->source());
+ auto data = new (Z) LoadUntaggedInstr(
+ new (Z) Value(array), compiler::target::PointerBase::data_offset());
flow_graph_->InsertBefore(call, data, call->env(), FlowGraph::kValue);
Definition* load = new (Z) LoadIndexedInstr(
@@ -1724,9 +1723,8 @@
break;
}
- auto data = new (Z)
- LoadFieldInstr(new (Z) Value(array), Slot::PointerBase_data(),
- InnerPointerAccess::kMayBeInnerPointer, call->source());
+ auto data = new (Z) LoadUntaggedInstr(
+ new (Z) Value(array), compiler::target::PointerBase::data_offset());
flow_graph_->InsertBefore(call, data, call->env(), FlowGraph::kValue);
auto store = new (Z) StoreIndexedInstr(
diff --git a/runtime/vm/compiler/call_specializer.h b/runtime/vm/compiler/call_specializer.h
index e92306c..6a46cc9 100644
--- a/runtime/vm/compiler/call_specializer.h
+++ b/runtime/vm/compiler/call_specializer.h
@@ -222,7 +222,7 @@
//
// // Directly access the byte, independent of whether `bytes` is
// // _Uint8List, _Uint8ArrayView or _ExternalUint8Array.
-// v5 <- LoadField(Slot::PointerBase_data(), v1);
+// v5 <- LoadUntagged(v1, "TypedDataBase.data");
// v5 <- LoadIndexed(v5, v4)
//
class TypedDataSpecializer : public FlowGraphVisitor {
diff --git a/runtime/vm/compiler/frontend/base_flow_graph_builder.cc b/runtime/vm/compiler/frontend/base_flow_graph_builder.cc
index b8e8a7b..c949593 100644
--- a/runtime/vm/compiler/frontend/base_flow_graph_builder.cc
+++ b/runtime/vm/compiler/frontend/base_flow_graph_builder.cc
@@ -277,22 +277,8 @@
Value* dest = Pop();
Value* src = Pop();
auto copy =
- new (Z) MemoryCopyInstr(src, src_cid, dest, dest_cid, src_start,
- dest_start, length, unboxed_inputs, can_overlap);
- return Fragment(copy);
-}
-
-Fragment BaseFlowGraphBuilder::MemoryCopyUntagged(intptr_t element_size,
- bool unboxed_inputs,
- bool can_overlap) {
- Value* length = Pop();
- Value* dest_start = Pop();
- Value* src_start = Pop();
- Value* dest = Pop();
- Value* src = Pop();
- auto copy =
- new (Z) MemoryCopyInstr(element_size, src, dest, src_start, dest_start,
- length, unboxed_inputs, can_overlap);
+ new (Z) MemoryCopyInstr(src, dest, src_start, dest_start, length, src_cid,
+ dest_cid, unboxed_inputs, can_overlap);
return Fragment(copy);
}
@@ -443,6 +429,29 @@
return Fragment(converted);
}
+Fragment BaseFlowGraphBuilder::AddIntptrIntegers() {
+ Value* right = Pop();
+ Value* left = Pop();
+#if defined(TARGET_ARCH_IS_64_BIT)
+ auto add = new (Z) BinaryInt64OpInstr(
+ Token::kADD, left, right, DeoptId::kNone, Instruction::kNotSpeculative);
+#else
+ auto add =
+ new (Z) BinaryInt32OpInstr(Token::kADD, left, right, DeoptId::kNone);
+#endif
+ add->mark_truncating();
+ Push(add);
+ return Fragment(add);
+}
+
+Fragment BaseFlowGraphBuilder::UnboxSmiToIntptr() {
+ Value* value = Pop();
+ auto untagged = UnboxInstr::Create(kUnboxedIntPtr, value, DeoptId::kNone,
+ Instruction::kNotSpeculative);
+ Push(untagged);
+ return Fragment(untagged);
+}
+
Fragment BaseFlowGraphBuilder::FloatToDouble() {
Value* value = Pop();
FloatToDoubleInstr* instr = new FloatToDoubleInstr(value, DeoptId::kNone);
@@ -464,13 +473,11 @@
calls_initializer);
}
-Fragment BaseFlowGraphBuilder::LoadNativeField(
- const Slot& native_field,
- InnerPointerAccess loads_inner_pointer,
- bool calls_initializer) {
+Fragment BaseFlowGraphBuilder::LoadNativeField(const Slot& native_field,
+ bool calls_initializer) {
LoadFieldInstr* load = new (Z) LoadFieldInstr(
- Pop(), native_field, loads_inner_pointer, InstructionSource(),
- calls_initializer, calls_initializer ? GetNextDeoptId() : DeoptId::kNone);
+ Pop(), native_field, InstructionSource(), calls_initializer,
+ calls_initializer ? GetNextDeoptId() : DeoptId::kNone);
Push(load);
return Fragment(load);
}
@@ -509,7 +516,6 @@
Fragment BaseFlowGraphBuilder::StoreNativeField(
TokenPosition position,
const Slot& slot,
- InnerPointerAccess stores_inner_pointer,
StoreFieldInstr::Kind kind /* = StoreFieldInstr::Kind::kOther */,
StoreBarrierType emit_store_barrier /* = kEmitStoreBarrier */,
compiler::Assembler::MemoryOrder memory_order /* = kRelaxed */) {
@@ -517,9 +523,9 @@
if (value->BindsToConstant()) {
emit_store_barrier = kNoStoreBarrier;
}
- StoreFieldInstr* store = new (Z)
- StoreFieldInstr(slot, Pop(), value, emit_store_barrier,
- stores_inner_pointer, InstructionSource(position), kind);
+ StoreFieldInstr* store =
+ new (Z) StoreFieldInstr(slot, Pop(), value, emit_store_barrier,
+ InstructionSource(position), kind);
return Fragment(store);
}
diff --git a/runtime/vm/compiler/frontend/base_flow_graph_builder.h b/runtime/vm/compiler/frontend/base_flow_graph_builder.h
index ab8c026..27acef9 100644
--- a/runtime/vm/compiler/frontend/base_flow_graph_builder.h
+++ b/runtime/vm/compiler/frontend/base_flow_graph_builder.h
@@ -175,13 +175,7 @@
Fragment LoadField(const Field& field, bool calls_initializer);
Fragment LoadNativeField(const Slot& native_field,
- InnerPointerAccess loads_inner_pointer,
bool calls_initializer = false);
- Fragment LoadNativeField(const Slot& native_field,
- bool calls_initializer = false) {
- return LoadNativeField(native_field, InnerPointerAccess::kNotUntagged,
- calls_initializer);
- }
// Pass true for index_unboxed if indexing into external typed data.
Fragment LoadIndexed(classid_t class_id,
intptr_t index_scale = compiler::target::kWordSize,
@@ -191,9 +185,12 @@
Fragment LoadUntagged(intptr_t offset);
Fragment ConvertUntaggedToUnboxed(Representation to);
Fragment ConvertUnboxedToUntagged(Representation from);
+ Fragment UnboxSmiToIntptr();
Fragment FloatToDouble();
Fragment DoubleToFloat();
+ Fragment AddIntptrIntegers();
+
void SetTempIndex(Definition* definition);
Fragment LoadLocal(LocalVariable* variable);
@@ -209,40 +206,17 @@
Fragment StoreNativeField(
TokenPosition position,
const Slot& slot,
- InnerPointerAccess stores_inner_pointer,
StoreFieldInstr::Kind kind = StoreFieldInstr::Kind::kOther,
StoreBarrierType emit_store_barrier = kEmitStoreBarrier,
compiler::Assembler::MemoryOrder memory_order =
compiler::Assembler::kRelaxedNonAtomic);
Fragment StoreNativeField(
- TokenPosition position,
const Slot& slot,
StoreFieldInstr::Kind kind = StoreFieldInstr::Kind::kOther,
StoreBarrierType emit_store_barrier = kEmitStoreBarrier,
compiler::Assembler::MemoryOrder memory_order =
compiler::Assembler::kRelaxedNonAtomic) {
- return StoreNativeField(position, slot, InnerPointerAccess::kNotUntagged,
- kind, emit_store_barrier, memory_order);
- }
- Fragment StoreNativeField(
- const Slot& slot,
- InnerPointerAccess stores_inner_pointer,
- StoreFieldInstr::Kind kind = StoreFieldInstr::Kind::kOther,
- StoreBarrierType emit_store_barrier = kEmitStoreBarrier,
- compiler::Assembler::MemoryOrder memory_order =
- compiler::Assembler::kRelaxedNonAtomic) {
- return StoreNativeField(TokenPosition::kNoSource, slot,
- stores_inner_pointer, kind, emit_store_barrier,
- memory_order);
- }
- Fragment StoreNativeField(
- const Slot& slot,
- StoreFieldInstr::Kind kind = StoreFieldInstr::Kind::kOther,
- StoreBarrierType emit_store_barrier = kEmitStoreBarrier,
- compiler::Assembler::MemoryOrder memory_order =
- compiler::Assembler::kRelaxedNonAtomic) {
- return StoreNativeField(TokenPosition::kNoSource, slot,
- InnerPointerAccess::kNotUntagged, kind,
+ return StoreNativeField(TokenPosition::kNoSource, slot, kind,
emit_store_barrier, memory_order);
}
Fragment StoreField(
@@ -342,9 +316,6 @@
classid_t dest_cid,
bool unboxed_inputs,
bool can_overlap = true);
- Fragment MemoryCopyUntagged(intptr_t element_size,
- bool unboxed_inputs,
- bool can_overlap = true);
Fragment TailCall(const Code& code);
Fragment Utf8Scan();
diff --git a/runtime/vm/compiler/frontend/kernel_to_il.cc b/runtime/vm/compiler/frontend/kernel_to_il.cc
index 73215c7..35655e6 100644
--- a/runtime/vm/compiler/frontend/kernel_to_il.cc
+++ b/runtime/vm/compiler/frontend/kernel_to_il.cc
@@ -1140,19 +1140,19 @@
}
case MethodRecognizer::kTypedData_memMove1:
// Pick an appropriate typed data cid based on the element size.
- body += BuildTypedDataMemMove(function, 1);
+ body += BuildTypedDataMemMove(function, kTypedDataUint8ArrayCid);
break;
case MethodRecognizer::kTypedData_memMove2:
- body += BuildTypedDataMemMove(function, 2);
+ body += BuildTypedDataMemMove(function, kTypedDataUint16ArrayCid);
break;
case MethodRecognizer::kTypedData_memMove4:
- body += BuildTypedDataMemMove(function, 4);
+ body += BuildTypedDataMemMove(function, kTypedDataUint32ArrayCid);
break;
case MethodRecognizer::kTypedData_memMove8:
- body += BuildTypedDataMemMove(function, 8);
+ body += BuildTypedDataMemMove(function, kTypedDataUint64ArrayCid);
break;
case MethodRecognizer::kTypedData_memMove16:
- body += BuildTypedDataMemMove(function, 16);
+ body += BuildTypedDataMemMove(function, kTypedDataInt32x4ArrayCid);
break;
#define CASE(name) \
case MethodRecognizer::kTypedData_##name##_factory: \
@@ -1266,6 +1266,7 @@
body += Box(kUnboxedIntPtr);
break;
case MethodRecognizer::kMemCopy: {
+ // Keep consistent with inliner.cc (except boxed param).
ASSERT_EQUAL(function.NumParameters(), 5);
LocalVariable* arg_target = parsed_function_->RawParameterVariable(0);
LocalVariable* arg_target_offset_in_bytes =
@@ -1275,25 +1276,15 @@
parsed_function_->RawParameterVariable(3);
LocalVariable* arg_length_in_bytes =
parsed_function_->RawParameterVariable(4);
- // Load the untagged data fields of the source and destination so they
- // can be possibly load optimized away when applicable, and unbox the
- // numeric inputs since we're force optimizing _memCopy and that removes
- // the need to use SmiUntag within MemoryCopy when element_size is 1.
body += LoadLocal(arg_source);
- body += LoadNativeField(Slot::PointerBase_data(),
- InnerPointerAccess::kMayBeInnerPointer);
body += LoadLocal(arg_target);
- body += LoadNativeField(Slot::PointerBase_data(),
- InnerPointerAccess::kMayBeInnerPointer);
body += LoadLocal(arg_source_offset_in_bytes);
- body += UnboxTruncate(kUnboxedIntPtr);
body += LoadLocal(arg_target_offset_in_bytes);
- body += UnboxTruncate(kUnboxedIntPtr);
body += LoadLocal(arg_length_in_bytes);
- body += UnboxTruncate(kUnboxedIntPtr);
- body += MemoryCopyUntagged(/*element_size=*/1,
- /*unboxed_inputs=*/true,
- /*can_overlap=*/true);
+ // Pointers and TypedData have the same layout.
+ body += MemoryCopy(kTypedDataUint8ArrayCid, kTypedDataUint8ArrayCid,
+ /*unboxed_inputs=*/false,
+ /*can_overlap=*/true);
body += NullConstant();
} break;
case MethodRecognizer::kFfiAbi:
@@ -1341,8 +1332,7 @@
body += LoadLocal(arg_pointer);
body += CheckNullOptimized(String::ZoneHandle(Z, function.name()));
// No GC from here til LoadIndexed.
- body += LoadNativeField(Slot::PointerBase_data(),
- InnerPointerAccess::kCannotBeInnerPointer);
+ body += LoadUntagged(compiler::target::PointerBase::data_offset());
body += LoadLocal(arg_offset_not_null);
body += UnboxTruncate(kUnboxedFfiIntPtr);
body += LoadIndexed(typed_data_cid, /*index_scale=*/1,
@@ -1370,10 +1360,7 @@
body += LoadLocal(pointer);
body += LoadLocal(address);
body += UnboxTruncate(kUnboxedIntPtr);
- body += ConvertUnboxedToUntagged(kUnboxedIntPtr);
- body += StoreNativeField(Slot::PointerBase_data(),
- InnerPointerAccess::kCannotBeInnerPointer,
- StoreFieldInstr::Kind::kInitializing);
+ body += StoreNativeField(Slot::PointerBase_data());
body += DropTempsPreserveTop(1); // Drop [address] keep [pointer].
}
body += DropTempsPreserveTop(1); // Drop [arg_offset].
@@ -1413,15 +1400,13 @@
body += LoadLocal(arg_pointer); // Pointer.
body += CheckNullOptimized(String::ZoneHandle(Z, function.name()));
// No GC from here til StoreIndexed.
- body += LoadNativeField(Slot::PointerBase_data(),
- InnerPointerAccess::kCannotBeInnerPointer);
+ body += LoadUntagged(compiler::target::PointerBase::data_offset());
body += LoadLocal(arg_offset_not_null);
body += UnboxTruncate(kUnboxedFfiIntPtr);
body += LoadLocal(arg_value_not_null);
if (kind == MethodRecognizer::kFfiStorePointer) {
- // This can only be Pointer, so it is safe to load the data field.
- body += LoadNativeField(Slot::PointerBase_data(),
- InnerPointerAccess::kCannotBeInnerPointer);
+ // This can only be Pointer, so it is always safe to LoadUntagged.
+ body += LoadUntagged(compiler::target::PointerBase::data_offset());
body += ConvertUntaggedToUnboxed(kUnboxedFfiIntPtr);
} else {
// Avoid any unnecessary (and potentially deoptimizing) int
@@ -1453,18 +1438,14 @@
body += LoadLocal(parsed_function_->RawParameterVariable(0)); // Address.
body += CheckNullOptimized(String::ZoneHandle(Z, function.name()));
body += UnboxTruncate(kUnboxedIntPtr);
- body += ConvertUnboxedToUntagged(kUnboxedIntPtr);
- body += StoreNativeField(Slot::PointerBase_data(),
- InnerPointerAccess::kCannotBeInnerPointer,
- StoreFieldInstr::Kind::kInitializing);
+ body += StoreNativeField(Slot::PointerBase_data());
} break;
case MethodRecognizer::kFfiGetAddress: {
ASSERT_EQUAL(function.NumParameters(), 1);
body += LoadLocal(parsed_function_->RawParameterVariable(0)); // Pointer.
body += CheckNullOptimized(String::ZoneHandle(Z, function.name()));
- // This can only be Pointer, so it is safe to load the data field.
- body += LoadNativeField(Slot::PointerBase_data(),
- InnerPointerAccess::kCannotBeInnerPointer);
+ // This can only be Pointer, so it is always safe to LoadUntagged.
+ body += LoadUntagged(compiler::target::PointerBase::data_offset());
body += ConvertUntaggedToUnboxed(kUnboxedFfiIntPtr);
body += Box(kUnboxedFfiIntPtr);
} break;
@@ -1547,11 +1528,11 @@
// Initialize the result's data pointer field.
body += LoadLocal(typed_data_object);
body += LoadLocal(arg_pointer);
- body += LoadNativeField(Slot::PointerBase_data(),
- InnerPointerAccess::kCannotBeInnerPointer);
+ body += LoadUntagged(compiler::target::PointerBase::data_offset());
+ body += ConvertUntaggedToUnboxed(kUnboxedIntPtr);
body += StoreNativeField(Slot::PointerBase_data(),
- InnerPointerAccess::kCannotBeInnerPointer,
- StoreFieldInstr::Kind::kInitializing);
+ StoreFieldInstr::Kind::kInitializing,
+ kNoStoreBarrier);
} break;
case MethodRecognizer::kGetNativeField: {
auto& name = String::ZoneHandle(Z, function.name());
@@ -1621,8 +1602,8 @@
ASSERT_EQUAL(function.NumParameters(), 1);
body += LoadLocal(parsed_function_->RawParameterVariable(0));
body += LoadIsolate();
- body += StoreNativeField(Slot::FinalizerBase_isolate(),
- InnerPointerAccess::kCannotBeInnerPointer);
+ body += ConvertUntaggedToUnboxed(kUnboxedIntPtr);
+ body += StoreNativeField(Slot::FinalizerBase_isolate());
body += NullConstant();
break;
case MethodRecognizer::kFinalizerBase_getIsolateFinalizers:
@@ -1748,14 +1729,6 @@
Fragment body;
- // Note that we do no input checking here before allocation. The factory is
- // private, and only called by other code in the library implementation.
- // Thus, either the inputs are checked within Dart code before the factory is
- // called (e.g., the implementation of XList.sublistView), or the inputs to
- // the factory are retrieved from previously constructed TypedData objects
- // and thus already checked (e.g., the implementation of the
- // UnmodifiableXListView constructors).
-
body += AllocateObject(token_pos, view_class, /*arg_count=*/0);
LocalVariable* view_object = MakeTemporary();
@@ -1778,27 +1751,22 @@
// Update the inner pointer.
//
- // WARNING: Notice that we assume here no GC happens between the
- // LoadNativeField and the StoreNativeField, as the GC expects a properly
- // updated data field (see ScavengerVisitorBase::VisitTypedDataViewPointers).
+ // WARNING: Notice that we assume here no GC happens between those 4
+ // instructions!
body += LoadLocal(view_object);
body += LoadLocal(typed_data);
- body += LoadNativeField(Slot::PointerBase_data(),
- InnerPointerAccess::kMayBeInnerPointer);
+ body += LoadUntagged(compiler::target::PointerBase::data_offset());
body += ConvertUntaggedToUnboxed(kUnboxedIntPtr);
body += LoadLocal(offset_in_bytes);
- body += UnboxTruncate(kUnboxedIntPtr);
- body += BinaryIntegerOp(Token::kADD, kUnboxedIntPtr, /*is_truncating=*/true);
- body += ConvertUnboxedToUntagged(kUnboxedIntPtr);
- body += StoreNativeField(Slot::PointerBase_data(),
- InnerPointerAccess::kMayBeInnerPointer,
- StoreFieldInstr::Kind::kInitializing);
+ body += UnboxSmiToIntptr();
+ body += AddIntptrIntegers();
+ body += StoreNativeField(Slot::PointerBase_data());
return body;
}
Fragment FlowGraphBuilder::BuildTypedDataMemMove(const Function& function,
- intptr_t element_size) {
+ intptr_t cid) {
ASSERT_EQUAL(parsed_function_->function().NumParameters(), 5);
LocalVariable* arg_to = parsed_function_->RawParameterVariable(0);
LocalVariable* arg_to_start = parsed_function_->RawParameterVariable(1);
@@ -1833,23 +1801,18 @@
Fragment use_instruction(is_small_enough);
use_instruction += LoadLocal(arg_from);
- use_instruction += LoadNativeField(Slot::PointerBase_data(),
- InnerPointerAccess::kMayBeInnerPointer);
use_instruction += LoadLocal(arg_to);
- use_instruction += LoadNativeField(Slot::PointerBase_data(),
- InnerPointerAccess::kMayBeInnerPointer);
use_instruction += LoadLocal(arg_from_start);
use_instruction += LoadLocal(arg_to_start);
use_instruction += LoadLocal(arg_count);
- use_instruction +=
- MemoryCopyUntagged(element_size,
- /*unboxed_inputs=*/false, /*can_overlap=*/true);
+ use_instruction += MemoryCopy(cid, cid,
+ /*unboxed_inputs=*/false, /*can_overlap=*/true);
use_instruction += Goto(done);
+ const intptr_t element_size = Instance::ElementSizeFor(cid);
Fragment call_memmove(is_too_large);
call_memmove += LoadLocal(arg_to);
- call_memmove += LoadNativeField(Slot::PointerBase_data(),
- InnerPointerAccess::kMayBeInnerPointer);
+ call_memmove += LoadUntagged(compiler::target::PointerBase::data_offset());
call_memmove += ConvertUntaggedToUnboxed(kUnboxedIntPtr);
call_memmove += LoadLocal(arg_to_start);
call_memmove += IntConstant(element_size);
@@ -1858,8 +1821,7 @@
call_memmove +=
BinaryIntegerOp(Token::kADD, kUnboxedIntPtr, /*is_truncating=*/true);
call_memmove += LoadLocal(arg_from);
- call_memmove += LoadNativeField(Slot::PointerBase_data(),
- InnerPointerAccess::kMayBeInnerPointer);
+ call_memmove += LoadUntagged(compiler::target::PointerBase::data_offset());
call_memmove += ConvertUntaggedToUnboxed(kUnboxedIntPtr);
call_memmove += LoadLocal(arg_from_start);
call_memmove += IntConstant(element_size);
@@ -4515,10 +4477,7 @@
code += LoadLocal(pointer);
code += LoadLocal(address);
code += UnboxTruncate(kUnboxedIntPtr);
- code += ConvertUnboxedToUntagged(kUnboxedIntPtr);
- code += StoreNativeField(Slot::PointerBase_data(),
- InnerPointerAccess::kCannotBeInnerPointer,
- StoreFieldInstr::Kind::kInitializing);
+ code += StoreNativeField(Slot::PointerBase_data());
code += StoreLocal(TokenPosition::kNoSource, result);
code += Drop(); // StoreLocal^
code += Drop(); // address
@@ -4600,8 +4559,7 @@
for (intptr_t i = 0; i < num_defs; i++) {
body += LoadLocal(variable);
body += LoadTypedDataBaseFromCompound();
- body += LoadNativeField(Slot::PointerBase_data(),
- InnerPointerAccess::kMayBeInnerPointer);
+ body += LoadUntagged(compiler::target::PointerBase::data_offset());
body += IntConstant(offset_in_bytes);
const Representation representation = representations[i];
offset_in_bytes += RepresentationUtils::ValueSize(representation);
@@ -4623,8 +4581,7 @@
for (intptr_t i = 0; i < num_defs; i++) {
const Representation representation = representations[i];
body += LoadLocal(uint8_list);
- body += LoadNativeField(Slot::PointerBase_data(),
- InnerPointerAccess::kMayBeInnerPointer);
+ body += LoadUntagged(compiler::target::PointerBase::data_offset());
body += IntConstant(offset_in_bytes);
body += LoadLocal(definitions->At(i));
body += StoreIndexedTypedDataUnboxed(representation, /*index_scale=*/1,
@@ -4678,8 +4635,7 @@
const classid_t typed_data_cidd = typed_data_cid(chunk_sizee);
body += LoadLocal(typed_data_base);
- body += LoadNativeField(Slot::PointerBase_data(),
- InnerPointerAccess::kMayBeInnerPointer);
+ body += LoadUntagged(compiler::target::PointerBase::data_offset());
body += IntConstant(offset_in_bytes);
body += LoadIndexed(typed_data_cidd, /*index_scale=*/1,
/*index_unboxed=*/false);
@@ -4724,8 +4680,7 @@
LocalVariable* chunk_value = MakeTemporary("chunk_value");
body += LoadLocal(typed_data_base);
- body += LoadNativeField(Slot::PointerBase_data(),
- InnerPointerAccess::kMayBeInnerPointer);
+ body += LoadUntagged(compiler::target::PointerBase::data_offset());
body += IntConstant(offset_in_bytes);
body += LoadLocal(chunk_value);
body += StoreIndexedTypedData(typed_data_cidd, /*index_scale=*/1,
@@ -4881,9 +4836,8 @@
Fragment body;
if (marshaller.IsPointer(arg_index)) {
- // This can only be Pointer, so it is safe to load the data field.
- body += LoadNativeField(Slot::PointerBase_data(),
- InnerPointerAccess::kCannotBeInnerPointer);
+ // This can only be Pointer, so it is always safe to LoadUntagged.
+ body += LoadUntagged(compiler::target::PointerBase::data_offset());
body += ConvertUntaggedToUnboxed(kUnboxedFfiIntPtr);
} else if (marshaller.IsHandle(arg_index)) {
body += WrapHandle();
@@ -5026,9 +4980,8 @@
Z, Class::Handle(IG->object_store()->ffi_pointer_class()))
->context_variables()[0]));
- // This can only be Pointer, so it is safe to load the data field.
- body += LoadNativeField(Slot::PointerBase_data(),
- InnerPointerAccess::kCannotBeInnerPointer);
+ // This can only be Pointer, so it is always safe to LoadUntagged.
+ body += LoadUntagged(compiler::target::PointerBase::data_offset());
body += ConvertUntaggedToUnboxed(kUnboxedFfiIntPtr);
if (marshaller.PassTypedData()) {
diff --git a/runtime/vm/compiler/frontend/kernel_to_il.h b/runtime/vm/compiler/frontend/kernel_to_il.h
index 702f7b9..77a6457 100644
--- a/runtime/vm/compiler/frontend/kernel_to_il.h
+++ b/runtime/vm/compiler/frontend/kernel_to_il.h
@@ -146,8 +146,7 @@
FlowGraph* BuildGraphOfRecognizedMethod(const Function& function);
- Fragment BuildTypedDataMemMove(const Function& function,
- intptr_t element_size);
+ Fragment BuildTypedDataMemMove(const Function& function, intptr_t cid);
Fragment BuildTypedDataViewFactoryConstructor(const Function& function,
classid_t cid);
Fragment BuildTypedDataFactoryConstructor(const Function& function,
diff --git a/runtime/vm/compiler/recognized_methods_list.h b/runtime/vm/compiler/recognized_methods_list.h
index 479a226..a78dc61 100644
--- a/runtime/vm/compiler/recognized_methods_list.h
+++ b/runtime/vm/compiler/recognized_methods_list.h
@@ -100,20 +100,20 @@
TypedData_UnmodifiableInt32x4ArrayView_factory, 0xf66c6993) \
V(_UnmodifiableFloat64x2ArrayView, ._, \
TypedData_UnmodifiableFloat64x2ArrayView_factory, 0x6d9ae5fb) \
- V(Int8List, ., TypedData_Int8Array_factory, 0x65ff48e7) \
- V(Uint8List, ., TypedData_Uint8Array_factory, 0xedd566ae) \
- V(Uint8ClampedList, ., TypedData_Uint8ClampedArray_factory, 0x27f7a7b4) \
- V(Int16List, ., TypedData_Int16Array_factory, 0xd0bf0952) \
- V(Uint16List, ., TypedData_Uint16Array_factory, 0x3ca76bc9) \
- V(Int32List, ., TypedData_Int32Array_factory, 0x1b81637f) \
- V(Uint32List, ., TypedData_Uint32Array_factory, 0x2b210aea) \
- V(Int64List, ., TypedData_Int64Array_factory, 0xfb634e8e) \
- V(Uint64List, ., TypedData_Uint64Array_factory, 0xe3c14057) \
- V(Float32List, ., TypedData_Float32Array_factory, 0xa381d95d) \
- V(Float64List, ., TypedData_Float64Array_factory, 0xa0b7bef0) \
- V(Float32x4List, ., TypedData_Float32x4Array_factory, 0x0a6eebe7) \
- V(Int32x4List, ., TypedData_Int32x4Array_factory, 0x5a0924cd) \
- V(Float64x2List, ., TypedData_Float64x2Array_factory, 0xecbc6fc9) \
+ V(Int8List, ., TypedData_Int8Array_factory, 0x65ff4ca8) \
+ V(Uint8List, ., TypedData_Uint8Array_factory, 0xedd56a6f) \
+ V(Uint8ClampedList, ., TypedData_Uint8ClampedArray_factory, 0x27f7ab75) \
+ V(Int16List, ., TypedData_Int16Array_factory, 0xd0bf0d13) \
+ V(Uint16List, ., TypedData_Uint16Array_factory, 0x3ca76f8a) \
+ V(Int32List, ., TypedData_Int32Array_factory, 0x1b816740) \
+ V(Uint32List, ., TypedData_Uint32Array_factory, 0x2b210eab) \
+ V(Int64List, ., TypedData_Int64Array_factory, 0xfb63524f) \
+ V(Uint64List, ., TypedData_Uint64Array_factory, 0xe3c14418) \
+ V(Float32List, ., TypedData_Float32Array_factory, 0xa381dd1e) \
+ V(Float64List, ., TypedData_Float64Array_factory, 0xa0b7c2b1) \
+ V(Float32x4List, ., TypedData_Float32x4Array_factory, 0x0a6eefa8) \
+ V(Int32x4List, ., TypedData_Int32x4Array_factory, 0x5a09288e) \
+ V(Float64x2List, ., TypedData_Float64x2Array_factory, 0xecbc738a) \
V(_TypedListBase, _memMove1, TypedData_memMove1, 0xd2767fb0) \
V(_TypedListBase, _memMove2, TypedData_memMove2, 0xed382bb6) \
V(_TypedListBase, _memMove4, TypedData_memMove4, 0xcfe37726) \
@@ -544,22 +544,22 @@
kGrowableObjectArrayCid, 0x7be49a4e) \
V(_GrowableListWithData, _GrowableList, ._withData, kGrowableObjectArrayCid, \
0x19394cc1) \
- V(_Int8ArrayFactory, Int8List, ., kTypedDataInt8ArrayCid, 0x65ff48e7) \
- V(_Uint8ArrayFactory, Uint8List, ., kTypedDataUint8ArrayCid, 0xedd566ae) \
+ V(_Int8ArrayFactory, Int8List, ., kTypedDataInt8ArrayCid, 0x65ff4ca8) \
+ V(_Uint8ArrayFactory, Uint8List, ., kTypedDataUint8ArrayCid, 0xedd56a6f) \
V(_Uint8ClampedArrayFactory, Uint8ClampedList, ., \
- kTypedDataUint8ClampedArrayCid, 0x27f7a7b4) \
- V(_Int16ArrayFactory, Int16List, ., kTypedDataInt16ArrayCid, 0xd0bf0952) \
- V(_Uint16ArrayFactory, Uint16List, ., kTypedDataUint16ArrayCid, 0x3ca76bc9) \
- V(_Int32ArrayFactory, Int32List, ., kTypedDataInt32ArrayCid, 0x1b81637f) \
- V(_Uint32ArrayFactory, Uint32List, ., kTypedDataUint32ArrayCid, 0x2b210aea) \
- V(_Int64ArrayFactory, Int64List, ., kTypedDataInt64ArrayCid, 0xfb634e8e) \
- V(_Uint64ArrayFactory, Uint64List, ., kTypedDataUint64ArrayCid, 0xe3c14057) \
+ kTypedDataUint8ClampedArrayCid, 0x27f7ab75) \
+ V(_Int16ArrayFactory, Int16List, ., kTypedDataInt16ArrayCid, 0xd0bf0d13) \
+ V(_Uint16ArrayFactory, Uint16List, ., kTypedDataUint16ArrayCid, 0x3ca76f8a) \
+ V(_Int32ArrayFactory, Int32List, ., kTypedDataInt32ArrayCid, 0x1b816740) \
+ V(_Uint32ArrayFactory, Uint32List, ., kTypedDataUint32ArrayCid, 0x2b210eab) \
+ V(_Int64ArrayFactory, Int64List, ., kTypedDataInt64ArrayCid, 0xfb63524f) \
+ V(_Uint64ArrayFactory, Uint64List, ., kTypedDataUint64ArrayCid, 0xe3c14418) \
V(_Float64ArrayFactory, Float64List, ., kTypedDataFloat64ArrayCid, \
- 0xa0b7bef0) \
+ 0xa0b7c2b1) \
V(_Float32ArrayFactory, Float32List, ., kTypedDataFloat32ArrayCid, \
- 0xa381d95d) \
+ 0xa381dd1e) \
V(_Float32x4ArrayFactory, Float32x4List, ., kTypedDataFloat32x4ArrayCid, \
- 0x0a6eebe7)
+ 0x0a6eefa8)
// clang-format on
diff --git a/runtime/vm/deopt_instructions.cc b/runtime/vm/deopt_instructions.cc
index eabad37..c4c2ab1 100644
--- a/runtime/vm/deopt_instructions.cc
+++ b/runtime/vm/deopt_instructions.cc
@@ -1112,9 +1112,6 @@
deopt_instr =
new (zone()) DeoptWordInstr(ToCpuRegisterSource(source_loc));
break;
-#if defined(TARGET_ARCH_IS_64_BIT)
- case kUntagged:
-#endif
case kUnboxedInt64: {
if (source_loc.IsPairLocation()) {
PairLocation* pair = source_loc.AsPairLocation();
@@ -1128,9 +1125,6 @@
}
break;
}
-#if defined(TARGET_ARCH_IS_32_BIT)
- case kUntagged:
-#endif
case kUnboxedInt32:
deopt_instr =
new (zone()) DeoptInt32Instr(ToCpuRegisterSource(source_loc));
diff --git a/runtime/vm/object.cc b/runtime/vm/object.cc
index 7cc8a4c..f177256 100644
--- a/runtime/vm/object.cc
+++ b/runtime/vm/object.cc
@@ -9020,52 +9020,6 @@
}
}
-bool Function::IsTypedDataViewFactory() const {
- switch (recognized_kind()) {
- case MethodRecognizer::kTypedData_ByteDataView_factory:
- case MethodRecognizer::kTypedData_Int8ArrayView_factory:
- case MethodRecognizer::kTypedData_Uint8ArrayView_factory:
- case MethodRecognizer::kTypedData_Uint8ClampedArrayView_factory:
- case MethodRecognizer::kTypedData_Int16ArrayView_factory:
- case MethodRecognizer::kTypedData_Uint16ArrayView_factory:
- case MethodRecognizer::kTypedData_Int32ArrayView_factory:
- case MethodRecognizer::kTypedData_Uint32ArrayView_factory:
- case MethodRecognizer::kTypedData_Int64ArrayView_factory:
- case MethodRecognizer::kTypedData_Uint64ArrayView_factory:
- case MethodRecognizer::kTypedData_Float32ArrayView_factory:
- case MethodRecognizer::kTypedData_Float64ArrayView_factory:
- case MethodRecognizer::kTypedData_Float32x4ArrayView_factory:
- case MethodRecognizer::kTypedData_Int32x4ArrayView_factory:
- case MethodRecognizer::kTypedData_Float64x2ArrayView_factory:
- return true;
- default:
- return false;
- }
-}
-
-bool Function::IsUnmodifiableTypedDataViewFactory() const {
- switch (recognized_kind()) {
- case MethodRecognizer::kTypedData_UnmodifiableByteDataView_factory:
- case MethodRecognizer::kTypedData_UnmodifiableInt8ArrayView_factory:
- case MethodRecognizer::kTypedData_UnmodifiableUint8ArrayView_factory:
- case MethodRecognizer::kTypedData_UnmodifiableUint8ClampedArrayView_factory:
- case MethodRecognizer::kTypedData_UnmodifiableInt16ArrayView_factory:
- case MethodRecognizer::kTypedData_UnmodifiableUint16ArrayView_factory:
- case MethodRecognizer::kTypedData_UnmodifiableInt32ArrayView_factory:
- case MethodRecognizer::kTypedData_UnmodifiableUint32ArrayView_factory:
- case MethodRecognizer::kTypedData_UnmodifiableInt64ArrayView_factory:
- case MethodRecognizer::kTypedData_UnmodifiableUint64ArrayView_factory:
- case MethodRecognizer::kTypedData_UnmodifiableFloat32ArrayView_factory:
- case MethodRecognizer::kTypedData_UnmodifiableFloat64ArrayView_factory:
- case MethodRecognizer::kTypedData_UnmodifiableFloat32x4ArrayView_factory:
- case MethodRecognizer::kTypedData_UnmodifiableInt32x4ArrayView_factory:
- case MethodRecognizer::kTypedData_UnmodifiableFloat64x2ArrayView_factory:
- return true;
- default:
- return false;
- }
-}
-
bool Function::ForceOptimize() const {
if (RecognizedKindForceOptimize() || IsFfiTrampoline() ||
IsTypedDataViewFactory() || IsUnmodifiableTypedDataViewFactory()) {
@@ -9153,7 +9107,6 @@
case MethodRecognizer::kTypedData_memMove4:
case MethodRecognizer::kTypedData_memMove8:
case MethodRecognizer::kTypedData_memMove16:
- case MethodRecognizer::kMemCopy:
// Prevent the GC from running so that the operation is atomic from
// a GC point of view. Always double check implementation in
// kernel_to_il.cc that no GC can happen in between the relevant IL
diff --git a/runtime/vm/object.h b/runtime/vm/object.h
index 583b645..5a4ab9a 100644
--- a/runtime/vm/object.h
+++ b/runtime/vm/object.h
@@ -3928,8 +3928,23 @@
return modifier() == UntaggedFunction::kAsyncGen;
}
- bool IsTypedDataViewFactory() const;
- bool IsUnmodifiableTypedDataViewFactory() const;
+ bool IsTypedDataViewFactory() const {
+ if (is_native() && kind() == UntaggedFunction::kConstructor) {
+ // This is a native factory constructor.
+ const Class& klass = Class::Handle(Owner());
+ return IsTypedDataViewClassId(klass.id());
+ }
+ return false;
+ }
+
+ bool IsUnmodifiableTypedDataViewFactory() const {
+ if (is_native() && kind() == UntaggedFunction::kConstructor) {
+ // This is a native factory constructor.
+ const Class& klass = Class::Handle(Owner());
+ return IsUnmodifiableTypedDataViewClassId(klass.id());
+ }
+ return false;
+ }
DART_WARN_UNUSED_RESULT
ErrorPtr VerifyCallEntryPoint() const;
diff --git a/sdk/lib/_internal/vm/lib/ffi_patch.dart b/sdk/lib/_internal/vm/lib/ffi_patch.dart
index cdc8b69..5d02b6a 100644
--- a/sdk/lib/_internal/vm/lib/ffi_patch.dart
+++ b/sdk/lib/_internal/vm/lib/ffi_patch.dart
@@ -580,7 +580,6 @@
Pointer<Int8> elementAt(int index) => Pointer.fromAddress(address + index);
@patch
- @pragma("vm:prefer-inline")
Int8List asTypedList(
int length, {
Pointer<NativeFinalizerFunction>? finalizer,
@@ -617,7 +616,6 @@
Pointer.fromAddress(address + 2 * index);
@patch
- @pragma("vm:prefer-inline")
Int16List asTypedList(
int length, {
Pointer<NativeFinalizerFunction>? finalizer,
@@ -654,7 +652,6 @@
Pointer.fromAddress(address + 4 * index);
@patch
- @pragma("vm:prefer-inline")
Int32List asTypedList(
int length, {
Pointer<NativeFinalizerFunction>? finalizer,
@@ -691,7 +688,6 @@
Pointer.fromAddress(address + 8 * index);
@patch
- @pragma("vm:prefer-inline")
Int64List asTypedList(
int length, {
Pointer<NativeFinalizerFunction>? finalizer,
@@ -727,7 +723,6 @@
Pointer<Uint8> elementAt(int index) => Pointer.fromAddress(address + index);
@patch
- @pragma("vm:prefer-inline")
Uint8List asTypedList(
int length, {
Pointer<NativeFinalizerFunction>? finalizer,
@@ -764,7 +759,6 @@
Pointer.fromAddress(address + 2 * index);
@patch
- @pragma("vm:prefer-inline")
Uint16List asTypedList(
int length, {
Pointer<NativeFinalizerFunction>? finalizer,
@@ -801,7 +795,6 @@
Pointer.fromAddress(address + 4 * index);
@patch
- @pragma("vm:prefer-inline")
Uint32List asTypedList(
int length, {
Pointer<NativeFinalizerFunction>? finalizer,
@@ -838,7 +831,6 @@
Pointer.fromAddress(address + 8 * index);
@patch
- @pragma("vm:prefer-inline")
Uint64List asTypedList(
int length, {
Pointer<NativeFinalizerFunction>? finalizer,
@@ -875,7 +867,6 @@
Pointer.fromAddress(address + 4 * index);
@patch
- @pragma("vm:prefer-inline")
Float32List asTypedList(
int length, {
Pointer<NativeFinalizerFunction>? finalizer,
@@ -912,7 +903,6 @@
Pointer.fromAddress(address + 8 * index);
@patch
- @pragma("vm:prefer-inline")
Float64List asTypedList(
int length, {
Pointer<NativeFinalizerFunction>? finalizer,
diff --git a/sdk/lib/_internal/vm/lib/typed_data_patch.dart b/sdk/lib/_internal/vm/lib/typed_data_patch.dart
index f217cc9..d2674d3 100644
--- a/sdk/lib/_internal/vm/lib/typed_data_patch.dart
+++ b/sdk/lib/_internal/vm/lib/typed_data_patch.dart
@@ -2173,6 +2173,7 @@
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _Int8List)
@pragma("vm:prefer-inline")
+ @pragma("vm:external-name", "TypedData_Int8Array_new")
external factory Int8List(int length);
@patch
@@ -2232,6 +2233,7 @@
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _Uint8List)
@pragma("vm:prefer-inline")
+ @pragma("vm:external-name", "TypedData_Uint8Array_new")
external factory Uint8List(int length);
@patch
@@ -2294,6 +2296,7 @@
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _Uint8ClampedList)
@pragma("vm:prefer-inline")
+ @pragma("vm:external-name", "TypedData_Uint8ClampedArray_new")
external factory Uint8ClampedList(int length);
@patch
@@ -2358,6 +2361,7 @@
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _Int16List)
@pragma("vm:prefer-inline")
+ @pragma("vm:external-name", "TypedData_Int16Array_new")
external factory Int16List(int length);
@patch
@@ -2437,6 +2441,7 @@
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _Uint16List)
@pragma("vm:prefer-inline")
+ @pragma("vm:external-name", "TypedData_Uint16Array_new")
external factory Uint16List(int length);
@patch
@@ -2516,6 +2521,7 @@
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _Int32List)
@pragma("vm:prefer-inline")
+ @pragma("vm:external-name", "TypedData_Int32Array_new")
external factory Int32List(int length);
@patch
@@ -2582,6 +2588,7 @@
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _Uint32List)
@pragma("vm:prefer-inline")
+ @pragma("vm:external-name", "TypedData_Uint32Array_new")
external factory Uint32List(int length);
@patch
@@ -2648,6 +2655,7 @@
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _Int64List)
@pragma("vm:prefer-inline")
+ @pragma("vm:external-name", "TypedData_Int64Array_new")
external factory Int64List(int length);
@patch
@@ -2714,6 +2722,7 @@
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _Uint64List)
@pragma("vm:prefer-inline")
+ @pragma("vm:external-name", "TypedData_Uint64Array_new")
external factory Uint64List(int length);
@patch
@@ -2780,6 +2789,7 @@
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _Float32List)
@pragma("vm:prefer-inline")
+ @pragma("vm:external-name", "TypedData_Float32Array_new")
external factory Float32List(int length);
@patch
@@ -2847,6 +2857,7 @@
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _Float64List)
@pragma("vm:prefer-inline")
+ @pragma("vm:external-name", "TypedData_Float64Array_new")
external factory Float64List(int length);
@patch
@@ -2914,6 +2925,7 @@
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _Float32x4List)
@pragma("vm:prefer-inline")
+ @pragma("vm:external-name", "TypedData_Float32x4Array_new")
external factory Float32x4List(int length);
@patch
@@ -2980,6 +2992,7 @@
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _Int32x4List)
@pragma("vm:prefer-inline")
+ @pragma("vm:external-name", "TypedData_Int32x4Array_new")
external factory Int32x4List(int length);
@patch
@@ -3046,6 +3059,7 @@
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _Float64x2List)
@pragma("vm:prefer-inline")
+ @pragma("vm:external-name", "TypedData_Float64x2Array_new")
external factory Float64x2List(int length);
@patch
@@ -4339,7 +4353,7 @@
// Constructor.
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _Int8ArrayView)
- @pragma("vm:prefer-inline")
+ @pragma("vm:external-name", "TypedDataView_Int8ArrayView_new")
@pragma("vm:idempotent")
external factory _Int8ArrayView._(
_TypedList buffer, int offsetInBytes, int length);
@@ -4388,7 +4402,7 @@
// Constructor.
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _Uint8ArrayView)
- @pragma("vm:prefer-inline")
+ @pragma("vm:external-name", "TypedDataView_Uint8ArrayView_new")
@pragma("vm:idempotent")
external factory _Uint8ArrayView._(
_TypedList buffer, int offsetInBytes, int length);
@@ -4440,7 +4454,7 @@
// Constructor.
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _Uint8ClampedArrayView)
- @pragma("vm:prefer-inline")
+ @pragma("vm:external-name", "TypedDataView_Uint8ClampedArrayView_new")
@pragma("vm:idempotent")
external factory _Uint8ClampedArrayView._(
_TypedList buffer, int offsetInBytes, int length);
@@ -4494,7 +4508,7 @@
// Constructor.
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _Int16ArrayView)
- @pragma("vm:prefer-inline")
+ @pragma("vm:external-name", "TypedDataView_Int16ArrayView_new")
@pragma("vm:idempotent")
external factory _Int16ArrayView._(
_TypedList buffer, int offsetInBytes, int length);
@@ -4556,7 +4570,7 @@
// Constructor.
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _Uint16ArrayView)
- @pragma("vm:prefer-inline")
+ @pragma("vm:external-name", "TypedDataView_Uint16ArrayView_new")
@pragma("vm:idempotent")
external factory _Uint16ArrayView._(
_TypedList buffer, int offsetInBytes, int length);
@@ -4619,7 +4633,7 @@
// Constructor.
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _Int32ArrayView)
- @pragma("vm:prefer-inline")
+ @pragma("vm:external-name", "TypedDataView_Int32ArrayView_new")
@pragma("vm:idempotent")
external factory _Int32ArrayView._(
_TypedList buffer, int offsetInBytes, int length);
@@ -4668,7 +4682,7 @@
// Constructor.
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _Uint32ArrayView)
- @pragma("vm:prefer-inline")
+ @pragma("vm:external-name", "TypedDataView_Uint32ArrayView_new")
@pragma("vm:idempotent")
external factory _Uint32ArrayView._(
_TypedList buffer, int offsetInBytes, int length);
@@ -4717,7 +4731,7 @@
// Constructor.
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _Int64ArrayView)
- @pragma("vm:prefer-inline")
+ @pragma("vm:external-name", "TypedDataView_Int64ArrayView_new")
@pragma("vm:idempotent")
external factory _Int64ArrayView._(
_TypedList buffer, int offsetInBytes, int length);
@@ -4766,7 +4780,7 @@
// Constructor.
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _Uint64ArrayView)
- @pragma("vm:prefer-inline")
+ @pragma("vm:external-name", "TypedDataView_Uint64ArrayView_new")
@pragma("vm:idempotent")
external factory _Uint64ArrayView._(
_TypedList buffer, int offsetInBytes, int length);
@@ -4815,7 +4829,7 @@
// Constructor.
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _Float32ArrayView)
- @pragma("vm:prefer-inline")
+ @pragma("vm:external-name", "TypedDataView_Float32ArrayView_new")
@pragma("vm:idempotent")
external factory _Float32ArrayView._(
_TypedList buffer, int offsetInBytes, int length);
@@ -4864,7 +4878,7 @@
// Constructor.
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _Float64ArrayView)
- @pragma("vm:prefer-inline")
+ @pragma("vm:external-name", "TypedDataView_Float64ArrayView_new")
@pragma("vm:idempotent")
external factory _Float64ArrayView._(
_TypedList buffer, int offsetInBytes, int length);
@@ -4913,7 +4927,7 @@
// Constructor.
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _Float32x4ArrayView)
- @pragma("vm:prefer-inline")
+ @pragma("vm:external-name", "TypedDataView_Float32x4ArrayView_new")
@pragma("vm:idempotent")
external factory _Float32x4ArrayView._(
_TypedList buffer, int offsetInBytes, int length);
@@ -4960,7 +4974,7 @@
// Constructor.
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _Int32x4ArrayView)
- @pragma("vm:prefer-inline")
+ @pragma("vm:external-name", "TypedDataView_Int32x4ArrayView_new")
@pragma("vm:idempotent")
external factory _Int32x4ArrayView._(
_TypedList buffer, int offsetInBytes, int length);
@@ -5007,7 +5021,7 @@
// Constructor.
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _Float64x2ArrayView)
- @pragma("vm:prefer-inline")
+ @pragma("vm:external-name", "TypedDataView_Float64x2ArrayView_new")
@pragma("vm:idempotent")
external factory _Float64x2ArrayView._(
_TypedList buffer, int offsetInBytes, int length);
@@ -5051,7 +5065,7 @@
final class _ByteDataView implements ByteData {
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _ByteDataView)
- @pragma("vm:prefer-inline")
+ @pragma("vm:external-name", "TypedDataView_ByteDataView_new")
@pragma("vm:idempotent")
external factory _ByteDataView._(
_TypedList buffer, int offsetInBytes, int length);
@@ -5445,18 +5459,14 @@
@patch
abstract class UnmodifiableByteDataView implements ByteData {
@patch
- @pragma("vm:prefer-inline")
factory UnmodifiableByteDataView(ByteData data) =>
- new _UnmodifiableByteDataView._(
- unsafeCast<_ByteDataView>(data).buffer._data,
- data.offsetInBytes,
- data.lengthInBytes);
+ new _UnmodifiableByteDataView._((data as _ByteDataView).buffer._data,
+ data.offsetInBytes, data.lengthInBytes);
}
@patch
abstract class UnmodifiableUint8ListView implements Uint8List {
@patch
- @pragma("vm:prefer-inline")
factory UnmodifiableUint8ListView(Uint8List list) =>
new _UnmodifiableUint8ArrayView._(
unsafeCast<_TypedListBase>(list)._typedData,
@@ -5467,7 +5477,6 @@
@patch
abstract class UnmodifiableInt8ListView implements Int8List {
@patch
- @pragma("vm:prefer-inline")
factory UnmodifiableInt8ListView(Int8List list) =>
new _UnmodifiableInt8ArrayView._(
unsafeCast<_TypedListBase>(list)._typedData,
@@ -5478,7 +5487,6 @@
@patch
abstract class UnmodifiableUint8ClampedListView implements Uint8ClampedList {
@patch
- @pragma("vm:prefer-inline")
factory UnmodifiableUint8ClampedListView(Uint8ClampedList list) =>
new _UnmodifiableUint8ClampedArrayView._(
unsafeCast<_TypedListBase>(list)._typedData,
@@ -5489,7 +5497,6 @@
@patch
abstract class UnmodifiableUint16ListView implements Uint16List {
@patch
- @pragma("vm:prefer-inline")
factory UnmodifiableUint16ListView(Uint16List list) =>
new _UnmodifiableUint16ArrayView._(
unsafeCast<_TypedListBase>(list)._typedData,
@@ -5500,7 +5507,6 @@
@patch
abstract class UnmodifiableInt16ListView implements Int16List {
@patch
- @pragma("vm:prefer-inline")
factory UnmodifiableInt16ListView(Int16List list) =>
new _UnmodifiableInt16ArrayView._(
unsafeCast<_TypedListBase>(list)._typedData,
@@ -5511,7 +5517,6 @@
@patch
abstract class UnmodifiableUint32ListView implements Uint32List {
@patch
- @pragma("vm:prefer-inline")
factory UnmodifiableUint32ListView(Uint32List list) =>
new _UnmodifiableUint32ArrayView._(
unsafeCast<_TypedListBase>(list)._typedData,
@@ -5522,7 +5527,6 @@
@patch
abstract class UnmodifiableInt32ListView implements Int32List {
@patch
- @pragma("vm:prefer-inline")
factory UnmodifiableInt32ListView(Int32List list) =>
new _UnmodifiableInt32ArrayView._(
unsafeCast<_TypedListBase>(list)._typedData,
@@ -5533,7 +5537,6 @@
@patch
abstract class UnmodifiableUint64ListView implements Uint64List {
@patch
- @pragma("vm:prefer-inline")
factory UnmodifiableUint64ListView(Uint64List list) =>
new _UnmodifiableUint64ArrayView._(
unsafeCast<_TypedListBase>(list)._typedData,
@@ -5544,7 +5547,6 @@
@patch
abstract class UnmodifiableInt64ListView implements Int64List {
@patch
- @pragma("vm:prefer-inline")
factory UnmodifiableInt64ListView(Int64List list) =>
new _UnmodifiableInt64ArrayView._(
unsafeCast<_TypedListBase>(list)._typedData,
@@ -5555,7 +5557,6 @@
@patch
abstract class UnmodifiableInt32x4ListView implements Int32x4List {
@patch
- @pragma("vm:prefer-inline")
factory UnmodifiableInt32x4ListView(Int32x4List list) =>
new _UnmodifiableInt32x4ArrayView._(
unsafeCast<_TypedListBase>(list)._typedData,
@@ -5566,7 +5567,6 @@
@patch
abstract class UnmodifiableFloat32x4ListView implements Float32x4List {
@patch
- @pragma("vm:prefer-inline")
factory UnmodifiableFloat32x4ListView(Float32x4List list) =>
new _UnmodifiableFloat32x4ArrayView._(
unsafeCast<_TypedListBase>(list)._typedData,
@@ -5577,7 +5577,6 @@
@patch
abstract class UnmodifiableFloat64x2ListView implements Float64x2List {
@patch
- @pragma("vm:prefer-inline")
factory UnmodifiableFloat64x2ListView(Float64x2List list) =>
new _UnmodifiableFloat64x2ArrayView._(
unsafeCast<_TypedListBase>(list)._typedData,
@@ -5588,7 +5587,6 @@
@patch
abstract class UnmodifiableFloat32ListView implements Float32List {
@patch
- @pragma("vm:prefer-inline")
factory UnmodifiableFloat32ListView(Float32List list) =>
new _UnmodifiableFloat32ArrayView._(
unsafeCast<_TypedListBase>(list)._typedData,
@@ -5599,7 +5597,6 @@
@patch
abstract class UnmodifiableFloat64ListView implements Float64List {
@patch
- @pragma("vm:prefer-inline")
factory UnmodifiableFloat64ListView(Float64List list) =>
new _UnmodifiableFloat64ArrayView._(
unsafeCast<_TypedListBase>(list)._typedData,
@@ -5612,7 +5609,7 @@
implements UnmodifiableInt8ListView {
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _UnmodifiableInt8ArrayView)
- @pragma("vm:prefer-inline")
+ @pragma("vm:external-name", "TypedDataView_Int8ArrayView_new")
@pragma("vm:idempotent")
external factory _UnmodifiableInt8ArrayView._(
_TypedList buffer, int offsetInBytes, int length);
@@ -5629,7 +5626,7 @@
implements UnmodifiableUint8ListView {
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _UnmodifiableUint8ArrayView)
- @pragma("vm:prefer-inline")
+ @pragma("vm:external-name", "TypedDataView_Uint8ArrayView_new")
@pragma("vm:idempotent")
external factory _UnmodifiableUint8ArrayView._(
_TypedList buffer, int offsetInBytes, int length);
@@ -5646,7 +5643,7 @@
implements UnmodifiableUint8ClampedListView {
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _UnmodifiableUint8ClampedArrayView)
- @pragma("vm:prefer-inline")
+ @pragma("vm:external-name", "TypedDataView_Uint8ClampedArrayView_new")
@pragma("vm:idempotent")
external factory _UnmodifiableUint8ClampedArrayView._(
_TypedList buffer, int offsetInBytes, int length);
@@ -5663,7 +5660,7 @@
implements UnmodifiableInt16ListView {
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _UnmodifiableInt16ArrayView)
- @pragma("vm:prefer-inline")
+ @pragma("vm:external-name", "TypedDataView_Int16ArrayView_new")
@pragma("vm:idempotent")
external factory _UnmodifiableInt16ArrayView._(
_TypedList buffer, int offsetInBytes, int length);
@@ -5680,7 +5677,7 @@
implements UnmodifiableUint16ListView {
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _UnmodifiableUint16ArrayView)
- @pragma("vm:prefer-inline")
+ @pragma("vm:external-name", "TypedDataView_Uint16ArrayView_new")
@pragma("vm:idempotent")
external factory _UnmodifiableUint16ArrayView._(
_TypedList buffer, int offsetInBytes, int length);
@@ -5697,7 +5694,7 @@
implements UnmodifiableInt32ListView {
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _UnmodifiableInt32ArrayView)
- @pragma("vm:prefer-inline")
+ @pragma("vm:external-name", "TypedDataView_Int32ArrayView_new")
@pragma("vm:idempotent")
external factory _UnmodifiableInt32ArrayView._(
_TypedList buffer, int offsetInBytes, int length);
@@ -5714,7 +5711,7 @@
implements UnmodifiableUint32ListView {
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _UnmodifiableUint32ArrayView)
- @pragma("vm:prefer-inline")
+ @pragma("vm:external-name", "TypedDataView_Uint32ArrayView_new")
@pragma("vm:idempotent")
external factory _UnmodifiableUint32ArrayView._(
_TypedList buffer, int offsetInBytes, int length);
@@ -5731,7 +5728,7 @@
implements UnmodifiableInt64ListView {
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _UnmodifiableInt64ArrayView)
- @pragma("vm:prefer-inline")
+ @pragma("vm:external-name", "TypedDataView_Int64ArrayView_new")
@pragma("vm:idempotent")
external factory _UnmodifiableInt64ArrayView._(
_TypedList buffer, int offsetInBytes, int length);
@@ -5748,7 +5745,7 @@
implements UnmodifiableUint64ListView {
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _UnmodifiableUint64ArrayView)
- @pragma("vm:prefer-inline")
+ @pragma("vm:external-name", "TypedDataView_Uint64ArrayView_new")
@pragma("vm:idempotent")
external factory _UnmodifiableUint64ArrayView._(
_TypedList buffer, int offsetInBytes, int length);
@@ -5765,7 +5762,7 @@
implements UnmodifiableFloat32ListView {
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _UnmodifiableFloat32ArrayView)
- @pragma("vm:prefer-inline")
+ @pragma("vm:external-name", "TypedDataView_Float32ArrayView_new")
@pragma("vm:idempotent")
external factory _UnmodifiableFloat32ArrayView._(
_TypedList buffer, int offsetInBytes, int length);
@@ -5782,7 +5779,7 @@
implements UnmodifiableFloat64ListView {
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _UnmodifiableFloat64ArrayView)
- @pragma("vm:prefer-inline")
+ @pragma("vm:external-name", "TypedDataView_Float64ArrayView_new")
@pragma("vm:idempotent")
external factory _UnmodifiableFloat64ArrayView._(
_TypedList buffer, int offsetInBytes, int length);
@@ -5799,7 +5796,7 @@
implements UnmodifiableFloat32x4ListView {
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _UnmodifiableFloat32x4ArrayView)
- @pragma("vm:prefer-inline")
+ @pragma("vm:external-name", "TypedDataView_Float32x4ArrayView_new")
@pragma("vm:idempotent")
external factory _UnmodifiableFloat32x4ArrayView._(
_TypedList buffer, int offsetInBytes, int length);
@@ -5816,7 +5813,7 @@
implements UnmodifiableInt32x4ListView {
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _UnmodifiableInt32x4ArrayView)
- @pragma("vm:prefer-inline")
+ @pragma("vm:external-name", "TypedDataView_Int32x4ArrayView_new")
@pragma("vm:idempotent")
external factory _UnmodifiableInt32x4ArrayView._(
_TypedList buffer, int offsetInBytes, int length);
@@ -5833,7 +5830,7 @@
implements UnmodifiableFloat64x2ListView {
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _UnmodifiableFloat64x2ArrayView)
- @pragma("vm:prefer-inline")
+ @pragma("vm:external-name", "TypedDataView_Float64x2ArrayView_new")
@pragma("vm:idempotent")
external factory _UnmodifiableFloat64x2ArrayView._(
_TypedList buffer, int offsetInBytes, int length);
@@ -5850,7 +5847,7 @@
implements UnmodifiableByteDataView {
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _UnmodifiableByteDataView)
- @pragma("vm:prefer-inline")
+ @pragma("vm:external-name", "TypedDataView_ByteDataView_new")
@pragma("vm:idempotent")
external factory _UnmodifiableByteDataView._(
_TypedList buffer, int offsetInBytes, int length);