Revert "[VM runtime] Support Smi instances in type test cache."
This reverts commit 6ba3e55eccf343357549f0643b2cb75ccdb53d9b.
Reason for revert: unhappy kernel-precomp bots
Original change's description:
> [VM runtime] Support Smi instances in type test cache.
>
> This adds SubtypeTestCache-based optimizations for type tests against
> * dst_type = FutureOr<T> (when T=int/num)
> * dst_type = T (when T = FutureOr<int/num>)
>
> Remove dangerous LoadClass pseudo assembler instruction (does not work for Smi).
> Handle instantiated void in type tests (along with dynamic and Object).
>
> Change-Id: I0df0fc72ff173b9464d16cc971969132b055a429
> Reviewed-on: https://dart-review.googlesource.com/c/81182
> Commit-Queue: Régis Crelier <regis@google.com>
> Reviewed-by: Martin Kustermann <kustermann@google.com>
TBR=kustermann@google.com,alexmarkov@google.com,regis@google.com
Change-Id: I73be5fc068cd24e0a13ba0872a99a24ab5a8eeca
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Reviewed-on: https://dart-review.googlesource.com/c/81284
Reviewed-by: Régis Crelier <regis@google.com>
Commit-Queue: Régis Crelier <regis@google.com>
diff --git a/runtime/vm/compiler/assembler/assembler_arm.cc b/runtime/vm/compiler/assembler/assembler_arm.cc
index e35c3c1..1f56574 100644
--- a/runtime/vm/compiler/assembler/assembler_arm.cc
+++ b/runtime/vm/compiler/assembler/assembler_arm.cc
@@ -1825,6 +1825,12 @@
ldr(result, Address(result, class_id, LSL, kSizeOfClassPairLog2));
}
+void Assembler::LoadClass(Register result, Register object, Register scratch) {
+ ASSERT(scratch != result);
+ LoadClassId(scratch, object);
+ LoadClassById(result, scratch);
+}
+
void Assembler::CompareClassId(Register object,
intptr_t class_id,
Register scratch) {
diff --git a/runtime/vm/compiler/assembler/assembler_arm.h b/runtime/vm/compiler/assembler/assembler_arm.h
index 93da8c4..e78943db 100644
--- a/runtime/vm/compiler/assembler/assembler_arm.h
+++ b/runtime/vm/compiler/assembler/assembler_arm.h
@@ -860,6 +860,7 @@
void LoadClassId(Register result, Register object, Condition cond = AL);
void LoadClassById(Register result, Register class_id);
+ void LoadClass(Register result, Register object, Register scratch);
void CompareClassId(Register object, intptr_t class_id, Register scratch);
void LoadClassIdMayBeSmi(Register result, Register object);
void LoadTaggedClassIdMayBeSmi(Register result, Register object);
diff --git a/runtime/vm/compiler/assembler/assembler_arm64.cc b/runtime/vm/compiler/assembler/assembler_arm64.cc
index c258d9a..cdd45c8 100644
--- a/runtime/vm/compiler/assembler/assembler_arm64.cc
+++ b/runtime/vm/compiler/assembler/assembler_arm64.cc
@@ -1115,6 +1115,12 @@
ldr(result, Address(result, class_id, UXTX, Address::Scaled));
}
+void Assembler::LoadClass(Register result, Register object) {
+ ASSERT(object != TMP);
+ LoadClassId(TMP, object);
+ LoadClassById(result, TMP);
+}
+
void Assembler::CompareClassId(Register object,
intptr_t class_id,
Register scratch) {
diff --git a/runtime/vm/compiler/assembler/assembler_arm64.h b/runtime/vm/compiler/assembler/assembler_arm64.h
index 7807f87..785b0ac 100644
--- a/runtime/vm/compiler/assembler/assembler_arm64.h
+++ b/runtime/vm/compiler/assembler/assembler_arm64.h
@@ -1531,6 +1531,7 @@
void LoadClassId(Register result, Register object);
// Overwrites class_id register (it will be tagged afterwards).
void LoadClassById(Register result, Register class_id);
+ void LoadClass(Register result, Register object);
void CompareClassId(Register object,
intptr_t class_id,
Register scratch = kNoRegister);
diff --git a/runtime/vm/compiler/assembler/assembler_ia32.cc b/runtime/vm/compiler/assembler/assembler_ia32.cc
index e2074e5..1f528e18 100644
--- a/runtime/vm/compiler/assembler/assembler_ia32.cc
+++ b/runtime/vm/compiler/assembler/assembler_ia32.cc
@@ -2434,6 +2434,12 @@
movl(result, Address(result, class_id, TIMES_8, 0));
}
+void Assembler::LoadClass(Register result, Register object, Register scratch) {
+ ASSERT(scratch != result);
+ LoadClassId(scratch, object);
+ LoadClassById(result, scratch);
+}
+
void Assembler::CompareClassId(Register object,
intptr_t class_id,
Register scratch) {
diff --git a/runtime/vm/compiler/assembler/assembler_ia32.h b/runtime/vm/compiler/assembler/assembler_ia32.h
index cb75d28..c3b8492 100644
--- a/runtime/vm/compiler/assembler/assembler_ia32.h
+++ b/runtime/vm/compiler/assembler/assembler_ia32.h
@@ -666,6 +666,8 @@
void LoadClassById(Register result, Register class_id);
+ void LoadClass(Register result, Register object, Register scratch);
+
void CompareClassId(Register object, intptr_t class_id, Register scratch);
void LoadClassIdMayBeSmi(Register result, Register object);
diff --git a/runtime/vm/compiler/assembler/assembler_x64.cc b/runtime/vm/compiler/assembler/assembler_x64.cc
index c91098b..d3df2c3 100644
--- a/runtime/vm/compiler/assembler/assembler_x64.cc
+++ b/runtime/vm/compiler/assembler/assembler_x64.cc
@@ -1952,6 +1952,11 @@
movq(result, Address(result, class_id, TIMES_8, 0));
}
+void Assembler::LoadClass(Register result, Register object) {
+ LoadClassId(TMP, object);
+ LoadClassById(result, TMP);
+}
+
void Assembler::CompareClassId(Register object,
intptr_t class_id,
Register scratch) {
diff --git a/runtime/vm/compiler/assembler/assembler_x64.h b/runtime/vm/compiler/assembler/assembler_x64.h
index 6090c76..70f7d9e 100644
--- a/runtime/vm/compiler/assembler/assembler_x64.h
+++ b/runtime/vm/compiler/assembler/assembler_x64.h
@@ -782,6 +782,8 @@
// Overwrites class_id register (it will be tagged afterwards).
void LoadClassById(Register result, Register class_id);
+ void LoadClass(Register result, Register object);
+
void CompareClassId(Register object,
intptr_t class_id,
Register scratch = kNoRegister);
diff --git a/runtime/vm/compiler/backend/flow_graph_compiler_arm.cc b/runtime/vm/compiler/backend/flow_graph_compiler_arm.cc
index 729d9ff..b6a070f 100644
--- a/runtime/vm/compiler/backend/flow_graph_compiler_arm.cc
+++ b/runtime/vm/compiler/backend/flow_graph_compiler_arm.cc
@@ -248,14 +248,13 @@
ASSERT(type_class.NumTypeArguments() > 0);
const Register kInstanceReg = R0;
Error& bound_error = Error::Handle(zone());
- const Type& smi_type = Type::Handle(zone(), Type::SmiType());
+ const Type& int_type = Type::Handle(zone(), Type::IntType());
const bool smi_is_ok =
- smi_type.IsSubtypeOf(type, &bound_error, NULL, Heap::kOld);
+ int_type.IsSubtypeOf(type, &bound_error, NULL, Heap::kOld);
// Malformed type should have been handled at graph construction time.
ASSERT(smi_is_ok || bound_error.IsNull());
__ tst(kInstanceReg, Operand(kSmiTagMask));
if (smi_is_ok) {
- // Fast case for type = FutureOr<int/num/top-type>.
__ b(is_instance_lbl, EQ);
} else {
__ b(is_not_instance_lbl, EQ);
@@ -287,7 +286,7 @@
ASSERT(!tp_argument.IsMalformed());
if (tp_argument.IsType()) {
ASSERT(tp_argument.HasResolvedTypeClass());
- // Check if type argument is dynamic, Object, or void.
+ // Check if type argument is dynamic or Object.
const Type& object_type = Type::Handle(zone(), Type::ObjectType());
if (object_type.IsSubtypeOf(tp_argument, NULL, NULL, Heap::kOld)) {
// Instance class test only necessary.
@@ -342,7 +341,6 @@
if (smi_class.IsSubtypeOf(Object::null_type_arguments(), type_class,
Object::null_type_arguments(), NULL, NULL,
Heap::kOld)) {
- // Fast case for type = int/num/top-type.
__ b(is_instance_lbl, EQ);
} else {
__ b(is_not_instance_lbl, EQ);
@@ -400,14 +398,7 @@
Label* is_not_instance_lbl) {
__ Comment("Subtype1TestCacheLookup");
const Register kInstanceReg = R0;
-#if defined(DEBUG)
- Label ok;
- __ BranchIfNotSmi(kInstanceReg, &ok);
- __ Breakpoint();
- __ Bind(&ok);
-#endif
- __ LoadClassId(R2, kInstanceReg);
- __ LoadClassById(R1, R2);
+ __ LoadClass(R1, kInstanceReg, R2);
// R1: instance class.
// Check immediate superclass equality.
__ ldr(R2, FieldAddress(R1, Class::super_type_offset()));
@@ -456,13 +447,12 @@
__ ldr(R3, FieldAddress(kTypeArgumentsReg,
TypeArguments::type_at_offset(type_param.index())));
// R3: concrete type of type.
- // Check if type argument is dynamic, Object, or void.
+ // Check if type argument is dynamic.
__ CompareObject(R3, Object::dynamic_type());
__ b(is_instance_lbl, EQ);
__ CompareObject(R3, Type::ZoneHandle(zone(), Type::ObjectType()));
__ b(is_instance_lbl, EQ);
- __ CompareObject(R3, Object::void_type());
- __ b(is_instance_lbl, EQ);
+ // TODO(regis): Optimize void type as well once allowed as type argument.
// For Smi check quickly against int and num interfaces.
Label not_smi;
@@ -472,8 +462,9 @@
__ b(is_instance_lbl, EQ);
__ CompareObject(R3, Type::ZoneHandle(zone(), Type::Number()));
__ b(is_instance_lbl, EQ);
- // Smi can be handled by type test cache.
- __ Bind(¬_smi);
+ // Smi must be handled in runtime.
+ Label fall_through;
+ __ b(&fall_through);
// If it's guaranteed, by type-parameter bound, that the type parameter will
// never have a value of a function type, then we can safely do a 4-type
@@ -483,18 +474,17 @@
? kTestTypeSixArgs
: kTestTypeFourArgs;
+ __ Bind(¬_smi);
const SubtypeTestCache& type_test_cache = SubtypeTestCache::ZoneHandle(
zone(), GenerateCallSubtypeTestStub(
test_kind, kInstanceReg, kInstantiatorTypeArgumentsReg,
kFunctionTypeArgumentsReg, kTempReg, is_instance_lbl,
is_not_instance_lbl));
+ __ Bind(&fall_through);
return type_test_cache.raw();
}
if (type.IsType()) {
- // Smi is FutureOr<T>, when T is a top type or int or num.
- if (!FLAG_strong || !Class::Handle(type.type_class()).IsFutureOrClass()) {
- __ BranchIfSmi(kInstanceReg, is_not_instance_lbl);
- }
+ __ BranchIfSmi(kInstanceReg, is_not_instance_lbl);
__ ldm(IA, SP,
(1 << kFunctionTypeArgumentsReg) |
(1 << kInstantiatorTypeArgumentsReg));
diff --git a/runtime/vm/compiler/backend/flow_graph_compiler_arm64.cc b/runtime/vm/compiler/backend/flow_graph_compiler_arm64.cc
index 4812ac4..eb23398 100644
--- a/runtime/vm/compiler/backend/flow_graph_compiler_arm64.cc
+++ b/runtime/vm/compiler/backend/flow_graph_compiler_arm64.cc
@@ -243,12 +243,11 @@
ASSERT(type_class.NumTypeArguments() > 0);
const Register kInstanceReg = R0;
Error& bound_error = Error::Handle(zone());
- const Type& smi_type = Type::Handle(zone(), Type::SmiType());
+ const Type& int_type = Type::Handle(zone(), Type::IntType());
const bool smi_is_ok =
- smi_type.IsSubtypeOf(type, &bound_error, NULL, Heap::kOld);
+ int_type.IsSubtypeOf(type, &bound_error, NULL, Heap::kOld);
// Malformed type should have been handled at graph construction time.
ASSERT(smi_is_ok || bound_error.IsNull());
- // Fast case for type = FutureOr<int/num/top-type>.
__ BranchIfSmi(kInstanceReg,
smi_is_ok ? is_instance_lbl : is_not_instance_lbl);
@@ -279,7 +278,7 @@
ASSERT(!tp_argument.IsMalformed());
if (tp_argument.IsType()) {
ASSERT(tp_argument.HasResolvedTypeClass());
- // Check if type argument is dynamic, Object, or void.
+ // Check if type argument is dynamic or Object.
const Type& object_type = Type::Handle(zone(), Type::ObjectType());
if (object_type.IsSubtypeOf(tp_argument, NULL, NULL, Heap::kOld)) {
// Instance class test only necessary.
@@ -332,7 +331,6 @@
if (smi_class.IsSubtypeOf(Object::null_type_arguments(), type_class,
Object::null_type_arguments(), NULL, NULL,
Heap::kOld)) {
- // Fast case for type = int/num/top-type.
__ BranchIfSmi(kInstanceReg, is_instance_lbl);
} else {
__ BranchIfSmi(kInstanceReg, is_not_instance_lbl);
@@ -390,14 +388,7 @@
Label* is_not_instance_lbl) {
__ Comment("Subtype1TestCacheLookup");
const Register kInstanceReg = R0;
-#if defined(DEBUG)
- Label ok;
- __ BranchIfNotSmi(kInstanceReg, &ok);
- __ Breakpoint();
- __ Bind(&ok);
-#endif
- __ LoadClassId(TMP, kInstanceReg);
- __ LoadClassById(R1, TMP);
+ __ LoadClass(R1, kInstanceReg);
// R1: instance class.
// Check immediate superclass equality.
__ LoadFieldFromOffset(R2, R1, Class::super_type_offset());
@@ -444,13 +435,12 @@
__ LoadFieldFromOffset(R3, kTypeArgumentsReg,
TypeArguments::type_at_offset(type_param.index()));
// R3: concrete type of type.
- // Check if type argument is dynamic, Object, or void.
+ // Check if type argument is dynamic.
__ CompareObject(R3, Object::dynamic_type());
__ b(is_instance_lbl, EQ);
__ CompareObject(R3, Type::ZoneHandle(zone(), Type::ObjectType()));
__ b(is_instance_lbl, EQ);
- __ CompareObject(R3, Object::void_type());
- __ b(is_instance_lbl, EQ);
+ // TODO(regis): Optimize void type as well once allowed as type argument.
// For Smi check quickly against int and num interfaces.
Label not_smi;
@@ -459,8 +449,9 @@
__ b(is_instance_lbl, EQ);
__ CompareObject(R3, Type::ZoneHandle(zone(), Type::Number()));
__ b(is_instance_lbl, EQ);
- // Smi can be handled by type test cache.
- __ Bind(¬_smi);
+ // Smi must be handled in runtime.
+ Label fall_through;
+ __ b(&fall_through);
// If it's guaranteed, by type-parameter bound, that the type parameter will
// never have a value of a function type, then we can safely do a 4-type
@@ -470,18 +461,17 @@
? kTestTypeSixArgs
: kTestTypeFourArgs;
+ __ Bind(¬_smi);
const SubtypeTestCache& type_test_cache = SubtypeTestCache::ZoneHandle(
zone(), GenerateCallSubtypeTestStub(
test_kind, kInstanceReg, kInstantiatorTypeArgumentsReg,
kFunctionTypeArgumentsReg, kTempReg, is_instance_lbl,
is_not_instance_lbl));
+ __ Bind(&fall_through);
return type_test_cache.raw();
}
if (type.IsType()) {
- // Smi is FutureOr<T>, when T is a top type or int or num.
- if (!FLAG_strong || !Class::Handle(type.type_class()).IsFutureOrClass()) {
- __ BranchIfSmi(kInstanceReg, is_not_instance_lbl);
- }
+ __ BranchIfSmi(kInstanceReg, is_not_instance_lbl);
__ ldp(kFunctionTypeArgumentsReg, kInstantiatorTypeArgumentsReg,
Address(SP, 0 * kWordSize, Address::PairOffset));
// Uninstantiated type class is known at compile time, but the type
diff --git a/runtime/vm/compiler/backend/flow_graph_compiler_ia32.cc b/runtime/vm/compiler/backend/flow_graph_compiler_ia32.cc
index f6dc95a..d8de557 100644
--- a/runtime/vm/compiler/backend/flow_graph_compiler_ia32.cc
+++ b/runtime/vm/compiler/backend/flow_graph_compiler_ia32.cc
@@ -256,14 +256,13 @@
ASSERT(type_class.NumTypeArguments() > 0);
const Register kInstanceReg = EAX;
Error& bound_error = Error::Handle(zone());
- const Type& smi_type = Type::Handle(zone(), Type::SmiType());
+ const Type& int_type = Type::Handle(zone(), Type::IntType());
const bool smi_is_ok =
- smi_type.IsSubtypeOf(type, &bound_error, NULL, Heap::kOld);
+ int_type.IsSubtypeOf(type, &bound_error, NULL, Heap::kOld);
// Malformed type should have been handled at graph construction time.
ASSERT(smi_is_ok || bound_error.IsNull());
__ testl(kInstanceReg, Immediate(kSmiTagMask));
if (smi_is_ok) {
- // Fast case for type = FutureOr<int/num/top-type>.
__ j(ZERO, is_instance_lbl);
} else {
__ j(ZERO, is_not_instance_lbl);
@@ -295,7 +294,7 @@
ASSERT(!tp_argument.IsMalformed());
if (tp_argument.IsType()) {
ASSERT(tp_argument.HasResolvedTypeClass());
- // Check if type argument is dynamic, Object, or void.
+ // Check if type argument is dynamic or Object.
const Type& object_type = Type::Handle(zone(), Type::ObjectType());
if (object_type.IsSubtypeOf(tp_argument, NULL, NULL, Heap::kOld)) {
// Instance class test only necessary.
@@ -348,7 +347,6 @@
if (smi_class.IsSubtypeOf(Object::null_type_arguments(), type_class,
Object::null_type_arguments(), NULL, NULL,
Heap::kOld)) {
- // Fast case for type = int/num/top-type.
__ j(ZERO, is_instance_lbl);
} else {
__ j(ZERO, is_not_instance_lbl);
@@ -406,14 +404,7 @@
Label* is_not_instance_lbl) {
__ Comment("Subtype1TestCacheLookup");
const Register kInstanceReg = EAX;
-#if defined(DEBUG)
- Label ok;
- __ BranchIfNotSmi(kInstanceReg, &ok);
- __ Breakpoint();
- __ Bind(&ok);
-#endif
- __ LoadClassId(EDI, kInstanceReg);
- __ LoadClassById(ECX, EDI);
+ __ LoadClass(ECX, kInstanceReg, EDI);
// ECX: instance class.
// Check immediate superclass equality.
__ movl(EDI, FieldAddress(ECX, Class::super_type_offset()));
@@ -464,13 +455,12 @@
__ movl(EDI, FieldAddress(kTypeArgumentsReg, TypeArguments::type_at_offset(
type_param.index())));
// EDI: concrete type of type.
- // Check if type argument is dynamic, Object, or void.
+ // Check if type argument is dynamic.
__ CompareObject(EDI, Object::dynamic_type());
__ j(EQUAL, is_instance_lbl);
__ CompareObject(EDI, Type::ZoneHandle(zone(), Type::ObjectType()));
__ j(EQUAL, is_instance_lbl);
- __ CompareObject(EDI, Object::void_type());
- __ j(EQUAL, is_instance_lbl);
+ // TODO(regis): Optimize void type as well once allowed as type argument.
// For Smi check quickly against int and num interfaces.
Label not_smi;
@@ -480,8 +470,9 @@
__ j(EQUAL, is_instance_lbl);
__ CompareObject(EDI, Type::ZoneHandle(zone(), Type::Number()));
__ j(EQUAL, is_instance_lbl);
- // Smi can be handled by type test cache.
- __ Bind(¬_smi);
+ // Smi must be handled in runtime.
+ Label fall_through;
+ __ jmp(&fall_through);
// If it's guaranteed, by type-parameter bound, that the type parameter will
// never have a value of a function type.
@@ -490,19 +481,18 @@
? kTestTypeSixArgs
: kTestTypeFourArgs;
+ __ Bind(¬_smi);
const SubtypeTestCache& type_test_cache = SubtypeTestCache::ZoneHandle(
zone(), GenerateCallSubtypeTestStub(
test_kind, kInstanceReg, kInstantiatorTypeArgumentsReg,
kFunctionTypeArgumentsReg, kTempReg, is_instance_lbl,
is_not_instance_lbl));
+ __ Bind(&fall_through);
return type_test_cache.raw();
}
if (type.IsType()) {
- // Smi is FutureOr<T>, when T is a top type or int or num.
- if (!FLAG_strong || !Class::Handle(type.type_class()).IsFutureOrClass()) {
- __ testl(kInstanceReg, Immediate(kSmiTagMask)); // Is instance Smi?
- __ j(ZERO, is_not_instance_lbl);
- }
+ __ testl(kInstanceReg, Immediate(kSmiTagMask)); // Is instance Smi?
+ __ j(ZERO, is_not_instance_lbl);
__ movl(kInstantiatorTypeArgumentsReg, Address(ESP, 1 * kWordSize));
__ movl(kFunctionTypeArgumentsReg, Address(ESP, 0 * kWordSize));
// Uninstantiated type class is known at compile time, but the type
diff --git a/runtime/vm/compiler/backend/flow_graph_compiler_x64.cc b/runtime/vm/compiler/backend/flow_graph_compiler_x64.cc
index 2471b4e..d3e58d8 100644
--- a/runtime/vm/compiler/backend/flow_graph_compiler_x64.cc
+++ b/runtime/vm/compiler/backend/flow_graph_compiler_x64.cc
@@ -248,14 +248,13 @@
ASSERT(type_class.NumTypeArguments() > 0);
const Register kInstanceReg = RAX;
Error& bound_error = Error::Handle(zone());
- const Type& smi_type = Type::Handle(zone(), Type::SmiType());
+ const Type& int_type = Type::Handle(zone(), Type::IntType());
const bool smi_is_ok =
- smi_type.IsSubtypeOf(type, &bound_error, NULL, Heap::kOld);
+ int_type.IsSubtypeOf(type, &bound_error, NULL, Heap::kOld);
// Malformed type should have been handled at graph construction time.
ASSERT(smi_is_ok || bound_error.IsNull());
__ testq(kInstanceReg, Immediate(kSmiTagMask));
if (smi_is_ok) {
- // Fast case for type = FutureOr<int/num/top-type>.
__ j(ZERO, is_instance_lbl);
} else {
__ j(ZERO, is_not_instance_lbl);
@@ -288,7 +287,7 @@
ASSERT(!tp_argument.IsMalformed());
if (tp_argument.IsType()) {
ASSERT(tp_argument.HasResolvedTypeClass());
- // Check if type argument is dynamic, Object, or void.
+ // Check if type argument is dynamic or Object.
const Type& object_type = Type::Handle(zone(), Type::ObjectType());
if (object_type.IsSubtypeOf(tp_argument, NULL, NULL, Heap::kOld)) {
// Instance class test only necessary.
@@ -346,7 +345,6 @@
if (smi_class.IsSubtypeOf(Object::null_type_arguments(), type_class,
Object::null_type_arguments(), NULL, NULL,
Heap::kOld)) {
- // Fast case for type = int/num/top-type.
__ j(ZERO, is_instance_lbl);
} else {
__ j(ZERO, is_not_instance_lbl);
@@ -408,14 +406,7 @@
Label* is_not_instance_lbl) {
__ Comment("Subtype1TestCacheLookup");
const Register kInstanceReg = RAX;
-#if defined(DEBUG)
- Label ok;
- __ BranchIfNotSmi(kInstanceReg, &ok);
- __ Breakpoint();
- __ Bind(&ok);
-#endif
- __ LoadClassId(TMP, kInstanceReg);
- __ LoadClassById(R10, TMP);
+ __ LoadClass(R10, kInstanceReg);
// R10: instance class.
// Check immediate superclass equality.
__ movq(R13, FieldAddress(R10, Class::super_type_offset()));
@@ -467,14 +458,13 @@
__ movq(RDI, FieldAddress(kTypeArgumentsReg, TypeArguments::type_at_offset(
type_param.index())));
// RDI: Concrete type of type.
- // Check if type argument is dynamic, Object, or void.
+ // Check if type argument is dynamic.
__ CompareObject(RDI, Object::dynamic_type());
__ j(EQUAL, is_instance_lbl);
const Type& object_type = Type::ZoneHandle(zone(), Type::ObjectType());
__ CompareObject(RDI, object_type);
__ j(EQUAL, is_instance_lbl);
- __ CompareObject(RDI, Object::void_type());
- __ j(EQUAL, is_instance_lbl);
+ // TODO(regis): Optimize void type as well once allowed as type argument.
// For Smi check quickly against int and num interfaces.
Label not_smi;
@@ -484,8 +474,9 @@
__ j(EQUAL, is_instance_lbl);
__ CompareObject(RDI, Type::ZoneHandle(zone(), Type::Number()));
__ j(EQUAL, is_instance_lbl);
- // Smi can be handled by type test cache.
- __ Bind(¬_smi);
+ // Smi must be handled in runtime.
+ Label fall_through;
+ __ jmp(&fall_through);
// If it's guaranteed, by type-parameter bound, that the type parameter will
// never have a value of a function type, then we can safely do a 4-type
@@ -495,19 +486,18 @@
? kTestTypeSixArgs
: kTestTypeFourArgs;
+ __ Bind(¬_smi);
const SubtypeTestCache& type_test_cache = SubtypeTestCache::ZoneHandle(
zone(), GenerateCallSubtypeTestStub(
test_kind, kInstanceReg, kInstantiatorTypeArgumentsReg,
kFunctionTypeArgumentsReg, kTempReg, is_instance_lbl,
is_not_instance_lbl));
+ __ Bind(&fall_through);
return type_test_cache.raw();
}
if (type.IsType()) {
- // Smi is FutureOr<T>, when T is a top type or int or num.
- if (!FLAG_strong || !Class::Handle(type.type_class()).IsFutureOrClass()) {
- __ testq(kInstanceReg, Immediate(kSmiTagMask)); // Is instance Smi?
- __ j(ZERO, is_not_instance_lbl);
- }
+ __ testq(kInstanceReg, Immediate(kSmiTagMask)); // Is instance Smi?
+ __ j(ZERO, is_not_instance_lbl);
// Uninstantiated type class is known at compile time, but the type
// arguments are determined at runtime by the instantiator(s).
return GenerateCallSubtypeTestStub(kTestTypeFourArgs, kInstanceReg,
diff --git a/runtime/vm/runtime_entry.cc b/runtime/vm/runtime_entry.cc
index 5994aed..ad00ea5 100644
--- a/runtime/vm/runtime_entry.cc
+++ b/runtime/vm/runtime_entry.cc
@@ -651,17 +651,18 @@
}
return;
}
- Class& instance_class = Class::Handle(zone);
if (instance.IsSmi()) {
- instance_class = Smi::Class();
- } else {
- instance_class = instance.clazz();
+ if (FLAG_trace_type_checks) {
+ OS::PrintErr("UpdateTypeTestCache: instance is Smi\n");
+ }
+ return;
}
// If the type is uninstantiated and refers to parent function type
// parameters, the function_type_arguments have been canonicalized
// when concatenated.
ASSERT(function_type_arguments.IsNull() ||
function_type_arguments.IsCanonical());
+ const Class& instance_class = Class::Handle(zone, instance.clazz());
auto& instance_class_id_or_function = Object::Handle(zone);
auto& instance_type_arguments = TypeArguments::Handle(zone);
auto& instance_parent_function_type_arguments = TypeArguments::Handle(zone);
@@ -747,16 +748,14 @@
" Updated test cache %p ix: %" Pd
" with "
"(cid-or-fun: %p, type-args: %p, i-type-args: %p, f-type-args: %p, "
- "p-type-args: %p, d-type-args: %p, result: %s)\n"
+ "result: %s)\n"
" instance [class: (%p '%s' cid: %" Pd
"), type-args: %p %s]\n"
" test-type [class: (%p '%s' cid: %" Pd
"), i-type-args: %p %s, f-type-args: %p %s]\n",
new_cache.raw(), len, instance_class_id_or_function.raw(),
instance_type_arguments.raw(), instantiator_type_arguments.raw(),
- function_type_arguments.raw(),
- instance_parent_function_type_arguments.raw(),
- instance_delayed_type_arguments.raw(), result.ToCString(),
+ instantiator_type_arguments.raw(), result.ToCString(),
instance_class.raw(), instance_class_name.ToCString(),
instance_class.id(), instance_type_arguments.raw(),
instance_type_arguments.ToCString(), type_class.raw(),
diff --git a/runtime/vm/stub_code_arm.cc b/runtime/vm/stub_code_arm.cc
index 2ddd1dc..8791013 100644
--- a/runtime/vm/stub_code_arm.cc
+++ b/runtime/vm/stub_code_arm.cc
@@ -2007,11 +2007,7 @@
__ AddImmediate(kCacheReg, Array::data_offset() - kHeapObjectTag);
Label loop, not_closure;
- if (n >= 4) {
- __ LoadClassIdMayBeSmi(kInstanceCidOrFunction, kInstanceReg);
- } else {
- __ LoadClassId(kInstanceCidOrFunction, kInstanceReg);
- }
+ __ LoadClassId(kInstanceCidOrFunction, kInstanceReg);
__ CompareImmediate(kInstanceCidOrFunction, kClosureCid);
__ b(¬_closure, NE);
@@ -2299,6 +2295,7 @@
void StubCode::GenerateSlowTypeTestStub(Assembler* assembler) {
Label done, call_runtime;
+ const Register kInstanceReg = R0;
const Register kFunctionTypeArgumentsReg = R1;
const Register kDstTypeReg = R8;
const Register kSubtypeTestCacheReg = R3;
@@ -2306,7 +2303,6 @@
__ EnterStubFrame();
#ifdef DEBUG
- const Register kInstanceReg = R0;
// Guaranteed by caller.
Label no_error;
__ CompareObject(kInstanceReg, Object::null_object());
@@ -2315,6 +2311,11 @@
__ Bind(&no_error);
#endif
+ // Need to handle slow cases of [Smi]s here because the
+ // [SubtypeTestCache]-based stubs do not handle [Smi]s.
+ Label non_smi_value;
+ __ BranchIfSmi(kInstanceReg, &call_runtime);
+
// If the subtype-cache is null, it needs to be lazily-created by the runtime.
__ CompareObject(kSubtypeTestCacheReg, Object::null_object());
__ BranchIf(EQUAL, &call_runtime);
diff --git a/runtime/vm/stub_code_arm64.cc b/runtime/vm/stub_code_arm64.cc
index fc87279..8aa8213 100644
--- a/runtime/vm/stub_code_arm64.cc
+++ b/runtime/vm/stub_code_arm64.cc
@@ -2271,11 +2271,7 @@
__ AddImmediate(kCacheReg, Array::data_offset() - kHeapObjectTag);
Label loop, not_closure;
- if (n >= 4) {
- __ LoadClassIdMayBeSmi(kInstanceCidOrFunction, kInstanceReg);
- } else {
- __ LoadClassId(kInstanceCidOrFunction, kInstanceReg);
- }
+ __ LoadClassId(kInstanceCidOrFunction, kInstanceReg);
__ CompareImmediate(kInstanceCidOrFunction, kClosureCid);
__ b(¬_closure, NE);
@@ -2571,6 +2567,7 @@
void StubCode::GenerateSlowTypeTestStub(Assembler* assembler) {
Label done, call_runtime;
+ const Register kInstanceReg = R0;
const Register kInstantiatorTypeArgumentsReg = R1;
const Register kSubtypeTestCacheReg = R3;
@@ -2579,7 +2576,6 @@
__ EnterStubFrame();
#ifdef DEBUG
- const Register kInstanceReg = R0;
// Guaranteed by caller.
Label no_error;
__ CompareObject(kInstanceReg, Object::null_object());
@@ -2588,6 +2584,11 @@
__ Bind(&no_error);
#endif
+ // Need to handle slow cases of [Smi]s here because the
+ // [SubtypeTestCache]-based stubs do not handle [Smi]s.
+ Label non_smi_value;
+ __ BranchIfSmi(kInstanceReg, &call_runtime);
+
// If the subtype-cache is null, it needs to be lazily-created by the runtime.
__ CompareObject(kSubtypeTestCacheReg, Object::null_object());
__ BranchIf(EQUAL, &call_runtime);
diff --git a/runtime/vm/stub_code_ia32.cc b/runtime/vm/stub_code_ia32.cc
index d609e2a..4535bad 100644
--- a/runtime/vm/stub_code_ia32.cc
+++ b/runtime/vm/stub_code_ia32.cc
@@ -1752,11 +1752,8 @@
__ addl(EDX, Immediate(Array::data_offset() - kHeapObjectTag));
Label loop, not_closure;
- if (n >= 4) {
- __ LoadClassIdMayBeSmi(kInstanceCidOrFunction, kInstanceReg);
- } else {
- __ LoadClassId(kInstanceCidOrFunction, kInstanceReg);
- }
+
+ __ LoadClassId(kInstanceCidOrFunction, kInstanceReg);
__ cmpl(kInstanceCidOrFunction, Immediate(kClosureCid));
__ j(NOT_EQUAL, ¬_closure, Assembler::kNearJump);
diff --git a/runtime/vm/stub_code_x64.cc b/runtime/vm/stub_code_x64.cc
index 295d7ce..c8f4218 100644
--- a/runtime/vm/stub_code_x64.cc
+++ b/runtime/vm/stub_code_x64.cc
@@ -2291,12 +2291,8 @@
__ addq(RSI, Immediate(Array::data_offset() - kHeapObjectTag));
Label loop, not_closure;
- if (n >= 4) {
- __ LoadClassIdMayBeSmi(kInstanceCidOrFunction, kInstanceReg);
- } else {
- __ LoadClassId(kInstanceCidOrFunction, kInstanceReg);
- }
- __ cmpq(kInstanceCidOrFunction, Immediate(kClosureCid));
+ __ LoadClassId(R10, kInstanceReg);
+ __ cmpq(R10, Immediate(kClosureCid));
__ j(NOT_EQUAL, ¬_closure, Assembler::kNearJump);
// Closure handling.
@@ -2585,13 +2581,13 @@
void StubCode::GenerateSlowTypeTestStub(Assembler* assembler) {
Label done, call_runtime;
+ const Register kInstanceReg = RAX;
const Register kDstTypeReg = RBX;
const Register kSubtypeTestCacheReg = R9;
__ EnterStubFrame();
#ifdef DEBUG
- const Register kInstanceReg = RAX;
// Guaranteed by caller.
Label no_error;
__ CompareObject(kInstanceReg, Object::null_object());
@@ -2600,6 +2596,10 @@
__ Bind(&no_error);
#endif
+ // Need to handle slow cases of [Smi]s here because the
+ // [SubtypeTestCache]-based stubs do not handle [Smi]s.
+ __ BranchIfSmi(kInstanceReg, &call_runtime);
+
// If the subtype-cache is null, it needs to be lazily-created by the runtime.
__ CompareObject(kSubtypeTestCacheReg, Object::null_object());
__ BranchIf(EQUAL, &call_runtime);