| // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file |
| // for details. All rights reserved. Use of this source code is governed by a |
| // BSD-style license that can be found in the LICENSE file. |
| |
| #include "vm/globals.h" |
| #if defined(TARGET_ARCH_ARM) |
| |
| #include "vm/compiler/assembler/assembler.h" |
| #include "vm/cpu.h" |
| #include "vm/os.h" |
| #include "vm/unit_test.h" |
| #include "vm/virtual_memory.h" |
| |
| namespace dart { |
| namespace compiler { |
| |
| TEST_CASE(ReciprocalOps) { |
| EXPECT_EQ(true, isinf(ReciprocalEstimate(-0.0f))); |
| EXPECT_EQ(true, signbit(ReciprocalEstimate(-0.0f))); |
| EXPECT_EQ(true, isinf(ReciprocalEstimate(0.0f))); |
| EXPECT_EQ(true, !signbit(ReciprocalEstimate(0.0f))); |
| EXPECT_EQ(true, isnan(ReciprocalEstimate(NAN))); |
| |
| #define AS_UINT32(v) (bit_cast<uint32_t, float>(v)) |
| #define EXPECT_BITWISE_EQ(a, b) EXPECT_EQ(AS_UINT32(a), AS_UINT32(b)) |
| |
| EXPECT_BITWISE_EQ(0.0f, ReciprocalEstimate(kPosInfinity)); |
| EXPECT_BITWISE_EQ(-0.0f, ReciprocalEstimate(kNegInfinity)); |
| EXPECT_BITWISE_EQ(2.0f, ReciprocalStep(0.0f, kPosInfinity)); |
| EXPECT_BITWISE_EQ(2.0f, ReciprocalStep(0.0f, kNegInfinity)); |
| EXPECT_BITWISE_EQ(2.0f, ReciprocalStep(-0.0f, kPosInfinity)); |
| EXPECT_BITWISE_EQ(2.0f, ReciprocalStep(-0.0f, kNegInfinity)); |
| EXPECT_BITWISE_EQ(2.0f, ReciprocalStep(kPosInfinity, 0.0f)); |
| EXPECT_BITWISE_EQ(2.0f, ReciprocalStep(kNegInfinity, 0.0f)); |
| EXPECT_BITWISE_EQ(2.0f, ReciprocalStep(kPosInfinity, -0.0f)); |
| EXPECT_BITWISE_EQ(2.0f, ReciprocalStep(kNegInfinity, -0.0f)); |
| EXPECT_EQ(true, isnan(ReciprocalStep(NAN, 1.0f))); |
| EXPECT_EQ(true, isnan(ReciprocalStep(1.0f, NAN))); |
| |
| EXPECT_EQ(true, isnan(ReciprocalSqrtEstimate(-1.0f))); |
| EXPECT_EQ(true, isnan(ReciprocalSqrtEstimate(kNegInfinity))); |
| EXPECT_EQ(true, isnan(ReciprocalSqrtEstimate(-1.0f))); |
| EXPECT_EQ(true, isinf(ReciprocalSqrtEstimate(-0.0f))); |
| EXPECT_EQ(true, signbit(ReciprocalSqrtEstimate(-0.0f))); |
| EXPECT_EQ(true, isinf(ReciprocalSqrtEstimate(0.0f))); |
| EXPECT_EQ(true, !signbit(ReciprocalSqrtEstimate(0.0f))); |
| EXPECT_EQ(true, isnan(ReciprocalSqrtEstimate(NAN))); |
| EXPECT_BITWISE_EQ(0.0f, ReciprocalSqrtEstimate(kPosInfinity)); |
| |
| EXPECT_BITWISE_EQ(1.5f, ReciprocalSqrtStep(0.0f, kPosInfinity)); |
| EXPECT_BITWISE_EQ(1.5f, ReciprocalSqrtStep(0.0f, kNegInfinity)); |
| EXPECT_BITWISE_EQ(1.5f, ReciprocalSqrtStep(-0.0f, kPosInfinity)); |
| EXPECT_BITWISE_EQ(1.5f, ReciprocalSqrtStep(-0.0f, kNegInfinity)); |
| EXPECT_BITWISE_EQ(1.5f, ReciprocalSqrtStep(kPosInfinity, 0.0f)); |
| EXPECT_BITWISE_EQ(1.5f, ReciprocalSqrtStep(kNegInfinity, 0.0f)); |
| EXPECT_BITWISE_EQ(1.5f, ReciprocalSqrtStep(kPosInfinity, -0.0f)); |
| EXPECT_BITWISE_EQ(1.5f, ReciprocalSqrtStep(kNegInfinity, -0.0f)); |
| EXPECT_EQ(true, isnan(ReciprocalSqrtStep(NAN, 1.0f))); |
| EXPECT_EQ(true, isnan(ReciprocalSqrtStep(1.0f, NAN))); |
| |
| #undef AS_UINT32 |
| #undef EXPECT_BITWISE_EQ |
| } |
| |
| #define __ assembler-> |
| |
| ASSEMBLER_TEST_GENERATE(Simple, assembler) { |
| __ mov(R0, Operand(42)); |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Simple, test) { |
| typedef int (*SimpleCode)() DART_UNUSED; |
| EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(SimpleCode, test->entry())); |
| } |
| |
| ASSEMBLER_TEST_GENERATE(MoveNegated, assembler) { |
| __ mvn(R0, Operand(42)); |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(MoveNegated, test) { |
| EXPECT(test != NULL); |
| typedef int (*MoveNegated)() DART_UNUSED; |
| EXPECT_EQ(~42, EXECUTE_TEST_CODE_INT32(MoveNegated, test->entry())); |
| } |
| |
| ASSEMBLER_TEST_GENERATE(MoveRotImm, assembler) { |
| Operand o; |
| EXPECT(Operand::CanHold(0x00550000, &o)); |
| __ mov(R0, o); |
| EXPECT(Operand::CanHold(0x30000003, &o)); |
| __ add(R0, R0, o); |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(MoveRotImm, test) { |
| EXPECT(test != NULL); |
| typedef int (*MoveRotImm)() DART_UNUSED; |
| EXPECT_EQ(0x30550003, EXECUTE_TEST_CODE_INT32(MoveRotImm, test->entry())); |
| } |
| |
| ASSEMBLER_TEST_GENERATE(MovImm16, assembler) { |
| __ LoadPatchableImmediate(R0, 0x12345678); |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(MovImm16, test) { |
| EXPECT(test != NULL); |
| typedef int (*MovImm16)() DART_UNUSED; |
| EXPECT_EQ(0x12345678, EXECUTE_TEST_CODE_INT32(MovImm16, test->entry())); |
| } |
| |
| ASSEMBLER_TEST_GENERATE(LoadImmediate, assembler) { |
| __ mov(R0, Operand(0)); |
| __ cmp(R0, Operand(0)); |
| __ LoadImmediate(R0, 0x12345678, EQ); |
| __ LoadImmediate(R0, 0x87654321, NE); |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(LoadImmediate, test) { |
| EXPECT(test != NULL); |
| typedef int (*LoadImmediate)() DART_UNUSED; |
| EXPECT_EQ(0x12345678, EXECUTE_TEST_CODE_INT32(LoadImmediate, test->entry())); |
| } |
| |
| ASSEMBLER_TEST_GENERATE(LoadHalfWordUnaligned, assembler) { |
| __ LoadHalfWordUnaligned(R1, R0, TMP); |
| __ mov(R0, Operand(R1)); |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(LoadHalfWordUnaligned, test) { |
| EXPECT(test != NULL); |
| typedef intptr_t (*LoadHalfWordUnaligned)(intptr_t) DART_UNUSED; |
| uint8_t buffer[4] = { |
| 0x89, 0xAB, 0xCD, 0xEF, |
| }; |
| |
| EXPECT_EQ( |
| static_cast<int16_t>(static_cast<uint16_t>(0xAB89)), |
| EXECUTE_TEST_CODE_INTPTR_INTPTR(LoadHalfWordUnaligned, test->entry(), |
| reinterpret_cast<intptr_t>(&buffer[0]))); |
| EXPECT_EQ( |
| static_cast<int16_t>(static_cast<uint16_t>(0xCDAB)), |
| EXECUTE_TEST_CODE_INTPTR_INTPTR(LoadHalfWordUnaligned, test->entry(), |
| reinterpret_cast<intptr_t>(&buffer[1]))); |
| } |
| |
| ASSEMBLER_TEST_GENERATE(LoadHalfWordUnsignedUnaligned, assembler) { |
| __ LoadHalfWordUnsignedUnaligned(R1, R0, TMP); |
| __ mov(R0, Operand(R1)); |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(LoadHalfWordUnsignedUnaligned, test) { |
| EXPECT(test != NULL); |
| typedef intptr_t (*LoadHalfWordUnsignedUnaligned)(intptr_t) DART_UNUSED; |
| uint8_t buffer[4] = { |
| 0x89, 0xAB, 0xCD, 0xEF, |
| }; |
| |
| EXPECT_EQ(0xAB89, EXECUTE_TEST_CODE_INTPTR_INTPTR( |
| LoadHalfWordUnsignedUnaligned, test->entry(), |
| reinterpret_cast<intptr_t>(&buffer[0]))); |
| EXPECT_EQ(0xCDAB, EXECUTE_TEST_CODE_INTPTR_INTPTR( |
| LoadHalfWordUnsignedUnaligned, test->entry(), |
| reinterpret_cast<intptr_t>(&buffer[1]))); |
| } |
| |
| ASSEMBLER_TEST_GENERATE(StoreHalfWordUnaligned, assembler) { |
| __ LoadImmediate(R1, 0xABCD); |
| __ StoreWordUnaligned(R1, R0, TMP); |
| __ mov(R0, Operand(R1)); |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(StoreHalfWordUnaligned, test) { |
| EXPECT(test != NULL); |
| typedef intptr_t (*StoreHalfWordUnaligned)(intptr_t) DART_UNUSED; |
| uint8_t buffer[4] = { |
| 0, 0, 0, 0, |
| }; |
| |
| EXPECT_EQ(0xABCD, EXECUTE_TEST_CODE_INTPTR_INTPTR( |
| StoreHalfWordUnaligned, test->entry(), |
| reinterpret_cast<intptr_t>(&buffer[0]))); |
| EXPECT_EQ(0xCD, buffer[0]); |
| EXPECT_EQ(0xAB, buffer[1]); |
| EXPECT_EQ(0, buffer[2]); |
| |
| EXPECT_EQ(0xABCD, EXECUTE_TEST_CODE_INTPTR_INTPTR( |
| StoreHalfWordUnaligned, test->entry(), |
| reinterpret_cast<intptr_t>(&buffer[1]))); |
| EXPECT_EQ(0xCD, buffer[1]); |
| EXPECT_EQ(0xAB, buffer[2]); |
| EXPECT_EQ(0, buffer[3]); |
| } |
| |
| ASSEMBLER_TEST_GENERATE(LoadWordUnaligned, assembler) { |
| __ LoadWordUnaligned(R1, R0, TMP); |
| __ mov(R0, Operand(R1)); |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(LoadWordUnaligned, test) { |
| EXPECT(test != NULL); |
| typedef intptr_t (*LoadWordUnaligned)(intptr_t) DART_UNUSED; |
| uint8_t buffer[8] = {0x12, 0x34, 0x56, 0x78, 0x9A, 0xBC, 0xDE, 0xF0}; |
| |
| EXPECT_EQ( |
| static_cast<intptr_t>(0x78563412), |
| EXECUTE_TEST_CODE_INTPTR_INTPTR(LoadWordUnaligned, test->entry(), |
| reinterpret_cast<intptr_t>(&buffer[0]))); |
| EXPECT_EQ( |
| static_cast<intptr_t>(0x9A785634), |
| EXECUTE_TEST_CODE_INTPTR_INTPTR(LoadWordUnaligned, test->entry(), |
| reinterpret_cast<intptr_t>(&buffer[1]))); |
| EXPECT_EQ( |
| static_cast<intptr_t>(0xBC9A7856), |
| EXECUTE_TEST_CODE_INTPTR_INTPTR(LoadWordUnaligned, test->entry(), |
| reinterpret_cast<intptr_t>(&buffer[2]))); |
| EXPECT_EQ( |
| static_cast<intptr_t>(0xDEBC9A78), |
| EXECUTE_TEST_CODE_INTPTR_INTPTR(LoadWordUnaligned, test->entry(), |
| reinterpret_cast<intptr_t>(&buffer[3]))); |
| } |
| |
| ASSEMBLER_TEST_GENERATE(StoreWordUnaligned, assembler) { |
| __ LoadImmediate(R1, 0x12345678); |
| __ StoreWordUnaligned(R1, R0, TMP); |
| __ mov(R0, Operand(R1)); |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(StoreWordUnaligned, test) { |
| EXPECT(test != NULL); |
| typedef intptr_t (*StoreWordUnaligned)(intptr_t) DART_UNUSED; |
| uint8_t buffer[8] = {0, 0, 0, 0, 0, 0, 0, 0}; |
| |
| EXPECT_EQ(0x12345678, EXECUTE_TEST_CODE_INTPTR_INTPTR( |
| StoreWordUnaligned, test->entry(), |
| reinterpret_cast<intptr_t>(&buffer[0]))); |
| EXPECT_EQ(0x78, buffer[0]); |
| EXPECT_EQ(0x56, buffer[1]); |
| EXPECT_EQ(0x34, buffer[2]); |
| EXPECT_EQ(0x12, buffer[3]); |
| |
| EXPECT_EQ(0x12345678, EXECUTE_TEST_CODE_INTPTR_INTPTR( |
| StoreWordUnaligned, test->entry(), |
| reinterpret_cast<intptr_t>(&buffer[1]))); |
| EXPECT_EQ(0x78, buffer[1]); |
| EXPECT_EQ(0x56, buffer[2]); |
| EXPECT_EQ(0x34, buffer[3]); |
| EXPECT_EQ(0x12, buffer[4]); |
| |
| EXPECT_EQ(0x12345678, EXECUTE_TEST_CODE_INTPTR_INTPTR( |
| StoreWordUnaligned, test->entry(), |
| reinterpret_cast<intptr_t>(&buffer[2]))); |
| EXPECT_EQ(0x78, buffer[2]); |
| EXPECT_EQ(0x56, buffer[3]); |
| EXPECT_EQ(0x34, buffer[4]); |
| EXPECT_EQ(0x12, buffer[5]); |
| |
| EXPECT_EQ(0x12345678, EXECUTE_TEST_CODE_INTPTR_INTPTR( |
| StoreWordUnaligned, test->entry(), |
| reinterpret_cast<intptr_t>(&buffer[3]))); |
| EXPECT_EQ(0x78, buffer[3]); |
| EXPECT_EQ(0x56, buffer[4]); |
| EXPECT_EQ(0x34, buffer[5]); |
| EXPECT_EQ(0x12, buffer[6]); |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Vmov, assembler) { |
| if (TargetCPUFeatures::vfp_supported()) { |
| __ mov(R3, Operand(43)); |
| __ mov(R1, Operand(41)); |
| __ vmovsrr(S1, R1, R3); // S1:S2 = 41:43 |
| __ vmovs(S0, S2); // S0 = S2, S0:S1 == 43:41 |
| __ vmovd(D2, D0); // D2 = D0, S4:S5 == 43:41 |
| __ vmovrs(R3, S5); // R3 = S5, R3 == 41 |
| __ vmovrrs(R1, R2, S4); // R1:R2 = S4:S5, R1:R2 == 43:41 |
| __ vmovdrr(D3, R3, R2); // D3 = R3:R2, S6:S7 == 41:41 |
| __ vmovdr(D3, 1, R1); // D3[1] == S7 = R1, S6:S7 == 41:43 |
| __ vmovrrd(R0, R1, D3); // R0:R1 = D3, R0:R1 == 41:43 |
| __ sub(R0, R1, Operand(R0)); // 43-41 |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Vmov, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::vfp_supported()) { |
| typedef int (*Vmov)() DART_UNUSED; |
| EXPECT_EQ(2, EXECUTE_TEST_CODE_INT32(Vmov, test->entry())); |
| } |
| } |
| |
| ASSEMBLER_TEST_GENERATE(SingleVLoadStore, assembler) { |
| if (TargetCPUFeatures::vfp_supported()) { |
| __ LoadImmediate(R0, bit_cast<int32_t, float>(12.3f)); |
| __ mov(R2, Operand(SP)); |
| __ str(R0, Address(SP, (-target::kWordSize * 30), Address::PreIndex)); |
| __ vldrs(S0, Address(R2, (-target::kWordSize * 30))); |
| __ vadds(S0, S0, S0); |
| __ vstrs(S0, Address(R2, (-target::kWordSize * 30))); |
| __ ldr(R0, Address(SP, (target::kWordSize * 30), Address::PostIndex)); |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(SingleVLoadStore, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::vfp_supported()) { |
| typedef float (*SingleVLoadStore)() DART_UNUSED; |
| float res = EXECUTE_TEST_CODE_FLOAT(SingleVLoadStore, test->entry()); |
| EXPECT_FLOAT_EQ(2 * 12.3f, res, 0.001f); |
| } |
| } |
| |
| ASSEMBLER_TEST_GENERATE(SingleVShiftLoadStore, assembler) { |
| if (TargetCPUFeatures::vfp_supported()) { |
| __ LoadImmediate(R0, bit_cast<int32_t, float>(12.3f)); |
| __ mov(R2, Operand(SP)); |
| // Expressing __str(R0, Address(SP, (-kWordSize * 32), Address::PreIndex)); |
| // as: |
| __ mov(R1, Operand(target::kWordSize)); |
| __ str(R0, Address(SP, R1, LSL, 5, Address::NegPreIndex)); |
| __ vldrs(S0, Address(R2, (-target::kWordSize * 32))); |
| __ vadds(S0, S0, S0); |
| __ vstrs(S0, Address(R2, (-target::kWordSize * 32))); |
| // Expressing __ldr(R0, Address(SP, (kWordSize * 32), Address::PostIndex)); |
| // as: |
| __ ldr(R0, Address(SP, R1, LSL, 5, Address::PostIndex)); |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(SingleVShiftLoadStore, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::vfp_supported()) { |
| typedef float (*SingleVLoadStore)() DART_UNUSED; |
| float res = EXECUTE_TEST_CODE_FLOAT(SingleVLoadStore, test->entry()); |
| EXPECT_FLOAT_EQ(2 * 12.3f, res, 0.001f); |
| } |
| } |
| |
| ASSEMBLER_TEST_GENERATE(DoubleVLoadStore, assembler) { |
| if (TargetCPUFeatures::vfp_supported()) { |
| int64_t value = bit_cast<int64_t, double>(12.3); |
| __ LoadImmediate(R0, Utils::Low32Bits(value)); |
| __ LoadImmediate(R1, Utils::High32Bits(value)); |
| __ mov(R2, Operand(SP)); |
| __ str(R0, Address(SP, (-target::kWordSize * 30), Address::PreIndex)); |
| __ str(R1, Address(R2, (-target::kWordSize * 29))); |
| __ vldrd(D0, Address(R2, (-target::kWordSize * 30))); |
| __ vaddd(D0, D0, D0); |
| __ vstrd(D0, Address(R2, (-target::kWordSize * 30))); |
| __ ldr(R1, Address(R2, (-target::kWordSize * 29))); |
| __ ldr(R0, Address(SP, (target::kWordSize * 30), Address::PostIndex)); |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(DoubleVLoadStore, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::vfp_supported()) { |
| typedef double (*DoubleVLoadStore)() DART_UNUSED; |
| float res = EXECUTE_TEST_CODE_DOUBLE(DoubleVLoadStore, test->entry()); |
| EXPECT_FLOAT_EQ(2 * 12.3f, res, 0.001f); |
| } |
| } |
| |
| ASSEMBLER_TEST_GENERATE(SingleFPOperations, assembler) { |
| if (TargetCPUFeatures::vfp_supported()) { |
| __ LoadSImmediate(S0, 12.3f); |
| __ LoadSImmediate(S1, 3.4f); |
| __ vnegs(S0, S0); // -12.3f |
| __ vabss(S0, S0); // 12.3f |
| __ vadds(S0, S0, S1); // 15.7f |
| __ vmuls(S0, S0, S1); // 53.38f |
| __ vsubs(S0, S0, S1); // 49.98f |
| __ vdivs(S0, S0, S1); // 14.7f |
| __ vsqrts(S0, S0); // 3.8340579f |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(SingleFPOperations, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::vfp_supported()) { |
| typedef float (*SingleFPOperations)() DART_UNUSED; |
| float res = EXECUTE_TEST_CODE_FLOAT(SingleFPOperations, test->entry()); |
| EXPECT_FLOAT_EQ(3.8340579f, res, 0.001f); |
| } |
| } |
| |
| ASSEMBLER_TEST_GENERATE(DoubleFPOperations, assembler) { |
| if (TargetCPUFeatures::vfp_supported()) { |
| __ LoadDImmediate(D0, 12.3, R0); |
| __ LoadDImmediate(D1, 3.4, R0); |
| __ vnegd(D0, D0); // -12.3 |
| __ vabsd(D0, D0); // 12.3 |
| __ vaddd(D0, D0, D1); // 15.7 |
| __ vmuld(D0, D0, D1); // 53.38 |
| __ vsubd(D0, D0, D1); // 49.98 |
| __ vdivd(D0, D0, D1); // 14.7 |
| __ vsqrtd(D0, D0); // 3.8340579 |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(DoubleFPOperations, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::vfp_supported()) { |
| typedef double (*DoubleFPOperations)() DART_UNUSED; |
| double res = EXECUTE_TEST_CODE_DOUBLE(DoubleFPOperations, test->entry()); |
| EXPECT_FLOAT_EQ(3.8340579, res, 0.001); |
| } |
| } |
| |
| ASSEMBLER_TEST_GENERATE(DoubleSqrtNeg, assembler) { |
| if (TargetCPUFeatures::vfp_supported()) { |
| // Check that sqrt of a negative double gives NaN. |
| __ LoadDImmediate(D1, -1.0, R0); |
| __ vsqrtd(D0, D1); |
| __ vcmpd(D0, D0); |
| __ vmstat(); |
| __ mov(R0, Operand(1), VS); |
| __ mov(R0, Operand(0), VC); |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(DoubleSqrtNeg, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::vfp_supported()) { |
| typedef int (*DoubleSqrtNeg)() DART_UNUSED; |
| EXPECT_EQ(1, EXECUTE_TEST_CODE_INT32(DoubleSqrtNeg, test->entry())); |
| } |
| } |
| |
| ASSEMBLER_TEST_GENERATE(IntToDoubleConversion, assembler) { |
| if (TargetCPUFeatures::vfp_supported()) { |
| __ mov(R3, Operand(6)); |
| __ vmovsr(S3, R3); |
| __ vcvtdi(D0, S3); |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(IntToDoubleConversion, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::vfp_supported()) { |
| typedef double (*IntToDoubleConversionCode)() DART_UNUSED; |
| double res = |
| EXECUTE_TEST_CODE_DOUBLE(IntToDoubleConversionCode, test->entry()); |
| EXPECT_FLOAT_EQ(6.0, res, 0.001); |
| } |
| } |
| |
| ASSEMBLER_TEST_GENERATE(LongToDoubleConversion, assembler) { |
| if (TargetCPUFeatures::vfp_supported()) { |
| int64_t value = 60000000000LL; |
| __ LoadImmediate(R0, Utils::Low32Bits(value)); |
| __ LoadImmediate(R1, Utils::High32Bits(value)); |
| __ vmovsr(S0, R0); |
| __ vmovsr(S2, R1); |
| __ vcvtdu(D0, S0); |
| __ vcvtdi(D1, S2); |
| __ LoadDImmediate(D2, 1.0 * (1LL << 32), R0); |
| __ vmlad(D0, D1, D2); |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(LongToDoubleConversion, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::vfp_supported()) { |
| typedef double (*LongToDoubleConversionCode)() DART_UNUSED; |
| double res = |
| EXECUTE_TEST_CODE_DOUBLE(LongToDoubleConversionCode, test->entry()); |
| EXPECT_FLOAT_EQ(60000000000.0, res, 0.001); |
| } |
| } |
| |
| ASSEMBLER_TEST_GENERATE(IntToFloatConversion, assembler) { |
| if (TargetCPUFeatures::vfp_supported()) { |
| __ mov(R3, Operand(6)); |
| __ vmovsr(S3, R3); |
| __ vcvtsi(S0, S3); |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(IntToFloatConversion, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::vfp_supported()) { |
| typedef float (*IntToFloatConversionCode)() DART_UNUSED; |
| float res = |
| EXECUTE_TEST_CODE_FLOAT(IntToFloatConversionCode, test->entry()); |
| EXPECT_FLOAT_EQ(6.0, res, 0.001); |
| } |
| } |
| |
| ASSEMBLER_TEST_GENERATE(FloatToIntConversion, assembler) { |
| if (TargetCPUFeatures::vfp_supported()) { |
| __ vcvtis(S1, S0); |
| __ vmovrs(R0, S1); |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(FloatToIntConversion, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::vfp_supported()) { |
| typedef int (*FloatToIntConversion)(float arg) DART_UNUSED; |
| EXPECT_EQ(12, EXECUTE_TEST_CODE_INT32_F(FloatToIntConversion, test->entry(), |
| 12.8f)); |
| EXPECT_EQ(INT32_MIN, EXECUTE_TEST_CODE_INT32_F(FloatToIntConversion, |
| test->entry(), -FLT_MAX)); |
| EXPECT_EQ(INT32_MAX, EXECUTE_TEST_CODE_INT32_F(FloatToIntConversion, |
| test->entry(), FLT_MAX)); |
| } |
| } |
| |
| ASSEMBLER_TEST_GENERATE(DoubleToIntConversion, assembler) { |
| if (TargetCPUFeatures::vfp_supported()) { |
| __ vcvtid(S0, D0); |
| __ vmovrs(R0, S0); |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(DoubleToIntConversion, test) { |
| if (TargetCPUFeatures::vfp_supported()) { |
| typedef int (*DoubleToIntConversion)(double arg) DART_UNUSED; |
| EXPECT(test != NULL); |
| EXPECT_EQ(12, EXECUTE_TEST_CODE_INT32_D(DoubleToIntConversion, |
| test->entry(), 12.8)); |
| EXPECT_EQ(INT32_MIN, EXECUTE_TEST_CODE_INT32_D(DoubleToIntConversion, |
| test->entry(), -DBL_MAX)); |
| EXPECT_EQ(INT32_MAX, EXECUTE_TEST_CODE_INT32_D(DoubleToIntConversion, |
| test->entry(), DBL_MAX)); |
| } |
| } |
| |
| ASSEMBLER_TEST_GENERATE(FloatToDoubleConversion, assembler) { |
| if (TargetCPUFeatures::vfp_supported()) { |
| __ LoadSImmediate(S2, 12.8f); |
| __ vcvtds(D0, S2); |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(FloatToDoubleConversion, test) { |
| if (TargetCPUFeatures::vfp_supported()) { |
| typedef double (*FloatToDoubleConversionCode)() DART_UNUSED; |
| EXPECT(test != NULL); |
| double res = |
| EXECUTE_TEST_CODE_DOUBLE(FloatToDoubleConversionCode, test->entry()); |
| EXPECT_FLOAT_EQ(12.8, res, 0.001); |
| } |
| } |
| |
| ASSEMBLER_TEST_GENERATE(DoubleToFloatConversion, assembler) { |
| if (TargetCPUFeatures::vfp_supported()) { |
| __ LoadDImmediate(D1, 12.8, R0); |
| __ vcvtsd(S0, D1); |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(DoubleToFloatConversion, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::vfp_supported()) { |
| typedef float (*DoubleToFloatConversionCode)() DART_UNUSED; |
| float res = |
| EXECUTE_TEST_CODE_FLOAT(DoubleToFloatConversionCode, test->entry()); |
| EXPECT_FLOAT_EQ(12.8, res, 0.001); |
| } |
| } |
| |
| ASSEMBLER_TEST_GENERATE(FloatCompare, assembler) { |
| if (TargetCPUFeatures::vfp_supported()) { |
| // Test 12.3f vs 12.5f. |
| __ LoadSImmediate(S0, 12.3f); |
| __ LoadSImmediate(S1, 12.5f); |
| |
| // Count errors in R0. R0 is zero if no errors found. |
| __ mov(R0, Operand(0)); |
| __ vcmps(S0, S1); |
| __ vmstat(); |
| __ add(R0, R0, Operand(1), VS); // Error if unordered (Nan). |
| __ add(R0, R0, Operand(2), GT); // Error if greater. |
| __ add(R0, R0, Operand(4), EQ); // Error if equal. |
| __ add(R0, R0, Operand(8), PL); // Error if not less. |
| |
| // Test NaN. |
| // Create NaN by dividing 0.0f/0.0f. |
| __ LoadSImmediate(S1, 0.0f); |
| __ vdivs(S1, S1, S1); |
| __ vcmps(S1, S1); |
| __ vmstat(); |
| // Error if not unordered (not Nan). |
| __ add(R0, R0, Operand(16), VC); |
| } |
| // R0 is 0 if all tests passed. |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(FloatCompare, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::vfp_supported()) { |
| typedef int (*FloatCompare)() DART_UNUSED; |
| EXPECT_EQ(0, EXECUTE_TEST_CODE_INT32(FloatCompare, test->entry())); |
| } |
| } |
| |
| ASSEMBLER_TEST_GENERATE(DoubleCompare, assembler) { |
| if (TargetCPUFeatures::vfp_supported()) { |
| // Test 12.3 vs 12.5. |
| __ LoadDImmediate(D0, 12.3, R1); |
| __ LoadDImmediate(D1, 12.5, R1); |
| |
| // Count errors in R0. R0 is zero if no errors found. |
| __ mov(R0, Operand(0)); |
| __ vcmpd(D0, D1); |
| __ vmstat(); |
| __ add(R0, R0, Operand(1), VS); // Error if unordered (Nan). |
| __ add(R0, R0, Operand(2), GT); // Error if greater. |
| __ add(R0, R0, Operand(4), EQ); // Error if equal. |
| __ add(R0, R0, Operand(8), PL); // Error if not less. |
| |
| // Test NaN. |
| // Create NaN by dividing 0.0/0.0. |
| __ LoadDImmediate(D1, 0.0, R1); |
| __ vdivd(D1, D1, D1); |
| __ vcmpd(D1, D1); |
| __ vmstat(); |
| // Error if not unordered (not Nan). |
| __ add(R0, R0, Operand(16), VC); |
| } |
| // R0 is 0 if all tests passed. |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(DoubleCompare, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::vfp_supported()) { |
| typedef int (*DoubleCompare)() DART_UNUSED; |
| EXPECT_EQ(0, EXECUTE_TEST_CODE_INT32(DoubleCompare, test->entry())); |
| } |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Loop, assembler) { |
| Label loop_entry; |
| __ mov(R0, Operand(1)); |
| __ mov(R1, Operand(2)); |
| __ Bind(&loop_entry); |
| __ mov(R0, Operand(R0, LSL, 1)); |
| __ movs(R1, Operand(R1, LSR, 1)); |
| __ b(&loop_entry, NE); |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Loop, test) { |
| EXPECT(test != NULL); |
| typedef int (*Loop)() DART_UNUSED; |
| EXPECT_EQ(4, EXECUTE_TEST_CODE_INT32(Loop, test->entry())); |
| } |
| |
| ASSEMBLER_TEST_GENERATE(ForwardBranch, assembler) { |
| Label skip; |
| __ mov(R0, Operand(42)); |
| __ b(&skip); |
| __ mov(R0, Operand(11)); |
| __ Bind(&skip); |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(ForwardBranch, test) { |
| EXPECT(test != NULL); |
| typedef int (*ForwardBranch)() DART_UNUSED; |
| EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(ForwardBranch, test->entry())); |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Loop2, assembler) { |
| Label loop_entry; |
| __ set_use_far_branches(true); |
| __ mov(R0, Operand(1)); |
| __ mov(R1, Operand(2)); |
| __ Bind(&loop_entry); |
| __ mov(R0, Operand(R0, LSL, 1)); |
| __ movs(R1, Operand(R1, LSR, 1)); |
| __ b(&loop_entry, NE); |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Loop2, test) { |
| EXPECT(test != NULL); |
| typedef int (*Loop)() DART_UNUSED; |
| EXPECT_EQ(4, EXECUTE_TEST_CODE_INT32(Loop, test->entry())); |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Loop3, assembler) { |
| Label loop_entry; |
| __ set_use_far_branches(true); |
| __ mov(R0, Operand(1)); |
| __ mov(R1, Operand(2)); |
| __ Bind(&loop_entry); |
| for (int i = 0; i < (1 << 22); i++) { |
| __ nop(); |
| } |
| __ mov(R0, Operand(R0, LSL, 1)); |
| __ movs(R1, Operand(R1, LSR, 1)); |
| __ b(&loop_entry, NE); |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Loop3, test) { |
| EXPECT(test != NULL); |
| typedef int (*Loop)() DART_UNUSED; |
| EXPECT_EQ(4, EXECUTE_TEST_CODE_INT32(Loop, test->entry())); |
| } |
| |
| ASSEMBLER_TEST_GENERATE(LoadStore, assembler) { |
| __ mov(R1, Operand(123)); |
| __ Push(R1); |
| __ Pop(R0); |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(LoadStore, test) { |
| EXPECT(test != NULL); |
| typedef int (*LoadStore)() DART_UNUSED; |
| EXPECT_EQ(123, EXECUTE_TEST_CODE_INT32(LoadStore, test->entry())); |
| } |
| |
| ASSEMBLER_TEST_GENERATE(PushRegisterPair, assembler) { |
| __ mov(R2, Operand(12)); |
| __ mov(R3, Operand(21)); |
| __ PushRegisterPair(R2, R3); |
| __ Pop(R0); |
| __ Pop(R1); |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(PushRegisterPair, test) { |
| EXPECT(test != NULL); |
| typedef int (*PushRegisterPair)() DART_UNUSED; |
| EXPECT_EQ(12, EXECUTE_TEST_CODE_INT32(PushRegisterPair, test->entry())); |
| } |
| |
| ASSEMBLER_TEST_GENERATE(PushRegisterPairReversed, assembler) { |
| __ mov(R3, Operand(12)); |
| __ mov(R2, Operand(21)); |
| __ PushRegisterPair(R3, R2); |
| __ Pop(R0); |
| __ Pop(R1); |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(PushRegisterPairReversed, test) { |
| EXPECT(test != NULL); |
| typedef int (*PushRegisterPairReversed)() DART_UNUSED; |
| EXPECT_EQ(12, |
| EXECUTE_TEST_CODE_INT32(PushRegisterPairReversed, test->entry())); |
| } |
| |
| ASSEMBLER_TEST_GENERATE(PopRegisterPair, assembler) { |
| __ mov(R2, Operand(12)); |
| __ mov(R3, Operand(21)); |
| __ Push(R3); |
| __ Push(R2); |
| __ PopRegisterPair(R0, R1); |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(PopRegisterPair, test) { |
| EXPECT(test != NULL); |
| typedef int (*PopRegisterPair)() DART_UNUSED; |
| EXPECT_EQ(12, EXECUTE_TEST_CODE_INT32(PopRegisterPair, test->entry())); |
| } |
| |
| ASSEMBLER_TEST_GENERATE(PopRegisterPairReversed, assembler) { |
| __ mov(R3, Operand(12)); |
| __ mov(R2, Operand(21)); |
| __ Push(R3); |
| __ Push(R2); |
| __ PopRegisterPair(R1, R0); |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(PopRegisterPairReversed, test) { |
| EXPECT(test != NULL); |
| typedef int (*PopRegisterPairReversed)() DART_UNUSED; |
| EXPECT_EQ(12, |
| EXECUTE_TEST_CODE_INT32(PopRegisterPairReversed, test->entry())); |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Semaphore, assembler) { |
| __ mov(R0, Operand(40)); |
| __ mov(R1, Operand(42)); |
| __ Push(R0); |
| Label retry; |
| __ Bind(&retry); |
| __ ldrex(R0, SP); |
| __ strex(IP, R1, SP); // IP == 0, success |
| __ tst(IP, Operand(0)); |
| __ b(&retry, NE); // NE if context switch occurred between ldrex and strex. |
| __ Pop(R0); // 42 |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Semaphore, test) { |
| EXPECT(test != NULL); |
| typedef int (*Semaphore)() DART_UNUSED; |
| EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(Semaphore, test->entry())); |
| } |
| |
| ASSEMBLER_TEST_GENERATE(FailedSemaphore, assembler) { |
| __ mov(R0, Operand(40)); |
| __ mov(R1, Operand(42)); |
| __ Push(R0); |
| __ ldrex(R0, SP); |
| __ clrex(); // Simulate a context switch. |
| __ strex(IP, R1, SP); // IP == 1, failure |
| __ Pop(R0); // 40 |
| __ add(R0, R0, Operand(IP)); |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(FailedSemaphore, test) { |
| EXPECT(test != NULL); |
| typedef int (*FailedSemaphore)() DART_UNUSED; |
| EXPECT_EQ(41, EXECUTE_TEST_CODE_INT32(FailedSemaphore, test->entry())); |
| } |
| |
| ASSEMBLER_TEST_GENERATE(AddSub, assembler) { |
| __ mov(R1, Operand(40)); |
| __ sub(R1, R1, Operand(2)); |
| __ add(R0, R1, Operand(4)); |
| __ rsbs(R0, R0, Operand(100)); |
| __ rsc(R0, R0, Operand(100)); |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(AddSub, test) { |
| EXPECT(test != NULL); |
| typedef int (*AddSub)() DART_UNUSED; |
| EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(AddSub, test->entry())); |
| } |
| |
| ASSEMBLER_TEST_GENERATE(AddCarry, assembler) { |
| __ LoadImmediate(R2, 0xFFFFFFFF); |
| __ mov(R1, Operand(1)); |
| __ mov(R0, Operand(0)); |
| __ adds(R2, R2, Operand(R1)); |
| __ adcs(R0, R0, Operand(R0)); |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(AddCarry, test) { |
| EXPECT(test != NULL); |
| typedef int (*AddCarry)() DART_UNUSED; |
| EXPECT_EQ(1, EXECUTE_TEST_CODE_INT32(AddCarry, test->entry())); |
| } |
| |
| ASSEMBLER_TEST_GENERATE(AddCarryInOut, assembler) { |
| __ LoadImmediate(R2, 0xFFFFFFFF); |
| __ mov(R1, Operand(1)); |
| __ mov(R0, Operand(0)); |
| __ adds(IP, R2, Operand(R1)); // c_out = 1. |
| __ adcs(IP, R2, Operand(R0)); // c_in = 1, c_out = 1. |
| __ adc(R0, R0, Operand(R0)); // c_in = 1. |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(AddCarryInOut, test) { |
| EXPECT(test != NULL); |
| typedef int (*AddCarryInOut)() DART_UNUSED; |
| EXPECT_EQ(1, EXECUTE_TEST_CODE_INT32(AddCarryInOut, test->entry())); |
| } |
| |
| ASSEMBLER_TEST_GENERATE(SubCarry, assembler) { |
| __ LoadImmediate(R2, 0x0); |
| __ mov(R1, Operand(1)); |
| __ mov(R0, Operand(0)); |
| __ subs(R2, R2, Operand(R1)); |
| __ sbcs(R0, R0, Operand(R0)); |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(SubCarry, test) { |
| EXPECT(test != NULL); |
| typedef int (*SubCarry)() DART_UNUSED; |
| EXPECT_EQ(-1, EXECUTE_TEST_CODE_INT32(SubCarry, test->entry())); |
| } |
| |
| ASSEMBLER_TEST_GENERATE(SubCarryInOut, assembler) { |
| __ mov(R1, Operand(1)); |
| __ mov(R0, Operand(0)); |
| __ subs(IP, R0, Operand(R1)); // c_out = 1. |
| __ sbcs(IP, R0, Operand(R0)); // c_in = 1, c_out = 1. |
| __ sbc(R0, R0, Operand(R0)); // c_in = 1. |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(SubCarryInOut, test) { |
| EXPECT(test != NULL); |
| typedef int (*SubCarryInOut)() DART_UNUSED; |
| EXPECT_EQ(-1, EXECUTE_TEST_CODE_INT32(SubCarryInOut, test->entry())); |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Overflow, assembler) { |
| __ LoadImmediate(R0, 0xFFFFFFFF); |
| __ LoadImmediate(R1, 0x7FFFFFFF); |
| __ adds(IP, R0, Operand(1)); // c_out = 1. |
| __ adcs(IP, R1, Operand(0)); // c_in = 1, c_out = 1, v = 1. |
| __ mov(R0, Operand(1), VS); |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Overflow, test) { |
| EXPECT(test != NULL); |
| typedef int (*Overflow)() DART_UNUSED; |
| EXPECT_EQ(1, EXECUTE_TEST_CODE_INT32(Overflow, test->entry())); |
| } |
| |
| ASSEMBLER_TEST_GENERATE(AndOrr, assembler) { |
| __ mov(R1, Operand(40)); |
| __ mov(R2, Operand(0)); |
| __ and_(R1, R2, Operand(R1)); |
| __ mov(R3, Operand(42)); |
| __ orr(R0, R1, Operand(R3)); |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(AndOrr, test) { |
| EXPECT(test != NULL); |
| typedef int (*AndOrr)() DART_UNUSED; |
| EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(AndOrr, test->entry())); |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Orrs, assembler) { |
| __ mov(R0, Operand(0)); |
| __ tst(R0, Operand(R1)); // Set zero-flag. |
| __ orrs(R0, R0, Operand(1)); // Clear zero-flag. |
| __ Ret(EQ); |
| __ mov(R0, Operand(42)); |
| __ Ret(NE); // Only this return should fire. |
| __ mov(R0, Operand(2)); |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Orrs, test) { |
| EXPECT(test != NULL); |
| typedef int (*Orrs)() DART_UNUSED; |
| EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(Orrs, test->entry())); |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Multiply, assembler) { |
| __ mov(R1, Operand(20)); |
| __ mov(R2, Operand(40)); |
| __ mul(R3, R2, R1); |
| __ mov(R0, Operand(R3)); |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Multiply, test) { |
| EXPECT(test != NULL); |
| typedef int (*Multiply)() DART_UNUSED; |
| EXPECT_EQ(800, EXECUTE_TEST_CODE_INT32(Multiply, test->entry())); |
| } |
| |
| ASSEMBLER_TEST_GENERATE(QuotientRemainder, assembler) { |
| if (TargetCPUFeatures::vfp_supported()) { |
| __ vmovsr(S2, R0); |
| __ vmovsr(S4, R2); |
| __ vcvtdi(D1, S2); |
| __ vcvtdi(D2, S4); |
| __ vdivd(D0, D1, D2); |
| __ vcvtid(S0, D0); |
| __ vmovrs(R1, S0); // r1 = r0/r2 |
| __ mls(R0, R1, R2, R0); // r0 = r0 - r1*r2 |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(QuotientRemainder, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::vfp_supported()) { |
| typedef int64_t (*QuotientRemainder)(int64_t dividend, int64_t divisor) |
| DART_UNUSED; |
| EXPECT_EQ(0x1000400000da8LL, |
| EXECUTE_TEST_CODE_INT64_LL(QuotientRemainder, test->entry(), |
| 0x12345678, 0x1234)); |
| } |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Multiply64To64, assembler) { |
| __ Push(R4); |
| __ mov(IP, Operand(R0)); |
| __ mul(R4, R2, R1); |
| __ umull(R0, R1, R2, IP); |
| __ mla(R2, IP, R3, R4); |
| __ add(R1, R2, Operand(R1)); |
| __ Pop(R4); |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Multiply64To64, test) { |
| EXPECT(test != NULL); |
| typedef int64_t (*Multiply64To64)(int64_t operand0, int64_t operand1) |
| DART_UNUSED; |
| EXPECT_EQ(6, |
| EXECUTE_TEST_CODE_INT64_LL(Multiply64To64, test->entry(), -3, -2)); |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Multiply32To64, assembler) { |
| __ smull(R0, R1, R0, R2); |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Multiply32To64, test) { |
| EXPECT(test != NULL); |
| typedef int64_t (*Multiply32To64)(int64_t operand0, int64_t operand1) |
| DART_UNUSED; |
| EXPECT_EQ(6, |
| EXECUTE_TEST_CODE_INT64_LL(Multiply32To64, test->entry(), -3, -2)); |
| } |
| |
| ASSEMBLER_TEST_GENERATE(MultiplyAccumAccum32To64, assembler) { |
| __ umaal(R0, R1, R2, R3); |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(MultiplyAccumAccum32To64, test) { |
| EXPECT(test != NULL); |
| typedef int64_t (*MultiplyAccumAccum32To64)(int64_t operand0, |
| int64_t operand1) DART_UNUSED; |
| EXPECT_EQ(3 + 7 + 5 * 11, |
| EXECUTE_TEST_CODE_INT64_LL(MultiplyAccumAccum32To64, test->entry(), |
| (3LL << 32) + 7, (5LL << 32) + 11)); |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Clz, assembler) { |
| Label error; |
| |
| __ mov(R0, Operand(0)); |
| __ clz(R1, R0); |
| __ cmp(R1, Operand(32)); |
| __ b(&error, NE); |
| __ mov(R2, Operand(42)); |
| __ clz(R2, R2); |
| __ cmp(R2, Operand(26)); |
| __ b(&error, NE); |
| __ mvn(R0, Operand(0)); |
| __ clz(R1, R0); |
| __ cmp(R1, Operand(0)); |
| __ b(&error, NE); |
| __ Lsr(R0, R0, Operand(3)); |
| __ clz(R1, R0); |
| __ cmp(R1, Operand(3)); |
| __ b(&error, NE); |
| __ mov(R0, Operand(0)); |
| __ Ret(); |
| __ Bind(&error); |
| __ mov(R0, Operand(1)); |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Clz, test) { |
| EXPECT(test != NULL); |
| typedef int (*Clz)() DART_UNUSED; |
| EXPECT_EQ(0, EXECUTE_TEST_CODE_INT32(Clz, test->entry())); |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Rbit, assembler) { |
| __ mov(R0, Operand(0x15)); |
| __ rbit(R0, R0); |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Rbit, test) { |
| EXPECT(test != NULL); |
| typedef int (*Rbit)() DART_UNUSED; |
| const int32_t expected = 0xa8000000; |
| EXPECT_EQ(expected, EXECUTE_TEST_CODE_INT32(Rbit, test->entry())); |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Tst, assembler) { |
| Label skip; |
| |
| __ mov(R0, Operand(42)); |
| __ mov(R1, Operand(40)); |
| __ tst(R1, Operand(0)); |
| __ b(&skip, NE); |
| __ mov(R0, Operand(0)); |
| __ Bind(&skip); |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Tst, test) { |
| EXPECT(test != NULL); |
| typedef int (*Tst)() DART_UNUSED; |
| EXPECT_EQ(0, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Lsl, assembler) { |
| Label skip; |
| |
| __ mov(R0, Operand(1)); |
| __ mov(R0, Operand(R0, LSL, 1)); |
| __ mov(R1, Operand(1)); |
| __ mov(R0, Operand(R0, LSL, R1)); |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Lsl, test) { |
| EXPECT(test != NULL); |
| typedef int (*Tst)() DART_UNUSED; |
| EXPECT_EQ(4, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Lsr, assembler) { |
| Label skip; |
| |
| __ mov(R0, Operand(4)); |
| __ mov(R0, Operand(R0, LSR, 1)); |
| __ mov(R1, Operand(1)); |
| __ mov(R0, Operand(R0, LSR, R1)); |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Lsr, test) { |
| EXPECT(test != NULL); |
| typedef int (*Tst)() DART_UNUSED; |
| EXPECT_EQ(1, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Lsr1, assembler) { |
| Label skip; |
| |
| __ mov(R0, Operand(1)); |
| __ Lsl(R0, R0, Operand(31)); |
| __ Lsr(R0, R0, Operand(31)); |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Lsr1, test) { |
| EXPECT(test != NULL); |
| typedef int (*Tst)() DART_UNUSED; |
| EXPECT_EQ(1, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Asr1, assembler) { |
| Label skip; |
| |
| __ mov(R0, Operand(1)); |
| __ Lsl(R0, R0, Operand(31)); |
| __ Asr(R0, R0, Operand(31)); |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Asr1, test) { |
| EXPECT(test != NULL); |
| typedef int (*Tst)() DART_UNUSED; |
| EXPECT_EQ(-1, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Rsb, assembler) { |
| __ mov(R3, Operand(10)); |
| __ rsb(R0, R3, Operand(42)); |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Rsb, test) { |
| EXPECT(test != NULL); |
| typedef int (*Rsb)() DART_UNUSED; |
| EXPECT_EQ(32, EXECUTE_TEST_CODE_INT32(Rsb, test->entry())); |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Ldrh, assembler) { |
| Label Test1, Test2, Test3, Done; |
| |
| __ mov(R1, Operand(0x11)); |
| __ mov(R2, Operand(SP)); |
| __ str(R1, Address(SP, (-target::kWordSize * 30), Address::PreIndex)); |
| __ ldrh(R0, Address(R2, (-target::kWordSize * 30))); |
| __ cmp(R0, Operand(0x11)); |
| __ b(&Test1, EQ); |
| __ mov(R0, Operand(1)); |
| __ b(&Done); |
| __ Bind(&Test1); |
| |
| __ mov(R0, Operand(0x22)); |
| __ strh(R0, Address(R2, (-target::kWordSize * 30))); |
| __ ldrh(R1, Address(R2, (-target::kWordSize * 30))); |
| __ cmp(R1, Operand(0x22)); |
| __ b(&Test2, EQ); |
| __ mov(R0, Operand(1)); |
| __ b(&Done); |
| __ Bind(&Test2); |
| |
| __ mov(R0, Operand(0)); |
| __ AddImmediate(R2, (-target::kWordSize * 30)); |
| __ strh(R0, Address(R2)); |
| __ ldrh(R1, Address(R2)); |
| __ cmp(R1, Operand(0)); |
| __ b(&Test3, EQ); |
| __ mov(R0, Operand(1)); |
| __ b(&Done); |
| __ Bind(&Test3); |
| |
| __ mov(R0, Operand(0)); |
| __ Bind(&Done); |
| __ ldr(R1, Address(SP, (target::kWordSize * 30), Address::PostIndex)); |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Ldrh, test) { |
| EXPECT(test != NULL); |
| typedef int (*Tst)() DART_UNUSED; |
| EXPECT_EQ(0, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Ldrsb, assembler) { |
| __ mov(R1, Operand(0xFF)); |
| __ mov(R2, Operand(SP)); |
| __ str(R1, Address(SP, (-target::kWordSize * 30), Address::PreIndex)); |
| __ ldrsb(R0, Address(R2, (-target::kWordSize * 30))); |
| __ ldr(R1, Address(SP, (target::kWordSize * 30), Address::PostIndex)); |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Ldrsb, test) { |
| EXPECT(test != NULL); |
| typedef int (*Tst)() DART_UNUSED; |
| EXPECT_EQ(-1, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Ldrb, assembler) { |
| __ mov(R1, Operand(0xFF)); |
| __ mov(R2, Operand(SP)); |
| __ str(R1, Address(SP, (-target::kWordSize * 30), Address::PreIndex)); |
| __ ldrb(R0, Address(R2, (-target::kWordSize * 30))); |
| __ ldr(R1, Address(SP, (target::kWordSize * 30), Address::PostIndex)); |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Ldrb, test) { |
| EXPECT(test != NULL); |
| typedef int (*Tst)() DART_UNUSED; |
| EXPECT_EQ(0xff, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Ldrsh, assembler) { |
| __ mov(R1, Operand(0xFF)); |
| __ mov(R2, Operand(SP)); |
| __ str(R1, Address(SP, (-target::kWordSize * 30), Address::PreIndex)); |
| __ ldrsh(R0, Address(R2, (-target::kWordSize * 30))); |
| __ ldr(R1, Address(SP, (target::kWordSize * 30), Address::PostIndex)); |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Ldrsh, test) { |
| EXPECT(test != NULL); |
| typedef int (*Tst)() DART_UNUSED; |
| EXPECT_EQ(0xff, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Ldrh1, assembler) { |
| __ mov(R1, Operand(0xFF)); |
| __ mov(R2, Operand(SP)); |
| __ str(R1, Address(SP, (-target::kWordSize * 30), Address::PreIndex)); |
| __ ldrh(R0, Address(R2, (-target::kWordSize * 30))); |
| __ ldr(R1, Address(SP, (target::kWordSize * 30), Address::PostIndex)); |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Ldrh1, test) { |
| EXPECT(test != NULL); |
| typedef int (*Tst)() DART_UNUSED; |
| EXPECT_EQ(0xff, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Ldrd, assembler) { |
| __ mov(IP, Operand(SP)); |
| __ sub(SP, SP, Operand(target::kWordSize * 30)); |
| __ strd(R2, R3, SP, 0); |
| __ strd(R0, R1, IP, (-target::kWordSize * 28)); |
| __ ldrd(R2, R3, IP, (-target::kWordSize * 28)); |
| __ ldrd(R0, R1, SP, 0); |
| __ add(SP, SP, Operand(target::kWordSize * 30)); |
| __ sub(R0, R0, Operand(R2)); |
| __ add(R1, R1, Operand(R3)); |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Ldrd, test) { |
| EXPECT(test != NULL); |
| typedef int64_t (*Tst)(int64_t r0r1, int64_t r2r3) DART_UNUSED; |
| EXPECT_EQ(0x0000444400002222LL, |
| EXECUTE_TEST_CODE_INT64_LL(Tst, test->entry(), 0x0000111100000000LL, |
| 0x0000333300002222LL)); |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Ldm_stm_da, assembler) { |
| __ mov(R0, Operand(1)); |
| __ mov(R1, Operand(7)); |
| __ mov(R2, Operand(11)); |
| __ mov(R3, Operand(31)); |
| __ Push(R9); // We use R9 as accumulator. |
| __ Push(R9); |
| __ Push(R9); |
| __ Push(R9); |
| __ Push(R9); |
| __ Push(R0); // Make room, so we can decrement after. |
| __ stm(DA_W, SP, (1 << R0 | 1 << R1 | 1 << R2 | 1 << R3)); |
| __ str(R2, Address(SP)); // Should be a free slot. |
| __ ldr(R9, Address(SP, 1 * target::kWordSize)); // R0. R9 = +1. |
| __ ldr(IP, Address(SP, 2 * target::kWordSize)); // R1. |
| __ sub(R9, R9, Operand(IP)); // -R1. R9 = -6. |
| __ ldr(IP, Address(SP, 3 * target::kWordSize)); // R2. |
| __ add(R9, R9, Operand(IP)); // +R2. R9 = +5. |
| __ ldr(IP, Address(SP, 4 * target::kWordSize)); // R3. |
| __ sub(R9, R9, Operand(IP)); // -R3. R9 = -26. |
| __ ldm(IB_W, SP, (1 << R0 | 1 << R1 | 1 << R2 | 1 << R3)); |
| // Same operations again. But this time from the restore registers. |
| __ add(R9, R9, Operand(R0)); |
| __ sub(R9, R9, Operand(R1)); |
| __ add(R9, R9, Operand(R2)); |
| __ sub(R0, R9, Operand(R3)); // R0 = result = -52. |
| __ Pop(R1); // Remove storage slot. |
| __ Pop(R9); // Restore R9. |
| __ Pop(R9); // Restore R9. |
| __ Pop(R9); // Restore R9. |
| __ Pop(R9); // Restore R9. |
| __ Pop(R9); // Restore R9. |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Ldm_stm_da, test) { |
| EXPECT(test != NULL); |
| typedef int (*Tst)() DART_UNUSED; |
| EXPECT_EQ(-52, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
| } |
| |
| ASSEMBLER_TEST_GENERATE(AddressShiftStrLSL1NegOffset, assembler) { |
| __ mov(R2, Operand(42)); |
| __ mov(R1, Operand(target::kWordSize)); |
| __ str(R2, Address(SP, R1, LSL, 1, Address::NegOffset)); |
| __ ldr(R0, Address(SP, (-target::kWordSize * 2), Address::Offset)); |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(AddressShiftStrLSL1NegOffset, test) { |
| EXPECT(test != NULL); |
| typedef int (*Tst)() DART_UNUSED; |
| EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
| } |
| |
| ASSEMBLER_TEST_GENERATE(AddressShiftLdrLSL5NegOffset, assembler) { |
| __ mov(R2, Operand(42)); |
| __ mov(R1, Operand(target::kWordSize)); |
| __ str(R2, Address(SP, (-target::kWordSize * 32), Address::Offset)); |
| __ ldr(R0, Address(SP, R1, LSL, 5, Address::NegOffset)); |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(AddressShiftLdrLSL5NegOffset, test) { |
| EXPECT(test != NULL); |
| typedef int (*Tst)() DART_UNUSED; |
| EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
| } |
| |
| ASSEMBLER_TEST_GENERATE(AddressShiftStrLRS1NegOffset, assembler) { |
| __ mov(R2, Operand(42)); |
| __ mov(R1, Operand(target::kWordSize * 2)); |
| __ str(R2, Address(SP, R1, LSR, 1, Address::NegOffset)); |
| __ ldr(R0, Address(SP, -target::kWordSize, Address::Offset)); |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(AddressShiftStrLRS1NegOffset, test) { |
| EXPECT(test != NULL); |
| typedef int (*Tst)() DART_UNUSED; |
| EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
| } |
| |
| ASSEMBLER_TEST_GENERATE(AddressShiftLdrLRS1NegOffset, assembler) { |
| __ mov(R2, Operand(42)); |
| __ mov(R1, Operand(target::kWordSize * 2)); |
| __ str(R2, Address(SP, -target::kWordSize, Address::Offset)); |
| __ ldr(R0, Address(SP, R1, LSR, 1, Address::NegOffset)); |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(AddressShiftLdrLRS1NegOffset, test) { |
| EXPECT(test != NULL); |
| typedef int (*Tst)() DART_UNUSED; |
| EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
| } |
| |
| ASSEMBLER_TEST_GENERATE(AddressShiftStrLSLNegPreIndex, assembler) { |
| __ mov(R2, Operand(42)); |
| __ mov(R1, Operand(target::kWordSize)); |
| __ mov(R3, Operand(SP)); |
| __ str(R2, Address(SP, R1, LSL, 5, Address::NegPreIndex)); |
| __ ldr(R0, Address(R3, (-target::kWordSize * 32), Address::Offset)); |
| __ mov(SP, Operand(R3)); |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(AddressShiftStrLSLNegPreIndex, test) { |
| EXPECT(test != NULL); |
| typedef int (*Tst)() DART_UNUSED; |
| EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
| } |
| |
| ASSEMBLER_TEST_GENERATE(AddressShiftLdrLSLNegPreIndex, assembler) { |
| __ mov(R2, Operand(42)); |
| __ mov(R1, Operand(target::kWordSize)); |
| __ str(R2, Address(SP, (-target::kWordSize * 32), Address::PreIndex)); |
| __ ldr(R0, Address(SP, R1, LSL, 5, Address::PostIndex)); |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(AddressShiftLdrLSLNegPreIndex, test) { |
| EXPECT(test != NULL); |
| typedef int (*Tst)() DART_UNUSED; |
| EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
| } |
| |
| // Make sure we can store and reload the D registers using vstmd and vldmd |
| ASSEMBLER_TEST_GENERATE(VstmdVldmd, assembler) { |
| if (TargetCPUFeatures::vfp_supported()) { |
| __ LoadDImmediate(D0, 0.0, R0); |
| __ LoadDImmediate(D1, 1.0, R0); |
| __ LoadDImmediate(D2, 2.0, R0); |
| __ LoadDImmediate(D3, 3.0, R0); |
| __ LoadDImmediate(D4, 4.0, R0); |
| __ vstmd(DB_W, SP, D0, 5); // Push D0 - D4 onto the stack, dec SP |
| __ LoadDImmediate(D0, 0.0, R0); |
| __ LoadDImmediate(D1, 0.0, R0); |
| __ LoadDImmediate(D2, 0.0, R0); |
| __ LoadDImmediate(D3, 0.0, R0); |
| __ LoadDImmediate(D4, 0.0, R0); |
| __ vldmd(IA_W, SP, D0, 5); // Pop stack into D0 - D4, inc SP |
| |
| // Load success value into R0 |
| __ mov(R0, Operand(42)); |
| |
| // Check that 4.0 is back in D4 |
| __ LoadDImmediate(D5, 4.0, R1); |
| __ vcmpd(D4, D5); |
| __ vmstat(); |
| __ mov(R0, Operand(0), NE); // Put failure into R0 if NE |
| |
| // Check that 3.0 is back in D3 |
| __ LoadDImmediate(D5, 3.0, R1); |
| __ vcmpd(D3, D5); |
| __ vmstat(); |
| __ mov(R0, Operand(0), NE); // Put failure into R0 if NE |
| |
| // Check that 2.0 is back in D2 |
| __ LoadDImmediate(D5, 2.0, R1); |
| __ vcmpd(D2, D5); |
| __ vmstat(); |
| __ mov(R0, Operand(0), NE); // Put failure into R0 if NE |
| |
| // Check that 1.0 is back in D1 |
| __ LoadDImmediate(D5, 1.0, R1); |
| __ vcmpd(D1, D5); |
| __ vmstat(); |
| __ mov(R0, Operand(0), NE); // Put failure into R0 if NE |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(VstmdVldmd, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::vfp_supported()) { |
| typedef int (*Tst)() DART_UNUSED; |
| EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
| } |
| } |
| |
| // Make sure we can store and reload the S registers using vstms and vldms |
| ASSEMBLER_TEST_GENERATE(VstmsVldms, assembler) { |
| if (TargetCPUFeatures::vfp_supported()) { |
| __ LoadSImmediate(S0, 0.0); |
| __ LoadSImmediate(S1, 1.0); |
| __ LoadSImmediate(S2, 2.0); |
| __ LoadSImmediate(S3, 3.0); |
| __ LoadSImmediate(S4, 4.0); |
| __ vstms(DB_W, SP, S0, S4); // Push S0 - S4 onto the stack, dec SP |
| __ LoadSImmediate(S0, 0.0); |
| __ LoadSImmediate(S1, 0.0); |
| __ LoadSImmediate(S2, 0.0); |
| __ LoadSImmediate(S3, 0.0); |
| __ LoadSImmediate(S4, 0.0); |
| __ vldms(IA_W, SP, S0, S4); // Pop stack into S0 - S4, inc SP |
| |
| // Load success value into R0 |
| __ mov(R0, Operand(42)); |
| |
| // Check that 4.0 is back in S4 |
| __ LoadSImmediate(S5, 4.0); |
| __ vcmps(S4, S5); |
| __ vmstat(); |
| __ mov(R0, Operand(0), NE); // Put failure value into R0 if NE |
| |
| // Check that 3.0 is back in S3 |
| __ LoadSImmediate(S5, 3.0); |
| __ vcmps(S3, S5); |
| __ vmstat(); |
| __ mov(R0, Operand(0), NE); // Put failure value into R0 if NE |
| |
| // Check that 2.0 is back in S2 |
| __ LoadSImmediate(S5, 2.0); |
| __ vcmps(S2, S5); |
| __ vmstat(); |
| __ mov(R0, Operand(0), NE); // Put failure value into R0 if NE |
| |
| // Check that 1.0 is back in S1 |
| __ LoadSImmediate(S5, 1.0); |
| __ vcmps(S1, S5); |
| __ vmstat(); |
| __ mov(R0, Operand(0), NE); // Put failure value into R0 if NE |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(VstmsVldms, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::vfp_supported()) { |
| typedef int (*Tst)() DART_UNUSED; |
| EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
| } |
| } |
| |
| // Make sure we can start somewhere other than D0 |
| ASSEMBLER_TEST_GENERATE(VstmdVldmd1, assembler) { |
| if (TargetCPUFeatures::vfp_supported()) { |
| __ LoadDImmediate(D1, 1.0, R0); |
| __ LoadDImmediate(D2, 2.0, R0); |
| __ LoadDImmediate(D3, 3.0, R0); |
| __ LoadDImmediate(D4, 4.0, R0); |
| __ vstmd(DB_W, SP, D1, 4); // Push D1 - D4 onto the stack, dec SP |
| __ LoadDImmediate(D1, 0.0, R0); |
| __ LoadDImmediate(D2, 0.0, R0); |
| __ LoadDImmediate(D3, 0.0, R0); |
| __ LoadDImmediate(D4, 0.0, R0); |
| __ vldmd(IA_W, SP, D1, 4); // Pop stack into D1 - D4, inc SP |
| |
| // Load success value into R0 |
| __ mov(R0, Operand(42)); |
| |
| // Check that 4.0 is back in D4 |
| __ LoadDImmediate(D5, 4.0, R1); |
| __ vcmpd(D4, D5); |
| __ vmstat(); |
| __ mov(R0, Operand(0), NE); // Put failure into R0 if NE |
| |
| // Check that 3.0 is back in D3 |
| __ LoadDImmediate(D5, 3.0, R1); |
| __ vcmpd(D3, D5); |
| __ vmstat(); |
| __ mov(R0, Operand(0), NE); // Put failure into R0 if NE |
| |
| // Check that 2.0 is back in D2 |
| __ LoadDImmediate(D5, 2.0, R1); |
| __ vcmpd(D2, D5); |
| __ vmstat(); |
| __ mov(R0, Operand(0), NE); // Put failure into R0 if NE |
| |
| // Check that 1.0 is back in D1 |
| __ LoadDImmediate(D5, 1.0, R1); |
| __ vcmpd(D1, D5); |
| __ vmstat(); |
| __ mov(R0, Operand(0), NE); // Put failure into R0 if NE |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(VstmdVldmd1, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::vfp_supported()) { |
| typedef int (*Tst)() DART_UNUSED; |
| EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
| } |
| } |
| |
| // Make sure we can start somewhere other than S0 |
| ASSEMBLER_TEST_GENERATE(VstmsVldms1, assembler) { |
| if (TargetCPUFeatures::vfp_supported()) { |
| __ LoadSImmediate(S1, 1.0); |
| __ LoadSImmediate(S2, 2.0); |
| __ LoadSImmediate(S3, 3.0); |
| __ LoadSImmediate(S4, 4.0); |
| __ vstms(DB_W, SP, S1, S4); // Push S0 - S4 onto the stack, dec SP |
| __ LoadSImmediate(S1, 0.0); |
| __ LoadSImmediate(S2, 0.0); |
| __ LoadSImmediate(S3, 0.0); |
| __ LoadSImmediate(S4, 0.0); |
| __ vldms(IA_W, SP, S1, S4); // Pop stack into S0 - S4, inc SP |
| |
| // Load success value into R0 |
| __ mov(R0, Operand(42)); |
| |
| // Check that 4.0 is back in S4 |
| __ LoadSImmediate(S5, 4.0); |
| __ vcmps(S4, S5); |
| __ vmstat(); |
| __ mov(R0, Operand(0), NE); // Put failure value into R0 if NE |
| |
| // Check that 3.0 is back in S3 |
| __ LoadSImmediate(S5, 3.0); |
| __ vcmps(S3, S5); |
| __ vmstat(); |
| __ mov(R0, Operand(0), NE); // Put failure value into R0 if NE |
| |
| // Check that 2.0 is back in S2 |
| __ LoadSImmediate(S5, 2.0); |
| __ vcmps(S2, S5); |
| __ vmstat(); |
| __ mov(R0, Operand(0), NE); // Put failure value into R0 if NE |
| |
| // Check that 1.0 is back in S1 |
| __ LoadSImmediate(S5, 1.0); |
| __ vcmps(S1, S5); |
| __ vmstat(); |
| __ mov(R0, Operand(0), NE); // Put failure value into R0 if NE |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(VstmsVldms1, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::vfp_supported()) { |
| typedef int (*Tst)() DART_UNUSED; |
| EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
| } |
| } |
| |
| // Make sure we can store the D registers using vstmd and |
| // load them into a different set using vldmd |
| ASSEMBLER_TEST_GENERATE(VstmdVldmd_off, assembler) { |
| if (TargetCPUFeatures::vfp_supported()) { |
| // Save used callee-saved FPU registers. |
| __ vstmd(DB_W, SP, D8, 3); |
| __ LoadDImmediate(D0, 0.0, R0); |
| __ LoadDImmediate(D1, 1.0, R0); |
| __ LoadDImmediate(D2, 2.0, R0); |
| __ LoadDImmediate(D3, 3.0, R0); |
| __ LoadDImmediate(D4, 4.0, R0); |
| __ LoadDImmediate(D5, 5.0, R0); |
| __ vstmd(DB_W, SP, D0, 5); // Push D0 - D4 onto the stack, dec SP |
| __ vldmd(IA_W, SP, D5, 5); // Pop stack into D5 - D9, inc SP |
| |
| // Load success value into R0 |
| __ mov(R0, Operand(42)); |
| |
| // Check that 4.0 is in D9 |
| __ LoadDImmediate(D10, 4.0, R1); |
| __ vcmpd(D9, D10); |
| __ vmstat(); |
| __ mov(R0, Operand(0), NE); // Put failure into R0 if NE |
| |
| // Check that 3.0 is in D8 |
| __ LoadDImmediate(D10, 3.0, R1); |
| __ vcmpd(D8, D10); |
| __ vmstat(); |
| __ mov(R0, Operand(0), NE); // Put failure into R0 if NE |
| |
| // Check that 2.0 is in D7 |
| __ LoadDImmediate(D10, 2.0, R1); |
| __ vcmpd(D7, D10); |
| __ vmstat(); |
| __ mov(R0, Operand(0), NE); // Put failure into R0 if NE |
| |
| // Check that 1.0 is in D6 |
| __ LoadDImmediate(D10, 1.0, R1); |
| __ vcmpd(D6, D10); |
| __ vmstat(); |
| __ mov(R0, Operand(0), NE); // Put failure into R0 if NE |
| |
| // Check that 0.0 is in D5 |
| __ LoadDImmediate(D10, 0.0, R1); |
| __ vcmpd(D5, D10); |
| __ vmstat(); |
| __ mov(R0, Operand(0), NE); // Put failure into R0 if NE |
| |
| // Restore used callee-saved FPU registers. |
| __ vldmd(IA_W, SP, D8, 3); |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(VstmdVldmd_off, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::vfp_supported()) { |
| typedef int (*Tst)() DART_UNUSED; |
| EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
| } |
| } |
| |
| // Make sure we can start somewhere other than S0 |
| ASSEMBLER_TEST_GENERATE(VstmsVldms_off, assembler) { |
| if (TargetCPUFeatures::vfp_supported()) { |
| __ LoadSImmediate(S0, 0.0); |
| __ LoadSImmediate(S1, 1.0); |
| __ LoadSImmediate(S2, 2.0); |
| __ LoadSImmediate(S3, 3.0); |
| __ LoadSImmediate(S4, 4.0); |
| __ LoadSImmediate(S5, 5.0); |
| __ vstms(DB_W, SP, S0, S4); // Push S0 - S4 onto the stack, dec SP |
| __ vldms(IA_W, SP, S5, S9); // Pop stack into S5 - S9, inc SP |
| |
| // Load success value into R0 |
| __ mov(R0, Operand(42)); |
| |
| // Check that 4.0 is in S9 |
| __ LoadSImmediate(S10, 4.0); |
| __ vcmps(S9, S10); |
| __ vmstat(); |
| __ mov(R0, Operand(0), NE); // Put failure value into R0 if NE |
| |
| // Check that 3.0 is in S8 |
| __ LoadSImmediate(S10, 3.0); |
| __ vcmps(S8, S10); |
| __ vmstat(); |
| __ mov(R0, Operand(0), NE); // Put failure value into R0 if NE |
| |
| // Check that 2.0 is in S7 |
| __ LoadSImmediate(S10, 2.0); |
| __ vcmps(S7, S10); |
| __ vmstat(); |
| __ mov(R0, Operand(0), NE); // Put failure value into R0 if NE |
| |
| // Check that 1.0 is back in S6 |
| __ LoadSImmediate(S10, 1.0); |
| __ vcmps(S6, S10); |
| __ vmstat(); |
| __ mov(R0, Operand(0), NE); // Put failure value into R0 if NE |
| |
| // Check that 0.0 is back in S5 |
| __ LoadSImmediate(S10, 0.0); |
| __ vcmps(S5, S10); |
| __ vmstat(); |
| __ mov(R0, Operand(0), NE); // Put failure value into R0 if NE |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(VstmsVldms_off, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::vfp_supported()) { |
| typedef int (*Tst)() DART_UNUSED; |
| EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
| } |
| } |
| |
| // 3 2 1 0 |
| // 10987654321098765432109876543210 |
| static constexpr uint32_t kBfxTestBits = 0b00010000001000001000010001001011; |
| |
| static int32_t ExpectedUbfxBitPattern(uint8_t lsb, uint8_t width) { |
| ASSERT(width >= 1); |
| ASSERT(width < 32); |
| ASSERT(lsb < 32); |
| ASSERT(lsb + width <= 32); |
| return (kBfxTestBits & (Utils::NBitMask(width) << lsb)) >> lsb; |
| } |
| |
| static int32_t ExpectedSbfxBitPattern(uint8_t lsb, uint8_t width) { |
| const uint32_t no_extension = ExpectedUbfxBitPattern(lsb, width); |
| const uint32_t sign_extension = |
| Utils::TestBit(no_extension, width - 1) ? ~Utils::NBitMask(width) : 0; |
| return no_extension | sign_extension; |
| } |
| |
| // (lsb, width, extracted bit field is signed) |
| #define BFX_TEST_CASES(V) \ |
| V(0, 1, true) \ |
| V(0, 8, false) \ |
| V(0, 11, true) V(0, 19, false) V(3, 20, false) V(10, 19, true) V(31, 1, false) |
| |
| #define GENERATE_BFX_TEST(L, W, S) \ |
| ASSEMBLER_TEST_GENERATE(UbfxLSB##L##Width##W, assembler) { \ |
| __ LoadImmediate(R1, kBfxTestBits); \ |
| __ ubfx(R0, R1, L, W); \ |
| __ Ret(); \ |
| } \ |
| ASSEMBLER_TEST_RUN(UbfxLSB##L##Width##W, test) { \ |
| EXPECT(test != nullptr); \ |
| typedef int (*Tst)() DART_UNUSED; \ |
| ASSERT((ExpectedUbfxBitPattern(L, W) == ExpectedSbfxBitPattern(L, W)) != \ |
| S); \ |
| EXPECT_EQ(ExpectedUbfxBitPattern(L, W), \ |
| EXECUTE_TEST_CODE_INT32(Tst, test->entry())); \ |
| } \ |
| ASSEMBLER_TEST_GENERATE(SbfxLSB##L##Width##W, assembler) { \ |
| __ LoadImmediate(R1, kBfxTestBits); \ |
| __ sbfx(R0, R1, L, W); \ |
| __ Ret(); \ |
| } \ |
| ASSEMBLER_TEST_RUN(SbfxLSB##L##Width##W, test) { \ |
| EXPECT(test != nullptr); \ |
| typedef int (*Tst)() DART_UNUSED; \ |
| ASSERT((ExpectedUbfxBitPattern(L, W) == ExpectedSbfxBitPattern(L, W)) != \ |
| S); \ |
| EXPECT_EQ(ExpectedSbfxBitPattern(L, W), \ |
| EXECUTE_TEST_CODE_INT32(Tst, test->entry())); \ |
| } |
| |
| BFX_TEST_CASES(GENERATE_BFX_TEST) |
| |
| #undef GENERATE_BFX_TEST |
| #undef BFX_TEST_CASES |
| |
| ASSEMBLER_TEST_GENERATE(Udiv, assembler) { |
| if (TargetCPUFeatures::integer_division_supported()) { |
| __ mov(R0, Operand(27)); |
| __ mov(R1, Operand(9)); |
| __ udiv(R2, R0, R1); |
| __ mov(R0, Operand(R2)); |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Udiv, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::integer_division_supported()) { |
| typedef int (*Tst)() DART_UNUSED; |
| EXPECT_EQ(3, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
| } |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Sdiv, assembler) { |
| if (TargetCPUFeatures::integer_division_supported()) { |
| __ mov(R0, Operand(27)); |
| __ LoadImmediate(R1, -9); |
| __ sdiv(R2, R0, R1); |
| __ mov(R0, Operand(R2)); |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Sdiv, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::integer_division_supported()) { |
| typedef int (*Tst)() DART_UNUSED; |
| EXPECT_EQ(-3, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
| } |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Udiv_zero, assembler) { |
| if (TargetCPUFeatures::integer_division_supported()) { |
| __ mov(R0, Operand(27)); |
| __ mov(R1, Operand(0)); |
| __ udiv(R2, R0, R1); |
| __ mov(R0, Operand(R2)); |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Udiv_zero, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::integer_division_supported()) { |
| typedef int (*Tst)() DART_UNUSED; |
| EXPECT_EQ(0, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
| } |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Sdiv_zero, assembler) { |
| if (TargetCPUFeatures::integer_division_supported()) { |
| __ mov(R0, Operand(27)); |
| __ mov(R1, Operand(0)); |
| __ sdiv(R2, R0, R1); |
| __ mov(R0, Operand(R2)); |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Sdiv_zero, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::integer_division_supported()) { |
| typedef int (*Tst)() DART_UNUSED; |
| EXPECT_EQ(0, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
| } |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Udiv_corner, assembler) { |
| if (TargetCPUFeatures::integer_division_supported()) { |
| __ LoadImmediate(R0, 0x80000000); |
| __ LoadImmediate(R1, 0xffffffff); |
| __ udiv(R2, R0, R1); |
| __ mov(R0, Operand(R2)); |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Udiv_corner, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::integer_division_supported()) { |
| typedef int (*Tst)() DART_UNUSED; |
| EXPECT_EQ(0, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
| } |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Sdiv_corner, assembler) { |
| if (TargetCPUFeatures::integer_division_supported()) { |
| __ LoadImmediate(R0, 0x80000000); |
| __ LoadImmediate(R1, 0xffffffff); |
| __ sdiv(R2, R0, R1); |
| __ mov(R0, Operand(R2)); |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Sdiv_corner, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::integer_division_supported()) { |
| typedef int (*Tst)() DART_UNUSED; |
| EXPECT_EQ(static_cast<int32_t>(0x80000000), |
| EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
| } |
| } |
| |
| ASSEMBLER_TEST_GENERATE(IntDiv_supported, assembler) { |
| #if defined(USING_SIMULATOR) |
| bool orig = TargetCPUFeatures::integer_division_supported(); |
| HostCPUFeatures::set_integer_division_supported(true); |
| __ mov(R0, Operand(27)); |
| __ mov(R1, Operand(9)); |
| __ IntegerDivide(R0, R0, R1, D0, D1); |
| HostCPUFeatures::set_integer_division_supported(orig); |
| __ Ret(); |
| #else |
| if (TargetCPUFeatures::can_divide()) { |
| __ mov(R0, Operand(27)); |
| __ mov(R1, Operand(9)); |
| __ IntegerDivide(R0, R0, R1, D0, D1); |
| } |
| __ Ret(); |
| #endif |
| } |
| |
| ASSEMBLER_TEST_RUN(IntDiv_supported, test) { |
| EXPECT(test != NULL); |
| #if defined(USING_SIMULATOR) |
| bool orig = TargetCPUFeatures::integer_division_supported(); |
| HostCPUFeatures::set_integer_division_supported(true); |
| if (TargetCPUFeatures::can_divide()) { |
| typedef int (*Tst)() DART_UNUSED; |
| EXPECT_EQ(3, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
| } |
| HostCPUFeatures::set_integer_division_supported(orig); |
| #else |
| if (TargetCPUFeatures::can_divide()) { |
| typedef int (*Tst)() DART_UNUSED; |
| EXPECT_EQ(3, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
| } |
| #endif |
| } |
| |
| ASSEMBLER_TEST_GENERATE(IntDiv_unsupported, assembler) { |
| #if defined(USING_SIMULATOR) |
| if (TargetCPUFeatures::can_divide()) { |
| bool orig = TargetCPUFeatures::integer_division_supported(); |
| HostCPUFeatures::set_integer_division_supported(false); |
| __ mov(R0, Operand(27)); |
| __ mov(R1, Operand(9)); |
| __ IntegerDivide(R0, R0, R1, D0, D1); |
| HostCPUFeatures::set_integer_division_supported(orig); |
| } |
| __ Ret(); |
| #else |
| if (TargetCPUFeatures::can_divide()) { |
| __ mov(R0, Operand(27)); |
| __ mov(R1, Operand(9)); |
| __ IntegerDivide(R0, R0, R1, D0, D1); |
| } |
| __ Ret(); |
| #endif |
| } |
| |
| ASSEMBLER_TEST_RUN(IntDiv_unsupported, test) { |
| EXPECT(test != NULL); |
| #if defined(USING_SIMULATOR) |
| bool orig = TargetCPUFeatures::integer_division_supported(); |
| HostCPUFeatures::set_integer_division_supported(false); |
| if (TargetCPUFeatures::can_divide()) { |
| typedef int (*Tst)() DART_UNUSED; |
| EXPECT_EQ(3, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
| } |
| HostCPUFeatures::set_integer_division_supported(orig); |
| #else |
| if (TargetCPUFeatures::can_divide()) { |
| typedef int (*Tst)() DART_UNUSED; |
| EXPECT_EQ(3, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
| } |
| #endif |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Muls, assembler) { |
| __ mov(R0, Operand(3)); |
| __ LoadImmediate(R1, -9); |
| __ muls(R2, R0, R1); |
| __ mov(R0, Operand(42), MI); |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Muls, test) { |
| EXPECT(test != NULL); |
| typedef int (*Tst)() DART_UNUSED; |
| EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Vaddqi8, assembler) { |
| if (TargetCPUFeatures::neon_supported()) { |
| __ mov(R0, Operand(1)); |
| __ vmovsr(S0, R0); |
| __ mov(R0, Operand(2)); |
| __ vmovsr(S1, R0); |
| __ mov(R0, Operand(3)); |
| __ vmovsr(S2, R0); |
| __ mov(R0, Operand(4)); |
| __ vmovsr(S3, R0); |
| __ mov(R0, Operand(5)); |
| __ vmovsr(S4, R0); |
| __ mov(R0, Operand(6)); |
| __ vmovsr(S5, R0); |
| __ mov(R0, Operand(7)); |
| __ vmovsr(S6, R0); |
| __ mov(R0, Operand(8)); |
| __ vmovsr(S7, R0); |
| |
| __ vaddqi(kByte, Q2, Q0, Q1); |
| |
| __ vmovrs(R0, S8); |
| __ vmovrs(R1, S9); |
| __ vmovrs(R2, S10); |
| __ vmovrs(R3, S11); |
| |
| __ add(R0, R0, Operand(R1)); |
| __ add(R0, R0, Operand(R2)); |
| __ add(R0, R0, Operand(R3)); |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Vaddqi8, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::neon_supported()) { |
| typedef int (*Tst)() DART_UNUSED; |
| EXPECT_EQ(36, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
| } |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Vaddqi16, assembler) { |
| if (TargetCPUFeatures::neon_supported()) { |
| __ mov(R0, Operand(1)); |
| __ vmovsr(S0, R0); |
| __ mov(R0, Operand(2)); |
| __ vmovsr(S1, R0); |
| __ mov(R0, Operand(3)); |
| __ vmovsr(S2, R0); |
| __ mov(R0, Operand(4)); |
| __ vmovsr(S3, R0); |
| __ mov(R0, Operand(5)); |
| __ vmovsr(S4, R0); |
| __ mov(R0, Operand(6)); |
| __ vmovsr(S5, R0); |
| __ mov(R0, Operand(7)); |
| __ vmovsr(S6, R0); |
| __ mov(R0, Operand(8)); |
| __ vmovsr(S7, R0); |
| |
| __ vaddqi(kTwoBytes, Q2, Q0, Q1); |
| |
| __ vmovrs(R0, S8); |
| __ vmovrs(R1, S9); |
| __ vmovrs(R2, S10); |
| __ vmovrs(R3, S11); |
| |
| __ add(R0, R0, Operand(R1)); |
| __ add(R0, R0, Operand(R2)); |
| __ add(R0, R0, Operand(R3)); |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Vaddqi16, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::neon_supported()) { |
| typedef int (*Tst)() DART_UNUSED; |
| EXPECT_EQ(36, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
| } |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Vaddqi32, assembler) { |
| if (TargetCPUFeatures::neon_supported()) { |
| __ mov(R0, Operand(1)); |
| __ vmovsr(S0, R0); |
| __ mov(R0, Operand(2)); |
| __ vmovsr(S1, R0); |
| __ mov(R0, Operand(3)); |
| __ vmovsr(S2, R0); |
| __ mov(R0, Operand(4)); |
| __ vmovsr(S3, R0); |
| __ mov(R0, Operand(5)); |
| __ vmovsr(S4, R0); |
| __ mov(R0, Operand(6)); |
| __ vmovsr(S5, R0); |
| __ mov(R0, Operand(7)); |
| __ vmovsr(S6, R0); |
| __ mov(R0, Operand(8)); |
| __ vmovsr(S7, R0); |
| |
| __ vaddqi(kFourBytes, Q2, Q0, Q1); |
| |
| __ vmovrs(R0, S8); |
| __ vmovrs(R1, S9); |
| __ vmovrs(R2, S10); |
| __ vmovrs(R3, S11); |
| |
| __ add(R0, R0, Operand(R1)); |
| __ add(R0, R0, Operand(R2)); |
| __ add(R0, R0, Operand(R3)); |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Vaddqi32, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::neon_supported()) { |
| typedef int (*Tst)() DART_UNUSED; |
| EXPECT_EQ(36, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
| } |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Vaddqi64, assembler) { |
| if (TargetCPUFeatures::neon_supported()) { |
| __ mov(R0, Operand(1)); |
| __ vmovsr(S0, R0); |
| __ mov(R0, Operand(2)); |
| __ vmovsr(S2, R0); |
| __ mov(R0, Operand(3)); |
| __ vmovsr(S4, R0); |
| __ mov(R0, Operand(4)); |
| __ vmovsr(S6, R0); |
| |
| __ vaddqi(kWordPair, Q2, Q0, Q1); |
| |
| __ vmovrs(R0, S8); |
| __ vmovrs(R2, S10); |
| |
| __ add(R0, R0, Operand(R2)); |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Vaddqi64, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::neon_supported()) { |
| typedef int (*Tst)() DART_UNUSED; |
| EXPECT_EQ(10, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
| } |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Vshlqu64, assembler) { |
| if (TargetCPUFeatures::neon_supported()) { |
| Label fail; |
| __ LoadImmediate(R1, 21); |
| __ LoadImmediate(R0, 1); |
| __ vmovsr(S0, R1); |
| __ vmovsr(S2, R1); |
| __ vmovsr(S4, R0); |
| __ vmovsr(S6, R0); |
| |
| __ vshlqu(kWordPair, Q2, Q0, Q1); |
| |
| __ vmovrs(R0, S8); |
| __ vmovrs(R1, S10); |
| __ CompareImmediate(R0, 42); |
| __ LoadImmediate(R0, 0); |
| __ b(&fail, NE); |
| __ CompareImmediate(R1, 42); |
| __ LoadImmediate(R0, 0); |
| __ b(&fail, NE); |
| |
| __ LoadImmediate(R0, 1); |
| __ Bind(&fail); |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Vshlqu64, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::neon_supported()) { |
| typedef int (*Tst)() DART_UNUSED; |
| EXPECT_EQ(1, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
| } |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Vshlqi64, assembler) { |
| if (TargetCPUFeatures::neon_supported()) { |
| Label fail; |
| __ LoadImmediate(R1, -84); |
| __ LoadImmediate(R0, -1); |
| __ vmovdrr(D0, R1, R0); |
| __ vmovdrr(D1, R1, R0); |
| __ vmovsr(S4, R0); |
| __ vmovsr(S6, R0); |
| |
| __ vshlqi(kWordPair, Q2, Q0, Q1); |
| |
| __ vmovrs(R0, S8); |
| __ vmovrs(R1, S10); |
| __ CompareImmediate(R0, -42); |
| __ LoadImmediate(R0, 0); |
| __ b(&fail, NE); |
| __ CompareImmediate(R1, -42); |
| __ LoadImmediate(R0, 0); |
| __ b(&fail, NE); |
| |
| __ LoadImmediate(R0, 1); |
| __ Bind(&fail); |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Vshlqi64, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::neon_supported()) { |
| typedef int (*Tst)() DART_UNUSED; |
| EXPECT_EQ(1, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
| } |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Mint_shl_ok, assembler) { |
| if (TargetCPUFeatures::neon_supported()) { |
| const QRegister value = Q0; |
| const QRegister temp = Q1; |
| const QRegister out = Q2; |
| const Register shift = R1; |
| const DRegister dtemp0 = EvenDRegisterOf(temp); |
| const SRegister stemp0 = EvenSRegisterOf(dtemp0); |
| const DRegister dout0 = EvenDRegisterOf(out); |
| const SRegister sout0 = EvenSRegisterOf(dout0); |
| const SRegister sout1 = OddSRegisterOf(dout0); |
| Label fail; |
| |
| // Initialize. |
| __ veorq(value, value, value); |
| __ veorq(temp, temp, temp); |
| __ veorq(out, out, out); |
| __ LoadImmediate(shift, 32); |
| __ LoadImmediate(R2, 1 << 7); |
| __ vmovsr(S0, R2); |
| |
| __ vmovsr(stemp0, shift); // Move the shift into the low S register. |
| __ vshlqu(kWordPair, out, value, temp); |
| |
| // check for overflow by shifting back and comparing. |
| __ rsb(shift, shift, Operand(0)); |
| __ vmovsr(stemp0, shift); |
| __ vshlqi(kWordPair, temp, out, temp); |
| __ vceqqi(kFourBytes, out, temp, value); |
| // Low 64 bits of temp should be all 1's, otherwise temp != value and |
| // we deopt. |
| __ vmovrs(shift, sout0); |
| __ CompareImmediate(shift, -1); |
| __ b(&fail, NE); |
| __ vmovrs(shift, sout1); |
| __ CompareImmediate(shift, -1); |
| __ b(&fail, NE); |
| |
| __ LoadImmediate(R0, 1); |
| __ Ret(); |
| |
| __ Bind(&fail); |
| __ LoadImmediate(R0, 0); |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Mint_shl_ok, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::neon_supported()) { |
| typedef int (*Tst)() DART_UNUSED; |
| EXPECT_EQ(1, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
| } |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Mint_shl_overflow, assembler) { |
| if (TargetCPUFeatures::neon_supported()) { |
| const QRegister value = Q0; |
| const QRegister temp = Q1; |
| const QRegister out = Q2; |
| const Register shift = R1; |
| const DRegister dtemp0 = EvenDRegisterOf(temp); |
| const SRegister stemp0 = EvenSRegisterOf(dtemp0); |
| const DRegister dout0 = EvenDRegisterOf(out); |
| const SRegister sout0 = EvenSRegisterOf(dout0); |
| const SRegister sout1 = OddSRegisterOf(dout0); |
| Label fail; |
| |
| // Initialize. |
| __ veorq(value, value, value); |
| __ veorq(temp, temp, temp); |
| __ veorq(out, out, out); |
| __ LoadImmediate(shift, 60); |
| __ LoadImmediate(R2, 1 << 7); |
| __ vmovsr(S0, R2); |
| |
| __ vmovsr(stemp0, shift); // Move the shift into the low S register. |
| __ vshlqu(kWordPair, out, value, temp); |
| |
| // check for overflow by shifting back and comparing. |
| __ rsb(shift, shift, Operand(0)); |
| __ vmovsr(stemp0, shift); |
| __ vshlqi(kWordPair, temp, out, temp); |
| __ vceqqi(kFourBytes, out, temp, value); |
| // Low 64 bits of temp should be all 1's, otherwise temp != value and |
| // we deopt. |
| __ vmovrs(shift, sout0); |
| __ CompareImmediate(shift, -1); |
| __ b(&fail, NE); |
| __ vmovrs(shift, sout1); |
| __ CompareImmediate(shift, -1); |
| __ b(&fail, NE); |
| |
| __ LoadImmediate(R0, 0); |
| __ Ret(); |
| |
| __ Bind(&fail); |
| __ LoadImmediate(R0, 1); |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Mint_shl_overflow, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::neon_supported()) { |
| typedef int (*Tst)() DART_UNUSED; |
| EXPECT_EQ(1, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
| } |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Vsubqi8, assembler) { |
| if (TargetCPUFeatures::neon_supported()) { |
| __ mov(R0, Operand(1)); |
| __ vmovsr(S0, R0); |
| __ mov(R0, Operand(2)); |
| __ vmovsr(S1, R0); |
| __ mov(R0, Operand(3)); |
| __ vmovsr(S2, R0); |
| __ mov(R0, Operand(4)); |
| __ vmovsr(S3, R0); |
| __ mov(R0, Operand(2)); |
| __ vmovsr(S4, R0); |
| __ mov(R0, Operand(4)); |
| __ vmovsr(S5, R0); |
| __ mov(R0, Operand(6)); |
| __ vmovsr(S6, R0); |
| __ mov(R0, Operand(8)); |
| __ vmovsr(S7, R0); |
| |
| __ vsubqi(kByte, Q2, Q1, Q0); |
| |
| __ vmovrs(R0, S8); |
| __ vmovrs(R1, S9); |
| __ vmovrs(R2, S10); |
| __ vmovrs(R3, S11); |
| |
| __ add(R0, R0, Operand(R1)); |
| __ add(R0, R0, Operand(R2)); |
| __ add(R0, R0, Operand(R3)); |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Vsubqi8, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::neon_supported()) { |
| typedef int (*Tst)() DART_UNUSED; |
| EXPECT_EQ(10, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
| } |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Vsubqi16, assembler) { |
| if (TargetCPUFeatures::neon_supported()) { |
| __ mov(R0, Operand(1)); |
| __ vmovsr(S0, R0); |
| __ mov(R0, Operand(2)); |
| __ vmovsr(S1, R0); |
| __ mov(R0, Operand(3)); |
| __ vmovsr(S2, R0); |
| __ mov(R0, Operand(4)); |
| __ vmovsr(S3, R0); |
| __ mov(R0, Operand(2)); |
| __ vmovsr(S4, R0); |
| __ mov(R0, Operand(4)); |
| __ vmovsr(S5, R0); |
| __ mov(R0, Operand(6)); |
| __ vmovsr(S6, R0); |
| __ mov(R0, Operand(8)); |
| __ vmovsr(S7, R0); |
| |
| __ vsubqi(kTwoBytes, Q2, Q1, Q0); |
| |
| __ vmovrs(R0, S8); |
| __ vmovrs(R1, S9); |
| __ vmovrs(R2, S10); |
| __ vmovrs(R3, S11); |
| |
| __ add(R0, R0, Operand(R1)); |
| __ add(R0, R0, Operand(R2)); |
| __ add(R0, R0, Operand(R3)); |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Vsubqi16, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::neon_supported()) { |
| typedef int (*Tst)() DART_UNUSED; |
| EXPECT_EQ(10, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
| } |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Vsubqi32, assembler) { |
| if (TargetCPUFeatures::neon_supported()) { |
| __ mov(R0, Operand(1)); |
| __ vmovsr(S0, R0); |
| __ mov(R0, Operand(2)); |
| __ vmovsr(S1, R0); |
| __ mov(R0, Operand(3)); |
| __ vmovsr(S2, R0); |
| __ mov(R0, Operand(4)); |
| __ vmovsr(S3, R0); |
| __ mov(R0, Operand(2)); |
| __ vmovsr(S4, R0); |
| __ mov(R0, Operand(4)); |
| __ vmovsr(S5, R0); |
| __ mov(R0, Operand(6)); |
| __ vmovsr(S6, R0); |
| __ mov(R0, Operand(8)); |
| __ vmovsr(S7, R0); |
| |
| __ vsubqi(kFourBytes, Q2, Q1, Q0); |
| |
| __ vmovrs(R0, S8); |
| __ vmovrs(R1, S9); |
| __ vmovrs(R2, S10); |
| __ vmovrs(R3, S11); |
| |
| __ add(R0, R0, Operand(R1)); |
| __ add(R0, R0, Operand(R2)); |
| __ add(R0, R0, Operand(R3)); |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Vsubqi32, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::neon_supported()) { |
| typedef int (*Tst)() DART_UNUSED; |
| EXPECT_EQ(10, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
| } |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Vsubqi64, assembler) { |
| if (TargetCPUFeatures::neon_supported()) { |
| __ mov(R0, Operand(1)); |
| __ vmovsr(S0, R0); |
| __ mov(R0, Operand(2)); |
| __ vmovsr(S2, R0); |
| __ mov(R0, Operand(2)); |
| __ vmovsr(S4, R0); |
| __ mov(R0, Operand(4)); |
| __ vmovsr(S6, R0); |
| |
| __ vsubqi(kWordPair, Q2, Q1, Q0); |
| |
| __ vmovrs(R0, S8); |
| __ vmovrs(R2, S10); |
| |
| __ add(R0, R0, Operand(R2)); |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Vsubqi64, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::neon_supported()) { |
| typedef int (*Tst)() DART_UNUSED; |
| EXPECT_EQ(3, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
| } |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Vmulqi8, assembler) { |
| if (TargetCPUFeatures::neon_supported()) { |
| __ mov(R0, Operand(1)); |
| __ vmovsr(S0, R0); |
| __ mov(R0, Operand(2)); |
| __ vmovsr(S1, R0); |
| __ mov(R0, Operand(3)); |
| __ vmovsr(S2, R0); |
| __ mov(R0, Operand(4)); |
| __ vmovsr(S3, R0); |
| __ mov(R0, Operand(5)); |
| __ vmovsr(S4, R0); |
| __ mov(R0, Operand(6)); |
| __ vmovsr(S5, R0); |
| __ mov(R0, Operand(7)); |
| __ vmovsr(S6, R0); |
| __ mov(R0, Operand(8)); |
| __ vmovsr(S7, R0); |
| |
| __ vmulqi(kByte, Q2, Q1, Q0); |
| |
| __ vmovrs(R0, S8); |
| __ vmovrs(R1, S9); |
| __ vmovrs(R2, S10); |
| __ vmovrs(R3, S11); |
| |
| __ add(R0, R0, Operand(R1)); |
| __ add(R0, R0, Operand(R2)); |
| __ add(R0, R0, Operand(R3)); |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Vmulqi8, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::neon_supported()) { |
| typedef int (*Tst)() DART_UNUSED; |
| EXPECT_EQ(70, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
| } |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Vmulqi16, assembler) { |
| if (TargetCPUFeatures::neon_supported()) { |
| __ mov(R0, Operand(1)); |
| __ vmovsr(S0, R0); |
| __ mov(R0, Operand(2)); |
| __ vmovsr(S1, R0); |
| __ mov(R0, Operand(3)); |
| __ vmovsr(S2, R0); |
| __ mov(R0, Operand(4)); |
| __ vmovsr(S3, R0); |
| __ mov(R0, Operand(5)); |
| __ vmovsr(S4, R0); |
| __ mov(R0, Operand(6)); |
| __ vmovsr(S5, R0); |
| __ mov(R0, Operand(7)); |
| __ vmovsr(S6, R0); |
| __ mov(R0, Operand(8)); |
| __ vmovsr(S7, R0); |
| |
| __ vmulqi(kTwoBytes, Q2, Q1, Q0); |
| |
| __ vmovrs(R0, S8); |
| __ vmovrs(R1, S9); |
| __ vmovrs(R2, S10); |
| __ vmovrs(R3, S11); |
| |
| __ add(R0, R0, Operand(R1)); |
| __ add(R0, R0, Operand(R2)); |
| __ add(R0, R0, Operand(R3)); |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Vmulqi16, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::neon_supported()) { |
| typedef int (*Tst)() DART_UNUSED; |
| EXPECT_EQ(70, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
| } |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Vmulqi32, assembler) { |
| if (TargetCPUFeatures::neon_supported()) { |
| __ mov(R0, Operand(1)); |
| __ vmovsr(S0, R0); |
| __ mov(R0, Operand(2)); |
| __ vmovsr(S1, R0); |
| __ mov(R0, Operand(3)); |
| __ vmovsr(S2, R0); |
| __ mov(R0, Operand(4)); |
| __ vmovsr(S3, R0); |
| __ mov(R0, Operand(5)); |
| __ vmovsr(S4, R0); |
| __ mov(R0, Operand(6)); |
| __ vmovsr(S5, R0); |
| __ mov(R0, Operand(7)); |
| __ vmovsr(S6, R0); |
| __ mov(R0, Operand(8)); |
| __ vmovsr(S7, R0); |
| |
| __ vmulqi(kFourBytes, Q2, Q1, Q0); |
| |
| __ vmovrs(R0, S8); |
| __ vmovrs(R1, S9); |
| __ vmovrs(R2, S10); |
| __ vmovrs(R3, S11); |
| |
| __ add(R0, R0, Operand(R1)); |
| __ add(R0, R0, Operand(R2)); |
| __ add(R0, R0, Operand(R3)); |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Vmulqi32, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::neon_supported()) { |
| typedef int (*Tst)() DART_UNUSED; |
| EXPECT_EQ(70, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
| } |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Vaddqs, assembler) { |
| if (TargetCPUFeatures::neon_supported()) { |
| __ LoadSImmediate(S0, 1.0); |
| __ LoadSImmediate(S1, 2.0); |
| __ LoadSImmediate(S2, 3.0); |
| __ LoadSImmediate(S3, 4.0); |
| __ LoadSImmediate(S4, 5.0); |
| __ LoadSImmediate(S5, 6.0); |
| __ LoadSImmediate(S6, 7.0); |
| __ LoadSImmediate(S7, 8.0); |
| |
| __ vaddqs(Q2, Q0, Q1); |
| |
| __ vadds(S8, S8, S9); |
| __ vadds(S8, S8, S10); |
| __ vadds(S8, S8, S11); |
| |
| __ vcvtis(S0, S8); |
| __ vmovrs(R0, S0); |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Vaddqs, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::neon_supported()) { |
| typedef int (*Tst)() DART_UNUSED; |
| EXPECT_EQ(36, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
| } |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Vsubqs, assembler) { |
| if (TargetCPUFeatures::neon_supported()) { |
| __ LoadSImmediate(S0, 1.0); |
| __ LoadSImmediate(S1, 2.0); |
| __ LoadSImmediate(S2, 3.0); |
| __ LoadSImmediate(S3, 4.0); |
| __ LoadSImmediate(S4, 2.0); |
| __ LoadSImmediate(S5, 4.0); |
| __ LoadSImmediate(S6, 6.0); |
| __ LoadSImmediate(S7, 8.0); |
| |
| __ vsubqs(Q2, Q1, Q0); |
| |
| __ vadds(S8, S8, S9); |
| __ vadds(S8, S8, S10); |
| __ vadds(S8, S8, S11); |
| |
| __ vcvtis(S0, S8); |
| __ vmovrs(R0, S0); |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Vsubqs, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::neon_supported()) { |
| typedef int (*Tst)() DART_UNUSED; |
| EXPECT_EQ(10, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
| } |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Vmulqs, assembler) { |
| if (TargetCPUFeatures::neon_supported()) { |
| __ LoadSImmediate(S0, 1.0); |
| __ LoadSImmediate(S1, 2.0); |
| __ LoadSImmediate(S2, 3.0); |
| __ LoadSImmediate(S3, 4.0); |
| __ LoadSImmediate(S4, 5.0); |
| __ LoadSImmediate(S5, 6.0); |
| __ LoadSImmediate(S6, 7.0); |
| __ LoadSImmediate(S7, 8.0); |
| |
| __ vmulqs(Q2, Q1, Q0); |
| |
| __ vadds(S8, S8, S9); |
| __ vadds(S8, S8, S10); |
| __ vadds(S8, S8, S11); |
| |
| __ vcvtis(S0, S8); |
| __ vmovrs(R0, S0); |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Vmulqs, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::neon_supported()) { |
| typedef int (*Tst)() DART_UNUSED; |
| EXPECT_EQ(70, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
| } |
| } |
| |
| ASSEMBLER_TEST_GENERATE(VtblX, assembler) { |
| if (TargetCPUFeatures::neon_supported()) { |
| // Index. |
| __ LoadImmediate(R0, 0x03020100); |
| __ vmovsr(S0, R0); |
| __ vmovsr(S1, R0); |
| |
| // Table. |
| __ LoadSImmediate(S2, 1.0); |
| __ LoadSImmediate(S3, 2.0); |
| __ LoadSImmediate(S4, 3.0); |
| __ LoadSImmediate(S5, 4.0); |
| |
| // Select. |
| __ vtbl(D3, D1, 2, D0); |
| |
| // Check that S6, S7 are both 1.0 |
| __ vcvtis(S0, S6); |
| __ vcvtis(S1, S7); |
| __ vmovrs(R2, S0); |
| __ vmovrs(R3, S1); |
| |
| __ LoadImmediate(R0, 0); |
| __ CompareImmediate(R2, 1); |
| __ Ret(NE); |
| __ CompareImmediate(R3, 1); |
| __ Ret(NE); |
| __ LoadImmediate(R0, 42); |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(VtblX, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::neon_supported()) { |
| typedef int (*Tst)() DART_UNUSED; |
| EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
| } |
| } |
| |
| ASSEMBLER_TEST_GENERATE(VtblY, assembler) { |
| if (TargetCPUFeatures::neon_supported()) { |
| // Index. |
| __ LoadImmediate(R0, 0x07060504); |
| __ vmovsr(S0, R0); |
| __ vmovsr(S1, R0); |
| |
| // Table. |
| __ LoadSImmediate(S2, 2.0); |
| __ LoadSImmediate(S3, 1.0); |
| __ LoadSImmediate(S4, 3.0); |
| __ LoadSImmediate(S5, 4.0); |
| |
| // Select. |
| __ vtbl(D3, D1, 2, D0); |
| |
| // Check that S6, S7 are both 1.0 |
| __ vcvtis(S0, S6); |
| __ vcvtis(S1, S7); |
| __ vmovrs(R2, S0); |
| __ vmovrs(R3, S1); |
| |
| __ LoadImmediate(R0, 0); |
| __ CompareImmediate(R2, 1); |
| __ Ret(NE); |
| __ CompareImmediate(R3, 1); |
| __ Ret(NE); |
| __ LoadImmediate(R0, 42); |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(VtblY, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::neon_supported()) { |
| typedef int (*Tst)() DART_UNUSED; |
| EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
| } |
| } |
| |
| ASSEMBLER_TEST_GENERATE(VtblZ, assembler) { |
| if (TargetCPUFeatures::neon_supported()) { |
| // Index. |
| __ LoadImmediate(R0, 0x0b0a0908); |
| __ vmovsr(S0, R0); |
| __ vmovsr(S1, R0); |
| |
| // Table. |
| __ LoadSImmediate(S2, 2.0); |
| __ LoadSImmediate(S3, 3.0); |
| __ LoadSImmediate(S4, 1.0); |
| __ LoadSImmediate(S5, 4.0); |
| |
| // Select. |
| __ vtbl(D3, D1, 2, D0); |
| |
| // Check that S6, S7 are both 1.0 |
| __ vcvtis(S0, S6); |
| __ vcvtis(S1, S7); |
| __ vmovrs(R2, S0); |
| __ vmovrs(R3, S1); |
| |
| __ LoadImmediate(R0, 0); |
| __ CompareImmediate(R2, 1); |
| __ Ret(NE); |
| __ CompareImmediate(R3, 1); |
| __ Ret(NE); |
| __ LoadImmediate(R0, 42); |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(VtblZ, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::neon_supported()) { |
| typedef int (*Tst)() DART_UNUSED; |
| EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
| } |
| } |
| |
| ASSEMBLER_TEST_GENERATE(VtblW, assembler) { |
| if (TargetCPUFeatures::neon_supported()) { |
| // Index. |
| __ LoadImmediate(R0, 0x0f0e0d0c); |
| __ vmovsr(S0, R0); |
| __ vmovsr(S1, R0); |
| |
| // Table. |
| __ LoadSImmediate(S2, 2.0); |
| __ LoadSImmediate(S3, 3.0); |
| __ LoadSImmediate(S4, 4.0); |
| __ LoadSImmediate(S5, 1.0); |
| |
| // Select. |
| __ vtbl(D3, D1, 2, D0); |
| |
| // Check that S6, S7 are both 1.0 |
| __ vcvtis(S0, S6); |
| __ vcvtis(S1, S7); |
| __ vmovrs(R2, S0); |
| __ vmovrs(R3, S1); |
| |
| __ LoadImmediate(R0, 0); |
| __ CompareImmediate(R2, 1); |
| __ Ret(NE); |
| __ CompareImmediate(R3, 1); |
| __ Ret(NE); |
| __ LoadImmediate(R0, 42); |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(VtblW, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::neon_supported()) { |
| typedef int (*Tst)() DART_UNUSED; |
| EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
| } |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Veorq, assembler) { |
| if (TargetCPUFeatures::neon_supported()) { |
| // Q0 |
| __ LoadImmediate(R0, 0xaaaaaaab); |
| __ vmovsr(S0, R0); |
| __ vmovsr(S1, R0); |
| __ vmovsr(S2, R0); |
| __ vmovsr(S3, R0); |
| |
| // Q1 |
| __ LoadImmediate(R0, 0x55555555); |
| __ vmovsr(S4, R0); |
| __ vmovsr(S5, R0); |
| __ vmovsr(S6, R0); |
| __ vmovsr(S7, R0); |
| |
| // Q2 = -2 -2 -2 -2 |
| __ veorq(Q2, Q1, Q0); |
| |
| __ vmovrs(R0, S8); |
| __ vmovrs(R1, S9); |
| __ vmovrs(R2, S10); |
| __ vmovrs(R3, S11); |
| |
| __ add(R0, R0, Operand(R1)); |
| __ add(R0, R0, Operand(R2)); |
| __ add(R0, R0, Operand(R3)); |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Veorq, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::neon_supported()) { |
| typedef int (*Tst)() DART_UNUSED; |
| EXPECT_EQ(-8, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
| } |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Vornq, assembler) { |
| if (TargetCPUFeatures::neon_supported()) { |
| // Q0 |
| __ LoadImmediate(R0, 0xfffffff0); |
| __ vmovsr(S0, R0); |
| __ vmovsr(S1, R0); |
| __ vmovsr(S2, R0); |
| __ vmovsr(S3, R0); |
| |
| // Q1 |
| __ LoadImmediate(R0, 0); |
| __ vmovsr(S4, R0); |
| __ vmovsr(S5, R0); |
| __ vmovsr(S6, R0); |
| __ vmovsr(S7, R0); |
| |
| // Q2 = 15 15 15 15 |
| __ vornq(Q2, Q1, Q0); |
| |
| __ vmovrs(R0, S8); |
| __ vmovrs(R1, S9); |
| __ vmovrs(R2, S10); |
| __ vmovrs(R3, S11); |
| |
| __ add(R0, R0, Operand(R1)); |
| __ add(R0, R0, Operand(R2)); |
| __ add(R0, R0, Operand(R3)); |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Vornq, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::neon_supported()) { |
| typedef int (*Tst)() DART_UNUSED; |
| EXPECT_EQ(60, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
| } |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Vorrq, assembler) { |
| if (TargetCPUFeatures::neon_supported()) { |
| // Q0 |
| __ LoadImmediate(R0, 0xaaaaaaaa); |
| __ vmovsr(S0, R0); |
| __ vmovsr(S1, R0); |
| __ vmovsr(S2, R0); |
| __ vmovsr(S3, R0); |
| |
| // Q1 |
| __ LoadImmediate(R0, 0x55555555); |
| __ vmovsr(S4, R0); |
| __ vmovsr(S5, R0); |
| __ vmovsr(S6, R0); |
| __ vmovsr(S7, R0); |
| |
| // Q2 = -1 -1 -1 -1 |
| __ vorrq(Q2, Q1, Q0); |
| |
| __ vmovrs(R0, S8); |
| __ vmovrs(R1, S9); |
| __ vmovrs(R2, S10); |
| __ vmovrs(R3, S11); |
| |
| __ add(R0, R0, Operand(R1)); |
| __ add(R0, R0, Operand(R2)); |
| __ add(R0, R0, Operand(R3)); |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Vorrq, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::neon_supported()) { |
| typedef int (*Tst)() DART_UNUSED; |
| EXPECT_EQ(-4, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
| } |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Vandq, assembler) { |
| if (TargetCPUFeatures::neon_supported()) { |
| // Q0 |
| __ LoadImmediate(R0, 0xaaaaaaab); |
| __ vmovsr(S0, R0); |
| __ vmovsr(S1, R0); |
| __ vmovsr(S2, R0); |
| __ vmovsr(S3, R0); |
| |
| // Q1 |
| __ LoadImmediate(R0, 0x55555555); |
| __ vmovsr(S4, R0); |
| __ vmovsr(S5, R0); |
| __ vmovsr(S6, R0); |
| __ vmovsr(S7, R0); |
| |
| // Q2 = 1 1 1 1 |
| __ vandq(Q2, Q1, Q0); |
| |
| __ vmovrs(R0, S8); |
| __ vmovrs(R1, S9); |
| __ vmovrs(R2, S10); |
| __ vmovrs(R3, S11); |
| |
| __ add(R0, R0, Operand(R1)); |
| __ add(R0, R0, Operand(R2)); |
| __ add(R0, R0, Operand(R3)); |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Vandq, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::neon_supported()) { |
| typedef int (*Tst)() DART_UNUSED; |
| EXPECT_EQ(4, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
| } |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Vmovq, assembler) { |
| if (TargetCPUFeatures::neon_supported()) { |
| // Q0 |
| __ LoadSImmediate(S0, 1.0); |
| __ vmovs(S1, S0); |
| __ vmovs(S2, S0); |
| __ vmovs(S3, S0); |
| |
| // Q0 |
| __ LoadSImmediate(S4, -1.0); |
| __ vmovs(S5, S0); |
| __ vmovs(S6, S0); |
| __ vmovs(S7, S0); |
| |
| // Q1 = Q2 |
| __ vmovq(Q1, Q0); |
| |
| __ vadds(S4, S4, S5); |
| __ vadds(S4, S4, S6); |
| __ vadds(S4, S4, S7); |
| __ vcvtis(S0, S4); |
| __ vmovrs(R0, S0); |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Vmovq, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::neon_supported()) { |
| typedef int (*Tst)() DART_UNUSED; |
| EXPECT_EQ(4, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
| } |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Vmvnq, assembler) { |
| if (TargetCPUFeatures::neon_supported()) { |
| __ LoadImmediate(R1, 42); // R1 <- 42. |
| __ vmovsr(S2, R1); // S2 <- R1. |
| __ vmvnq(Q1, Q0); // Q1 <- ~Q0. |
| __ vmvnq(Q2, Q1); // Q2 <- ~Q1. |
| __ vmovrs(R0, S10); // Now R0 should be 42 again. |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Vmvnq, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::neon_supported()) { |
| typedef int (*Tst)() DART_UNUSED; |
| EXPECT_EQ(42, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
| } |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Vdupb, assembler) { |
| if (TargetCPUFeatures::neon_supported()) { |
| __ LoadImmediate(R0, 0x00000000); |
| __ LoadImmediate(R1, 0x00ff0000); |
| __ vmovsr(S4, R0); |
| __ vmovsr(S5, R1); |
| |
| // Should copy 0xff to each byte of Q0. |
| __ vdup(kByte, Q0, D2, 6); |
| |
| __ vmovrs(R0, S0); |
| __ vmovrs(R1, S1); |
| __ vmovrs(R2, S2); |
| __ vmovrs(R3, S3); |
| |
| __ add(R0, R0, Operand(R1)); |
| __ add(R0, R0, Operand(R2)); |
| __ add(R0, R0, Operand(R3)); |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Vdupb, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::neon_supported()) { |
| typedef int (*Tst)() DART_UNUSED; |
| EXPECT_EQ(-4, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
| } |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Vduph, assembler) { |
| if (TargetCPUFeatures::neon_supported()) { |
| __ LoadImmediate(R0, 0xffff0000); |
| __ LoadImmediate(R1, 0x00000000); |
| __ vmovsr(S4, R0); |
| __ vmovsr(S5, R1); |
| |
| // Should copy 0xff to each byte of Q0. |
| __ vdup(kTwoBytes, Q0, D2, 1); |
| |
| __ vmovrs(R0, S0); |
| __ vmovrs(R1, S1); |
| __ vmovrs(R2, S2); |
| __ vmovrs(R3, S3); |
| |
| __ add(R0, R0, Operand(R1)); |
| __ add(R0, R0, Operand(R2)); |
| __ add(R0, R0, Operand(R3)); |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Vduph, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::neon_supported()) { |
| typedef int (*Tst)() DART_UNUSED; |
| EXPECT_EQ(-4, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
| } |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Vdupw, assembler) { |
| if (TargetCPUFeatures::neon_supported()) { |
| __ LoadImmediate(R0, 0x00000000); |
| __ LoadImmediate(R1, 0xffffffff); |
| __ vmovsr(S4, R0); |
| __ vmovsr(S5, R1); |
| |
| // Should copy 0xff to each byte of Q0. |
| __ vdup(kFourBytes, Q0, D2, 1); |
| |
| __ vmovrs(R0, S0); |
| __ vmovrs(R1, S1); |
| __ vmovrs(R2, S2); |
| __ vmovrs(R3, S3); |
| |
| __ add(R0, R0, Operand(R1)); |
| __ add(R0, R0, Operand(R2)); |
| __ add(R0, R0, Operand(R3)); |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Vdupw, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::neon_supported()) { |
| typedef int (*Tst)() DART_UNUSED; |
| EXPECT_EQ(-4, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
| } |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Vzipqw, assembler) { |
| if (TargetCPUFeatures::neon_supported()) { |
| __ LoadSImmediate(S0, 0.0); |
| __ LoadSImmediate(S1, 1.0); |
| __ LoadSImmediate(S2, 2.0); |
| __ LoadSImmediate(S3, 3.0); |
| __ LoadSImmediate(S4, 4.0); |
| __ LoadSImmediate(S5, 5.0); |
| __ LoadSImmediate(S6, 6.0); |
| __ LoadSImmediate(S7, 7.0); |
| |
| __ vzipqw(Q0, Q1); |
| |
| __ vsubqs(Q0, Q1, Q0); |
| |
| __ vadds(S0, S0, S1); |
| __ vadds(S0, S0, S2); |
| __ vadds(S0, S0, S3); |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Vzipqw, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::neon_supported()) { |
| typedef float (*Vzipqw)() DART_UNUSED; |
| float res = EXECUTE_TEST_CODE_FLOAT(Vzipqw, test->entry()); |
| EXPECT_FLOAT_EQ(8.0, res, 0.0001f); |
| } |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Vceqqi32, assembler) { |
| if (TargetCPUFeatures::neon_supported()) { |
| __ mov(R0, Operand(1)); |
| __ vmovsr(S0, R0); |
| __ mov(R0, Operand(2)); |
| __ vmovsr(S1, R0); |
| __ mov(R0, Operand(3)); |
| __ vmovsr(S2, R0); |
| __ mov(R0, Operand(4)); |
| __ vmovsr(S3, R0); |
| __ mov(R0, Operand(1)); |
| __ vmovsr(S4, R0); |
| __ mov(R0, Operand(20)); |
| __ vmovsr(S5, R0); |
| __ mov(R0, Operand(3)); |
| __ vmovsr(S6, R0); |
| __ mov(R0, Operand(40)); |
| __ vmovsr(S7, R0); |
| |
| __ vceqqi(kFourBytes, Q2, Q1, Q0); |
| |
| __ vmovrs(R0, S8); |
| __ vmovrs(R1, S9); |
| __ vmovrs(R2, S10); |
| __ vmovrs(R3, S11); |
| |
| __ add(R0, R0, Operand(R1)); |
| __ add(R0, R0, Operand(R2)); |
| __ add(R0, R0, Operand(R3)); |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Vceqqi32, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::neon_supported()) { |
| typedef int (*Tst)() DART_UNUSED; |
| EXPECT_EQ(-2, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
| } |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Vceqqs, assembler) { |
| if (TargetCPUFeatures::neon_supported()) { |
| __ LoadSImmediate(S0, 1.0); |
| __ LoadSImmediate(S1, 2.0); |
| __ LoadSImmediate(S2, 3.0); |
| __ LoadSImmediate(S3, 4.0); |
| __ LoadSImmediate(S4, 1.0); |
| __ LoadSImmediate(S5, 4.0); |
| __ LoadSImmediate(S6, 3.0); |
| __ LoadSImmediate(S7, 8.0); |
| |
| __ vceqqs(Q2, Q1, Q0); |
| |
| __ vmovrs(R0, S8); |
| __ vmovrs(R1, S9); |
| __ vmovrs(R2, S10); |
| __ vmovrs(R3, S11); |
| |
| __ add(R0, R0, Operand(R1)); |
| __ add(R0, R0, Operand(R2)); |
| __ add(R0, R0, Operand(R3)); |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Vceqqs, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::neon_supported()) { |
| typedef int (*Tst)() DART_UNUSED; |
| EXPECT_EQ(-2, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
| } |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Vcgeqi32, assembler) { |
| if (TargetCPUFeatures::neon_supported()) { |
| __ mov(R0, Operand(1)); |
| __ vmovsr(S0, R0); |
| __ mov(R0, Operand(2)); |
| __ vmovsr(S1, R0); |
| __ mov(R0, Operand(3)); |
| __ vmovsr(S2, R0); |
| __ mov(R0, Operand(4)); |
| __ vmovsr(S3, R0); |
| __ mov(R0, Operand(1)); |
| __ vmovsr(S4, R0); |
| __ mov(R0, Operand(1)); |
| __ vmovsr(S5, R0); |
| __ mov(R0, Operand(3)); |
| __ vmovsr(S6, R0); |
| __ mov(R0, Operand(1)); |
| __ vmovsr(S7, R0); |
| |
| __ vcgeqi(kFourBytes, Q2, Q1, Q0); |
| |
| __ vmovrs(R0, S8); |
| __ vmovrs(R1, S9); |
| __ vmovrs(R2, S10); |
| __ vmovrs(R3, S11); |
| |
| __ add(R0, R0, Operand(R1)); |
| __ add(R0, R0, Operand(R2)); |
| __ add(R0, R0, Operand(R3)); |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Vcgeqi32, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::neon_supported()) { |
| typedef int (*Tst)() DART_UNUSED; |
| EXPECT_EQ(-2, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
| } |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Vcugeqi32, assembler) { |
| if (TargetCPUFeatures::neon_supported()) { |
| __ mov(R0, Operand(1)); |
| __ vmovsr(S0, R0); |
| __ mov(R0, Operand(2)); |
| __ vmovsr(S1, R0); |
| __ mov(R0, Operand(3)); |
| __ vmovsr(S2, R0); |
| __ mov(R0, Operand(4)); |
| __ vmovsr(S3, R0); |
| __ LoadImmediate(R0, -1); |
| __ vmovsr(S4, R0); |
| __ mov(R0, Operand(1)); |
| __ vmovsr(S5, R0); |
| __ LoadImmediate(R0, -3); |
| __ vmovsr(S6, R0); |
| __ mov(R0, Operand(1)); |
| __ vmovsr(S7, R0); |
| |
| __ vcugeqi(kFourBytes, Q2, Q1, Q0); |
| |
| __ vmovrs(R0, S8); |
| __ vmovrs(R1, S9); |
| __ vmovrs(R2, S10); |
| __ vmovrs(R3, S11); |
| |
| __ add(R0, R0, Operand(R1)); |
| __ add(R0, R0, Operand(R2)); |
| __ add(R0, R0, Operand(R3)); |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Vcugeqi32, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::neon_supported()) { |
| typedef int (*Tst)() DART_UNUSED; |
| EXPECT_EQ(-2, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
| } |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Vcgeqs, assembler) { |
| if (TargetCPUFeatures::neon_supported()) { |
| __ LoadSImmediate(S0, 1.0); |
| __ LoadSImmediate(S1, 2.0); |
| __ LoadSImmediate(S2, 3.0); |
| __ LoadSImmediate(S3, 4.0); |
| __ LoadSImmediate(S4, 1.0); |
| __ LoadSImmediate(S5, 1.0); |
| __ LoadSImmediate(S6, 3.0); |
| __ LoadSImmediate(S7, 1.0); |
| |
| __ vcgeqs(Q2, Q1, Q0); |
| |
| __ vmovrs(R0, S8); |
| __ vmovrs(R1, S9); |
| __ vmovrs(R2, S10); |
| __ vmovrs(R3, S11); |
| |
| __ add(R0, R0, Operand(R1)); |
| __ add(R0, R0, Operand(R2)); |
| __ add(R0, R0, Operand(R3)); |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Vcgeqs, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::neon_supported()) { |
| typedef int (*Tst)() DART_UNUSED; |
| EXPECT_EQ(-2, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
| } |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Vcgtqi32, assembler) { |
| if (TargetCPUFeatures::neon_supported()) { |
| __ mov(R0, Operand(1)); |
| __ vmovsr(S0, R0); |
| __ mov(R0, Operand(2)); |
| __ vmovsr(S1, R0); |
| __ mov(R0, Operand(3)); |
| __ vmovsr(S2, R0); |
| __ mov(R0, Operand(4)); |
| __ vmovsr(S3, R0); |
| __ mov(R0, Operand(2)); |
| __ vmovsr(S4, R0); |
| __ mov(R0, Operand(1)); |
| __ vmovsr(S5, R0); |
| __ mov(R0, Operand(4)); |
| __ vmovsr(S6, R0); |
| __ mov(R0, Operand(1)); |
| __ vmovsr(S7, R0); |
| |
| __ vcgtqi(kFourBytes, Q2, Q1, Q0); |
| |
| __ vmovrs(R0, S8); |
| __ vmovrs(R1, S9); |
| __ vmovrs(R2, S10); |
| __ vmovrs(R3, S11); |
| |
| __ add(R0, R0, Operand(R1)); |
| __ add(R0, R0, Operand(R2)); |
| __ add(R0, R0, Operand(R3)); |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Vcgtqi32, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::neon_supported()) { |
| typedef int (*Tst)() DART_UNUSED; |
| EXPECT_EQ(-2, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
| } |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Vcugtqi32, assembler) { |
| if (TargetCPUFeatures::neon_supported()) { |
| __ mov(R0, Operand(1)); |
| __ vmovsr(S0, R0); |
| __ mov(R0, Operand(2)); |
| __ vmovsr(S1, R0); |
| __ mov(R0, Operand(3)); |
| __ vmovsr(S2, R0); |
| __ mov(R0, Operand(4)); |
| __ vmovsr(S3, R0); |
| __ LoadImmediate(R0, -1); |
| __ vmovsr(S4, R0); |
| __ mov(R0, Operand(1)); |
| __ vmovsr(S5, R0); |
| __ LoadImmediate(R0, -3); |
| __ vmovsr(S6, R0); |
| __ mov(R0, Operand(1)); |
| __ vmovsr(S7, R0); |
| |
| __ vcugtqi(kFourBytes, Q2, Q1, Q0); |
| |
| __ vmovrs(R0, S8); |
| __ vmovrs(R1, S9); |
| __ vmovrs(R2, S10); |
| __ vmovrs(R3, S11); |
| |
| __ add(R0, R0, Operand(R1)); |
| __ add(R0, R0, Operand(R2)); |
| __ add(R0, R0, Operand(R3)); |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Vcugtqi32, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::neon_supported()) { |
| typedef int (*Tst)() DART_UNUSED; |
| EXPECT_EQ(-2, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
| } |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Vcgtqs, assembler) { |
| if (TargetCPUFeatures::neon_supported()) { |
| __ LoadSImmediate(S0, 1.0); |
| __ LoadSImmediate(S1, 2.0); |
| __ LoadSImmediate(S2, 3.0); |
| __ LoadSImmediate(S3, 4.0); |
| __ LoadSImmediate(S4, 2.0); |
| __ LoadSImmediate(S5, 1.0); |
| __ LoadSImmediate(S6, 4.0); |
| __ LoadSImmediate(S7, 1.0); |
| |
| __ vcgtqs(Q2, Q1, Q0); |
| |
| __ vmovrs(R0, S8); |
| __ vmovrs(R1, S9); |
| __ vmovrs(R2, S10); |
| __ vmovrs(R3, S11); |
| |
| __ add(R0, R0, Operand(R1)); |
| __ add(R0, R0, Operand(R2)); |
| __ add(R0, R0, Operand(R3)); |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Vcgtqs, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::neon_supported()) { |
| typedef int (*Tst)() DART_UNUSED; |
| EXPECT_EQ(-2, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
| } |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Vminqs, assembler) { |
| if (TargetCPUFeatures::neon_supported()) { |
| __ LoadSImmediate(S0, 1.0); |
| __ LoadSImmediate(S1, 2.0); |
| __ LoadSImmediate(S2, 3.0); |
| __ LoadSImmediate(S3, 4.0); |
| |
| __ LoadSImmediate(S4, 2.0); |
| __ LoadSImmediate(S5, 1.0); |
| __ LoadSImmediate(S6, 6.0); |
| __ LoadSImmediate(S7, 3.0); |
| |
| __ vminqs(Q2, Q1, Q0); |
| |
| __ vadds(S8, S8, S9); |
| __ vadds(S8, S8, S10); |
| __ vadds(S8, S8, S11); |
| |
| __ vcvtis(S0, S8); |
| __ vmovrs(R0, S0); |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Vminqs, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::neon_supported()) { |
| typedef int (*Tst)() DART_UNUSED; |
| EXPECT_EQ(8, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
| } |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Vmaxqs, assembler) { |
| if (TargetCPUFeatures::neon_supported()) { |
| __ LoadSImmediate(S0, 1.0); |
| __ LoadSImmediate(S1, 2.0); |
| __ LoadSImmediate(S2, 3.0); |
| __ LoadSImmediate(S3, 4.0); |
| |
| __ LoadSImmediate(S4, 2.0); |
| __ LoadSImmediate(S5, 1.0); |
| __ LoadSImmediate(S6, 6.0); |
| __ LoadSImmediate(S7, 3.0); |
| |
| __ vmaxqs(Q2, Q1, Q0); |
| |
| __ vadds(S8, S8, S9); |
| __ vadds(S8, S8, S10); |
| __ vadds(S8, S8, S11); |
| |
| __ vcvtis(S0, S8); |
| __ vmovrs(R0, S0); |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Vmaxqs, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::neon_supported()) { |
| typedef int (*Tst)() DART_UNUSED; |
| EXPECT_EQ(14, EXECUTE_TEST_CODE_INT32(Tst, test->entry())); |
| } |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Vrecpeqs, assembler) { |
| if (TargetCPUFeatures::neon_supported()) { |
| __ LoadSImmediate(S4, 147.0); |
| __ vmovs(S5, S4); |
| __ vmovs(S6, S4); |
| __ vmovs(S7, S4); |
| __ vrecpeqs(Q0, Q1); |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Vrecpeqs, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::neon_supported()) { |
| typedef float (*Vrecpeqs)() DART_UNUSED; |
| float res = EXECUTE_TEST_CODE_FLOAT(Vrecpeqs, test->entry()); |
| EXPECT_FLOAT_EQ(ReciprocalEstimate(147.0), res, 0.0001f); |
| } |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Vrecpsqs, assembler) { |
| if (TargetCPUFeatures::neon_supported()) { |
| __ LoadSImmediate(S4, 5.0); |
| __ LoadSImmediate(S5, 2.0); |
| __ LoadSImmediate(S6, 3.0); |
| __ LoadSImmediate(S7, 4.0); |
| |
| __ LoadSImmediate(S8, 10.0); |
| __ LoadSImmediate(S9, 1.0); |
| __ LoadSImmediate(S10, 6.0); |
| __ LoadSImmediate(S11, 3.0); |
| |
| __ vrecpsqs(Q0, Q1, Q2); |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Vrecpsqs, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::neon_supported()) { |
| typedef float (*Vrecpsqs)() DART_UNUSED; |
| float res = EXECUTE_TEST_CODE_FLOAT(Vrecpsqs, test->entry()); |
| EXPECT_FLOAT_EQ(2.0 - 10.0 * 5.0, res, 0.0001f); |
| } |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Reciprocal, assembler) { |
| if (TargetCPUFeatures::neon_supported()) { |
| __ LoadSImmediate(S4, 147000.0); |
| __ vmovs(S5, S4); |
| __ vmovs(S6, S4); |
| __ vmovs(S7, S4); |
| |
| // Reciprocal estimate. |
| __ vrecpeqs(Q0, Q1); |
| // 2 Newton-Raphson steps. |
| __ vrecpsqs(Q2, Q1, Q0); |
| __ vmulqs(Q0, Q0, Q2); |
| __ vrecpsqs(Q2, Q1, Q0); |
| __ vmulqs(Q0, Q0, Q2); |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Reciprocal, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::neon_supported()) { |
| typedef float (*Reciprocal)() DART_UNUSED; |
| float res = EXECUTE_TEST_CODE_FLOAT(Reciprocal, test->entry()); |
| EXPECT_FLOAT_EQ(1.0 / 147000.0, res, 0.0001f); |
| } |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Vrsqrteqs, assembler) { |
| if (TargetCPUFeatures::neon_supported()) { |
| __ LoadSImmediate(S4, 147.0); |
| __ vmovs(S5, S4); |
| __ vmovs(S6, S4); |
| __ vmovs(S7, S4); |
| |
| __ vrsqrteqs(Q0, Q1); |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Vrsqrteqs, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::neon_supported()) { |
| typedef float (*Vrsqrteqs)() DART_UNUSED; |
| float res = EXECUTE_TEST_CODE_FLOAT(Vrsqrteqs, test->entry()); |
| EXPECT_FLOAT_EQ(ReciprocalSqrtEstimate(147.0), res, 0.0001f); |
| } |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Vrsqrtsqs, assembler) { |
| if (TargetCPUFeatures::neon_supported()) { |
| __ LoadSImmediate(S4, 5.0); |
| __ LoadSImmediate(S5, 2.0); |
| __ LoadSImmediate(S6, 3.0); |
| __ LoadSImmediate(S7, 4.0); |
| |
| __ LoadSImmediate(S8, 10.0); |
| __ LoadSImmediate(S9, 1.0); |
| __ LoadSImmediate(S10, 6.0); |
| __ LoadSImmediate(S11, 3.0); |
| |
| __ vrsqrtsqs(Q0, Q1, Q2); |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Vrsqrtsqs, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::neon_supported()) { |
| typedef float (*Vrsqrtsqs)() DART_UNUSED; |
| float res = EXECUTE_TEST_CODE_FLOAT(Vrsqrtsqs, test->entry()); |
| EXPECT_FLOAT_EQ((3.0 - 10.0 * 5.0) / 2.0, res, 0.0001f); |
| } |
| } |
| |
| ASSEMBLER_TEST_GENERATE(ReciprocalSqrt, assembler) { |
| if (TargetCPUFeatures::neon_supported()) { |
| __ LoadSImmediate(S4, 147000.0); |
| __ vmovs(S5, S4); |
| __ vmovs(S6, S4); |
| __ vmovs(S7, S4); |
| |
| // Reciprocal square root estimate. |
| __ vrsqrteqs(Q0, Q1); |
| // 2 Newton-Raphson steps. xn+1 = xn * (3 - Q1*xn^2) / 2. |
| // First step. |
| __ vmulqs(Q2, Q0, Q0); // Q2 <- xn^2 |
| __ vrsqrtsqs(Q2, Q1, Q2); // Q2 <- (3 - Q1*Q2) / 2. |
| __ vmulqs(Q0, Q0, Q2); // xn+1 <- xn * Q2 |
| // Second step. |
| __ vmulqs(Q2, Q0, Q0); |
| __ vrsqrtsqs(Q2, Q1, Q2); |
| __ vmulqs(Q0, Q0, Q2); |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(ReciprocalSqrt, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::neon_supported()) { |
| typedef float (*ReciprocalSqrt)() DART_UNUSED; |
| float res = EXECUTE_TEST_CODE_FLOAT(ReciprocalSqrt, test->entry()); |
| EXPECT_FLOAT_EQ(1.0 / sqrt(147000.0), res, 0.0001f); |
| } |
| } |
| |
| ASSEMBLER_TEST_GENERATE(SIMDSqrt, assembler) { |
| if (TargetCPUFeatures::neon_supported()) { |
| __ LoadSImmediate(S4, 147000.0); |
| __ vmovs(S5, S4); |
| __ vmovs(S6, S4); |
| __ vmovs(S7, S4); |
| |
| // Reciprocal square root estimate. |
| __ vrsqrteqs(Q0, Q1); |
| // 2 Newton-Raphson steps. xn+1 = xn * (3 - Q1*xn^2) / 2. |
| // First step. |
| __ vmulqs(Q2, Q0, Q0); // Q2 <- xn^2 |
| __ vrsqrtsqs(Q2, Q1, Q2); // Q2 <- (3 - Q1*Q2) / 2. |
| __ vmulqs(Q0, Q0, Q2); // xn+1 <- xn * Q2 |
| // Second step. |
| __ vmulqs(Q2, Q0, Q0); |
| __ vrsqrtsqs(Q2, Q1, Q2); |
| __ vmulqs(Q0, Q0, Q2); |
| |
| // Reciprocal. |
| __ vmovq(Q1, Q0); |
| // Reciprocal estimate. |
| __ vrecpeqs(Q0, Q1); |
| // 2 Newton-Raphson steps. |
| __ vrecpsqs(Q2, Q1, Q0); |
| __ vmulqs(Q0, Q0, Q2); |
| __ vrecpsqs(Q2, Q1, Q0); |
| __ vmulqs(Q0, Q0, Q2); |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(SIMDSqrt, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::neon_supported()) { |
| typedef float (*SIMDSqrt)() DART_UNUSED; |
| float res = EXECUTE_TEST_CODE_FLOAT(SIMDSqrt, test->entry()); |
| EXPECT_FLOAT_EQ(sqrt(147000.0), res, 0.0001f); |
| } |
| } |
| |
| ASSEMBLER_TEST_GENERATE(SIMDSqrt2, assembler) { |
| if (TargetCPUFeatures::neon_supported()) { |
| __ LoadSImmediate(S4, 1.0); |
| __ LoadSImmediate(S5, 4.0); |
| __ LoadSImmediate(S6, 9.0); |
| __ LoadSImmediate(S7, 16.0); |
| |
| // Reciprocal square root estimate. |
| __ vrsqrteqs(Q0, Q1); |
| // 2 Newton-Raphson steps. xn+1 = xn * (3 - Q1*xn^2) / 2. |
| // First step. |
| __ vmulqs(Q2, Q0, Q0); // Q2 <- xn^2 |
| __ vrsqrtsqs(Q2, Q1, Q2); // Q2 <- (3 - Q1*Q2) / 2. |
| __ vmulqs(Q0, Q0, Q2); // xn+1 <- xn * Q2 |
| // Second step. |
| __ vmulqs(Q2, Q0, Q0); |
| __ vrsqrtsqs(Q2, Q1, Q2); |
| __ vmulqs(Q0, Q0, Q2); |
| |
| // Reciprocal. |
| __ vmovq(Q1, Q0); |
| // Reciprocal estimate. |
| __ vrecpeqs(Q0, Q1); |
| // 2 Newton-Raphson steps. |
| __ vrecpsqs(Q2, Q1, Q0); |
| __ vmulqs(Q0, Q0, Q2); |
| __ vrecpsqs(Q2, Q1, Q0); |
| __ vmulqs(Q0, Q0, Q2); |
| |
| __ vadds(S0, S0, S1); |
| __ vadds(S0, S0, S2); |
| __ vadds(S0, S0, S3); |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(SIMDSqrt2, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::neon_supported()) { |
| typedef float (*SIMDSqrt2)() DART_UNUSED; |
| float res = EXECUTE_TEST_CODE_FLOAT(SIMDSqrt2, test->entry()); |
| EXPECT_FLOAT_EQ(10.0, res, 0.0001f); |
| } |
| } |
| |
| ASSEMBLER_TEST_GENERATE(SIMDDiv, assembler) { |
| if (TargetCPUFeatures::neon_supported()) { |
| __ LoadSImmediate(S4, 1.0); |
| __ LoadSImmediate(S5, 4.0); |
| __ LoadSImmediate(S6, 9.0); |
| __ LoadSImmediate(S7, 16.0); |
| |
| __ LoadSImmediate(S12, 4.0); |
| __ LoadSImmediate(S13, 16.0); |
| __ LoadSImmediate(S14, 36.0); |
| __ LoadSImmediate(S15, 64.0); |
| |
| // Reciprocal estimate. |
| __ vrecpeqs(Q0, Q1); |
| // 2 Newton-Raphson steps. |
| __ vrecpsqs(Q2, Q1, Q0); |
| __ vmulqs(Q0, Q0, Q2); |
| __ vrecpsqs(Q2, Q1, Q0); |
| __ vmulqs(Q0, Q0, Q2); |
| |
| __ vmulqs(Q0, Q3, Q0); |
| __ vadds(S0, S0, S1); |
| __ vadds(S0, S0, S2); |
| __ vadds(S0, S0, S3); |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(SIMDDiv, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::neon_supported()) { |
| typedef float (*SIMDDiv)() DART_UNUSED; |
| float res = EXECUTE_TEST_CODE_FLOAT(SIMDDiv, test->entry()); |
| EXPECT_FLOAT_EQ(16.0, res, 0.0001f); |
| } |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Vabsqs, assembler) { |
| if (TargetCPUFeatures::neon_supported()) { |
| __ LoadSImmediate(S4, 1.0); |
| __ LoadSImmediate(S5, -1.0); |
| __ LoadSImmediate(S6, 1.0); |
| __ LoadSImmediate(S7, -1.0); |
| |
| __ vabsqs(Q0, Q1); |
| |
| __ vadds(S0, S0, S1); |
| __ vadds(S0, S0, S2); |
| __ vadds(S0, S0, S3); |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Vabsqs, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::neon_supported()) { |
| typedef float (*Vabsqs)() DART_UNUSED; |
| float res = EXECUTE_TEST_CODE_FLOAT(Vabsqs, test->entry()); |
| EXPECT_FLOAT_EQ(4.0, res, 0.0001f); |
| } |
| } |
| |
| ASSEMBLER_TEST_GENERATE(Vnegqs, assembler) { |
| if (TargetCPUFeatures::neon_supported()) { |
| __ LoadSImmediate(S4, 1.0); |
| __ LoadSImmediate(S5, -2.0); |
| __ LoadSImmediate(S6, 1.0); |
| __ LoadSImmediate(S7, -2.0); |
| |
| __ vnegqs(Q0, Q1); |
| |
| __ vadds(S0, S0, S1); |
| __ vadds(S0, S0, S2); |
| __ vadds(S0, S0, S3); |
| } |
| __ Ret(); |
| } |
| |
| ASSEMBLER_TEST_RUN(Vnegqs, test) { |
| EXPECT(test != NULL); |
| if (TargetCPUFeatures::neon_supported()) { |
| typedef float (*Vnegqs)() DART_UNUSED; |
| float res = EXECUTE_TEST_CODE_FLOAT(Vnegqs, test->entry()); |
| EXPECT_FLOAT_EQ(2.0, res, 0.0001f); |
| } |
| } |
| |
| // Called from assembler_test.cc. |
| // LR: return address. |
| // R0: value. |
| // R1: growable array. |
| // R2: current thread. |
| ASSEMBLER_TEST_GENERATE(StoreIntoObject, assembler) { |
| SPILLS_LR_TO_FRAME(__ PushList((1 << LR) | (1 << THR))); |
| __ mov(THR, Operand(R2)); |
| __ StoreIntoObject(R1, FieldAddress(R1, GrowableObjectArray::data_offset()), |
| R0); |
| RESTORES_LR_FROM_FRAME(__ PopList((1 << LR) | (1 << THR))); |
| __ Ret(); |
| } |
| |
| } // namespace compiler |
| } // namespace dart |
| |
| #endif // defined TARGET_ARCH_ARM |