[vm] Use 31-bit Smis in compressed pointer mode.

The range of Smis must be reduced so that pointer compression is not lossy.

Smis are kept in sign-extended form. In the future, we might track down all places that perform Smi comparisons or otherwise assume sign-extension. Allowing garbage in the upper half would make it safe to unconditionally add the heap base during pointer decompression.

Based on cf78da8a48b886cbd70e6c50dd7461a621065e31.

TEST=ci
Change-Id: If8c76da3166c170618e5b3f3991353bab5c84e6f
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/181760
Commit-Queue: Ryan Macnak <rmacnak@google.com>
Reviewed-by: Liam Appelbe <liama@google.com>
diff --git a/runtime/vm/compiler/asm_intrinsifier_arm64.cc b/runtime/vm/compiler/asm_intrinsifier_arm64.cc
index c2adf98..6bef2d7 100644
--- a/runtime/vm/compiler/asm_intrinsifier_arm64.cc
+++ b/runtime/vm/compiler/asm_intrinsifier_arm64.cc
@@ -95,8 +95,14 @@
 void AsmIntrinsifier::Integer_addFromInteger(Assembler* assembler,
                                              Label* normal_ir_body) {
   TestBothArgumentsSmis(assembler, normal_ir_body);  // Checks two smis.
-  __ adds(R0, R0, Operand(R1));                      // Adds.
+#if !defined(DART_COMPRESSED_POINTERS)
+  __ adds(R0, R0, Operand(R1));  // Add.
   __ b(normal_ir_body, VS);  // Fall-through on overflow.
+#else
+  __ addsw(R0, R0, Operand(R1));  // Add (32-bit).
+  __ b(normal_ir_body, VS);       // Fall-through on overflow.
+  __ sxtw(R0, R0);                // Sign extend.
+#endif
   __ ret();
   __ Bind(normal_ir_body);
 }
@@ -108,16 +114,28 @@
 void AsmIntrinsifier::Integer_subFromInteger(Assembler* assembler,
                                              Label* normal_ir_body) {
   TestBothArgumentsSmis(assembler, normal_ir_body);
+#if !defined(DART_COMPRESSED_POINTERS)
   __ subs(R0, R0, Operand(R1));  // Subtract.
   __ b(normal_ir_body, VS);      // Fall-through on overflow.
+#else
+  __ subsw(R0, R0, Operand(R1));  // Subtract (32-bit).
+  __ b(normal_ir_body, VS);       // Fall-through on overflow.
+  __ sxtw(R0, R0);                // Sign extend.
+#endif
   __ ret();
   __ Bind(normal_ir_body);
 }
 
 void AsmIntrinsifier::Integer_sub(Assembler* assembler, Label* normal_ir_body) {
   TestBothArgumentsSmis(assembler, normal_ir_body);
+#if !defined(DART_COMPRESSED_POINTERS)
   __ subs(R0, R1, Operand(R0));  // Subtract.
   __ b(normal_ir_body, VS);      // Fall-through on overflow.
+#else
+  __ subsw(R0, R1, Operand(R0));  // Subtract (32-bit).
+  __ b(normal_ir_body, VS);       // Fall-through on overflow.
+  __ sxtw(R0, R0);                // Sign extend.
+#endif
   __ ret();
   __ Bind(normal_ir_body);
 }
@@ -127,9 +145,15 @@
   TestBothArgumentsSmis(assembler, normal_ir_body);  // checks two smis
   __ SmiUntag(R0);  // Untags R6. We only want result shifted by one.
 
+#if !defined(DART_COMPRESSED_POINTERS)
   __ mul(TMP, R0, R1);
   __ smulh(TMP2, R0, R1);
   // TMP: result bits 64..127.
+#else
+  __ smull(TMP, R0, R1);
+  __ AsrImmediate(TMP2, TMP, 31);
+  // TMP: result bits 32..63.
+#endif
   __ cmp(TMP2, Operand(TMP, ASR, 63));
   __ b(normal_ir_body, NE);
   __ mov(R0, TMP);
@@ -246,7 +270,11 @@
 
   // Check the corner case of dividing the 'MIN_SMI' with -1, in which case we
   // cannot tag the result.
+#if !defined(DART_COMPRESSED_POINTERS)
   __ CompareImmediate(R0, 0x4000000000000000);
+#else
+  __ CompareImmediate(R0, 0x40000000);
+#endif
   __ b(normal_ir_body, EQ);
   __ SmiTag(R0);  // Not equal. Okay to tag and return.
   __ ret();       // Return.
@@ -257,8 +285,14 @@
                                      Label* normal_ir_body) {
   __ ldr(R0, Address(SP, +0 * target::kWordSize));  // Grab first argument.
   __ BranchIfNotSmi(R0, normal_ir_body);
+#if !defined(DART_COMPRESSED_POINTERS)
   __ negs(R0, R0);
   __ b(normal_ir_body, VS);
+#else
+  __ negsw(R0, R0);
+  __ b(normal_ir_body, VS);
+  __ sxtw(R0, R0);
+#endif
   __ ret();
   __ Bind(normal_ir_body);
 }
@@ -318,9 +352,15 @@
   // Check if count too large for handling it inlined.
   __ SmiUntag(TMP, right);  // SmiUntag right into TMP.
   // Overflow test (preserve left, right, and TMP);
+#if !defined(DART_COMPRESSED_POINTERS)
   __ lslv(temp, left, TMP);
   __ asrv(TMP2, temp, TMP);
   __ CompareRegisters(left, TMP2);
+#else
+  __ lslvw(temp, left, TMP);
+  __ asrvw(TMP2, temp, TMP);
+  __ cmpw(left, Operand(TMP2));
+#endif
   __ b(normal_ir_body, NE);  // Overflow.
   // Shift for result now we know there is no overflow.
   __ lslv(result, left, TMP);
@@ -1283,7 +1323,11 @@
   __ BranchIfNotSmi(R0, normal_ir_body);
   // Is Smi.
   __ SmiUntag(R0);
+#if !defined(DART_COMPRESSED_POINTERS)
   __ scvtfdx(V0, R0);
+#else
+  __ scvtfdw(V0, R0);
+#endif
   const Class& double_class = DoubleClass();
   __ TryAllocate(double_class, normal_ir_body, R0, R1);
   __ StoreDFieldToOffset(V0, R0, target::Double::value_offset());
@@ -1356,11 +1400,20 @@
   __ fcmpd(V0, V0);
   __ b(normal_ir_body, VS);
 
+#if !defined(DART_COMPRESSED_POINTERS)
   __ fcvtzdsx(R0, V0);
   // Overflow is signaled with minint.
   // Check for overflow and that it fits into Smi.
   __ CompareImmediate(R0, 0xC000000000000000);
   __ b(normal_ir_body, MI);
+#else
+  __ fcvtzdsw(R0, V0);
+  // Overflow is signaled with minint.
+  // Check for overflow and that it fits into Smi.
+  __ AsrImmediate(TMP, R0, 30);
+  __ cmp(TMP, Operand(R0, ASR, 63));
+  __ b(normal_ir_body, NE);
+#endif
   __ SmiTag(R0);
   __ ret();
   __ Bind(normal_ir_body);
@@ -1378,18 +1431,31 @@
   __ fcmpd(V0, V0);
   __ b(&double_hash, VS);
 
+#if !defined(DART_COMPRESSED_POINTERS)
   // Convert double value to signed 64-bit int in R0 and back to a
   // double value in V1.
   __ fcvtzdsx(R0, V0);
   __ scvtfdx(V1, R0);
+#else
+  // Convert double value to signed 32-bit int in R0 and back to a
+  // double value in V1.
+  __ fcvtzdsw(R0, V0);
+  __ scvtfdw(V1, R0);
+#endif
 
   // Tag the int as a Smi, making sure that it fits; this checks for
   // overflow in the conversion from double to int. Conversion
   // overflow is signalled by fcvt through clamping R0 to either
   // INT64_MAX or INT64_MIN (saturation).
   ASSERT(kSmiTag == 0 && kSmiTagShift == 1);
+#if !defined(DART_COMPRESSED_POINTERS)
   __ adds(R0, R0, Operand(R0));
   __ b(normal_ir_body, VS);
+#else
+  __ addsw(R0, R0, Operand(R0));
+  __ b(normal_ir_body, VS);
+  __ sxtw(R0, R0);  // Sign extend.
+#endif
 
   // Compare the two double values. If they are equal, we return the
   // Smi tagged result immediately as the hash code.
diff --git a/runtime/vm/compiler/asm_intrinsifier_x64.cc b/runtime/vm/compiler/asm_intrinsifier_x64.cc
index 8cf9a76..7df68fa 100644
--- a/runtime/vm/compiler/asm_intrinsifier_x64.cc
+++ b/runtime/vm/compiler/asm_intrinsifier_x64.cc
@@ -94,8 +94,14 @@
                                              Label* normal_ir_body) {
   TestBothArgumentsSmis(assembler, normal_ir_body);
   // RAX contains right argument.
+#if !defined(DART_COMPRESSED_POINTERS)
   __ addq(RAX, Address(RSP, +2 * target::kWordSize));
   __ j(OVERFLOW, normal_ir_body, Assembler::kNearJump);
+#else
+  __ addl(RAX, Address(RSP, +2 * target::kWordSize));
+  __ j(OVERFLOW, normal_ir_body, Assembler::kNearJump);
+  __ movsxd(RAX, RAX);
+#endif
   // Result is in RAX.
   __ ret();
   __ Bind(normal_ir_body);
@@ -109,8 +115,14 @@
                                              Label* normal_ir_body) {
   TestBothArgumentsSmis(assembler, normal_ir_body);
   // RAX contains right argument, which is the actual minuend of subtraction.
+#if !defined(DART_COMPRESSED_POINTERS)
   __ subq(RAX, Address(RSP, +2 * target::kWordSize));
   __ j(OVERFLOW, normal_ir_body, Assembler::kNearJump);
+#else
+  __ subl(RAX, Address(RSP, +2 * target::kWordSize));
+  __ j(OVERFLOW, normal_ir_body, Assembler::kNearJump);
+  __ movsxd(RAX, RAX);
+#endif
   // Result is in RAX.
   __ ret();
   __ Bind(normal_ir_body);
@@ -121,8 +133,14 @@
   // RAX contains right argument, which is the actual subtrahend of subtraction.
   __ movq(RCX, RAX);
   __ movq(RAX, Address(RSP, +2 * target::kWordSize));
+#if !defined(DART_COMPRESSED_POINTERS)
   __ subq(RAX, RCX);
   __ j(OVERFLOW, normal_ir_body, Assembler::kNearJump);
+#else
+  __ subl(RAX, RCX);
+  __ j(OVERFLOW, normal_ir_body, Assembler::kNearJump);
+  __ movsxd(RAX, RAX);
+#endif
   // Result is in RAX.
   __ ret();
   __ Bind(normal_ir_body);
@@ -134,8 +152,14 @@
   // RAX is the right argument.
   ASSERT(kSmiTag == 0);  // Adjust code below if not the case.
   __ SmiUntag(RAX);
+#if !defined(DART_COMPRESSED_POINTERS)
   __ imulq(RAX, Address(RSP, +2 * target::kWordSize));
   __ j(OVERFLOW, normal_ir_body, Assembler::kNearJump);
+#else
+  __ imull(RAX, Address(RSP, +2 * target::kWordSize));
+  __ j(OVERFLOW, normal_ir_body, Assembler::kNearJump);
+  __ movsxd(RAX, RAX);
+#endif
   // Result is in RAX.
   __ ret();
   __ Bind(normal_ir_body);
@@ -179,6 +203,7 @@
 
   __ Bind(&try_modulo);
 
+#if !defined(DART_COMPRESSED_POINTERS)
   // Check if both operands fit into 32bits as idiv with 64bit operands
   // requires twice as many cycles and has much higher latency. We are checking
   // this before untagging them to avoid corner case dividing INT_MAX by -1 that
@@ -189,6 +214,7 @@
   __ movsxd(RBX, RCX);
   __ cmpq(RBX, RCX);
   __ j(NOT_EQUAL, &not_32bit, Assembler::kNearJump);
+#endif
 
   // Both operands are 31bit smis. Divide using 32bit idiv.
   __ SmiUntag(RAX);
@@ -196,6 +222,7 @@
   __ cdq();
   __ idivl(RCX);
   __ movsxd(RAX, RDX);
+#if !defined(DART_COMPRESSED_POINTERS)
   __ jmp(&done, Assembler::kNearJump);
 
   // Divide using 64bit idiv.
@@ -206,6 +233,7 @@
   __ idivq(RCX);
   __ movq(RAX, RDX);
   __ Bind(&done);
+#endif
 }
 
 // Implementation:
@@ -262,6 +290,7 @@
   __ movq(RAX,
           Address(RSP, +2 * target::kWordSize));  // Left argument (dividend).
 
+#if !defined(DART_COMPRESSED_POINTERS)
   // Check if both operands fit into 32bits as idiv with 64bit operands
   // requires twice as many cycles and has much higher latency. We are checking
   // this before untagging them to avoid corner case dividing INT_MAX by -1 that
@@ -296,6 +325,21 @@
   __ j(EQUAL, normal_ir_body);
   __ SmiTag(RAX);
   __ ret();
+#else
+  // Check the corner case of dividing the 'MIN_SMI' with -1, in which case we
+  // cannot tag the result.
+  __ cmpq(RAX, Immediate(target::ToRawSmi(target::kSmiMin)));
+  __ j(EQUAL, normal_ir_body);
+
+  // Both operands are 31bit smis. Divide using 32bit idiv.
+  __ SmiUntag(RAX);
+  __ SmiUntag(RCX);
+  __ cdq();
+  __ idivl(RCX);
+  __ SmiTag(RAX);  // Result is guaranteed to fit into a smi.
+  __ movsxd(RAX, RAX);
+  __ ret();
+#endif
   __ Bind(normal_ir_body);
 }
 
@@ -304,8 +348,14 @@
   __ movq(RAX, Address(RSP, +1 * target::kWordSize));
   __ testq(RAX, Immediate(kSmiTagMask));
   __ j(NOT_ZERO, normal_ir_body, Assembler::kNearJump);  // Non-smi value.
+#if !defined(DART_COMPRESSED_POINTERS)
   __ negq(RAX);
   __ j(OVERFLOW, normal_ir_body, Assembler::kNearJump);
+#else
+  __ negl(RAX);
+  __ j(OVERFLOW, normal_ir_body, Assembler::kNearJump);
+  __ movsxd(RAX, RAX);
+#endif
   // Result is in RAX.
   __ ret();
   __ Bind(normal_ir_body);
@@ -371,8 +421,14 @@
 
   // Overflow test - all the shifted-out bits must be same as the sign bit.
   __ movq(RDI, RAX);
+#if !defined(DART_COMPRESSED_POINTERS)
   __ shlq(RAX, RCX);
   __ sarq(RAX, RCX);
+#else
+  __ shll(RAX, RCX);
+  __ sarl(RAX, RCX);
+  __ movsxd(RAX, RAX);
+#endif
   __ cmpq(RAX, RDI);
   __ j(NOT_EQUAL, &overflow, Assembler::kNearJump);
 
@@ -383,7 +439,7 @@
 
   __ Bind(&overflow);
   // Mint is rarely used on x64 (only for integers requiring 64 bit instead of
-  // 63 bits as represented by Smi).
+  // 63 or 31 bits as represented by Smi).
   __ Bind(normal_ir_body);
 }
 
@@ -1219,7 +1275,11 @@
   __ j(NOT_ZERO, normal_ir_body);
   // Is Smi.
   __ SmiUntag(RAX);
+#if !defined(DART_COMPRESSED_POINTER)
   __ cvtsi2sdq(XMM0, RAX);
+#else
+  __ cvtsi2sdl(XMM0, RAX);
+#endif
   const Class& double_class = DoubleClass();
   __ TryAllocate(double_class, normal_ir_body, Assembler::kFarJump,
                  RAX,  // Result register.
@@ -1291,13 +1351,26 @@
                                       Label* normal_ir_body) {
   __ movq(RAX, Address(RSP, +1 * target::kWordSize));
   __ movsd(XMM0, FieldAddress(RAX, target::Double::value_offset()));
+#if !defined(DART_COMPRESSED_POINTERS)
   __ cvttsd2siq(RAX, XMM0);
+#else
+  __ cvttsd2sil(RAX, XMM0);
+#endif
   // Overflow is signalled with minint.
   // Check for overflow and that it fits into Smi.
   __ movq(RCX, RAX);
+#if !defined(DART_COMPRESSED_POINTERS)
   __ shlq(RCX, Immediate(1));
+#else
+  __ shll(RCX, Immediate(1));
+#endif
   __ j(OVERFLOW, normal_ir_body, Assembler::kNearJump);
+#if !defined(DART_COMPRESSED_POINTERS)
   __ SmiTag(RAX);
+#else
+  ASSERT((kSmiTagShift == 1) && (kSmiTag == 0));
+  __ movsxd(RAX, RCX);
+#endif
   __ ret();
   __ Bind(normal_ir_body);
 }
@@ -1310,15 +1383,26 @@
   // back to a double in XMM1.
   __ movq(RCX, Address(RSP, +1 * target::kWordSize));
   __ movsd(XMM0, FieldAddress(RCX, target::Double::value_offset()));
+#if !defined(DART_COMPRESSED_POINTERS)
   __ cvttsd2siq(RAX, XMM0);
   __ cvtsi2sdq(XMM1, RAX);
+#else
+  __ cvttsd2sil(RAX, XMM0);
+  __ cvtsi2sdl(XMM1, RAX);
+#endif
 
   // Tag the int as a Smi, making sure that it fits; this checks for
   // overflow and NaN in the conversion from double to int. Conversion
   // overflow from cvttsd2si is signalled with an INT64_MIN value.
   ASSERT(kSmiTag == 0 && kSmiTagShift == 1);
+#if !defined(DART_COMPRESSED_POINTERS)
   __ addq(RAX, RAX);
   __ j(OVERFLOW, normal_ir_body, Assembler::kNearJump);
+#else
+  __ addl(RAX, RAX);
+  __ j(OVERFLOW, normal_ir_body, Assembler::kNearJump);
+  __ movsxd(RAX, RAX);
+#endif
 
   // Compare the two double values. If they are equal, we return the
   // Smi tagged result immediately as the hash code.
diff --git a/runtime/vm/compiler/assembler/assembler_arm64.h b/runtime/vm/compiler/assembler/assembler_arm64.h
index 068256d..3c44731 100644
--- a/runtime/vm/compiler/assembler/assembler_arm64.h
+++ b/runtime/vm/compiler/assembler/assembler_arm64.h
@@ -926,12 +926,16 @@
             OperandSize sz = kEightBytes) {
     EmitMiscDP3Source(MSUB, rd, rn, rm, ra, sz);
   }
+  // Signed Multiply High
+  // rd <- (rn * rm)[127:64]
   void smulh(Register rd,
              Register rn,
              Register rm,
              OperandSize sz = kEightBytes) {
     EmitMiscDP3Source(SMULH, rd, rn, rm, R31, sz);
   }
+  // Unsigned Multiply High
+  // rd <- (rn * rm)[127:64]
   void umulh(Register rd,
              Register rn,
              Register rm,
@@ -945,6 +949,8 @@
               OperandSize sz = kEightBytes) {
     EmitMiscDP3Source(UMADDL, rd, rn, rm, ra, sz);
   }
+  // Unsigned Multiply Long
+  // rd:uint64 <- rn:uint32 * rm:uint32
   void umull(Register rd,
              Register rn,
              Register rm,
@@ -958,6 +964,8 @@
               OperandSize sz = kEightBytes) {
     EmitMiscDP3Source(SMADDL, rd, rn, rm, ra, sz);
   }
+  // Signed Multiply Long
+  // rd:int64 <- rn:int32 * rm:int32
   void smull(Register rd,
              Register rn,
              Register rm,
diff --git a/runtime/vm/compiler/backend/il.cc b/runtime/vm/compiler/backend/il.cc
index b90b712..f1f8f02 100644
--- a/runtime/vm/compiler/backend/il.cc
+++ b/runtime/vm/compiler/backend/il.cc
@@ -2173,7 +2173,7 @@
 static intptr_t RepresentationBits(Representation r) {
   switch (r) {
     case kTagged:
-      return compiler::target::kBitsPerWord - 1;
+      return compiler::target::kSmiBits + 1;
     case kUnboxedInt32:
     case kUnboxedUint32:
       return 32;
diff --git a/runtime/vm/compiler/backend/il.h b/runtime/vm/compiler/backend/il.h
index f27870d..48ba834 100644
--- a/runtime/vm/compiler/backend/il.h
+++ b/runtime/vm/compiler/backend/il.h
@@ -5744,7 +5744,8 @@
   intptr_t element_count() const { return element_count_; }
 
   bool can_pack_into_smi() const {
-    return element_count() <= kSmiBits / (index_scale() * kBitsPerByte);
+    return element_count() <=
+           compiler::target::kSmiBits / (index_scale() * kBitsPerByte);
   }
 
   virtual bool ComputeCanDeoptimize() const { return false; }
diff --git a/runtime/vm/compiler/backend/il_arm64.cc b/runtime/vm/compiler/backend/il_arm64.cc
index 8a6897e..cc4cc66 100644
--- a/runtime/vm/compiler/backend/il_arm64.cc
+++ b/runtime/vm/compiler/backend/il_arm64.cc
@@ -1785,6 +1785,7 @@
       index.reg(), TMP);
   __ ldr(result, element_address, sz);
 
+  ASSERT(can_pack_into_smi());
   __ SmiTag(result);
 }
 
@@ -3358,17 +3359,29 @@
     const Object& constant = locs.in(1).constant();
     ASSERT(constant.IsSmi());
     // Immediate shift operation takes 6 bits for the count.
+#if !defined(DART_COMPRESSED_POINTERS)
     const intptr_t kCountLimit = 0x3F;
+#else
+    const intptr_t kCountLimit = 0x1F;
+#endif
     const intptr_t value = Smi::Cast(constant).Value();
     ASSERT((0 < value) && (value < kCountLimit));
     if (shift_left->can_overflow()) {
       // Check for overflow (preserve left).
+#if !defined(DART_COMPRESSED_POINTERS)
       __ LslImmediate(TMP, left, value);
       __ cmp(left, compiler::Operand(TMP, ASR, value));
+#else
+      __ LslImmediate(TMP, left, value, compiler::kFourBytes);
+      __ cmpw(left, compiler::Operand(TMP, ASR, value));
+#endif
       __ b(deopt, NE);  // Overflow.
     }
     // Shift for result now we know there is no overflow.
     __ LslImmediate(result, left, value);
+#if defined(DART_COMPRESSED_POINTERS)
+    __ sxtw(result, result);
+#endif
     return;
   }
 
@@ -3387,7 +3400,8 @@
         __ mov(result, ZR);
         return;
       }
-      const intptr_t max_right = kSmiBits - Utils::HighestBit(left_int);
+      const intptr_t max_right =
+          compiler::target::kSmiBits - Utils::HighestBit(left_int);
       const bool right_needs_check =
           !RangeUtils::IsWithin(right_range, 0, max_right - 1);
       if (right_needs_check) {
@@ -3396,6 +3410,9 @@
       }
       __ SmiUntag(TMP, right);
       __ lslv(result, left, TMP);
+#if defined(DART_COMPRESSED_POINTERS)
+      __ sxtw(result, result);
+#endif
     }
     return;
   }
@@ -3430,9 +3447,15 @@
     __ SmiUntag(TMP, right);
     // Overflow test (preserve left, right, and TMP);
     const Register temp = locs.temp(0).reg();
+#if !defined(DART_COMPRESSED_POINTERS)
     __ lslv(temp, left, TMP);
     __ asrv(TMP2, temp, TMP);
     __ CompareRegisters(left, TMP2);
+#else
+    __ lslvw(temp, left, TMP);
+    __ asrvw(TMP2, temp, TMP);
+    __ cmpw(left, compiler::Operand(TMP2));
+#endif
     __ b(deopt, NE);  // Overflow.
     // Shift for result now we know there is no overflow.
     __ lslv(result, left, TMP);
@@ -3516,18 +3539,34 @@
 
   switch (op_kind()) {
     case Token::kADD:
+#if !defined(DART_COMPRESSED_POINTERS)
       __ adds(result, left, compiler::Operand(right));
+#else
+      __ addsw(result, left, compiler::Operand(right));
+      __ sxtw(result, result);
+#endif
       __ b(slow_path->entry_label(), VS);
       break;
     case Token::kSUB:
+#if !defined(DART_COMPRESSED_POINTERS)
       __ subs(result, left, compiler::Operand(right));
+#else
+      __ subsw(result, left, compiler::Operand(right));
+      __ sxtw(result, result);
+#endif
       __ b(slow_path->entry_label(), VS);
       break;
     case Token::kMUL:
       __ SmiUntag(TMP, left);
+#if !defined(DART_COMPRESSED_POINTERS)
       __ mul(result, TMP, right);
       __ smulh(TMP, TMP, right);
       // TMP: result bits 64..127.
+#else
+      __ smull(result, TMP, right);
+      __ AsrImmediate(TMP, result, 31);
+      // TMP: result bits 32..63.
+#endif
       __ cmp(TMP, compiler::Operand(result, ASR, 63));
       __ b(slow_path->entry_label(), NE);
       break;
@@ -3551,8 +3590,13 @@
 
       __ SmiUntag(TMP, right);
       __ lslv(result, left, TMP);
+#if !defined(DART_COMPRESSED_POINTERS)
       __ asrv(TMP2, result, TMP);
       __ CompareRegisters(left, TMP2);
+#else
+      __ asrvw(TMP2, result, TMP);
+      __ cmp(left, compiler::Operand(TMP2, SXTW, 0));
+#endif
       __ b(slow_path->entry_label(), NE);  // Overflow.
       break;
     case Token::kSHR:
@@ -3781,21 +3825,37 @@
     switch (op_kind()) {
       case Token::kADD: {
         if (deopt == NULL) {
+          // When we can't overflow, prefer 64-bit op to 32-bit op followed by
+          // sign extension.
           __ AddImmediate(result, left, imm);
         } else {
+#if !defined(DART_COMPRESSED_POINTERS)
           __ AddImmediateSetFlags(result, left, imm);
           __ b(deopt, VS);
+#else
+          __ AddImmediateSetFlags(result, left, imm, compiler::kFourBytes);
+          __ b(deopt, VS);
+          __ sxtw(result, result);
+#endif
         }
         break;
       }
       case Token::kSUB: {
         if (deopt == NULL) {
+          // When we can't overflow, prefer 64-bit op to 32-bit op followed by
+          // sign extension.
           __ AddImmediate(result, left, -imm);
         } else {
           // Negating imm and using AddImmediateSetFlags would not detect the
           // overflow when imm == kMinInt64.
+#if !defined(DART_COMPRESSED_POINTERS)
           __ SubImmediateSetFlags(result, left, imm);
           __ b(deopt, VS);
+#else
+          __ SubImmediateSetFlags(result, left, imm, compiler::kFourBytes);
+          __ b(deopt, VS);
+          __ sxtw(result, result);
+#endif
         }
         break;
       }
@@ -3803,10 +3863,19 @@
         // Keep left value tagged and untag right value.
         const intptr_t value = Smi::Cast(constant).Value();
         __ LoadImmediate(TMP, value);
+#if !defined(DART_COMPRESSED_POINTERS)
         __ mul(result, left, TMP);
+#else
+        __ smull(result, left, TMP);
+#endif
         if (deopt != NULL) {
+#if !defined(DART_COMPRESSED_POINTERS)
           __ smulh(TMP, left, TMP);
           // TMP: result bits 64..127.
+#else
+          __ AsrImmediate(TMP, result, 31);
+          // TMP: result bits 32..63.
+#endif
           __ cmp(TMP, compiler::Operand(result, ASR, 63));
           __ b(deopt, NE);
         }
@@ -3865,8 +3934,14 @@
       if (deopt == NULL) {
         __ add(result, left, compiler::Operand(right));
       } else {
+#if !defined(DART_COMPRESSED_POINTERS)
         __ adds(result, left, compiler::Operand(right));
         __ b(deopt, VS);
+#else
+        __ addsw(result, left, compiler::Operand(right));
+        __ b(deopt, VS);
+        __ sxtw(result, result);
+#endif
       }
       break;
     }
@@ -3874,19 +3949,32 @@
       if (deopt == NULL) {
         __ sub(result, left, compiler::Operand(right));
       } else {
+#if !defined(DART_COMPRESSED_POINTERS)
         __ subs(result, left, compiler::Operand(right));
         __ b(deopt, VS);
+#else
+        __ subsw(result, left, compiler::Operand(right));
+        __ b(deopt, VS);
+        __ sxtw(result, result);
+#endif
       }
       break;
     }
     case Token::kMUL: {
       __ SmiUntag(TMP, left);
-      if (deopt == NULL) {
-        __ mul(result, TMP, right);
-      } else {
-        __ mul(result, TMP, right);
+#if !defined(DART_COMPRESSED_POINTERS)
+      __ mul(result, TMP, right);
+#else
+      __ smull(result, TMP, right);
+#endif
+      if (deopt != NULL) {
+#if !defined(DART_COMPRESSED_POINTERS)
         __ smulh(TMP, TMP, right);
         // TMP: result bits 64..127.
+#else
+        __ AsrImmediate(TMP, result, 31);
+        // TMP: result bits 32..63.
+#endif
         __ cmp(TMP, compiler::Operand(result, ASR, 63));
         __ b(deopt, NE);
       }
@@ -3921,7 +4009,11 @@
       if (RangeUtils::Overlaps(right_range(), -1, -1)) {
         // Check the corner case of dividing the 'MIN_SMI' with -1, in which
         // case we cannot tag the result.
+#if !defined(DART_COMPRESSED_POINTERS)
         __ CompareImmediate(result, 0x4000000000000000LL);
+#else
+        __ CompareImmediate(result, 0x40000000LL);
+#endif
         __ b(deopt, EQ);
       }
       __ SmiTag(result);
@@ -4196,12 +4288,24 @@
                                                         bool opt) const {
   ASSERT((from_representation() == kUnboxedInt32) ||
          (from_representation() == kUnboxedUint32));
+#if !defined(DART_COMPRESSED_POINTERS)
+  // ValueFitsSmi() may be overly conservative and false because we only
+  // perform range analysis during optimized compilation.
+  const bool kMayAllocateMint = false;
+#else
+  const bool kMayAllocateMint = !ValueFitsSmi();
+#endif
   const intptr_t kNumInputs = 1;
-  const intptr_t kNumTemps = 0;
+  const intptr_t kNumTemps = kMayAllocateMint ? 1 : 0;
   LocationSummary* summary = new (zone)
-      LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
+      LocationSummary(zone, kNumInputs, kNumTemps,
+                      kMayAllocateMint ? LocationSummary::kCallOnSlowPath
+                                       : LocationSummary::kNoCall);
   summary->set_in(0, Location::RequiresRegister());
   summary->set_out(0, Location::RequiresRegister());
+  if (kMayAllocateMint) {
+    summary->set_temp(0, Location::RequiresRegister());
+  }
   return summary;
 }
 
@@ -4210,6 +4314,7 @@
   Register out = locs()->out(0).reg();
   ASSERT(value != out);
 
+#if !defined(DART_COMPRESSED_POINTERS)
   ASSERT(compiler::target::kSmiBits >= 32);
   if (from_representation() == kUnboxedInt32) {
     __ sbfiz(out, value, kSmiTagSize, 32);
@@ -4217,6 +4322,41 @@
     ASSERT(from_representation() == kUnboxedUint32);
     __ ubfiz(out, value, kSmiTagSize, 32);
   }
+#else
+  compiler::Label done;
+  if (from_representation() == kUnboxedInt32) {
+    ASSERT(kSmiTag == 0);
+    // Signed Bitfield Insert in Zero instruction extracts the 31 significant
+    // bits from a Smi.
+    __ sbfiz(out, value, kSmiTagSize, 32 - kSmiTagSize);
+    if (ValueFitsSmi()) {
+      return;
+    }
+    __ cmp(out, compiler::Operand(value, LSL, 1));
+    __ b(&done, EQ);  // Jump if the sbfiz instruction didn't lose info.
+  } else {
+    ASSERT(from_representation() == kUnboxedUint32);
+    // A 32 bit positive Smi has one tag bit and one unused sign bit,
+    // leaving only 30 bits for the payload.
+    __ ubfiz(out, value, kSmiTagSize, compiler::target::kSmiBits);
+    if (ValueFitsSmi()) {
+      return;
+    }
+    __ TestImmediate(value, 0xC0000000);
+    __ b(&done, EQ);  // Jump if both bits are zero.
+  }
+
+  Register temp = locs()->temp(0).reg();
+  BoxAllocationSlowPath::Allocate(compiler, this, compiler->mint_class(), out,
+                                  temp);
+  if (from_representation() == kUnboxedInt32) {
+    __ sxtw(temp, value);  // Sign-extend.
+  } else {
+    __ ubfiz(temp, value, 0, 32);  // Zero extend word.
+  }
+  __ StoreToOffset(temp, out, Mint::value_offset() - kHeapObjectTag);
+  __ Bind(&done);
+#endif
 }
 
 LocationSummary* BoxInt64Instr::MakeLocationSummary(Zone* zone,
@@ -4265,11 +4405,19 @@
     return;
   }
   ASSERT(kSmiTag == 0);
-  __ adds(out, in, compiler::Operand(in));  // SmiTag
   compiler::Label done;
+#if !defined(DART_COMPRESSED_POINTERS)
+  __ adds(out, in, compiler::Operand(in));  // SmiTag
   // If the value doesn't fit in a smi, the tagging changes the sign,
   // which causes the overflow flag to be set.
   __ b(&done, NO_OVERFLOW);
+#else
+  __ LslImmediate(out, in, kSmiTagSize,
+                  compiler::kFourBytes);  // SmiTag (32-bit);
+  __ sxtw(out, out);                      // Sign-extend.
+  __ cmp(in, compiler::Operand(out, ASR, kSmiTagSize));
+  __ b(&done, EQ);
+#endif
 
   Register temp = locs()->temp(0).reg();
   if (compiler->intrinsic_mode()) {
@@ -5026,7 +5174,12 @@
     case Token::kNEGATE: {
       compiler::Label* deopt =
           compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnaryOp);
+#if !defined(DART_COMPRESSED_POINTERS)
       __ subs(result, ZR, compiler::Operand(value));
+#else
+      __ subsw(result, ZR, compiler::Operand(value));
+      __ sxtw(result, result);
+#endif
       __ b(deopt, VS);
       break;
     }
@@ -5089,7 +5242,11 @@
   const Register value = locs()->in(0).reg();
   const VRegister result = locs()->out(0).fpu_reg();
   __ SmiUntag(TMP, value);
+#if !defined(DART_COMPRESSED_POINTERS)
   __ scvtfdx(result, TMP);
+#else
+  __ scvtfdw(result, TMP);
+#endif
 }
 
 LocationSummary* Int64ToDoubleInstr::MakeLocationSummary(Zone* zone,
@@ -5136,9 +5293,16 @@
   __ fcvtzdsx(result, VTMP);
   // Overflow is signaled with minint.
 
+#if !defined(DART_COMPRESSED_POINTERS)
   // Check for overflow and that it fits into Smi.
   __ CompareImmediate(result, 0xC000000000000000);
   __ b(&do_call, MI);
+#else
+  // Check for overflow and that it fits into Smi.
+  __ AsrImmediate(TMP, result, 30);
+  __ cmp(TMP, compiler::Operand(result, ASR, 63));
+  __ b(&do_call, NE);
+#endif
   __ SmiTag(result);
   __ b(&done);
   __ Bind(&do_call);
@@ -5181,10 +5345,18 @@
   __ fcmpd(value, value);
   __ b(deopt, VS);
 
+#if !defined(DART_COMPRESSED_POINTERS)
   __ fcvtzdsx(result, value);
   // Check for overflow and that it fits into Smi.
   __ CompareImmediate(result, 0xC000000000000000);
   __ b(deopt, MI);
+#else
+  __ fcvtzdsw(result, value);
+  // Check for overflow and that it fits into Smi.
+  __ AsrImmediate(TMP, result, 30);
+  __ cmp(TMP, compiler::Operand(result, ASR, 63));
+  __ b(deopt, NE);
+#endif
   __ SmiTag(result);
 }
 
@@ -5455,7 +5627,11 @@
 
   // Check the corner case of dividing the 'MIN_SMI' with -1, in which
   // case we cannot tag the result.
+#if !defined(DART_COMPRESSED_POINTERS)
   __ CompareImmediate(result_div, 0x4000000000000000);
+#else
+  __ CompareImmediate(result_div, 0x40000000);
+#endif
   __ b(deopt, EQ);
   // result_mod <- left - right * result_div.
   __ msub(result_mod, TMP, result_div, result_mod);
diff --git a/runtime/vm/compiler/backend/il_x64.cc b/runtime/vm/compiler/backend/il_x64.cc
index d81e582..bd87d93 100644
--- a/runtime/vm/compiler/backend/il_x64.cc
+++ b/runtime/vm/compiler/backend/il_x64.cc
@@ -1755,6 +1755,7 @@
         default:
           UNREACHABLE();
       }
+      ASSERT(can_pack_into_smi());
       __ SmiTag(result);
       break;
     case kTwoByteStringCid:
@@ -1769,6 +1770,7 @@
         default:
           UNREACHABLE();
       }
+      ASSERT(can_pack_into_smi());
       __ SmiTag(result);
       break;
     default:
@@ -3372,26 +3374,47 @@
     const Object& constant = locs.in(1).constant();
     ASSERT(constant.IsSmi());
     // shlq operation masks the count to 6 bits.
+#if !defined(DART_COMPRESSED_POINTERS)
     const intptr_t kCountLimit = 0x3F;
+#else
+    const intptr_t kCountLimit = 0x1F;
+#endif
     const intptr_t value = Smi::Cast(constant).Value();
     ASSERT((0 < value) && (value < kCountLimit));
     if (shift_left->can_overflow()) {
       if (value == 1) {
         // Use overflow flag.
+#if !defined(DART_COMPRESSED_POINTERS)
         __ shlq(left, compiler::Immediate(1));
         __ j(OVERFLOW, deopt);
+#else
+        __ shll(left, compiler::Immediate(1));
+        __ j(OVERFLOW, deopt);
+        __ movsxd(left, left);
+#endif
         return;
       }
       // Check for overflow.
       Register temp = locs.temp(0).reg();
       __ movq(temp, left);
+#if !defined(DART_COMPRESSED_POINTERS)
       __ shlq(left, compiler::Immediate(value));
       __ sarq(left, compiler::Immediate(value));
+#else
+      __ shll(left, compiler::Immediate(value));
+      __ sarl(left, compiler::Immediate(value));
+      __ movsxd(left, left);
+#endif
       __ cmpq(left, temp);
       __ j(NOT_EQUAL, deopt);  // Overflow.
     }
     // Shift for result now we know there is no overflow.
     __ shlq(left, compiler::Immediate(value));
+#if defined(DART_COMPRESSED_POINTERS)
+    if (shift_left->is_truncating()) {
+      __ movsxd(left, left);
+    }
+#endif
     return;
   }
 
@@ -3420,6 +3443,13 @@
       }
       __ SmiUntag(right);
       __ shlq(left, right);
+#if defined(DART_COMPRESSED_POINTERS)
+      if (shift_left->is_truncating()) {
+        __ movsxd(left, left);
+      }
+#endif
+    } else {
+      __ int3();  //???
     }
     return;
   }
@@ -3451,6 +3481,11 @@
       __ SmiUntag(right);
       __ shlq(left, right);
     }
+#if defined(DART_COMPRESSED_POINTERS)
+    if (shift_left->is_truncating()) {
+      __ movsxd(left, left);
+    }
+#endif
   } else {
     if (right_needs_check) {
       ASSERT(shift_left->CanDeoptimize());
@@ -3462,15 +3497,26 @@
     // Left is not a constant.
     Register temp = locs.temp(0).reg();
     // Check if count too large for handling it inlined.
+#if !defined(DART_COMPRESSED_POINTERS)
     __ movq(temp, left);
+#else
+    __ movl(temp, left);
+#endif
     __ SmiUntag(right);
     // Overflow test (preserve temp and right);
+#if !defined(DART_COMPRESSED_POINTERS)
     __ shlq(left, right);
     __ sarq(left, right);
     __ cmpq(left, temp);
+#else
+    __ shll(temp, right);
+    __ sarl(temp, right);
+    __ cmpl(temp, left);
+#endif
     __ j(NOT_EQUAL, deopt);  // Overflow.
     // Shift for result now we know there is no overflow.
     __ shlq(left, right);
+    ASSERT(!shift_left->is_truncating());
   }
 }
 
@@ -3572,19 +3618,37 @@
   switch (op_kind()) {
     case Token::kADD:
       __ movq(result, left);
+#if !defined(DART_COMPRESSED_POINTERS)
       __ addq(result, right);
       __ j(OVERFLOW, slow_path->entry_label());
+#else
+      __ addl(result, right);
+      __ j(OVERFLOW, slow_path->entry_label());
+      __ movsxd(result, result);
+#endif
       break;
     case Token::kSUB:
       __ movq(result, left);
+#if !defined(DART_COMPRESSED_POINTERS)
       __ subq(result, right);
       __ j(OVERFLOW, slow_path->entry_label());
+#else
+      __ subl(result, right);
+      __ j(OVERFLOW, slow_path->entry_label());
+      __ movsxd(result, result);
+#endif
       break;
     case Token::kMUL:
       __ movq(result, left);
       __ SmiUntag(result);
+#if !defined(DART_COMPRESSED_POINTERS)
       __ imulq(result, right);
       __ j(OVERFLOW, slow_path->entry_label());
+#else
+      __ imull(result, right);
+      __ j(OVERFLOW, slow_path->entry_label());
+      __ movsxd(result, result);
+#endif
       break;
     case Token::kBIT_OR:
       ASSERT(left == result);
@@ -3607,11 +3671,20 @@
       __ movq(RCX, right);
       __ SmiUntag(RCX);
       __ movq(result, left);
+#if !defined(DART_COMPRESSED_POINTERS)
       __ shlq(result, RCX);
       __ movq(TMP, result);
       __ sarq(TMP, RCX);
       __ cmpq(TMP, left);
       __ j(NOT_EQUAL, slow_path->entry_label());
+#else
+      __ shll(result, RCX);
+      __ movq(TMP, result);
+      __ sarl(TMP, RCX);
+      __ cmpl(TMP, left);
+      __ j(NOT_EQUAL, slow_path->entry_label());
+      __ movsxd(result, result);
+#endif
       break;
     case Token::kSHR: {
       compiler::Label shift_count_ok;
@@ -3900,20 +3973,57 @@
     const int64_t imm = static_cast<int64_t>(constant.ptr());
     switch (op_kind()) {
       case Token::kADD: {
-        __ AddImmediate(left, compiler::Immediate(imm));
-        if (deopt != NULL) __ j(OVERFLOW, deopt);
+        if (deopt == NULL) {
+          // When we can't overflow, prefer 64-bit op to 32-bit op followed by
+          // sign extension.
+          __ AddImmediate(left, compiler::Immediate(imm));
+        } else {
+#if !defined(DART_COMPRESSED_POINTERS)
+          __ AddImmediate(left, compiler::Immediate(imm));
+          __ j(OVERFLOW, deopt);
+#else
+          __ AddImmediate(left, compiler::Immediate(imm), compiler::kFourBytes);
+          __ j(OVERFLOW, deopt);
+          __ movsxd(left, left);
+#endif
+        }
         break;
       }
       case Token::kSUB: {
-        __ SubImmediate(left, compiler::Immediate(imm));
-        if (deopt != NULL) __ j(OVERFLOW, deopt);
+        if (deopt == NULL) {
+          // When we can't overflow, prefer 64-bit op to 32-bit op followed by
+          // sign extension.
+          __ SubImmediate(left, compiler::Immediate(imm));
+        } else {
+#if !defined(DART_COMPRESSED_POINTERS)
+          __ SubImmediate(left, compiler::Immediate(imm));
+          __ j(OVERFLOW, deopt);
+#else
+          __ SubImmediate(left, compiler::Immediate(imm), compiler::kFourBytes);
+          __ j(OVERFLOW, deopt);
+          __ movsxd(left, left);
+#endif
+        }
         break;
       }
       case Token::kMUL: {
         // Keep left value tagged and untag right value.
         const intptr_t value = Smi::Cast(constant).Value();
-        __ MulImmediate(left, compiler::Immediate(value));
-        if (deopt != NULL) __ j(OVERFLOW, deopt);
+        if (deopt == NULL) {
+          // When we can't overflow, prefer 64-bit op to 32-bit op followed by
+          // sign extension.
+          __ MulImmediate(left, compiler::Immediate(value));
+        } else {
+#if !defined(DART_COMPRESSED_POINTERS)
+          __ MulImmediate(left, compiler::Immediate(value));
+          __ j(OVERFLOW, deopt);
+#else
+          __ MulImmediate(left, compiler::Immediate(value),
+                          compiler::kFourBytes);
+          __ j(OVERFLOW, deopt);
+          __ movsxd(left, left);
+#endif
+        }
         break;
       }
       case Token::kTRUNCDIV: {
@@ -3925,7 +4035,12 @@
         ASSERT(kSmiTagSize == 1);
         Register temp = locs()->temp(0).reg();
         __ movq(temp, left);
+#if !defined(DART_COMPRESSED_POINTERS)
         __ sarq(temp, compiler::Immediate(63));
+#else
+        // Assumes Smis are sign extended.
+        __ sarq(temp, compiler::Immediate(31));
+#endif
         ASSERT(shift_count > 1);  // 1, -1 case handled above.
         __ shrq(temp, compiler::Immediate(64 - shift_count));
         __ addq(left, temp);
@@ -3955,7 +4070,11 @@
 
       case Token::kSHR: {
         // sarq operation masks the count to 6 bits.
+#if !defined(DART_COMPRESSED_POINTERS)
         const intptr_t kCountLimit = 0x3F;
+#else
+        const intptr_t kCountLimit = 0x1F;
+#endif
         const intptr_t value = Smi::Cast(constant).Value();
         __ sarq(left, compiler::Immediate(
                           Utils::Minimum(value + kSmiTagSize, kCountLimit)));
@@ -3974,19 +4093,55 @@
     const compiler::Address& right = LocationToStackSlotAddress(locs()->in(1));
     switch (op_kind()) {
       case Token::kADD: {
-        __ addq(left, right);
-        if (deopt != NULL) __ j(OVERFLOW, deopt);
+        if (deopt == NULL) {
+          // When we can't overflow, prefer 64-bit op to 32-bit op followed by
+          // sign extension.
+          __ addq(left, right);
+        } else {
+#if !defined(DART_COMPRESSED_POINTERS)
+          __ addq(left, right);
+          __ j(OVERFLOW, deopt);
+#else
+          __ addl(left, right);
+          __ j(OVERFLOW, deopt);
+          __ movsxd(left, left);
+#endif
+        }
         break;
       }
       case Token::kSUB: {
-        __ subq(left, right);
-        if (deopt != NULL) __ j(OVERFLOW, deopt);
+        if (deopt == NULL) {
+          // When we can't overflow, prefer 64-bit op to 32-bit op followed by
+          // sign extension.
+          __ subq(left, right);
+        } else {
+#if !defined(DART_COMPRESSED_POINTERS)
+          __ subq(left, right);
+          __ j(OVERFLOW, deopt);
+#else
+          __ subl(left, right);
+          __ j(OVERFLOW, deopt);
+          __ movsxd(left, left);
+#endif
+        }
         break;
       }
       case Token::kMUL: {
         __ SmiUntag(left);
-        __ imulq(left, right);
-        if (deopt != NULL) __ j(OVERFLOW, deopt);
+        if (deopt == NULL) {
+          // When we can't overflow, prefer 64-bit op to 32-bit op followed by
+          // sign extension.
+          __ imulq(left, right);
+        } else {
+#if !defined(DART_COMPRESSED_POINTERS)
+          __ imulq(left, right);
+          __ j(OVERFLOW, deopt);
+#else
+          __ imull(left, right);
+          __ j(OVERFLOW, deopt);
+          __ movsxd(left, left);
+#endif
+        }
         break;
       }
       case Token::kBIT_AND: {
@@ -4015,19 +4170,55 @@
   Register right = locs()->in(1).reg();
   switch (op_kind()) {
     case Token::kADD: {
+#if !defined(DART_COMPRESSED_POINTERS)
       __ addq(left, right);
       if (deopt != NULL) __ j(OVERFLOW, deopt);
+#else
+      if (deopt == NULL) {
+        // When we can't overflow, prefer 64-bit op to 32-bit op followed by
+        // sign extension.
+        __ addq(left, right);
+      } else {
+        __ addl(left, right);
+        __ j(OVERFLOW, deopt);
+        __ movsxd(left, left);
+      }
+#endif
       break;
     }
     case Token::kSUB: {
+#if !defined(DART_COMPRESSED_POINTERS)
       __ subq(left, right);
       if (deopt != NULL) __ j(OVERFLOW, deopt);
+#else
+      if (deopt == NULL) {
+        // When we can't overflow, prefer 64-bit op to 32-bit op followed by
+        // sign extension.
+        __ subq(left, right);
+      } else {
+        __ subl(left, right);
+        __ j(OVERFLOW, deopt);
+        __ movsxd(left, left);
+      }
+#endif
       break;
     }
     case Token::kMUL: {
       __ SmiUntag(left);
+#if !defined(DART_COMPRESSED_POINTERS)
       __ imulq(left, right);
       if (deopt != NULL) __ j(OVERFLOW, deopt);
+#else
+      if (deopt == NULL) {
+        // When we can't overflow, prefer 64-bit op to 32-bit op followed by
+        // sign extension.
+        __ imulq(left, right);
+      } else {
+        __ imull(left, right);
+        __ j(OVERFLOW, deopt);
+        __ movsxd(left, left);
+      }
+#endif
       break;
     }
     case Token::kBIT_AND: {
@@ -4058,6 +4249,7 @@
         __ testq(right, right);
         __ j(ZERO, deopt);
       }
+#if !defined(DART_COMPRESSED_POINTERS)
       // Check if both operands fit into 32bits as idiv with 64bit operands
       // requires twice as many cycles and has much higher latency.
       // We are checking this before untagging them to avoid corner case
@@ -4090,6 +4282,21 @@
         __ CompareImmediate(result, compiler::Immediate(0x4000000000000000));
         __ j(EQUAL, deopt);
       }
+#else
+      // Both operands are 31bit smis. Divide using 32bit idiv.
+      __ SmiUntag(left);
+      __ SmiUntag(right);
+      __ cdq();
+      __ idivl(right);
+
+      if (RangeUtils::Overlaps(right_range(), -1, -1)) {
+        // Check the corner case of dividing the 'MIN_SMI' with -1, in which
+        // case we cannot tag the result.
+        __ cmpl(result, compiler::Immediate(0x40000000));
+        __ j(EQUAL, deopt);
+      }
+      __ movsxd(result, result);
+#endif
       __ Bind(&done);
       __ SmiTag(result);
       break;
@@ -4107,6 +4314,7 @@
         __ testq(right, right);
         __ j(ZERO, deopt);
       }
+#if !defined(DART_COMPRESSED_POINTERS)
       // Check if both operands fit into 32bits as idiv with 64bit operands
       // requires twice as many cycles and has much higher latency.
       // We are checking this before untagging them to avoid corner case
@@ -4118,6 +4326,7 @@
       __ movsxd(temp, right);
       __ cmpq(temp, right);
       __ j(NOT_EQUAL, &not_32bit);
+#endif
       // Both operands are 31bit smis. Divide using 32bit idiv.
       __ SmiUntag(left);
       __ SmiUntag(right);
@@ -4125,6 +4334,7 @@
       __ cdq();
       __ idivl(right);
       __ movsxd(result, result);
+#if !defined(DART_COMPRESSED_POINTERS)
       __ jmp(&div_done);
 
       // Divide using 64bit idiv.
@@ -4135,6 +4345,7 @@
       __ cqo();         // Sign extend RAX -> RDX:RAX.
       __ idivq(right);  //  RAX: quotient, RDX: remainder.
       __ Bind(&div_done);
+#endif
       //  res = left % right;
       //  if (res < 0) {
       //    if (right < 0) {
@@ -4426,6 +4637,11 @@
     __ j(NOT_CARRY, &done, compiler::Assembler::kNearJump);
     __ movq(value, compiler::Address(value, TIMES_2, Mint::value_offset()));
     __ Bind(&done);
+#if defined(DART_COMPRESSED_POINTERS)
+    if (is_truncating()) {
+      __ movsxd(value, value);
+    }
+#endif
     return;
   } else {
     compiler::Label done;
@@ -4474,11 +4690,19 @@
   ASSERT((from_representation() == kUnboxedInt32) ||
          (from_representation() == kUnboxedUint32));
   const intptr_t kNumInputs = 1;
-  const intptr_t kNumTemps = 0;
+  const intptr_t kNumTemps = ValueFitsSmi() ? 0 : 1;
   LocationSummary* summary = new (zone)
-      LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
-  summary->set_in(0, Location::RequiresRegister());
+      LocationSummary(zone, kNumInputs, kNumTemps,
+                      ValueFitsSmi() ? LocationSummary::kNoCall
+                                     : LocationSummary::kCallOnSlowPath);
+  const bool needs_writable_input =
+      ValueFitsSmi() || (from_representation() == kUnboxedUint32) || true;
+  summary->set_in(0, needs_writable_input ? Location::WritableRegister()
+                                          : Location::RequiresRegister());
   summary->set_out(0, Location::RequiresRegister());
+  if (!ValueFitsSmi()) {
+    summary->set_temp(0, Location::RequiresRegister());
+  }
   return summary;
 }
 
@@ -4487,6 +4711,7 @@
   const Register out = locs()->out(0).reg();
   ASSERT(value != out);
 
+#if !defined(DART_COMPRESSED_POINTERS)
   ASSERT(kSmiTagSize == 1);
   if (from_representation() == kUnboxedInt32) {
     __ movsxd(out, value);
@@ -4495,6 +4720,40 @@
     __ movl(out, value);
   }
   __ SmiTag(out);
+#else
+  compiler::Label done;
+  if (from_representation() == kUnboxedInt32) {
+    __ MoveRegister(out, value);
+    __ addl(out, out);
+    __ movsxd(out, out);  // Does not affect flags.
+    if (ValueFitsSmi()) {
+      return;
+    }
+    __ j(NO_OVERFLOW, &done);
+  } else {
+    __ movl(out, value);
+    __ SmiTag(out);
+    if (ValueFitsSmi()) {
+      return;
+    }
+    __ TestImmediate(value, compiler::Immediate(0xC0000000LL));
+    __ j(ZERO, &done);
+  }
+  // Allocate a mint.
+  // Value input is a writable register and we have to inform the compiler of
+  // the type so it can be preserved untagged on the slow path
+  locs()->live_registers()->Add(locs()->in(0), from_representation());
+  const Register temp = locs()->temp(0).reg();
+  BoxAllocationSlowPath::Allocate(compiler, this, compiler->mint_class(), out,
+                                  temp);
+  if (from_representation() == kUnboxedInt32) {
+    __ movsxd(temp, value);  // Sign-extend.
+  } else {
+    __ movl(temp, value);  // Zero-extend.
+  }
+  __ movq(compiler::FieldAddress(out, Mint::value_offset()), temp);
+  __ Bind(&done);
+#endif
 }
 
 LocationSummary* BoxInt64Instr::MakeLocationSummary(Zone* zone,
@@ -4538,6 +4797,7 @@
 void BoxInt64Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
   const Register out = locs()->out(0).reg();
   const Register value = locs()->in(0).reg();
+#if !defined(DART_COMPRESSED_POINTERS)
   __ MoveRegister(out, value);
   __ SmiTag(out);
   if (ValueFitsSmi()) {
@@ -4546,9 +4806,22 @@
   // If the value doesn't fit in a smi, the tagging changes the sign,
   // which causes the overflow flag to be set.
   compiler::Label done;
-  __ j(NO_OVERFLOW, &done);
-
   const Register temp = locs()->temp(0).reg();
+  __ j(NO_OVERFLOW, &done);
+#else
+  __ leaq(out, compiler::Address(value, value, TIMES_1, 0));
+  if (ValueFitsSmi()) {
+    return;
+  }
+  compiler::Label done;
+  const Register temp = locs()->temp(0).reg();
+  __ movq(temp, value);
+  __ sarq(temp, compiler::Immediate(30));
+  __ addq(temp, compiler::Immediate(1));
+  __ cmpq(temp, compiler::Immediate(2));
+  __ j(BELOW, &done);
+#endif
+
   if (compiler->intrinsic_mode()) {
     __ TryAllocate(compiler->mint_class(),
                    compiler->intrinsic_slow_path_label(),
@@ -5133,7 +5406,12 @@
     case Token::kNEGATE: {
       compiler::Label* deopt =
           compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnaryOp);
+#if !defined(DART_COMPRESSED_POINTERS)
       __ negq(value);
+#else
+      __ negl(value);
+      __ movsxd(value, value);
+#endif
       __ j(OVERFLOW, deopt);
       break;
     }
@@ -5285,7 +5563,11 @@
   Register value = locs()->in(0).reg();
   FpuRegister result = locs()->out(0).fpu_reg();
   __ SmiUntag(value);
+#if !defined(DART_COMPRESSED_POINTERS)
   __ cvtsi2sdq(result, value);
+#else
+  __ cvtsi2sdl(result, value);
+#endif
 }
 
 DEFINE_BACKEND(Int64ToDouble, (FpuRegister result, Register value)) {
@@ -5314,14 +5596,25 @@
   ASSERT(result != temp);
   __ movsd(value_double,
            compiler::FieldAddress(value_obj, Double::value_offset()));
+#if !defined(DART_COMPRESSED_POINTERS)
   __ cvttsd2siq(result, value_double);
+#else
+  __ cvttsd2sil(result, value_double);
+#endif
   // Overflow is signalled with minint.
   compiler::Label do_call, done;
   // Check for overflow and that it fits into Smi.
+#if !defined(DART_COMPRESSED_POINTERS)
   __ movq(temp, result);
   __ shlq(temp, compiler::Immediate(1));
   __ j(OVERFLOW, &do_call, compiler::Assembler::kNearJump);
   __ SmiTag(result);
+#else
+  __ movl(temp, result);
+  __ shll(temp, compiler::Immediate(1));
+  __ j(OVERFLOW, &do_call, compiler::Assembler::kNearJump);
+  __ movsxd(result, temp);
+#endif
   __ jmp(&done);
   __ Bind(&do_call);
   __ pushq(value_obj);
@@ -5360,14 +5653,26 @@
   XmmRegister value = locs()->in(0).fpu_reg();
   Register temp = locs()->temp(0).reg();
 
+#if !defined(DART_COMPRESSED_POINTERS)
   __ cvttsd2siq(result, value);
+#else
+  __ cvttsd2sil(result, value);
+#endif
   // Overflow is signalled with minint.
   compiler::Label do_call, done;
   // Check for overflow and that it fits into Smi.
+#if !defined(DART_COMPRESSED_POINTERS)
   __ movq(temp, result);
   __ shlq(temp, compiler::Immediate(1));
   __ j(OVERFLOW, deopt);
   __ SmiTag(result);
+#else
+  __ movl(temp, result);
+  __ shll(temp, compiler::Immediate(1));
+  __ j(OVERFLOW, deopt);
+  ASSERT(kSmiTagShift == 1 && kSmiTag == 0);
+  __ movsxd(result, temp);
+#endif
 }
 
 LocationSummary* DoubleToDoubleInstr::MakeLocationSummary(Zone* zone,
@@ -5694,6 +5999,7 @@
     __ testq(right, right);
     __ j(ZERO, deopt);
   }
+#if !defined(DART_COMPRESSED_POINTERS)
   // Check if both operands fit into 32bits as idiv with 64bit operands
   // requires twice as many cycles and has much higher latency.
   // We are checking this before untagging them to avoid corner case
@@ -5726,6 +6032,21 @@
   __ CompareImmediate(RAX, compiler::Immediate(0x4000000000000000));
   __ j(EQUAL, deopt);
   __ Bind(&done);
+#else
+  USE(temp);
+  // Both operands are 31bit smis. Divide using 32bit idiv.
+  __ SmiUntag(left);
+  __ SmiUntag(right);
+  __ cdq();
+  __ idivl(right);
+
+  // Check the corner case of dividing the 'MIN_SMI' with -1, in which
+  // case we cannot tag the result.
+  __ cmpl(RAX, compiler::Immediate(0x40000000));
+  __ j(EQUAL, deopt);
+  __ movsxd(RAX, RAX);
+  __ movsxd(RDX, RDX);
+#endif
 
   // Modulo correction (RDX).
   //  res = left % right;
diff --git a/runtime/vm/compiler/backend/range_analysis_test.cc b/runtime/vm/compiler/backend/range_analysis_test.cc
index 81b1467..2d42347 100644
--- a/runtime/vm/compiler/backend/range_analysis_test.cc
+++ b/runtime/vm/compiler/backend/range_analysis_test.cc
@@ -66,13 +66,14 @@
                 RangeBoundary::PositiveInfinity());
   TEST_RANGE_OP(Range::Shl, -1, 1, 63, 63, RangeBoundary(kMinInt64),
                 RangeBoundary::PositiveInfinity());
-  if (kBitsPerWord == 64) {
+  if (compiler::target::kSmiBits == 62) {
     TEST_RANGE_OP_SMI(Range::Shl, -1, 1, 62, 62,
                       RangeBoundary(compiler::target::kSmiMin),
                       RangeBoundary(compiler::target::kSmiMax));
     TEST_RANGE_OP_SMI(Range::Shl, -1, 1, 30, 30, RangeBoundary(-(1 << 30)),
                       RangeBoundary(1 << 30));
   } else {
+    ASSERT(compiler::target::kSmiBits == 30);
     TEST_RANGE_OP_SMI(Range::Shl, -1, 1, 30, 30,
                       RangeBoundary(compiler::target::kSmiMin),
                       RangeBoundary(compiler::target::kSmiMax));
diff --git a/runtime/vm/compiler/backend/type_propagator.cc b/runtime/vm/compiler/backend/type_propagator.cc
index 075f0cf..fd01d69 100644
--- a/runtime/vm/compiler/backend/type_propagator.cc
+++ b/runtime/vm/compiler/backend/type_propagator.cc
@@ -665,7 +665,7 @@
 }
 
 CompileType CompileType::Int32() {
-#if defined(TARGET_ARCH_IS_64_BIT)
+#if defined(TARGET_ARCH_IS_64_BIT) && !defined(DART_COMPRESSED_POINTERS)
   return FromCid(kSmiCid);
 #else
   return Int();
diff --git a/runtime/vm/compiler/frontend/base_flow_graph_builder.cc b/runtime/vm/compiler/frontend/base_flow_graph_builder.cc
index f8fd87c..a106f1e 100644
--- a/runtime/vm/compiler/frontend/base_flow_graph_builder.cc
+++ b/runtime/vm/compiler/frontend/base_flow_graph_builder.cc
@@ -425,7 +425,7 @@
 Fragment BaseFlowGraphBuilder::AddIntptrIntegers() {
   Value* right = Pop();
   Value* left = Pop();
-#if defined(TARGET_ARCH_ARM64) || defined(TARGET_ARCH_X64)
+#if defined(TARGET_ARCH_IS_64_BIT)
   auto add = new (Z) BinaryInt64OpInstr(
       Token::kADD, left, right, DeoptId::kNone, Instruction::kNotSpeculative);
 #else
diff --git a/runtime/vm/compiler/runtime_api.h b/runtime/vm/compiler/runtime_api.h
index 0b13f75..bfc1343 100644
--- a/runtime/vm/compiler/runtime_api.h
+++ b/runtime/vm/compiler/runtime_api.h
@@ -303,7 +303,11 @@
 constexpr uword kUwordMax = static_cast<word>(-1);
 
 // The number of bits in the _magnitude_ of a Smi, not counting the sign bit.
+#if !defined(DART_COMPRESSED_POINTERS)
 constexpr int kSmiBits = kBitsPerWord - 2;
+#else
+constexpr int kSmiBits = 30;
+#endif
 constexpr word kSmiMax = (static_cast<uword>(1) << kSmiBits) - 1;
 constexpr word kSmiMin = -(static_cast<uword>(1) << kSmiBits);
 
@@ -326,7 +330,7 @@
 // calculate both the parameter flag index in the parameter names array and
 // which bit to check, kNumParameterFlagsPerElement should be a power of two.
 static constexpr intptr_t kNumParameterFlagsPerElementLog2 =
-    kBitsPerWordLog2 - kNumParameterFlags;
+    kBitsPerWordLog2 - 1 - kNumParameterFlags;
 static constexpr intptr_t kNumParameterFlagsPerElement =
     1 << kNumParameterFlagsPerElementLog2;
 static_assert(kNumParameterFlagsPerElement <= kSmiBits,
diff --git a/runtime/vm/compiler/runtime_offsets_extracted.h b/runtime/vm/compiler/runtime_offsets_extracted.h
index deb6f51..00bcf67 100644
--- a/runtime/vm/compiler/runtime_offsets_extracted.h
+++ b/runtime/vm/compiler/runtime_offsets_extracted.h
@@ -2176,8 +2176,7 @@
 static constexpr dart::compiler::target::word ObjectPool_elements_start_offset =
     16;
 static constexpr dart::compiler::target::word ObjectPool_element_size = 8;
-static constexpr dart::compiler::target::word Array_kMaxElements =
-    576460752303423487;
+static constexpr dart::compiler::target::word Array_kMaxElements = 134217727;
 static constexpr dart::compiler::target::word Array_kMaxNewSpaceElements =
     32765;
 static constexpr dart::compiler::target::word
@@ -2195,8 +2194,7 @@
 static constexpr dart::compiler::target::word OldPage_kBytesPerCardLog2 = 10;
 static constexpr dart::compiler::target::word
     NativeEntry_kNumCallWrapperArguments = 2;
-static constexpr dart::compiler::target::word String_kMaxElements =
-    2305843009213693951;
+static constexpr dart::compiler::target::word String_kMaxElements = 536870911;
 static constexpr dart::compiler::target::word
     SubtypeTestCache_kFunctionTypeArguments = 5;
 static constexpr dart::compiler::target::word
@@ -2215,7 +2213,7 @@
     SubtypeTestCache_kTestEntryLength = 8;
 static constexpr dart::compiler::target::word SubtypeTestCache_kTestResult = 0;
 static constexpr dart::compiler::target::word TypeArguments_kMaxElements =
-    576460752303423487;
+    134217727;
 static constexpr dart::compiler::target::word
     AbstractType_type_test_stub_entry_point_offset = 8;
 static constexpr dart::compiler::target::word ArgumentsDescriptor_count_offset =
@@ -2717,8 +2715,7 @@
 static constexpr dart::compiler::target::word ObjectPool_elements_start_offset =
     16;
 static constexpr dart::compiler::target::word ObjectPool_element_size = 8;
-static constexpr dart::compiler::target::word Array_kMaxElements =
-    576460752303423487;
+static constexpr dart::compiler::target::word Array_kMaxElements = 134217727;
 static constexpr dart::compiler::target::word Array_kMaxNewSpaceElements =
     32765;
 static constexpr dart::compiler::target::word
@@ -2736,8 +2733,7 @@
 static constexpr dart::compiler::target::word OldPage_kBytesPerCardLog2 = 10;
 static constexpr dart::compiler::target::word
     NativeEntry_kNumCallWrapperArguments = 2;
-static constexpr dart::compiler::target::word String_kMaxElements =
-    2305843009213693951;
+static constexpr dart::compiler::target::word String_kMaxElements = 536870911;
 static constexpr dart::compiler::target::word
     SubtypeTestCache_kFunctionTypeArguments = 5;
 static constexpr dart::compiler::target::word
@@ -2756,7 +2752,7 @@
     SubtypeTestCache_kTestEntryLength = 8;
 static constexpr dart::compiler::target::word SubtypeTestCache_kTestResult = 0;
 static constexpr dart::compiler::target::word TypeArguments_kMaxElements =
-    576460752303423487;
+    134217727;
 static constexpr dart::compiler::target::word
     AbstractType_type_test_stub_entry_point_offset = 8;
 static constexpr dart::compiler::target::word ArgumentsDescriptor_count_offset =
@@ -5381,8 +5377,7 @@
 static constexpr dart::compiler::target::word ObjectPool_elements_start_offset =
     16;
 static constexpr dart::compiler::target::word ObjectPool_element_size = 8;
-static constexpr dart::compiler::target::word Array_kMaxElements =
-    576460752303423487;
+static constexpr dart::compiler::target::word Array_kMaxElements = 134217727;
 static constexpr dart::compiler::target::word Array_kMaxNewSpaceElements =
     32765;
 static constexpr dart::compiler::target::word
@@ -5400,8 +5395,7 @@
 static constexpr dart::compiler::target::word OldPage_kBytesPerCardLog2 = 10;
 static constexpr dart::compiler::target::word
     NativeEntry_kNumCallWrapperArguments = 2;
-static constexpr dart::compiler::target::word String_kMaxElements =
-    2305843009213693951;
+static constexpr dart::compiler::target::word String_kMaxElements = 536870911;
 static constexpr dart::compiler::target::word
     SubtypeTestCache_kFunctionTypeArguments = 5;
 static constexpr dart::compiler::target::word
@@ -5420,7 +5414,7 @@
     SubtypeTestCache_kTestEntryLength = 8;
 static constexpr dart::compiler::target::word SubtypeTestCache_kTestResult = 0;
 static constexpr dart::compiler::target::word TypeArguments_kMaxElements =
-    576460752303423487;
+    134217727;
 static constexpr dart::compiler::target::word
     AbstractType_type_test_stub_entry_point_offset = 8;
 static constexpr dart::compiler::target::word ArgumentsDescriptor_count_offset =
@@ -5916,8 +5910,7 @@
 static constexpr dart::compiler::target::word ObjectPool_elements_start_offset =
     16;
 static constexpr dart::compiler::target::word ObjectPool_element_size = 8;
-static constexpr dart::compiler::target::word Array_kMaxElements =
-    576460752303423487;
+static constexpr dart::compiler::target::word Array_kMaxElements = 134217727;
 static constexpr dart::compiler::target::word Array_kMaxNewSpaceElements =
     32765;
 static constexpr dart::compiler::target::word
@@ -5935,8 +5928,7 @@
 static constexpr dart::compiler::target::word OldPage_kBytesPerCardLog2 = 10;
 static constexpr dart::compiler::target::word
     NativeEntry_kNumCallWrapperArguments = 2;
-static constexpr dart::compiler::target::word String_kMaxElements =
-    2305843009213693951;
+static constexpr dart::compiler::target::word String_kMaxElements = 536870911;
 static constexpr dart::compiler::target::word
     SubtypeTestCache_kFunctionTypeArguments = 5;
 static constexpr dart::compiler::target::word
@@ -5955,7 +5947,7 @@
     SubtypeTestCache_kTestEntryLength = 8;
 static constexpr dart::compiler::target::word SubtypeTestCache_kTestResult = 0;
 static constexpr dart::compiler::target::word TypeArguments_kMaxElements =
-    576460752303423487;
+    134217727;
 static constexpr dart::compiler::target::word
     AbstractType_type_test_stub_entry_point_offset = 8;
 static constexpr dart::compiler::target::word ArgumentsDescriptor_count_offset =
@@ -8248,7 +8240,7 @@
     AOT_ObjectPool_elements_start_offset = 16;
 static constexpr dart::compiler::target::word AOT_ObjectPool_element_size = 8;
 static constexpr dart::compiler::target::word AOT_Array_kMaxElements =
-    576460752303423487;
+    134217727;
 static constexpr dart::compiler::target::word AOT_Array_kMaxNewSpaceElements =
     32765;
 static constexpr dart::compiler::target::word
@@ -8268,7 +8260,7 @@
 static constexpr dart::compiler::target::word
     AOT_NativeEntry_kNumCallWrapperArguments = 2;
 static constexpr dart::compiler::target::word AOT_String_kMaxElements =
-    2305843009213693951;
+    536870911;
 static constexpr dart::compiler::target::word
     AOT_SubtypeTestCache_kFunctionTypeArguments = 5;
 static constexpr dart::compiler::target::word
@@ -8288,7 +8280,7 @@
 static constexpr dart::compiler::target::word AOT_SubtypeTestCache_kTestResult =
     0;
 static constexpr dart::compiler::target::word AOT_TypeArguments_kMaxElements =
-    576460752303423487;
+    134217727;
 static constexpr dart::compiler::target::word
     AOT_AbstractType_type_test_stub_entry_point_offset = 8;
 static constexpr dart::compiler::target::word
@@ -8846,7 +8838,7 @@
     AOT_ObjectPool_elements_start_offset = 16;
 static constexpr dart::compiler::target::word AOT_ObjectPool_element_size = 8;
 static constexpr dart::compiler::target::word AOT_Array_kMaxElements =
-    576460752303423487;
+    134217727;
 static constexpr dart::compiler::target::word AOT_Array_kMaxNewSpaceElements =
     32765;
 static constexpr dart::compiler::target::word
@@ -8866,7 +8858,7 @@
 static constexpr dart::compiler::target::word
     AOT_NativeEntry_kNumCallWrapperArguments = 2;
 static constexpr dart::compiler::target::word AOT_String_kMaxElements =
-    2305843009213693951;
+    536870911;
 static constexpr dart::compiler::target::word
     AOT_SubtypeTestCache_kFunctionTypeArguments = 5;
 static constexpr dart::compiler::target::word
@@ -8886,7 +8878,7 @@
 static constexpr dart::compiler::target::word AOT_SubtypeTestCache_kTestResult =
     0;
 static constexpr dart::compiler::target::word AOT_TypeArguments_kMaxElements =
-    576460752303423487;
+    134217727;
 static constexpr dart::compiler::target::word
     AOT_AbstractType_type_test_stub_entry_point_offset = 8;
 static constexpr dart::compiler::target::word
@@ -11220,7 +11212,7 @@
     AOT_ObjectPool_elements_start_offset = 16;
 static constexpr dart::compiler::target::word AOT_ObjectPool_element_size = 8;
 static constexpr dart::compiler::target::word AOT_Array_kMaxElements =
-    576460752303423487;
+    134217727;
 static constexpr dart::compiler::target::word AOT_Array_kMaxNewSpaceElements =
     32765;
 static constexpr dart::compiler::target::word
@@ -11240,7 +11232,7 @@
 static constexpr dart::compiler::target::word
     AOT_NativeEntry_kNumCallWrapperArguments = 2;
 static constexpr dart::compiler::target::word AOT_String_kMaxElements =
-    2305843009213693951;
+    536870911;
 static constexpr dart::compiler::target::word
     AOT_SubtypeTestCache_kFunctionTypeArguments = 5;
 static constexpr dart::compiler::target::word
@@ -11260,7 +11252,7 @@
 static constexpr dart::compiler::target::word AOT_SubtypeTestCache_kTestResult =
     0;
 static constexpr dart::compiler::target::word AOT_TypeArguments_kMaxElements =
-    576460752303423487;
+    134217727;
 static constexpr dart::compiler::target::word
     AOT_AbstractType_type_test_stub_entry_point_offset = 8;
 static constexpr dart::compiler::target::word
@@ -11811,7 +11803,7 @@
     AOT_ObjectPool_elements_start_offset = 16;
 static constexpr dart::compiler::target::word AOT_ObjectPool_element_size = 8;
 static constexpr dart::compiler::target::word AOT_Array_kMaxElements =
-    576460752303423487;
+    134217727;
 static constexpr dart::compiler::target::word AOT_Array_kMaxNewSpaceElements =
     32765;
 static constexpr dart::compiler::target::word
@@ -11831,7 +11823,7 @@
 static constexpr dart::compiler::target::word
     AOT_NativeEntry_kNumCallWrapperArguments = 2;
 static constexpr dart::compiler::target::word AOT_String_kMaxElements =
-    2305843009213693951;
+    536870911;
 static constexpr dart::compiler::target::word
     AOT_SubtypeTestCache_kFunctionTypeArguments = 5;
 static constexpr dart::compiler::target::word
@@ -11851,7 +11843,7 @@
 static constexpr dart::compiler::target::word AOT_SubtypeTestCache_kTestResult =
     0;
 static constexpr dart::compiler::target::word AOT_TypeArguments_kMaxElements =
-    576460752303423487;
+    134217727;
 static constexpr dart::compiler::target::word
     AOT_AbstractType_type_test_stub_entry_point_offset = 8;
 static constexpr dart::compiler::target::word
diff --git a/runtime/vm/compiler/stub_code_compiler_arm64.cc b/runtime/vm/compiler/stub_code_compiler_arm64.cc
index 2bbe95e..01b54d7 100644
--- a/runtime/vm/compiler/stub_code_compiler_arm64.cc
+++ b/runtime/vm/compiler/stub_code_compiler_arm64.cc
@@ -559,9 +559,18 @@
       Label length, smi_case;
 
       // The user-controlled index might not fit into a Smi.
+#if !defined(DART_COMPRESSED_POINTERS)
       __ adds(RangeErrorABI::kIndexReg, RangeErrorABI::kIndexReg,
               compiler::Operand(RangeErrorABI::kIndexReg));
       __ BranchIf(NO_OVERFLOW, &length);
+#else
+      __ mov(TMP, RangeErrorABI::kIndexReg);
+      __ SmiTag(RangeErrorABI::kIndexReg);
+      __ sxtw(RangeErrorABI::kIndexReg, RangeErrorABI::kIndexReg);
+      __ cmp(TMP,
+             compiler::Operand(RangeErrorABI::kIndexReg, ASR, kSmiTagSize));
+      __ BranchIf(EQ, &length);
+#endif
       {
         // Allocate a mint, reload the two registers and popualte the mint.
         __ PushRegister(NULL_REG);
@@ -2172,8 +2181,14 @@
   __ BranchIfNotSmi(TMP, not_smi_or_overflow);
   switch (kind) {
     case Token::kADD: {
-      __ adds(R0, R1, Operand(R0));   // Adds.
+#if !defined(DART_COMPRESSED_POINTERS)
+      __ adds(R0, R1, Operand(R0));   // Add.
       __ b(not_smi_or_overflow, VS);  // Branch if overflow.
+#else
+      __ addsw(R0, R1, Operand(R0));  // Add (32-bit).
+      __ b(not_smi_or_overflow, VS);  // Branch if overflow.
+      __ sxtw(R0, R0);                // Sign extend.
+#endif
       break;
     }
     case Token::kLT: {
diff --git a/runtime/vm/compiler/stub_code_compiler_x64.cc b/runtime/vm/compiler/stub_code_compiler_x64.cc
index 175ed5d..e852d5a 100644
--- a/runtime/vm/compiler/stub_code_compiler_x64.cc
+++ b/runtime/vm/compiler/stub_code_compiler_x64.cc
@@ -502,8 +502,17 @@
       Label length, smi_case;
 
       // The user-controlled index might not fit into a Smi.
+#if !defined(DART_COMPRESSED_POINTERS)
       __ addq(RangeErrorABI::kIndexReg, RangeErrorABI::kIndexReg);
       __ BranchIf(NO_OVERFLOW, &length);
+#else
+      __ movq(TMP, RangeErrorABI::kIndexReg);
+      __ SmiTag(RangeErrorABI::kIndexReg);
+      __ sarq(TMP, Immediate(30));
+      __ addq(TMP, Immediate(1));
+      __ cmpq(TMP, Immediate(2));
+      __ j(BELOW, &length);
+#endif
       {
         // Allocate a mint, reload the two registers and popualte the mint.
         __ PushImmediate(Immediate(0));
@@ -2099,8 +2108,14 @@
   __ j(NOT_ZERO, not_smi_or_overflow);
   switch (kind) {
     case Token::kADD: {
+#if !defined(DART_COMPRESSED_POINTERS)
       __ addq(RAX, RCX);
       __ j(OVERFLOW, not_smi_or_overflow);
+#else
+      __ addl(RAX, RCX);
+      __ j(OVERFLOW, not_smi_or_overflow);
+      __ movsxd(RAX, RAX);
+#endif
       break;
     }
     case Token::kLT: {
diff --git a/runtime/vm/constants_arm64.h b/runtime/vm/constants_arm64.h
index 333268d..16af371 100644
--- a/runtime/vm/constants_arm64.h
+++ b/runtime/vm/constants_arm64.h
@@ -941,14 +941,14 @@
 
 enum Extend {
   kNoExtend = -1,
-  UXTB = 0,
-  UXTH = 1,
-  UXTW = 2,
-  UXTX = 3,
-  SXTB = 4,
-  SXTH = 5,
-  SXTW = 6,
-  SXTX = 7,
+  UXTB = 0,  // Zero extend byte.
+  UXTH = 1,  // Zero extend halfword (16 bits).
+  UXTW = 2,  // Zero extend word (32 bits).
+  UXTX = 3,  // Zero extend doubleword (64 bits).
+  SXTB = 4,  // Sign extend byte.
+  SXTH = 5,  // Sign extend halfword (16 bits).
+  SXTW = 6,  // Sign extend word (32 bits).
+  SXTX = 7,  // Sign extend doubleword (64 bits).
   kMaxExtend = 8,
 };
 
diff --git a/runtime/vm/globals.h b/runtime/vm/globals.h
index 3ac014d..be0584f 100644
--- a/runtime/vm/globals.h
+++ b/runtime/vm/globals.h
@@ -20,7 +20,11 @@
 namespace dart {
 // Smi value range is from -(2^N) to (2^N)-1.
 // N=30 (32-bit build) or N=62 (64-bit build).
+#if !defined(DART_COMPRESSED_POINTERS)
 const intptr_t kSmiBits = kBitsPerWord - 2;
+#else
+const intptr_t kSmiBits = 30;
+#endif
 const intptr_t kSmiMax = (static_cast<intptr_t>(1) << kSmiBits) - 1;
 const intptr_t kSmiMin = -(static_cast<intptr_t>(1) << kSmiBits);
 
diff --git a/runtime/vm/object_test.cc b/runtime/vm/object_test.cc
index cad9d62..f09faae 100644
--- a/runtime/vm/object_test.cc
+++ b/runtime/vm/object_test.cc
@@ -374,7 +374,7 @@
   EXPECT(Smi::IsValid(-15));
   EXPECT(Smi::IsValid(0xFFu));
 // Upper two bits must be either 00 or 11.
-#if defined(ARCH_IS_64_BIT)
+#if defined(ARCH_IS_64_BIT) && !defined(DART_COMPRESSED_POINTERS)
   EXPECT(!Smi::IsValid(kMaxInt64));
   EXPECT(Smi::IsValid(0x3FFFFFFFFFFFFFFF));
   EXPECT(Smi::IsValid(-1));
@@ -502,7 +502,7 @@
 ISOLATE_UNIT_TEST_CASE(Mint) {
 // On 64-bit architectures a Smi is stored in a 64 bit word. A Midint cannot
 // be allocated if it does fit into a Smi.
-#if !defined(ARCH_IS_64_BIT)
+#if !defined(ARCH_IS_64_BIT) || defined(DART_COMPRESSED_POINTERS)
   {
     Mint& med = Mint::Handle();
     EXPECT(med.IsNull());
diff --git a/runtime/vm/regexp.cc b/runtime/vm/regexp.cc
index 9b880c8..782ecd1 100644
--- a/runtime/vm/regexp.cc
+++ b/runtime/vm/regexp.cc
@@ -2683,18 +2683,28 @@
                                                 intptr_t eats_at_least) {
   intptr_t preload_characters =
       Utils::Minimum(static_cast<intptr_t>(4), eats_at_least);
-  if (compiler->macro_assembler()->CanReadUnaligned()) {
-    bool one_byte = compiler->one_byte();
-    if (one_byte) {
-      if (preload_characters > 4) preload_characters = 4;
-      // We can't preload 3 characters because there is no machine instruction
-      // to do that.  We can't just load 4 because we could be reading
-      // beyond the end of the string, which could cause a memory fault.
-      if (preload_characters == 3) preload_characters = 2;
-    } else {
-      if (preload_characters > 2) preload_characters = 2;
-    }
+  if (compiler->one_byte()) {
+#if !defined(DART_COMPRESSED_POINTERS)
+    if (preload_characters > 4) preload_characters = 4;
+    // We can't preload 3 characters because there is no machine instruction
+    // to do that.  We can't just load 4 because we could be reading
+    // beyond the end of the string, which could cause a memory fault.
+    if (preload_characters == 3) preload_characters = 2;
+#else
+    // Ensure LoadCodeUnitsInstr can always produce a Smi. See
+    // https://github.com/dart-lang/sdk/issues/29951
+    if (preload_characters > 2) preload_characters = 2;
+#endif
   } else {
+#if !defined(DART_COMPRESSED_POINTERS)
+    if (preload_characters > 2) preload_characters = 2;
+#else
+    // Ensure LoadCodeUnitsInstr can always produce a Smi. See
+    // https://github.com/dart-lang/sdk/issues/29951
+    if (preload_characters > 1) preload_characters = 1;
+#endif
+  }
+  if (!compiler->macro_assembler()->CanReadUnaligned()) {
     if (preload_characters > 1) preload_characters = 1;
   }
   return preload_characters;
diff --git a/runtime/vm/snapshot_test.cc b/runtime/vm/snapshot_test.cc
index 16bce51..7ee9e11 100644
--- a/runtime/vm/snapshot_test.cc
+++ b/runtime/vm/snapshot_test.cc
@@ -232,7 +232,7 @@
 // here covers most of the 64-bit range. On 32-bit platforms the smi
 // range covers most of the 32-bit range and values outside that
 // range are also represented as mints.
-#if defined(ARCH_IS_64_BIT)
+#if defined(ARCH_IS_64_BIT) && !defined(DART_COMPRESSED_POINTERS)
   EXPECT_EQ(Dart_CObject_kInt64, mint_cobject->type);
   EXPECT_EQ(value, mint_cobject->value.as_int64);
 #else