[vm/compiler/aot] Implement non-speculative int64 shift operation

Closes https://github.com/dart-lang/sdk/issues/33364

Change-Id: I9432d82a1ee58fbbdc1b54b810a71c2c7ddd8fce
Reviewed-on: https://dart-review.googlesource.com/59821
Commit-Queue: Alexander Markov <alexmarkov@google.com>
Reviewed-by: Zach Anderson <zra@google.com>
Reviewed-by: Vyacheslav Egorov <vegorov@google.com>
diff --git a/runtime/vm/compiler/aot/aot_call_specializer.cc b/runtime/vm/compiler/aot/aot_call_specializer.cc
index 83f76d1..1b9b8e5 100644
--- a/runtime/vm/compiler/aot/aot_call_specializer.cc
+++ b/runtime/vm/compiler/aot/aot_call_specializer.cc
@@ -605,6 +605,20 @@
         break;
       }
 
+      case Token::kSHL:
+      case Token::kSHR: {
+        Value* left_value = instr->PushArgumentAt(receiver_index)->value();
+        Value* right_value = instr->PushArgumentAt(receiver_index + 1)->value();
+        CompileType* right_type = right_value->Type();
+        if (right_type->IsNullableInt()) {
+          left_value = PrepareReceiverOfDevirtualizedCall(left_value, kMintCid);
+          right_value = PrepareStaticOpInput(right_value, kMintCid, instr);
+          replacement = new (Z) ShiftInt64OpInstr(
+              op_kind, left_value, right_value, Thread::kNoDeoptId);
+        }
+        break;
+      }
+
       default:
         break;
     }
diff --git a/runtime/vm/compiler/backend/constant_propagator.cc b/runtime/vm/compiler/backend/constant_propagator.cc
index 55fcc83..ab5b854 100644
--- a/runtime/vm/compiler/backend/constant_propagator.cc
+++ b/runtime/vm/compiler/backend/constant_propagator.cc
@@ -933,10 +933,6 @@
   VisitBinaryIntegerOp(instr);
 }
 
-void ConstantPropagator::VisitShiftUint32Op(ShiftUint32OpInstr* instr) {
-  VisitBinaryIntegerOp(instr);
-}
-
 void ConstantPropagator::VisitBinaryInt64Op(BinaryInt64OpInstr* instr) {
   VisitBinaryIntegerOp(instr);
 }
@@ -945,6 +941,20 @@
   VisitBinaryIntegerOp(instr);
 }
 
+void ConstantPropagator::VisitSpeculativeShiftInt64Op(
+    SpeculativeShiftInt64OpInstr* instr) {
+  VisitBinaryIntegerOp(instr);
+}
+
+void ConstantPropagator::VisitShiftUint32Op(ShiftUint32OpInstr* instr) {
+  VisitBinaryIntegerOp(instr);
+}
+
+void ConstantPropagator::VisitSpeculativeShiftUint32Op(
+    SpeculativeShiftUint32OpInstr* instr) {
+  VisitBinaryIntegerOp(instr);
+}
+
 void ConstantPropagator::VisitBoxInt64(BoxInt64Instr* instr) {
   // TODO(kmillikin): Handle box operation.
   SetValue(instr, non_constant_);
diff --git a/runtime/vm/compiler/backend/flow_graph_compiler.cc b/runtime/vm/compiler/backend/flow_graph_compiler.cc
index 90112c2..4f48c7b 100644
--- a/runtime/vm/compiler/backend/flow_graph_compiler.cc
+++ b/runtime/vm/compiler/backend/flow_graph_compiler.cc
@@ -682,8 +682,7 @@
 // See StackFrame::VisitObjectPointers for the details of how stack map is
 // interpreted.
 void FlowGraphCompiler::RecordSafepoint(LocationSummary* locs,
-                                        intptr_t slow_path_argument_count,
-                                        Environment* env) {
+                                        intptr_t slow_path_argument_count) {
   if (is_optimizing() || locs->live_registers()->HasUntaggedValues()) {
     const intptr_t spill_area_size =
         is_optimizing() ? flow_graph_.graph_entry()->spill_slot_count() : 0;
@@ -2149,6 +2148,56 @@
 }
 #endif  // defined(DEBUG) && !defined(TARGET_ARCH_DBC)
 
+#if !defined(TARGET_ARCH_DBC)
+#define __ compiler->assembler()->
+
+void ThrowErrorSlowPathCode::EmitNativeCode(FlowGraphCompiler* compiler) {
+  if (Assembler::EmittingComments()) {
+    __ Comment("slow path %s operation", name());
+  }
+  const bool use_shared_stub =
+      instruction()->UseSharedSlowPathStub(compiler->is_optimizing());
+  const bool live_fpu_registers =
+      instruction()->locs()->live_registers()->FpuRegisterCount() > 0;
+  ASSERT(!use_shared_stub || num_args_ == 0);
+  __ Bind(entry_label());
+  EmitCodeAtSlowPathEntry(compiler);
+  LocationSummary* locs = instruction()->locs();
+  // Save registers as they are needed for lazy deopt / exception handling.
+  if (!use_shared_stub) {
+    compiler->SaveLiveRegisters(locs);
+  }
+  for (intptr_t i = 0; i < num_args_; ++i) {
+    __ PushRegister(locs->in(i).reg());
+  }
+  if (use_shared_stub) {
+    EmitSharedStubCall(compiler->assembler(), live_fpu_registers);
+  } else {
+    __ CallRuntime(runtime_entry_, num_args_);
+  }
+  // Can't query deopt_id() without checking if instruction can deoptimize...
+  intptr_t deopt_id = Thread::kNoDeoptId;
+  if (instruction()->CanDeoptimize() ||
+      instruction()->CanBecomeDeoptimizationTarget()) {
+    deopt_id = instruction()->deopt_id();
+  }
+  compiler->AddDescriptor(RawPcDescriptors::kOther,
+                          compiler->assembler()->CodeSize(), deopt_id,
+                          instruction()->token_pos(), try_index_);
+  AddMetadataForRuntimeCall(compiler);
+  compiler->RecordSafepoint(locs, num_args_);
+  if ((try_index_ != CatchClauseNode::kInvalidTryIndex) ||
+      (compiler->CurrentTryIndex() != CatchClauseNode::kInvalidTryIndex)) {
+    Environment* env =
+        compiler->SlowPathEnvironmentFor(instruction(), num_args_);
+    compiler->EmitCatchEntryState(env, try_index_);
+  }
+  __ Breakpoint();
+}
+
+#undef __
+#endif  //  !defined(TARGET_ARCH_DBC)
+
 #endif  // !defined(DART_PRECOMPILED_RUNTIME)
 
 }  // namespace dart
diff --git a/runtime/vm/compiler/backend/flow_graph_compiler.h b/runtime/vm/compiler/backend/flow_graph_compiler.h
index 8e05c21..83f91fb 100644
--- a/runtime/vm/compiler/backend/flow_graph_compiler.h
+++ b/runtime/vm/compiler/backend/flow_graph_compiler.h
@@ -240,6 +240,42 @@
   }
 };
 
+#if !defined(TARGET_ARCH_DBC)
+
+// Slow path code which calls runtime entry to throw an exception.
+class ThrowErrorSlowPathCode : public TemplateSlowPathCode<Instruction> {
+ public:
+  ThrowErrorSlowPathCode(Instruction* instruction,
+                         const RuntimeEntry& runtime_entry,
+                         intptr_t num_args,
+                         intptr_t try_index)
+      : TemplateSlowPathCode(instruction),
+        runtime_entry_(runtime_entry),
+        num_args_(num_args),
+        try_index_(try_index) {}
+
+  // This name appears in disassembly.
+  virtual const char* name() = 0;
+
+  // Subclasses can override these methods to customize slow path code.
+  virtual void EmitCodeAtSlowPathEntry(FlowGraphCompiler* compiler) {}
+  virtual void AddMetadataForRuntimeCall(FlowGraphCompiler* compiler) {}
+
+  virtual void EmitSharedStubCall(Assembler* assembler,
+                                  bool save_fpu_registers) {
+    UNREACHABLE();
+  }
+
+  virtual void EmitNativeCode(FlowGraphCompiler* compiler);
+
+ private:
+  const RuntimeEntry& runtime_entry_;
+  const intptr_t num_args_;
+  const intptr_t try_index_;
+};
+
+#endif  // !defined(TARGET_ARCH_DBC)
+
 class FlowGraphCompiler : public ValueObject {
  private:
   class BlockInfo : public ZoneAllocated {
@@ -583,10 +619,8 @@
                     TokenPosition token_pos,
                     intptr_t null_check_name_idx);
 
-  // 'environment' is required if 'using_shared_stub'.
   void RecordSafepoint(LocationSummary* locs,
-                       intptr_t slow_path_argument_count = 0,
-                       Environment* env = NULL);
+                       intptr_t slow_path_argument_count = 0);
 
   Label* AddDeoptStub(intptr_t deopt_id,
                       ICData::DeoptReasonId reason,
diff --git a/runtime/vm/compiler/backend/il.cc b/runtime/vm/compiler/backend/il.cc
index a549f9b..f773c95 100644
--- a/runtime/vm/compiler/backend/il.cc
+++ b/runtime/vm/compiler/backend/il.cc
@@ -1569,8 +1569,8 @@
   }
 }
 
-bool ShiftInt64OpInstr::IsShiftCountInRange() const {
-  return RangeUtils::IsWithin(shift_range(), 0, kMintShiftCountLimit);
+bool ShiftIntegerOpInstr::IsShiftCountInRange(int64_t max) const {
+  return RangeUtils::IsWithin(shift_range(), 0, max);
 }
 
 bool BinaryIntegerOpInstr::RightIsPowerOfTwoConstant() const {
@@ -1780,14 +1780,16 @@
   return op;
 }
 
-BinaryIntegerOpInstr* BinaryIntegerOpInstr::Make(Representation representation,
-                                                 Token::Kind op_kind,
-                                                 Value* left,
-                                                 Value* right,
-                                                 intptr_t deopt_id,
-                                                 bool can_overflow,
-                                                 bool is_truncating,
-                                                 Range* range) {
+BinaryIntegerOpInstr* BinaryIntegerOpInstr::Make(
+    Representation representation,
+    Token::Kind op_kind,
+    Value* left,
+    Value* right,
+    intptr_t deopt_id,
+    bool can_overflow,
+    bool is_truncating,
+    Range* range,
+    SpeculativeMode speculative_mode) {
   BinaryIntegerOpInstr* op = NULL;
   switch (representation) {
     case kTagged:
@@ -1801,14 +1803,23 @@
       break;
     case kUnboxedUint32:
       if ((op_kind == Token::kSHR) || (op_kind == Token::kSHL)) {
-        op = new ShiftUint32OpInstr(op_kind, left, right, deopt_id);
+        if (speculative_mode == kNotSpeculative) {
+          op = new ShiftUint32OpInstr(op_kind, left, right, deopt_id);
+        } else {
+          op =
+              new SpeculativeShiftUint32OpInstr(op_kind, left, right, deopt_id);
+        }
       } else {
         op = new BinaryUint32OpInstr(op_kind, left, right, deopt_id);
       }
       break;
     case kUnboxedInt64:
       if ((op_kind == Token::kSHR) || (op_kind == Token::kSHL)) {
-        op = new ShiftInt64OpInstr(op_kind, left, right, deopt_id);
+        if (speculative_mode == kNotSpeculative) {
+          op = new ShiftInt64OpInstr(op_kind, left, right, deopt_id);
+        } else {
+          op = new SpeculativeShiftInt64OpInstr(op_kind, left, right, deopt_id);
+        }
       } else {
         op = new BinaryInt64OpInstr(op_kind, left, right, deopt_id);
       }
@@ -2094,7 +2105,7 @@
         BinaryIntegerOpInstr* shift = BinaryIntegerOpInstr::Make(
             representation(), Token::kSHL, left()->CopyWithType(),
             new Value(constant_1), GetDeoptId(), can_overflow(),
-            is_truncating(), range());
+            is_truncating(), range(), speculative_mode());
         if (shift != NULL) {
           flow_graph->InsertBefore(this, shift, env(), FlowGraph::kValue);
           return shift;
@@ -3973,67 +3984,6 @@
   return locs;
 }
 
-class ThrowErrorSlowPathCode : public TemplateSlowPathCode<Instruction> {
- public:
-  ThrowErrorSlowPathCode(Instruction* instruction,
-                         const RuntimeEntry& runtime_entry,
-                         intptr_t num_args,
-                         intptr_t try_index)
-      : TemplateSlowPathCode(instruction),
-        runtime_entry_(runtime_entry),
-        num_args_(num_args),
-        try_index_(try_index) {}
-
-  virtual const char* name() = 0;
-
-  virtual void AddMetadataForRuntimeCall(FlowGraphCompiler* compiler) {}
-
-  virtual void EmitSharedStubCall(Assembler* assembler,
-                                  bool save_fpu_registers) {
-    UNREACHABLE();
-  }
-
-  virtual void EmitNativeCode(FlowGraphCompiler* compiler) {
-    if (Assembler::EmittingComments()) {
-      __ Comment("slow path %s operation", name());
-    }
-    bool use_shared_stub =
-        instruction()->UseSharedSlowPathStub(compiler->is_optimizing());
-    bool live_fpu_registers =
-        instruction()->locs()->live_registers()->FpuRegisterCount() > 0;
-    ASSERT(!use_shared_stub || num_args_ == 0);
-    __ Bind(entry_label());
-    LocationSummary* locs = instruction()->locs();
-    // Save registers as they are needed for lazy deopt / exception handling.
-    if (!use_shared_stub) {
-      compiler->SaveLiveRegisters(locs);
-    }
-    for (intptr_t i = 0; i < num_args_; ++i) {
-      __ PushRegister(locs->in(i).reg());
-    }
-    if (use_shared_stub) {
-      EmitSharedStubCall(compiler->assembler(), live_fpu_registers);
-    } else {
-      __ CallRuntime(runtime_entry_, num_args_);
-    }
-    compiler->AddDescriptor(
-        RawPcDescriptors::kOther, compiler->assembler()->CodeSize(),
-        instruction()->deopt_id(), instruction()->token_pos(), try_index_);
-    AddMetadataForRuntimeCall(compiler);
-    ASSERT(instruction()->env() != nullptr);
-    compiler->RecordSafepoint(locs, num_args_, instruction()->env());
-    Environment* env =
-        compiler->SlowPathEnvironmentFor(instruction(), num_args_);
-    compiler->EmitCatchEntryState(env, try_index_);
-    __ Breakpoint();
-  }
-
- private:
-  const RuntimeEntry& runtime_entry_;
-  const intptr_t num_args_;
-  const intptr_t try_index_;
-};
-
 class RangeErrorSlowPath : public ThrowErrorSlowPathCode {
  public:
   static const intptr_t kNumberOfArguments = 2;
diff --git a/runtime/vm/compiler/backend/il.h b/runtime/vm/compiler/backend/il.h
index bcafcbd..656e856 100644
--- a/runtime/vm/compiler/backend/il.h
+++ b/runtime/vm/compiler/backend/il.h
@@ -547,6 +547,7 @@
   M(CaseInsensitiveCompareUC16)                                                \
   M(BinaryInt64Op)                                                             \
   M(ShiftInt64Op)                                                              \
+  M(SpeculativeShiftInt64Op)                                                   \
   M(UnaryInt64Op)                                                              \
   M(CheckArrayBound)                                                           \
   M(GenericCheckBound)                                                         \
@@ -565,6 +566,7 @@
   M(ExtractNthOutput)                                                          \
   M(BinaryUint32Op)                                                            \
   M(ShiftUint32Op)                                                             \
+  M(SpeculativeShiftUint32Op)                                                  \
   M(UnaryUint32Op)                                                             \
   M(BoxUint32)                                                                 \
   M(UnboxUint32)                                                               \
@@ -580,7 +582,8 @@
   M(UnboxInteger)                                                              \
   M(Comparison)                                                                \
   M(UnaryIntegerOp)                                                            \
-  M(BinaryIntegerOp)
+  M(BinaryIntegerOp)                                                           \
+  M(ShiftIntegerOp)
 
 #define FORWARD_DECLARATION(type) class type##Instr;
 FOR_EACH_INSTRUCTION(FORWARD_DECLARATION)
@@ -1818,8 +1821,7 @@
 
   bool HasType() const { return (type_ != NULL); }
 
-  // Does this define a mint?
-  inline bool IsMintDefinition();
+  inline bool IsInt64Definition();
 
   bool IsInt32Definition() {
     return IsBinaryInt32Op() || IsBoxInt32() || IsUnboxInt32() ||
@@ -5545,9 +5547,10 @@
   DISALLOW_COPY_AND_ASSIGN(UnboxInt64Instr);
 };
 
-bool Definition::IsMintDefinition() {
+bool Definition::IsInt64Definition() {
   return (Type()->ToCid() == kMintCid) || IsBinaryInt64Op() ||
-         IsUnaryInt64Op() || IsShiftInt64Op() || IsBoxInt64() || IsUnboxInt64();
+         IsUnaryInt64Op() || IsShiftInt64Op() || IsSpeculativeShiftInt64Op() ||
+         IsBoxInt64() || IsUnboxInt64();
 }
 
 class MathUnaryInstr : public TemplateDefinition<1, NoThrow, Pure> {
@@ -6018,14 +6021,16 @@
     SetInputAt(1, right);
   }
 
-  static BinaryIntegerOpInstr* Make(Representation representation,
-                                    Token::Kind op_kind,
-                                    Value* left,
-                                    Value* right,
-                                    intptr_t deopt_id,
-                                    bool can_overflow,
-                                    bool is_truncating,
-                                    Range* range);
+  static BinaryIntegerOpInstr* Make(
+      Representation representation,
+      Token::Kind op_kind,
+      Value* left,
+      Value* right,
+      intptr_t deopt_id,
+      bool can_overflow,
+      bool is_truncating,
+      Range* range,
+      SpeculativeMode speculative_mode = kGuardInputs);
 
   Token::Kind op_kind() const { return op_kind_; }
   Value* left() const { return inputs_[0]; }
@@ -6071,6 +6076,8 @@
 
   bool can_overflow_;
   bool is_truncating_;
+
+  DISALLOW_COPY_AND_ASSIGN(BinaryIntegerOpInstr);
 };
 
 class BinarySmiOpInstr : public BinaryIntegerOpInstr {
@@ -6180,33 +6187,6 @@
   DISALLOW_COPY_AND_ASSIGN(BinaryUint32OpInstr);
 };
 
-class ShiftUint32OpInstr : public BinaryIntegerOpInstr {
- public:
-  ShiftUint32OpInstr(Token::Kind op_kind,
-                     Value* left,
-                     Value* right,
-                     intptr_t deopt_id)
-      : BinaryIntegerOpInstr(op_kind, left, right, deopt_id) {
-    ASSERT((op_kind == Token::kSHR) || (op_kind == Token::kSHL));
-  }
-
-  virtual bool ComputeCanDeoptimize() const { return true; }
-
-  virtual Representation representation() const { return kUnboxedUint32; }
-
-  virtual Representation RequiredInputRepresentation(intptr_t idx) const {
-    ASSERT((idx == 0) || (idx == 1));
-    return (idx == 0) ? kUnboxedUint32 : kTagged;
-  }
-
-  virtual CompileType ComputeType() const;
-
-  DECLARE_INSTRUCTION(ShiftUint32Op)
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(ShiftUint32OpInstr);
-};
-
 class BinaryInt64OpInstr : public BinaryIntegerOpInstr {
  public:
   BinaryInt64OpInstr(Token::Kind op_kind,
@@ -6253,12 +6233,13 @@
   DISALLOW_COPY_AND_ASSIGN(BinaryInt64OpInstr);
 };
 
-class ShiftInt64OpInstr : public BinaryIntegerOpInstr {
+// Base class for integer shift operations.
+class ShiftIntegerOpInstr : public BinaryIntegerOpInstr {
  public:
-  ShiftInt64OpInstr(Token::Kind op_kind,
-                    Value* left,
-                    Value* right,
-                    intptr_t deopt_id)
+  ShiftIntegerOpInstr(Token::Kind op_kind,
+                      Value* left,
+                      Value* right,
+                      intptr_t deopt_id)
       : BinaryIntegerOpInstr(op_kind, left, right, deopt_id),
         shift_range_(NULL) {
     ASSERT((op_kind == Token::kSHR) || (op_kind == Token::kSHL));
@@ -6267,6 +6248,62 @@
 
   Range* shift_range() const { return shift_range_; }
 
+  virtual void InferRange(RangeAnalysis* analysis, Range* range);
+
+  DEFINE_INSTRUCTION_TYPE_CHECK(ShiftIntegerOp)
+
+ protected:
+  static const intptr_t kShiftCountLimit = 63;
+
+  // Returns true if the shift amount is guaranteed to be in
+  // [0..max] range.
+  bool IsShiftCountInRange(int64_t max = kShiftCountLimit) const;
+
+ private:
+  Range* shift_range_;
+
+  DISALLOW_COPY_AND_ASSIGN(ShiftIntegerOpInstr);
+};
+
+// Non-speculative int64 shift. Takes 2 unboxed int64.
+// Throws if right operand is negative.
+class ShiftInt64OpInstr : public ShiftIntegerOpInstr {
+ public:
+  ShiftInt64OpInstr(Token::Kind op_kind,
+                    Value* left,
+                    Value* right,
+                    intptr_t deopt_id)
+      : ShiftIntegerOpInstr(op_kind, left, right, deopt_id) {}
+
+  virtual SpeculativeMode speculative_mode() const { return kNotSpeculative; }
+  virtual bool ComputeCanDeoptimize() const { return false; }
+  virtual bool MayThrow() const { return true; }
+
+  virtual Representation representation() const { return kUnboxedInt64; }
+
+  virtual Representation RequiredInputRepresentation(intptr_t idx) const {
+    ASSERT((idx == 0) || (idx == 1));
+    return kUnboxedInt64;
+  }
+
+  virtual CompileType ComputeType() const;
+
+  DECLARE_INSTRUCTION(ShiftInt64Op)
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(ShiftInt64OpInstr);
+};
+
+// Speculative int64 shift. Takes unboxed int64 and smi.
+// Deoptimizes if right operand is negative or greater than kShiftCountLimit.
+class SpeculativeShiftInt64OpInstr : public ShiftIntegerOpInstr {
+ public:
+  SpeculativeShiftInt64OpInstr(Token::Kind op_kind,
+                               Value* left,
+                               Value* right,
+                               intptr_t deopt_id)
+      : ShiftIntegerOpInstr(op_kind, left, right, deopt_id) {}
+
   virtual bool ComputeCanDeoptimize() const {
     ASSERT(!can_overflow());
     return !IsShiftCountInRange();
@@ -6279,21 +6316,72 @@
     return (idx == 0) ? kUnboxedInt64 : kTagged;
   }
 
-  virtual void InferRange(RangeAnalysis* analysis, Range* range);
   virtual CompileType ComputeType() const;
 
-  DECLARE_INSTRUCTION(ShiftInt64Op)
+  DECLARE_INSTRUCTION(SpeculativeShiftInt64Op)
 
  private:
-  static const intptr_t kMintShiftCountLimit = 63;
+  DISALLOW_COPY_AND_ASSIGN(SpeculativeShiftInt64OpInstr);
+};
 
-  // Returns true if the shift amount is guranteed to be in
-  // [0..kMintShiftCountLimit] range.
-  bool IsShiftCountInRange() const;
+// Non-speculative uint32 shift. Takes unboxed uint32 and unboxed int64.
+// Throws if right operand is negative.
+class ShiftUint32OpInstr : public ShiftIntegerOpInstr {
+ public:
+  ShiftUint32OpInstr(Token::Kind op_kind,
+                     Value* left,
+                     Value* right,
+                     intptr_t deopt_id)
+      : ShiftIntegerOpInstr(op_kind, left, right, deopt_id) {}
 
-  Range* shift_range_;
+  virtual SpeculativeMode speculative_mode() const { return kNotSpeculative; }
+  virtual bool ComputeCanDeoptimize() const { return false; }
+  virtual bool MayThrow() const { return true; }
 
-  DISALLOW_COPY_AND_ASSIGN(ShiftInt64OpInstr);
+  virtual Representation representation() const { return kUnboxedUint32; }
+
+  virtual Representation RequiredInputRepresentation(intptr_t idx) const {
+    ASSERT((idx == 0) || (idx == 1));
+    return (idx == 0) ? kUnboxedUint32 : kUnboxedInt64;
+  }
+
+  virtual CompileType ComputeType() const;
+
+  DECLARE_INSTRUCTION(ShiftUint32Op)
+
+ private:
+  static const intptr_t kUint32ShiftCountLimit = 31;
+
+  DISALLOW_COPY_AND_ASSIGN(ShiftUint32OpInstr);
+};
+
+// Speculative uint32 shift. Takes unboxed uint32 and smi.
+// Deoptimizes if right operand is negative.
+class SpeculativeShiftUint32OpInstr : public ShiftIntegerOpInstr {
+ public:
+  SpeculativeShiftUint32OpInstr(Token::Kind op_kind,
+                                Value* left,
+                                Value* right,
+                                intptr_t deopt_id)
+      : ShiftIntegerOpInstr(op_kind, left, right, deopt_id) {}
+
+  virtual bool ComputeCanDeoptimize() const { return !IsShiftCountInRange(); }
+
+  virtual Representation representation() const { return kUnboxedUint32; }
+
+  virtual Representation RequiredInputRepresentation(intptr_t idx) const {
+    ASSERT((idx == 0) || (idx == 1));
+    return (idx == 0) ? kUnboxedUint32 : kTagged;
+  }
+
+  DECLARE_INSTRUCTION(SpeculativeShiftUint32Op)
+
+  virtual CompileType ComputeType() const;
+
+ private:
+  static const intptr_t kUint32ShiftCountLimit = 31;
+
+  DISALLOW_COPY_AND_ASSIGN(SpeculativeShiftUint32OpInstr);
 };
 
 // Handles only NEGATE.
diff --git a/runtime/vm/compiler/backend/il_arm.cc b/runtime/vm/compiler/backend/il_arm.cc
index 42daf66..8d16db6 100644
--- a/runtime/vm/compiler/backend/il_arm.cc
+++ b/runtime/vm/compiler/backend/il_arm.cc
@@ -5807,15 +5807,192 @@
   }
 }
 
+static void EmitShiftInt64ByConstant(FlowGraphCompiler* compiler,
+                                     Token::Kind op_kind,
+                                     Register out_lo,
+                                     Register out_hi,
+                                     Register left_lo,
+                                     Register left_hi,
+                                     const Object& right) {
+  const int64_t shift = Integer::Cast(right).AsInt64Value();
+  ASSERT(shift >= 0);
+
+  switch (op_kind) {
+    case Token::kSHR: {
+      if (shift < 32) {
+        __ Lsl(out_lo, left_hi, Operand(32 - shift));
+        __ orr(out_lo, out_lo, Operand(left_lo, LSR, shift));
+        __ Asr(out_hi, left_hi, Operand(shift));
+      } else {
+        if (shift == 32) {
+          __ mov(out_lo, Operand(left_hi));
+        } else if (shift < 64) {
+          __ Asr(out_lo, left_hi, Operand(shift - 32));
+        } else {
+          __ Asr(out_lo, left_hi, Operand(31));
+        }
+        __ Asr(out_hi, left_hi, Operand(31));
+      }
+      break;
+    }
+    case Token::kSHL: {
+      ASSERT(shift < 64);
+      if (shift < 32) {
+        __ Lsr(out_hi, left_lo, Operand(32 - shift));
+        __ orr(out_hi, out_hi, Operand(left_hi, LSL, shift));
+        __ Lsl(out_lo, left_lo, Operand(shift));
+      } else {
+        if (shift == 32) {
+          __ mov(out_hi, Operand(left_lo));
+        } else {
+          __ Lsl(out_hi, left_lo, Operand(shift - 32));
+        }
+        __ mov(out_lo, Operand(0));
+      }
+      break;
+    }
+    default:
+      UNREACHABLE();
+  }
+}
+
+static void EmitShiftInt64ByRegister(FlowGraphCompiler* compiler,
+                                     Token::Kind op_kind,
+                                     Register out_lo,
+                                     Register out_hi,
+                                     Register left_lo,
+                                     Register left_hi,
+                                     Register right) {
+  switch (op_kind) {
+    case Token::kSHR: {
+      __ rsbs(IP, right, Operand(32));
+      __ sub(IP, right, Operand(32), MI);
+      __ mov(out_lo, Operand(left_hi, ASR, IP), MI);
+      __ mov(out_lo, Operand(left_lo, LSR, right), PL);
+      __ orr(out_lo, out_lo, Operand(left_hi, LSL, IP), PL);
+      __ mov(out_hi, Operand(left_hi, ASR, right));
+      break;
+    }
+    case Token::kSHL: {
+      __ rsbs(IP, right, Operand(32));
+      __ sub(IP, right, Operand(32), MI);
+      __ mov(out_hi, Operand(left_lo, LSL, IP), MI);
+      __ mov(out_hi, Operand(left_hi, LSL, right), PL);
+      __ orr(out_hi, out_hi, Operand(left_lo, LSR, IP), PL);
+      __ mov(out_lo, Operand(left_lo, LSL, right));
+      break;
+    }
+    default:
+      UNREACHABLE();
+  }
+}
+
+static void EmitShiftUint32ByConstant(FlowGraphCompiler* compiler,
+                                      Token::Kind op_kind,
+                                      Register out,
+                                      Register left,
+                                      const Object& right) {
+  const int64_t shift = Integer::Cast(right).AsInt64Value();
+  ASSERT(shift >= 0);
+  if (shift >= 32) {
+    __ LoadImmediate(out, 0);
+  } else {
+    switch (op_kind) {
+      case Token::kSHR:
+        __ Lsr(out, left, Operand(shift));
+        break;
+      case Token::kSHL:
+        __ Lsl(out, left, Operand(shift));
+        break;
+      default:
+        UNREACHABLE();
+    }
+  }
+}
+
+static void EmitShiftUint32ByRegister(FlowGraphCompiler* compiler,
+                                      Token::Kind op_kind,
+                                      Register out,
+                                      Register left,
+                                      Register right) {
+  switch (op_kind) {
+    case Token::kSHR:
+      __ Lsr(out, left, right);
+      break;
+    case Token::kSHL:
+      __ Lsl(out, left, right);
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+class ShiftInt64OpSlowPath : public ThrowErrorSlowPathCode {
+ public:
+  static const intptr_t kNumberOfArguments = 0;
+
+  ShiftInt64OpSlowPath(ShiftInt64OpInstr* instruction, intptr_t try_index)
+      : ThrowErrorSlowPathCode(instruction,
+                               kArgumentErrorUnboxedInt64RuntimeEntry,
+                               kNumberOfArguments,
+                               try_index) {}
+
+  const char* name() override { return "int64 shift"; }
+
+  void EmitCodeAtSlowPathEntry(FlowGraphCompiler* compiler) override {
+    PairLocation* left_pair = instruction()->locs()->in(0).AsPairLocation();
+    Register left_hi = left_pair->At(1).reg();
+    PairLocation* right_pair = instruction()->locs()->in(1).AsPairLocation();
+    Register right_lo = right_pair->At(0).reg();
+    Register right_hi = right_pair->At(1).reg();
+    PairLocation* out_pair = instruction()->locs()->out(0).AsPairLocation();
+    Register out_lo = out_pair->At(0).reg();
+    Register out_hi = out_pair->At(1).reg();
+
+    __ CompareImmediate(right_hi, 0);
+
+    switch (instruction()->AsShiftInt64Op()->op_kind()) {
+      case Token::kSHR:
+        __ Asr(out_hi, left_hi, Operand(kBitsPerWord - 1), GE);
+        __ mov(out_lo, Operand(out_hi), GE);
+        break;
+      case Token::kSHL: {
+        __ LoadImmediate(out_lo, 0, GE);
+        __ LoadImmediate(out_hi, 0, GE);
+        break;
+      }
+      default:
+        UNREACHABLE();
+    }
+
+    __ b(exit_label(), GE);
+
+    // Can't pass unboxed int64 value directly to runtime call, as all
+    // arguments are expected to be tagged (boxed).
+    // The unboxed int64 argument is passed through a dedicated slot in Thread.
+    // TODO(dartbug.com/33549): Clean this up when unboxed values
+    // could be passed as arguments.
+    __ StoreToOffset(kWord, right_lo, THR,
+                     Thread::unboxed_int64_runtime_arg_offset());
+    __ StoreToOffset(kWord, right_hi, THR,
+                     Thread::unboxed_int64_runtime_arg_offset() + kWordSize);
+  }
+};
+
 LocationSummary* ShiftInt64OpInstr::MakeLocationSummary(Zone* zone,
                                                         bool opt) const {
   const intptr_t kNumInputs = 2;
   const intptr_t kNumTemps = 0;
-  LocationSummary* summary = new (zone)
-      LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
+  LocationSummary* summary = new (zone) LocationSummary(
+      zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
   summary->set_in(0, Location::Pair(Location::RequiresRegister(),
                                     Location::RequiresRegister()));
-  summary->set_in(1, Location::WritableRegisterOrSmiConstant(right()));
+  if (ConstantInstr* constant = right()->definition()->AsConstant()) {
+    summary->set_in(1, Location::Constant(constant));
+  } else {
+    summary->set_in(1, Location::Pair(Location::RequiresRegister(),
+                                      Location::RequiresRegister()));
+  }
   summary->set_out(0, Location::Pair(Location::RequiresRegister(),
                                      Location::RequiresRegister()));
   return summary;
@@ -5830,86 +6007,216 @@
   Register out_hi = out_pair->At(1).reg();
   ASSERT(!can_overflow());
 
-  Label* deopt = NULL;
-  if (CanDeoptimize()) {
-    deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryInt64Op);
-  }
   if (locs()->in(1).IsConstant()) {
-    // Code for a constant shift amount.
-    ASSERT(locs()->in(1).constant().IsSmi());
-    const int32_t shift = Smi::Cast(locs()->in(1).constant()).Value();
-    ASSERT(shift >= 0);
-    switch (op_kind()) {
-      case Token::kSHR: {
-        if (shift < 32) {
-          __ Lsl(out_lo, left_hi, Operand(32 - shift));
-          __ orr(out_lo, out_lo, Operand(left_lo, LSR, shift));
-          __ Asr(out_hi, left_hi, Operand(shift));
-        } else {
-          if (shift == 32) {
-            __ mov(out_lo, Operand(left_hi));
-          } else if (shift < 64) {
-            __ Asr(out_lo, left_hi, Operand(shift - 32));
-          } else {
-            __ Asr(out_lo, left_hi, Operand(31));
-          }
-          __ Asr(out_hi, left_hi, Operand(31));
-        }
-        break;
-      }
-      case Token::kSHL: {
-        ASSERT(shift < 64);
-        if (shift < 32) {
-          __ Lsr(out_hi, left_lo, Operand(32 - shift));
-          __ orr(out_hi, out_hi, Operand(left_hi, LSL, shift));
-          __ Lsl(out_lo, left_lo, Operand(shift));
-        } else {
-          if (shift == 32) {
-            __ mov(out_hi, Operand(left_lo));
-          } else {
-            __ Lsl(out_hi, left_lo, Operand(shift - 32));
-          }
-          __ mov(out_lo, Operand(0));
-        }
-        break;
-      }
-      default:
-        UNREACHABLE();
+    EmitShiftInt64ByConstant(compiler, op_kind(), out_lo, out_hi, left_lo,
+                             left_hi, locs()->in(1).constant());
+  } else {
+    // Code for a variable shift amount.
+    PairLocation* right_pair = locs()->in(1).AsPairLocation();
+    Register right_lo = right_pair->At(0).reg();
+    Register right_hi = right_pair->At(1).reg();
+
+    // Jump to a slow path if shift is larger than 63 or less than 0.
+    ShiftInt64OpSlowPath* slow_path = NULL;
+    if (!IsShiftCountInRange()) {
+      slow_path =
+          new (Z) ShiftInt64OpSlowPath(this, compiler->CurrentTryIndex());
+      compiler->AddSlowPathCode(slow_path);
+      __ CompareImmediate(right_hi, 0);
+      __ b(slow_path->entry_label(), NE);
+      __ CompareImmediate(right_lo, kShiftCountLimit);
+      __ b(slow_path->entry_label(), HI);
     }
+
+    EmitShiftInt64ByRegister(compiler, op_kind(), out_lo, out_hi, left_lo,
+                             left_hi, right_lo);
+
+    if (slow_path != NULL) {
+      __ Bind(slow_path->exit_label());
+    }
+  }
+}
+
+LocationSummary* SpeculativeShiftInt64OpInstr::MakeLocationSummary(
+    Zone* zone,
+    bool opt) const {
+  const intptr_t kNumInputs = 2;
+  const intptr_t kNumTemps = 0;
+  LocationSummary* summary = new (zone)
+      LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
+  summary->set_in(0, Location::Pair(Location::RequiresRegister(),
+                                    Location::RequiresRegister()));
+  summary->set_in(1, Location::WritableRegisterOrSmiConstant(right()));
+  summary->set_out(0, Location::Pair(Location::RequiresRegister(),
+                                     Location::RequiresRegister()));
+  return summary;
+}
+
+void SpeculativeShiftInt64OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  PairLocation* left_pair = locs()->in(0).AsPairLocation();
+  Register left_lo = left_pair->At(0).reg();
+  Register left_hi = left_pair->At(1).reg();
+  PairLocation* out_pair = locs()->out(0).AsPairLocation();
+  Register out_lo = out_pair->At(0).reg();
+  Register out_hi = out_pair->At(1).reg();
+  ASSERT(!can_overflow());
+
+  if (locs()->in(1).IsConstant()) {
+    EmitShiftInt64ByConstant(compiler, op_kind(), out_lo, out_hi, left_lo,
+                             left_hi, locs()->in(1).constant());
   } else {
     // Code for a variable shift amount.
     Register shift = locs()->in(1).reg();
-
-    // Untag shift count.
     __ SmiUntag(shift);
 
     // Deopt if shift is larger than 63 or less than 0.
     if (!IsShiftCountInRange()) {
-      __ CompareImmediate(shift, kMintShiftCountLimit);
+      ASSERT(CanDeoptimize());
+      Label* deopt =
+          compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryInt64Op);
+
+      __ CompareImmediate(shift, kShiftCountLimit);
       __ b(deopt, HI);
     }
 
-    switch (op_kind()) {
-      case Token::kSHR: {
-        __ rsbs(IP, shift, Operand(32));
-        __ sub(IP, shift, Operand(32), MI);
-        __ mov(out_lo, Operand(left_hi, ASR, IP), MI);
-        __ mov(out_lo, Operand(left_lo, LSR, shift), PL);
-        __ orr(out_lo, out_lo, Operand(left_hi, LSL, IP), PL);
-        __ mov(out_hi, Operand(left_hi, ASR, shift));
-        break;
-      }
-      case Token::kSHL: {
-        __ rsbs(IP, shift, Operand(32));
-        __ sub(IP, shift, Operand(32), MI);
-        __ mov(out_hi, Operand(left_lo, LSL, IP), MI);
-        __ mov(out_hi, Operand(left_hi, LSL, shift), PL);
-        __ orr(out_hi, out_hi, Operand(left_lo, LSR, IP), PL);
-        __ mov(out_lo, Operand(left_lo, LSL, shift));
-        break;
-      }
-      default:
-        UNREACHABLE();
+    EmitShiftInt64ByRegister(compiler, op_kind(), out_lo, out_hi, left_lo,
+                             left_hi, shift);
+  }
+}
+
+class ShiftUint32OpSlowPath : public ThrowErrorSlowPathCode {
+ public:
+  static const intptr_t kNumberOfArguments = 0;
+
+  ShiftUint32OpSlowPath(ShiftUint32OpInstr* instruction, intptr_t try_index)
+      : ThrowErrorSlowPathCode(instruction,
+                               kArgumentErrorUnboxedInt64RuntimeEntry,
+                               kNumberOfArguments,
+                               try_index) {}
+
+  const char* name() override { return "uint32 shift"; }
+
+  void EmitCodeAtSlowPathEntry(FlowGraphCompiler* compiler) override {
+    PairLocation* right_pair = instruction()->locs()->in(1).AsPairLocation();
+    Register right_lo = right_pair->At(0).reg();
+    Register right_hi = right_pair->At(1).reg();
+    Register out = instruction()->locs()->out(0).reg();
+
+    __ CompareImmediate(right_hi, 0);
+    __ LoadImmediate(out, 0, GE);
+    __ b(exit_label(), GE);
+
+    // Can't pass unboxed int64 value directly to runtime call, as all
+    // arguments are expected to be tagged (boxed).
+    // The unboxed int64 argument is passed through a dedicated slot in Thread.
+    // TODO(dartbug.com/33549): Clean this up when unboxed values
+    // could be passed as arguments.
+    __ StoreToOffset(kWord, right_lo, THR,
+                     Thread::unboxed_int64_runtime_arg_offset());
+    __ StoreToOffset(kWord, right_hi, THR,
+                     Thread::unboxed_int64_runtime_arg_offset() + kWordSize);
+  }
+};
+
+LocationSummary* ShiftUint32OpInstr::MakeLocationSummary(Zone* zone,
+                                                         bool opt) const {
+  const intptr_t kNumInputs = 2;
+  const intptr_t kNumTemps = 0;
+  LocationSummary* summary = new (zone) LocationSummary(
+      zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
+  summary->set_in(0, Location::RequiresRegister());
+  if (ConstantInstr* constant = right()->definition()->AsConstant()) {
+    summary->set_in(1, Location::Constant(constant));
+  } else {
+    summary->set_in(1, Location::Pair(Location::RequiresRegister(),
+                                      Location::RequiresRegister()));
+  }
+  summary->set_out(0, Location::RequiresRegister());
+  return summary;
+}
+
+void ShiftUint32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  Register left = locs()->in(0).reg();
+  Register out = locs()->out(0).reg();
+
+  ASSERT(left != out);
+
+  if (locs()->in(1).IsConstant()) {
+    EmitShiftUint32ByConstant(compiler, op_kind(), out, left,
+                              locs()->in(1).constant());
+  } else {
+    PairLocation* right_pair = locs()->in(1).AsPairLocation();
+    Register right_lo = right_pair->At(0).reg();
+    Register right_hi = right_pair->At(1).reg();
+
+    // Jump to a slow path if shift count is > 31 or negative.
+    ShiftUint32OpSlowPath* slow_path = NULL;
+    if (!IsShiftCountInRange(kUint32ShiftCountLimit)) {
+      slow_path =
+          new (Z) ShiftUint32OpSlowPath(this, compiler->CurrentTryIndex());
+      compiler->AddSlowPathCode(slow_path);
+
+      __ CompareImmediate(right_hi, 0);
+      __ b(slow_path->entry_label(), NE);
+      __ CompareImmediate(right_lo, kUint32ShiftCountLimit);
+      __ b(slow_path->entry_label(), HI);
+    }
+
+    EmitShiftUint32ByRegister(compiler, op_kind(), out, left, right_lo);
+
+    if (slow_path != NULL) {
+      __ Bind(slow_path->exit_label());
+    }
+  }
+}
+
+LocationSummary* SpeculativeShiftUint32OpInstr::MakeLocationSummary(
+    Zone* zone,
+    bool opt) const {
+  const intptr_t kNumInputs = 2;
+  const intptr_t kNumTemps = 1;
+  LocationSummary* summary = new (zone)
+      LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
+  summary->set_in(0, Location::RequiresRegister());
+  summary->set_in(1, Location::RegisterOrSmiConstant(right()));
+  summary->set_temp(0, Location::RequiresRegister());
+  summary->set_out(0, Location::RequiresRegister());
+  return summary;
+}
+
+void SpeculativeShiftUint32OpInstr::EmitNativeCode(
+    FlowGraphCompiler* compiler) {
+  Register left = locs()->in(0).reg();
+  Register out = locs()->out(0).reg();
+  Register temp = locs()->temp(0).reg();
+  ASSERT(left != out);
+
+  if (locs()->in(1).IsConstant()) {
+    EmitShiftUint32ByConstant(compiler, op_kind(), out, left,
+                              locs()->in(1).constant());
+  } else {
+    Register right = locs()->in(1).reg();
+    const bool shift_count_in_range =
+        IsShiftCountInRange(kUint32ShiftCountLimit);
+
+    __ SmiUntag(temp, right);
+    right = temp;
+
+    // Deopt if shift count is negative.
+    if (!shift_count_in_range) {
+      ASSERT(CanDeoptimize());
+      Label* deopt =
+          compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryInt64Op);
+
+      __ CompareImmediate(right, 0);
+      __ b(deopt, LT);
+    }
+
+    EmitShiftUint32ByRegister(compiler, op_kind(), out, left, right);
+
+    if (!shift_count_in_range) {
+      __ CompareImmediate(right, kUint32ShiftCountLimit);
+      __ LoadImmediate(out, 0, HI);
     }
   }
 }
@@ -5948,6 +6255,10 @@
   return CompileType::Int();
 }
 
+CompileType SpeculativeShiftUint32OpInstr::ComputeType() const {
+  return CompileType::Int();
+}
+
 CompileType UnaryUint32OpInstr::ComputeType() const {
   return CompileType::Int();
 }
@@ -5993,75 +6304,6 @@
   }
 }
 
-LocationSummary* ShiftUint32OpInstr::MakeLocationSummary(Zone* zone,
-                                                         bool opt) const {
-  const intptr_t kNumInputs = 2;
-  const intptr_t kNumTemps = 1;
-  LocationSummary* summary = new (zone)
-      LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
-  summary->set_in(0, Location::RequiresRegister());
-  summary->set_in(1, Location::RegisterOrSmiConstant(right()));
-  summary->set_temp(0, Location::RequiresRegister());
-  summary->set_out(0, Location::RequiresRegister());
-  return summary;
-}
-
-void ShiftUint32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
-  const intptr_t kShifterLimit = 31;
-
-  Register left = locs()->in(0).reg();
-  Register out = locs()->out(0).reg();
-  Register temp = locs()->temp(0).reg();
-
-  ASSERT(left != out);
-
-  Label* deopt =
-      compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryInt64Op);
-
-  if (locs()->in(1).IsConstant()) {
-    // Shifter is constant.
-
-    const Object& constant = locs()->in(1).constant();
-    ASSERT(constant.IsSmi());
-    const intptr_t shift_value = Smi::Cast(constant).Value();
-
-    // Do the shift: (shift_value > 0) && (shift_value <= kShifterLimit).
-    switch (op_kind()) {
-      case Token::kSHR:
-        __ Lsr(out, left, Operand(shift_value));
-        break;
-      case Token::kSHL:
-        __ Lsl(out, left, Operand(shift_value));
-        break;
-      default:
-        UNREACHABLE();
-    }
-    return;
-  }
-
-  // Non constant shift value.
-  Register shifter = locs()->in(1).reg();
-
-  __ SmiUntag(temp, shifter);
-  __ CompareImmediate(temp, 0);
-  // If shift value is < 0, deoptimize.
-  __ b(deopt, LT);
-  __ CompareImmediate(temp, kShifterLimit);
-  // > kShifterLimit, result is 0.
-  __ eor(out, out, Operand(out), HI);
-  // Do the shift.
-  switch (op_kind()) {
-    case Token::kSHR:
-      __ Lsr(out, left, temp, LS);
-      break;
-    case Token::kSHL:
-      __ Lsl(out, left, temp, LS);
-      break;
-    default:
-      UNREACHABLE();
-  }
-}
-
 LocationSummary* UnaryUint32OpInstr::MakeLocationSummary(Zone* zone,
                                                          bool opt) const {
   const intptr_t kNumInputs = 1;
diff --git a/runtime/vm/compiler/backend/il_arm64.cc b/runtime/vm/compiler/backend/il_arm64.cc
index 983914d..c6c8498 100644
--- a/runtime/vm/compiler/backend/il_arm64.cc
+++ b/runtime/vm/compiler/backend/il_arm64.cc
@@ -5078,14 +5078,140 @@
   }
 }
 
+static void EmitShiftInt64ByConstant(FlowGraphCompiler* compiler,
+                                     Token::Kind op_kind,
+                                     Register out,
+                                     Register left,
+                                     const Object& right) {
+  const int64_t shift = Integer::Cast(right).AsInt64Value();
+  ASSERT(shift >= 0);
+  switch (op_kind) {
+    case Token::kSHR: {
+      __ AsrImmediate(out, left,
+                      Utils::Minimum<int64_t>(shift, kBitsPerWord - 1));
+      break;
+    }
+    case Token::kSHL: {
+      ASSERT(shift < 64);
+      __ LslImmediate(out, left, shift);
+      break;
+    }
+    default:
+      UNREACHABLE();
+  }
+}
+
+static void EmitShiftInt64ByRegister(FlowGraphCompiler* compiler,
+                                     Token::Kind op_kind,
+                                     Register out,
+                                     Register left,
+                                     Register right) {
+  switch (op_kind) {
+    case Token::kSHR: {
+      __ asrv(out, left, right);
+      break;
+    }
+    case Token::kSHL: {
+      __ lslv(out, left, right);
+      break;
+    }
+    default:
+      UNREACHABLE();
+  }
+}
+
+static void EmitShiftUint32ByConstant(FlowGraphCompiler* compiler,
+                                      Token::Kind op_kind,
+                                      Register out,
+                                      Register left,
+                                      const Object& right) {
+  const int64_t shift = Integer::Cast(right).AsInt64Value();
+  ASSERT(shift >= 0);
+  if (shift >= 32) {
+    __ LoadImmediate(out, 0);
+  } else {
+    switch (op_kind) {
+      case Token::kSHR:
+        __ LsrImmediate(out, left, shift, kWord);
+        break;
+      case Token::kSHL:
+        __ LslImmediate(out, left, shift, kWord);
+        break;
+      default:
+        UNREACHABLE();
+    }
+  }
+}
+
+static void EmitShiftUint32ByRegister(FlowGraphCompiler* compiler,
+                                      Token::Kind op_kind,
+                                      Register out,
+                                      Register left,
+                                      Register right) {
+  switch (op_kind) {
+    case Token::kSHR:
+      __ lsrvw(out, left, right);
+      break;
+    case Token::kSHL:
+      __ lslvw(out, left, right);
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+class ShiftInt64OpSlowPath : public ThrowErrorSlowPathCode {
+ public:
+  static const intptr_t kNumberOfArguments = 0;
+
+  ShiftInt64OpSlowPath(ShiftInt64OpInstr* instruction, intptr_t try_index)
+      : ThrowErrorSlowPathCode(instruction,
+                               kArgumentErrorUnboxedInt64RuntimeEntry,
+                               kNumberOfArguments,
+                               try_index) {}
+
+  const char* name() override { return "int64 shift"; }
+
+  void EmitCodeAtSlowPathEntry(FlowGraphCompiler* compiler) override {
+    const Register left = instruction()->locs()->in(0).reg();
+    const Register right = instruction()->locs()->in(1).reg();
+    const Register out = instruction()->locs()->out(0).reg();
+    ASSERT((out != left) && (out != right));
+
+    Label throw_error;
+    __ tbnz(&throw_error, right, kBitsPerWord - 1);
+
+    switch (instruction()->AsShiftInt64Op()->op_kind()) {
+      case Token::kSHR:
+        __ AsrImmediate(out, left, kBitsPerWord - 1);
+        break;
+      case Token::kSHL:
+        __ mov(out, ZR);
+        break;
+      default:
+        UNREACHABLE();
+    }
+    __ b(exit_label());
+
+    __ Bind(&throw_error);
+
+    // Can't pass unboxed int64 value directly to runtime call, as all
+    // arguments are expected to be tagged (boxed).
+    // The unboxed int64 argument is passed through a dedicated slot in Thread.
+    // TODO(dartbug.com/33549): Clean this up when unboxed values
+    // could be passed as arguments.
+    __ str(right, Address(THR, Thread::unboxed_int64_runtime_arg_offset()));
+  }
+};
+
 LocationSummary* ShiftInt64OpInstr::MakeLocationSummary(Zone* zone,
                                                         bool opt) const {
   const intptr_t kNumInputs = 2;
   const intptr_t kNumTemps = 0;
-  LocationSummary* summary = new (zone)
-      LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
+  LocationSummary* summary = new (zone) LocationSummary(
+      zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
   summary->set_in(0, Location::RequiresRegister());
-  summary->set_in(1, Location::WritableRegisterOrSmiConstant(right()));
+  summary->set_in(1, Location::RegisterOrConstant(right()));
   summary->set_out(0, Location::RequiresRegister());
   return summary;
 }
@@ -5095,52 +5221,186 @@
   const Register out = locs()->out(0).reg();
   ASSERT(!can_overflow());
 
-  Label* deopt = NULL;
-  if (CanDeoptimize()) {
-    deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryInt64Op);
-  }
   if (locs()->in(1).IsConstant()) {
-    // Code for a constant shift amount.
-    ASSERT(locs()->in(1).constant().IsSmi());
-    const int32_t shift = Smi::Cast(locs()->in(1).constant()).Value();
-    ASSERT(shift >= 0);
-    switch (op_kind()) {
-      case Token::kSHR: {
-        __ AsrImmediate(out, left, Utils::Minimum(shift, kBitsPerWord - 1));
-        break;
-      }
-      case Token::kSHL: {
-        ASSERT(shift < 64);
-        __ LslImmediate(out, left, shift);
-        break;
-      }
-      default:
-        UNREACHABLE();
+    EmitShiftInt64ByConstant(compiler, op_kind(), out, left,
+                             locs()->in(1).constant());
+  } else {
+    // Code for a variable shift amount.
+    Register shift = locs()->in(1).reg();
+
+    // Jump to a slow path if shift is larger than 63 or less than 0.
+    ShiftInt64OpSlowPath* slow_path = NULL;
+    if (!IsShiftCountInRange()) {
+      slow_path =
+          new (Z) ShiftInt64OpSlowPath(this, compiler->CurrentTryIndex());
+      compiler->AddSlowPathCode(slow_path);
+      __ CompareImmediate(shift, kShiftCountLimit);
+      __ b(slow_path->entry_label(), HI);
     }
+
+    EmitShiftInt64ByRegister(compiler, op_kind(), out, left, shift);
+
+    if (slow_path != NULL) {
+      __ Bind(slow_path->exit_label());
+    }
+  }
+}
+
+LocationSummary* SpeculativeShiftInt64OpInstr::MakeLocationSummary(
+    Zone* zone,
+    bool opt) const {
+  const intptr_t kNumInputs = 2;
+  const intptr_t kNumTemps = 0;
+  LocationSummary* summary = new (zone)
+      LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
+  summary->set_in(0, Location::RequiresRegister());
+  summary->set_in(1, Location::RegisterOrSmiConstant(right()));
+  summary->set_out(0, Location::RequiresRegister());
+  return summary;
+}
+
+void SpeculativeShiftInt64OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  const Register left = locs()->in(0).reg();
+  const Register out = locs()->out(0).reg();
+  ASSERT(!can_overflow());
+
+  if (locs()->in(1).IsConstant()) {
+    EmitShiftInt64ByConstant(compiler, op_kind(), out, left,
+                             locs()->in(1).constant());
   } else {
     // Code for a variable shift amount.
     Register shift = locs()->in(1).reg();
 
     // Untag shift count.
     __ SmiUntag(TMP, shift);
+    shift = TMP;
 
     // Deopt if shift is larger than 63 or less than 0.
     if (!IsShiftCountInRange()) {
-      __ CompareImmediate(TMP, kMintShiftCountLimit);
+      ASSERT(CanDeoptimize());
+      Label* deopt =
+          compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryInt64Op);
+
+      __ CompareImmediate(shift, kShiftCountLimit);
       __ b(deopt, HI);
     }
 
-    switch (op_kind()) {
-      case Token::kSHR: {
-        __ lsrv(out, left, TMP);
-        break;
-      }
-      case Token::kSHL: {
-        __ lslv(out, left, TMP);
-        break;
-      }
-      default:
-        UNREACHABLE();
+    EmitShiftInt64ByRegister(compiler, op_kind(), out, left, shift);
+  }
+}
+
+class ShiftUint32OpSlowPath : public ThrowErrorSlowPathCode {
+ public:
+  static const intptr_t kNumberOfArguments = 0;
+
+  ShiftUint32OpSlowPath(ShiftUint32OpInstr* instruction, intptr_t try_index)
+      : ThrowErrorSlowPathCode(instruction,
+                               kArgumentErrorUnboxedInt64RuntimeEntry,
+                               kNumberOfArguments,
+                               try_index) {}
+
+  const char* name() override { return "uint32 shift"; }
+
+  void EmitCodeAtSlowPathEntry(FlowGraphCompiler* compiler) override {
+    const Register right = instruction()->locs()->in(1).reg();
+
+    // Can't pass unboxed int64 value directly to runtime call, as all
+    // arguments are expected to be tagged (boxed).
+    // The unboxed int64 argument is passed through a dedicated slot in Thread.
+    // TODO(dartbug.com/33549): Clean this up when unboxed values
+    // could be passed as arguments.
+    __ str(right, Address(THR, Thread::unboxed_int64_runtime_arg_offset()));
+  }
+};
+
+LocationSummary* ShiftUint32OpInstr::MakeLocationSummary(Zone* zone,
+                                                         bool opt) const {
+  const intptr_t kNumInputs = 2;
+  const intptr_t kNumTemps = 0;
+  LocationSummary* summary = new (zone) LocationSummary(
+      zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
+  summary->set_in(0, Location::RequiresRegister());
+  summary->set_in(1, Location::RegisterOrConstant(right()));
+  summary->set_out(0, Location::RequiresRegister());
+  return summary;
+}
+
+void ShiftUint32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  Register left = locs()->in(0).reg();
+  Register out = locs()->out(0).reg();
+
+  if (locs()->in(1).IsConstant()) {
+    EmitShiftUint32ByConstant(compiler, op_kind(), out, left,
+                              locs()->in(1).constant());
+  } else {
+    const Register right = locs()->in(1).reg();
+    const bool shift_count_in_range =
+        IsShiftCountInRange(kUint32ShiftCountLimit);
+
+    // Jump to a slow path if shift count is negative.
+    if (!shift_count_in_range) {
+      ShiftUint32OpSlowPath* slow_path =
+          new (Z) ShiftUint32OpSlowPath(this, compiler->CurrentTryIndex());
+      compiler->AddSlowPathCode(slow_path);
+
+      __ tbnz(slow_path->entry_label(), right, kBitsPerWord - 1);
+    }
+
+    EmitShiftUint32ByRegister(compiler, op_kind(), out, left, right);
+
+    if (!shift_count_in_range) {
+      // If shift value is > 31, return zero.
+      __ CompareImmediate(right, 31);
+      __ csel(out, out, ZR, LE);
+    }
+  }
+}
+
+LocationSummary* SpeculativeShiftUint32OpInstr::MakeLocationSummary(
+    Zone* zone,
+    bool opt) const {
+  const intptr_t kNumInputs = 2;
+  const intptr_t kNumTemps = 0;
+  LocationSummary* summary = new (zone)
+      LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
+  summary->set_in(0, Location::RequiresRegister());
+  summary->set_in(1, Location::RegisterOrSmiConstant(right()));
+  summary->set_out(0, Location::RequiresRegister());
+  return summary;
+}
+
+void SpeculativeShiftUint32OpInstr::EmitNativeCode(
+    FlowGraphCompiler* compiler) {
+  Register left = locs()->in(0).reg();
+  Register out = locs()->out(0).reg();
+
+  if (locs()->in(1).IsConstant()) {
+    EmitShiftUint32ByConstant(compiler, op_kind(), out, left,
+                              locs()->in(1).constant());
+  } else {
+    Register right = locs()->in(1).reg();
+    const bool shift_count_in_range =
+        IsShiftCountInRange(kUint32ShiftCountLimit);
+
+    __ SmiUntag(TMP, right);
+    right = TMP;
+
+    // Jump to a slow path if shift count is negative.
+    if (!shift_count_in_range) {
+      // Deoptimize if shift count is negative.
+      ASSERT(CanDeoptimize());
+      Label* deopt =
+          compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryInt64Op);
+
+      __ tbnz(deopt, right, kBitsPerWord - 1);
+    }
+
+    EmitShiftUint32ByRegister(compiler, op_kind(), out, left, right);
+
+    if (!shift_count_in_range) {
+      // If shift value is > 31, return zero.
+      __ CompareImmediate(right, 31);
+      __ csel(out, out, ZR, LE);
     }
   }
 }
@@ -5172,6 +5432,10 @@
   return CompileType::FromCid(kSmiCid);
 }
 
+CompileType SpeculativeShiftUint32OpInstr::ComputeType() const {
+  return CompileType::FromCid(kSmiCid);
+}
+
 CompileType UnaryUint32OpInstr::ComputeType() const {
   return CompileType::FromCid(kSmiCid);
 }
@@ -5217,75 +5481,6 @@
   }
 }
 
-LocationSummary* ShiftUint32OpInstr::MakeLocationSummary(Zone* zone,
-                                                         bool opt) const {
-  const intptr_t kNumInputs = 2;
-  const intptr_t kNumTemps = 0;
-  LocationSummary* summary = new (zone)
-      LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
-  summary->set_in(0, Location::RequiresRegister());
-  summary->set_in(1, Location::RegisterOrSmiConstant(right()));
-  summary->set_out(0, Location::RequiresRegister());
-  return summary;
-}
-
-void ShiftUint32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
-  const intptr_t kShifterLimit = 31;
-
-  Register left = locs()->in(0).reg();
-  Register out = locs()->out(0).reg();
-
-  Label* deopt =
-      compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryInt64Op);
-
-  if (locs()->in(1).IsConstant()) {
-    // Shifter is constant.
-
-    const Object& constant = locs()->in(1).constant();
-    ASSERT(constant.IsSmi());
-    const intptr_t shift_value = Smi::Cast(constant).Value();
-
-    // Do the shift: (shift_value > 0) && (shift_value <= kShifterLimit).
-    switch (op_kind()) {
-      case Token::kSHR:
-        __ LsrImmediate(out, left, shift_value, kWord);
-        break;
-      case Token::kSHL:
-        __ LslImmediate(out, left, shift_value, kWord);
-        break;
-      default:
-        UNREACHABLE();
-    }
-    return;
-  }
-
-  // Non constant shift value.
-
-  Register shifter = locs()->in(1).reg();
-
-  // TODO(johnmccutchan): Use range information to avoid these checks.
-  __ SmiUntag(TMP, shifter);
-  __ CompareImmediate(TMP, 0);
-  // If shift value is < 0, deoptimize.
-  __ b(deopt, LT);
-
-  // Do the shift.
-  switch (op_kind()) {
-    case Token::kSHR:
-      __ lsrv(out, left, TMP);
-      break;
-    case Token::kSHL:
-      __ lslvw(out, left, TMP);
-      break;
-    default:
-      UNREACHABLE();
-  }
-
-  __ CompareImmediate(TMP, kShifterLimit);
-  // If shift value is > 31, return zero.
-  __ csel(out, out, ZR, GT);
-}
-
 LocationSummary* UnaryUint32OpInstr::MakeLocationSummary(Zone* zone,
                                                          bool opt) const {
   const intptr_t kNumInputs = 1;
diff --git a/runtime/vm/compiler/backend/il_dbc.cc b/runtime/vm/compiler/backend/il_dbc.cc
index 0451944..dae7f80 100644
--- a/runtime/vm/compiler/backend/il_dbc.cc
+++ b/runtime/vm/compiler/backend/il_dbc.cc
@@ -42,6 +42,7 @@
   M(ExtractNthOutput)                                                          \
   M(BinaryUint32Op)                                                            \
   M(ShiftUint32Op)                                                             \
+  M(SpeculativeShiftUint32Op)                                                  \
   M(UnaryUint32Op)                                                             \
   M(UnboxedIntConverter)
 
@@ -59,6 +60,7 @@
   M(Int64ToDouble)                                                             \
   M(BinaryInt64Op)                                                             \
   M(ShiftInt64Op)                                                              \
+  M(SpeculativeShiftInt64Op)                                                   \
   M(UnaryInt64Op)                                                              \
   M(CheckedSmiOp)                                                              \
   M(CheckedSmiComparison)                                                      \
@@ -1278,6 +1280,10 @@
   return CompileType::Int();
 }
 
+CompileType SpeculativeShiftUint32OpInstr::ComputeType() const {
+  return CompileType::Int();
+}
+
 CompileType UnaryUint32OpInstr::ComputeType() const {
   return CompileType::Int();
 }
diff --git a/runtime/vm/compiler/backend/il_ia32.cc b/runtime/vm/compiler/backend/il_ia32.cc
index 97e7ac1..57f3b5d 100644
--- a/runtime/vm/compiler/backend/il_ia32.cc
+++ b/runtime/vm/compiler/backend/il_ia32.cc
@@ -5247,15 +5247,204 @@
   }
 }
 
+static void EmitShiftInt64ByConstant(FlowGraphCompiler* compiler,
+                                     Token::Kind op_kind,
+                                     Register left_lo,
+                                     Register left_hi,
+                                     const Object& right) {
+  const int64_t shift = Integer::Cast(right).AsInt64Value();
+  ASSERT(shift >= 0);
+  switch (op_kind) {
+    case Token::kSHR: {
+      if (shift > 31) {
+        __ movl(left_lo, left_hi);        // Shift by 32.
+        __ sarl(left_hi, Immediate(31));  // Sign extend left hi.
+        if (shift > 32) {
+          __ sarl(left_lo, Immediate(shift > 63 ? 31 : shift - 32));
+        }
+      } else {
+        __ shrdl(left_lo, left_hi, Immediate(shift));
+        __ sarl(left_hi, Immediate(shift));
+      }
+      break;
+    }
+    case Token::kSHL: {
+      ASSERT(shift < 64);
+      if (shift > 31) {
+        __ movl(left_hi, left_lo);  // Shift by 32.
+        __ xorl(left_lo, left_lo);  // Zero left_lo.
+        if (shift > 32) {
+          __ shll(left_hi, Immediate(shift - 32));
+        }
+      } else {
+        __ shldl(left_hi, left_lo, Immediate(shift));
+        __ shll(left_lo, Immediate(shift));
+      }
+      break;
+    }
+    default:
+      UNREACHABLE();
+  }
+}
+
+static void EmitShiftInt64ByECX(FlowGraphCompiler* compiler,
+                                Token::Kind op_kind,
+                                Register left_lo,
+                                Register left_hi) {
+  // sarl operation masks the count to 5 bits and
+  // shrdl is undefined with count > operand size (32)
+  Label done, large_shift;
+  switch (op_kind) {
+    case Token::kSHR: {
+      __ cmpl(ECX, Immediate(31));
+      __ j(ABOVE, &large_shift);
+
+      __ shrdl(left_lo, left_hi, ECX);  // Shift count in CL.
+      __ sarl(left_hi, ECX);            // Shift count in CL.
+      __ jmp(&done, Assembler::kNearJump);
+
+      __ Bind(&large_shift);
+      // No need to subtract 32 from CL, only 5 bits used by sarl.
+      __ movl(left_lo, left_hi);        // Shift by 32.
+      __ sarl(left_hi, Immediate(31));  // Sign extend left hi.
+      __ sarl(left_lo, ECX);            // Shift count: CL % 32.
+      break;
+    }
+    case Token::kSHL: {
+      __ cmpl(ECX, Immediate(31));
+      __ j(ABOVE, &large_shift);
+
+      __ shldl(left_hi, left_lo, ECX);  // Shift count in CL.
+      __ shll(left_lo, ECX);            // Shift count in CL.
+      __ jmp(&done, Assembler::kNearJump);
+
+      __ Bind(&large_shift);
+      // No need to subtract 32 from CL, only 5 bits used by shll.
+      __ movl(left_hi, left_lo);  // Shift by 32.
+      __ xorl(left_lo, left_lo);  // Zero left_lo.
+      __ shll(left_hi, ECX);      // Shift count: CL % 32.
+      break;
+    }
+    default:
+      UNREACHABLE();
+  }
+  __ Bind(&done);
+}
+
+static void EmitShiftUint32ByConstant(FlowGraphCompiler* compiler,
+                                      Token::Kind op_kind,
+                                      Register left,
+                                      const Object& right) {
+  const int64_t shift = Integer::Cast(right).AsInt64Value();
+  if (shift >= 32) {
+    __ xorl(left, left);
+  } else {
+    switch (op_kind) {
+      case Token::kSHR: {
+        __ shrl(left, Immediate(shift));
+        break;
+      }
+      case Token::kSHL: {
+        __ shll(left, Immediate(shift));
+        break;
+      }
+      default:
+        UNREACHABLE();
+    }
+  }
+}
+
+static void EmitShiftUint32ByECX(FlowGraphCompiler* compiler,
+                                 Token::Kind op_kind,
+                                 Register left) {
+  switch (op_kind) {
+    case Token::kSHR: {
+      __ shrl(left, ECX);
+      break;
+    }
+    case Token::kSHL: {
+      __ shll(left, ECX);
+      break;
+    }
+    default:
+      UNREACHABLE();
+  }
+}
+
+class ShiftInt64OpSlowPath : public ThrowErrorSlowPathCode {
+ public:
+  static const intptr_t kNumberOfArguments = 0;
+
+  ShiftInt64OpSlowPath(ShiftInt64OpInstr* instruction, intptr_t try_index)
+      : ThrowErrorSlowPathCode(instruction,
+                               kArgumentErrorUnboxedInt64RuntimeEntry,
+                               kNumberOfArguments,
+                               try_index) {}
+
+  const char* name() override { return "int64 shift"; }
+
+  void EmitCodeAtSlowPathEntry(FlowGraphCompiler* compiler) override {
+    PairLocation* right_pair = instruction()->locs()->in(1).AsPairLocation();
+    Register right_lo = right_pair->At(0).reg();
+    Register right_hi = right_pair->At(1).reg();
+    PairLocation* out_pair = instruction()->locs()->out(0).AsPairLocation();
+    Register out_lo = out_pair->At(0).reg();
+    Register out_hi = out_pair->At(1).reg();
+#if defined(DEBUG)
+    PairLocation* left_pair = instruction()->locs()->in(0).AsPairLocation();
+    Register left_lo = left_pair->At(0).reg();
+    Register left_hi = left_pair->At(1).reg();
+    ASSERT(out_lo == left_lo);
+    ASSERT(out_hi == left_hi);
+#endif  // defined(DEBUG)
+
+    Label throw_error;
+    __ testl(right_hi, right_hi);
+    __ j(NEGATIVE, &throw_error);
+
+    switch (instruction()->AsShiftInt64Op()->op_kind()) {
+      case Token::kSHR:
+        __ sarl(out_hi, Immediate(31));
+        __ movl(out_lo, out_hi);
+        break;
+      case Token::kSHL: {
+        __ xorl(out_lo, out_lo);
+        __ xorl(out_hi, out_hi);
+        break;
+      }
+      default:
+        UNREACHABLE();
+    }
+    __ jmp(exit_label());
+
+    __ Bind(&throw_error);
+
+    // Can't pass unboxed int64 value directly to runtime call, as all
+    // arguments are expected to be tagged (boxed).
+    // The unboxed int64 argument is passed through a dedicated slot in Thread.
+    // TODO(dartbug.com/33549): Clean this up when unboxed values
+    // could be passed as arguments.
+    __ movl(Address(THR, Thread::unboxed_int64_runtime_arg_offset()), right_lo);
+    __ movl(
+        Address(THR, Thread::unboxed_int64_runtime_arg_offset() + kWordSize),
+        right_hi);
+  }
+};
+
 LocationSummary* ShiftInt64OpInstr::MakeLocationSummary(Zone* zone,
                                                         bool opt) const {
   const intptr_t kNumInputs = 2;
   const intptr_t kNumTemps = 0;
-  LocationSummary* summary = new (zone)
-      LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
+  LocationSummary* summary = new (zone) LocationSummary(
+      zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
   summary->set_in(0, Location::Pair(Location::RequiresRegister(),
                                     Location::RequiresRegister()));
-  summary->set_in(1, Location::FixedRegisterOrSmiConstant(right(), ECX));
+  if (ConstantInstr* constant = right()->definition()->AsConstant()) {
+    summary->set_in(1, Location::Constant(constant));
+  } else {
+    summary->set_in(1, Location::Pair(Location::RegisterLocation(ECX),
+                                      Location::RequiresRegister()));
+  }
   summary->set_out(0, Location::SameAsFirstInput());
   return summary;
 }
@@ -5271,92 +5460,217 @@
   ASSERT(out_hi == left_hi);
   ASSERT(!can_overflow());
 
-  Label* deopt = NULL;
-  if (CanDeoptimize()) {
-    deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryInt64Op);
-  }
   if (locs()->in(1).IsConstant()) {
-    // Code for a constant shift amount.
-    ASSERT(locs()->in(1).constant().IsSmi());
-    const int32_t shift = Smi::Cast(locs()->in(1).constant()).Value();
-    ASSERT(shift >= 0);
-    switch (op_kind()) {
-      case Token::kSHR: {
-        if (shift > 31) {
-          __ movl(left_lo, left_hi);        // Shift by 32.
-          __ sarl(left_hi, Immediate(31));  // Sign extend left hi.
-          if (shift > 32) {
-            __ sarl(left_lo, Immediate(shift > 63 ? 31 : shift - 32));
-          }
-        } else {
-          __ shrdl(left_lo, left_hi, Immediate(shift));
-          __ sarl(left_hi, Immediate(shift));
-        }
-        break;
-      }
-      case Token::kSHL: {
-        ASSERT(shift < 64);
-        if (shift > 31) {
-          __ movl(left_hi, left_lo);  // Shift by 32.
-          __ xorl(left_lo, left_lo);  // Zero left_lo.
-          if (shift > 32) {
-            __ shll(left_hi, Immediate(shift - 32));
-          }
-        } else {
-          __ shldl(left_hi, left_lo, Immediate(shift));
-          __ shll(left_lo, Immediate(shift));
-        }
-        break;
-      }
-      default:
-        UNREACHABLE();
-    }
+    EmitShiftInt64ByConstant(compiler, op_kind(), left_lo, left_hi,
+                             locs()->in(1).constant());
   } else {
     // Code for a variable shift amount.
-    // Deoptimize if shift count is > 63.
-    // sarl operation masks the count to 5 bits and
-    // shrdl is undefined with count > operand size (32)
-    __ SmiUntag(ECX);
+    ASSERT(locs()->in(1).AsPairLocation()->At(0).reg() == ECX);
+    Register right_hi = locs()->in(1).AsPairLocation()->At(1).reg();
+
+    // Jump to a slow path if shift count is > 63 or negative.
+    ShiftInt64OpSlowPath* slow_path = NULL;
     if (!IsShiftCountInRange()) {
-      __ cmpl(ECX, Immediate(kMintShiftCountLimit));
+      slow_path =
+          new (Z) ShiftInt64OpSlowPath(this, compiler->CurrentTryIndex());
+      compiler->AddSlowPathCode(slow_path);
+      __ testl(right_hi, right_hi);
+      __ j(NOT_ZERO, slow_path->entry_label());
+      __ cmpl(ECX, Immediate(kShiftCountLimit));
+      __ j(ABOVE, slow_path->entry_label());
+    }
+
+    EmitShiftInt64ByECX(compiler, op_kind(), left_lo, left_hi);
+
+    if (slow_path != NULL) {
+      __ Bind(slow_path->exit_label());
+    }
+  }
+}
+
+LocationSummary* SpeculativeShiftInt64OpInstr::MakeLocationSummary(
+    Zone* zone,
+    bool opt) const {
+  const intptr_t kNumInputs = 2;
+  const intptr_t kNumTemps = 0;
+  LocationSummary* summary = new (zone)
+      LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
+  summary->set_in(0, Location::Pair(Location::RequiresRegister(),
+                                    Location::RequiresRegister()));
+  summary->set_in(1, Location::FixedRegisterOrSmiConstant(right(), ECX));
+  summary->set_out(0, Location::SameAsFirstInput());
+  return summary;
+}
+
+void SpeculativeShiftInt64OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  PairLocation* left_pair = locs()->in(0).AsPairLocation();
+  Register left_lo = left_pair->At(0).reg();
+  Register left_hi = left_pair->At(1).reg();
+  PairLocation* out_pair = locs()->out(0).AsPairLocation();
+  Register out_lo = out_pair->At(0).reg();
+  Register out_hi = out_pair->At(1).reg();
+  ASSERT(out_lo == left_lo);
+  ASSERT(out_hi == left_hi);
+  ASSERT(!can_overflow());
+
+  if (locs()->in(1).IsConstant()) {
+    EmitShiftInt64ByConstant(compiler, op_kind(), left_lo, left_hi,
+                             locs()->in(1).constant());
+  } else {
+    ASSERT(locs()->in(1).reg() == ECX);
+    __ SmiUntag(ECX);
+
+    // Deoptimize if shift count is > 63 or negative.
+    if (!IsShiftCountInRange()) {
+      ASSERT(CanDeoptimize());
+      Label* deopt =
+          compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryInt64Op);
+      __ cmpl(ECX, Immediate(kShiftCountLimit));
       __ j(ABOVE, deopt);
     }
-    Label done, large_shift;
-    switch (op_kind()) {
-      case Token::kSHR: {
-        __ cmpl(ECX, Immediate(31));
-        __ j(ABOVE, &large_shift);
 
-        __ shrdl(left_lo, left_hi, ECX);  // Shift count in CL.
-        __ sarl(left_hi, ECX);            // Shift count in CL.
-        __ jmp(&done, Assembler::kNearJump);
+    EmitShiftInt64ByECX(compiler, op_kind(), left_lo, left_hi);
+  }
+}
 
-        __ Bind(&large_shift);
-        // No need to subtract 32 from CL, only 5 bits used by sarl.
-        __ movl(left_lo, left_hi);        // Shift by 32.
-        __ sarl(left_hi, Immediate(31));  // Sign extend left hi.
-        __ sarl(left_lo, ECX);            // Shift count: CL % 32.
-        break;
-      }
-      case Token::kSHL: {
-        __ cmpl(ECX, Immediate(31));
-        __ j(ABOVE, &large_shift);
+class ShiftUint32OpSlowPath : public ThrowErrorSlowPathCode {
+ public:
+  static const intptr_t kNumberOfArguments = 0;
 
-        __ shldl(left_hi, left_lo, ECX);  // Shift count in CL.
-        __ shll(left_lo, ECX);            // Shift count in CL.
-        __ jmp(&done, Assembler::kNearJump);
+  ShiftUint32OpSlowPath(ShiftUint32OpInstr* instruction, intptr_t try_index)
+      : ThrowErrorSlowPathCode(instruction,
+                               kArgumentErrorUnboxedInt64RuntimeEntry,
+                               kNumberOfArguments,
+                               try_index) {}
 
-        __ Bind(&large_shift);
-        // No need to subtract 32 from CL, only 5 bits used by shll.
-        __ movl(left_hi, left_lo);  // Shift by 32.
-        __ xorl(left_lo, left_lo);  // Zero left_lo.
-        __ shll(left_hi, ECX);      // Shift count: CL % 32.
-        break;
-      }
-      default:
-        UNREACHABLE();
+  const char* name() override { return "uint32 shift"; }
+
+  void EmitCodeAtSlowPathEntry(FlowGraphCompiler* compiler) override {
+    PairLocation* right_pair = instruction()->locs()->in(1).AsPairLocation();
+    Register right_lo = right_pair->At(0).reg();
+    Register right_hi = right_pair->At(1).reg();
+    const Register out = instruction()->locs()->out(0).reg();
+    ASSERT(out == instruction()->locs()->in(0).reg());
+
+    Label throw_error;
+    __ testl(right_hi, right_hi);
+    __ j(NEGATIVE, &throw_error);
+
+    __ xorl(out, out);
+    __ jmp(exit_label());
+
+    __ Bind(&throw_error);
+
+    // Can't pass unboxed int64 value directly to runtime call, as all
+    // arguments are expected to be tagged (boxed).
+    // The unboxed int64 argument is passed through a dedicated slot in Thread.
+    // TODO(dartbug.com/33549): Clean this up when unboxed values
+    // could be passed as arguments.
+    __ movl(Address(THR, Thread::unboxed_int64_runtime_arg_offset()), right_lo);
+    __ movl(
+        Address(THR, Thread::unboxed_int64_runtime_arg_offset() + kWordSize),
+        right_hi);
+  }
+};
+
+LocationSummary* ShiftUint32OpInstr::MakeLocationSummary(Zone* zone,
+                                                         bool opt) const {
+  const intptr_t kNumInputs = 2;
+  const intptr_t kNumTemps = 0;
+  LocationSummary* summary = new (zone) LocationSummary(
+      zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
+  summary->set_in(0, Location::RequiresRegister());
+  if (ConstantInstr* constant = right()->definition()->AsConstant()) {
+    summary->set_in(1, Location::Constant(constant));
+  } else {
+    summary->set_in(1, Location::Pair(Location::RegisterLocation(ECX),
+                                      Location::RequiresRegister()));
+  }
+  summary->set_out(0, Location::SameAsFirstInput());
+  return summary;
+}
+
+void ShiftUint32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  Register left = locs()->in(0).reg();
+  Register out = locs()->out(0).reg();
+  ASSERT(left == out);
+
+  if (locs()->in(1).IsConstant()) {
+    EmitShiftUint32ByConstant(compiler, op_kind(), left,
+                              locs()->in(1).constant());
+  } else {
+    // Code for a variable shift amount.
+    ASSERT(locs()->in(1).AsPairLocation()->At(0).reg() == ECX);
+    Register right_hi = locs()->in(1).AsPairLocation()->At(1).reg();
+
+    // Jump to a slow path if shift count is > 31 or negative.
+    ShiftUint32OpSlowPath* slow_path = NULL;
+    if (!IsShiftCountInRange(kUint32ShiftCountLimit)) {
+      slow_path =
+          new (Z) ShiftUint32OpSlowPath(this, compiler->CurrentTryIndex());
+      compiler->AddSlowPathCode(slow_path);
+
+      __ testl(right_hi, right_hi);
+      __ j(NOT_ZERO, slow_path->entry_label());
+      __ cmpl(ECX, Immediate(kUint32ShiftCountLimit));
+      __ j(ABOVE, slow_path->entry_label());
     }
-    __ Bind(&done);
+
+    EmitShiftUint32ByECX(compiler, op_kind(), left);
+
+    if (slow_path != NULL) {
+      __ Bind(slow_path->exit_label());
+    }
+  }
+}
+
+LocationSummary* SpeculativeShiftUint32OpInstr::MakeLocationSummary(
+    Zone* zone,
+    bool opt) const {
+  const intptr_t kNumInputs = 2;
+  const intptr_t kNumTemps = 0;
+  LocationSummary* summary = new (zone)
+      LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
+  summary->set_in(0, Location::RequiresRegister());
+  summary->set_in(1, Location::FixedRegisterOrSmiConstant(right(), ECX));
+  summary->set_out(0, Location::SameAsFirstInput());
+  return summary;
+}
+
+void SpeculativeShiftUint32OpInstr::EmitNativeCode(
+    FlowGraphCompiler* compiler) {
+  Register left = locs()->in(0).reg();
+  Register out = locs()->out(0).reg();
+  ASSERT(left == out);
+
+  if (locs()->in(1).IsConstant()) {
+    EmitShiftUint32ByConstant(compiler, op_kind(), left,
+                              locs()->in(1).constant());
+  } else {
+    ASSERT(locs()->in(1).reg() == ECX);
+    __ SmiUntag(ECX);
+
+    if (!IsShiftCountInRange(kUint32ShiftCountLimit)) {
+      if (!IsShiftCountInRange()) {
+        // Deoptimize if shift count is negative.
+        ASSERT(CanDeoptimize());
+        Label* deopt =
+            compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryInt64Op);
+
+        __ testl(ECX, ECX);
+        __ j(LESS, deopt);
+      }
+
+      Label cont;
+      __ cmpl(ECX, Immediate(kUint32ShiftCountLimit));
+      __ j(LESS_EQUAL, &cont);
+
+      __ xorl(left, left);
+
+      __ Bind(&cont);
+    }
+
+    EmitShiftUint32ByECX(compiler, op_kind(), left);
   }
 }
 
@@ -5394,90 +5708,12 @@
   return CompileType::Int();
 }
 
-CompileType UnaryUint32OpInstr::ComputeType() const {
+CompileType SpeculativeShiftUint32OpInstr::ComputeType() const {
   return CompileType::Int();
 }
 
-LocationSummary* ShiftUint32OpInstr::MakeLocationSummary(Zone* zone,
-                                                         bool opt) const {
-  const intptr_t kNumInputs = 2;
-  const intptr_t kNumTemps = 0;
-  LocationSummary* summary = new (zone)
-      LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
-  summary->set_in(0, Location::RequiresRegister());
-  summary->set_in(1, Location::FixedRegisterOrSmiConstant(right(), ECX));
-  summary->set_out(0, Location::SameAsFirstInput());
-  return summary;
-}
-
-void ShiftUint32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
-  const intptr_t kShifterLimit = 31;
-
-  Register left = locs()->in(0).reg();
-  Register out = locs()->out(0).reg();
-  ASSERT(left == out);
-
-  Label* deopt =
-      compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryInt64Op);
-
-  if (locs()->in(1).IsConstant()) {
-    // Shifter is constant.
-
-    const Object& constant = locs()->in(1).constant();
-    ASSERT(constant.IsSmi());
-    const intptr_t shift_value = Smi::Cast(constant).Value();
-
-    // Do the shift: (shift_value > 0) && (shift_value <= kShifterLimit).
-    switch (op_kind()) {
-      case Token::kSHR:
-        __ shrl(left, Immediate(shift_value));
-        break;
-      case Token::kSHL:
-        __ shll(left, Immediate(shift_value));
-        break;
-      default:
-        UNREACHABLE();
-    }
-    return;
-  }
-
-  // Non constant shift value.
-
-  Register shifter = locs()->in(1).reg();
-  ASSERT(shifter == ECX);
-
-  Label done;
-  Label zero;
-
-  // TODO(johnmccutchan): Use range information to avoid these checks.
-  __ SmiUntag(shifter);
-  __ cmpl(shifter, Immediate(0));
-  // If shift value is < 0, deoptimize.
-  __ j(NEGATIVE, deopt);
-  __ cmpl(shifter, Immediate(kShifterLimit));
-  // If shift value is >= 32, return zero.
-  __ j(ABOVE, &zero);
-
-  // Do the shift.
-  switch (op_kind()) {
-    case Token::kSHR:
-      __ shrl(left, shifter);
-      __ jmp(&done);
-      break;
-    case Token::kSHL:
-      __ shll(left, shifter);
-      __ jmp(&done);
-      break;
-    default:
-      UNREACHABLE();
-  }
-
-  __ Bind(&zero);
-  // Shift was greater than 31 bits, just return zero.
-  __ xorl(left, left);
-
-  // Exit path.
-  __ Bind(&done);
+CompileType UnaryUint32OpInstr::ComputeType() const {
+  return CompileType::Int();
 }
 
 LocationSummary* UnaryUint32OpInstr::MakeLocationSummary(Zone* zone,
diff --git a/runtime/vm/compiler/backend/il_x64.cc b/runtime/vm/compiler/backend/il_x64.cc
index f66d347..c520bfc 100644
--- a/runtime/vm/compiler/backend/il_x64.cc
+++ b/runtime/vm/compiler/backend/il_x64.cc
@@ -5237,14 +5237,136 @@
   __ notq(left);
 }
 
+static void EmitShiftInt64ByConstant(FlowGraphCompiler* compiler,
+                                     Token::Kind op_kind,
+                                     Register left,
+                                     const Object& right) {
+  const int64_t shift = Integer::Cast(right).AsInt64Value();
+  ASSERT(shift >= 0);
+  switch (op_kind) {
+    case Token::kSHR:
+      __ sarq(left,
+              Immediate(Utils::Minimum<int64_t>(shift, kBitsPerWord - 1)));
+      break;
+    case Token::kSHL: {
+      ASSERT(shift < 64);
+      __ shlq(left, Immediate(shift));
+      break;
+    }
+    default:
+      UNREACHABLE();
+  }
+}
+
+static void EmitShiftInt64ByRCX(FlowGraphCompiler* compiler,
+                                Token::Kind op_kind,
+                                Register left) {
+  switch (op_kind) {
+    case Token::kSHR: {
+      __ sarq(left, RCX);
+      break;
+    }
+    case Token::kSHL: {
+      __ shlq(left, RCX);
+      break;
+    }
+    default:
+      UNREACHABLE();
+  }
+}
+
+static void EmitShiftUint32ByConstant(FlowGraphCompiler* compiler,
+                                      Token::Kind op_kind,
+                                      Register left,
+                                      const Object& right) {
+  const int64_t shift = Integer::Cast(right).AsInt64Value();
+  ASSERT(shift >= 0);
+  if (shift >= 32) {
+    __ xorl(left, left);
+  } else {
+    switch (op_kind) {
+      case Token::kSHR: {
+        __ shrl(left, Immediate(shift));
+        break;
+      }
+      case Token::kSHL: {
+        __ shll(left, Immediate(shift));
+        break;
+      }
+      default:
+        UNREACHABLE();
+    }
+  }
+}
+
+static void EmitShiftUint32ByRCX(FlowGraphCompiler* compiler,
+                                 Token::Kind op_kind,
+                                 Register left) {
+  switch (op_kind) {
+    case Token::kSHR: {
+      __ shrl(left, RCX);
+      break;
+    }
+    case Token::kSHL: {
+      __ shll(left, RCX);
+      break;
+    }
+    default:
+      UNREACHABLE();
+  }
+}
+
+class ShiftInt64OpSlowPath : public ThrowErrorSlowPathCode {
+ public:
+  static const intptr_t kNumberOfArguments = 0;
+
+  ShiftInt64OpSlowPath(ShiftInt64OpInstr* instruction, intptr_t try_index)
+      : ThrowErrorSlowPathCode(instruction,
+                               kArgumentErrorUnboxedInt64RuntimeEntry,
+                               kNumberOfArguments,
+                               try_index) {}
+
+  const char* name() override { return "int64 shift"; }
+
+  void EmitCodeAtSlowPathEntry(FlowGraphCompiler* compiler) override {
+    const Register out = instruction()->locs()->out(0).reg();
+    ASSERT(out == instruction()->locs()->in(0).reg());
+
+    Label throw_error;
+    __ testq(RCX, RCX);
+    __ j(LESS, &throw_error);
+
+    switch (instruction()->AsShiftInt64Op()->op_kind()) {
+      case Token::kSHR:
+        __ sarq(out, Immediate(kBitsPerInt64 - 1));
+        break;
+      case Token::kSHL:
+        __ xorq(out, out);
+        break;
+      default:
+        UNREACHABLE();
+    }
+    __ jmp(exit_label());
+
+    __ Bind(&throw_error);
+
+    // Can't pass unboxed int64 value directly to runtime call, as all
+    // arguments are expected to be tagged (boxed).
+    // The unboxed int64 argument is passed through a dedicated slot in Thread.
+    // TODO(dartbug.com/33549): Clean this up when unboxed values
+    // could be passed as arguments.
+    __ movq(Address(THR, Thread::unboxed_int64_runtime_arg_offset()), RCX);
+  }
+};
+
 LocationSummary* ShiftInt64OpInstr::MakeLocationSummary(Zone* zone,
                                                         bool opt) const {
   const intptr_t kNumInputs = 2;
   const intptr_t kNumTemps = 0;
-  LocationSummary* summary = new (zone)
-      LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
+  LocationSummary* summary = new (zone) LocationSummary(
+      zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
   summary->set_in(0, Location::RequiresRegister());
-  summary->set_in(1, Location::FixedRegisterOrSmiConstant(right(), RCX));
+  summary->set_in(1, Location::FixedRegisterOrConstant(right(), RCX));
   summary->set_out(0, Location::SameAsFirstInput());
   return summary;
 }
@@ -5255,50 +5377,195 @@
   ASSERT(left == out);
   ASSERT(!can_overflow());
 
-  Label* deopt = NULL;
-  if (CanDeoptimize()) {
-    deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryInt64Op);
-  }
   if (locs()->in(1).IsConstant()) {
-    // Code for a constant shift amount.
-    ASSERT(locs()->in(1).constant().IsSmi());
-    const int64_t shift = Smi::Cast(locs()->in(1).constant()).Value();
-    ASSERT(shift >= 0);
-    switch (op_kind()) {
-      case Token::kSHR:
-        __ sarq(left,
-                Immediate(Utils::Minimum<int64_t>(shift, kBitsPerWord - 1)));
-        break;
-      case Token::kSHL: {
-        ASSERT(shift < 64);
-        __ shlq(left, Immediate(shift));
-        break;
-      }
-      default:
-        UNREACHABLE();
-    }
+    EmitShiftInt64ByConstant(compiler, op_kind(), left,
+                             locs()->in(1).constant());
   } else {
     // Code for a variable shift amount.
-    // Deoptimize if shift count is > 63 or negative.
-    // Sarq and shlq instructions mask the count to 6 bits.
-    __ SmiUntag(RCX);
+    ASSERT(locs()->in(1).reg() == RCX);
+
+    // Jump to a slow path if shift count is > 63 or negative.
+    ShiftInt64OpSlowPath* slow_path = NULL;
     if (!IsShiftCountInRange()) {
-      __ cmpq(RCX, Immediate(kMintShiftCountLimit));
+      slow_path =
+          new (Z) ShiftInt64OpSlowPath(this, compiler->CurrentTryIndex());
+      compiler->AddSlowPathCode(slow_path);
+
+      __ cmpq(RCX, Immediate(kShiftCountLimit));
+      __ j(ABOVE, slow_path->entry_label());
+    }
+
+    EmitShiftInt64ByRCX(compiler, op_kind(), left);
+
+    if (slow_path != NULL) {
+      __ Bind(slow_path->exit_label());
+    }
+  }
+}
+
+LocationSummary* SpeculativeShiftInt64OpInstr::MakeLocationSummary(
+    Zone* zone,
+    bool opt) const {
+  const intptr_t kNumInputs = 2;
+  const intptr_t kNumTemps = 0;
+  LocationSummary* summary = new (zone)
+      LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
+  summary->set_in(0, Location::RequiresRegister());
+  summary->set_in(1, Location::FixedRegisterOrSmiConstant(right(), RCX));
+  summary->set_out(0, Location::SameAsFirstInput());
+  return summary;
+}
+
+void SpeculativeShiftInt64OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  const Register left = locs()->in(0).reg();
+  const Register out = locs()->out(0).reg();
+  ASSERT(left == out);
+  ASSERT(!can_overflow());
+
+  if (locs()->in(1).IsConstant()) {
+    EmitShiftInt64ByConstant(compiler, op_kind(), left,
+                             locs()->in(1).constant());
+  } else {
+    ASSERT(locs()->in(1).reg() == RCX);
+    __ SmiUntag(RCX);
+
+    // Deoptimize if shift count is > 63 or negative.
+    if (!IsShiftCountInRange()) {
+      ASSERT(CanDeoptimize());
+      Label* deopt =
+          compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryInt64Op);
+
+      __ cmpq(RCX, Immediate(kShiftCountLimit));
       __ j(ABOVE, deopt);
     }
-    Label done, large_shift;
-    switch (op_kind()) {
-      case Token::kSHR: {
-        __ sarq(left, RCX);
-        break;
-      }
-      case Token::kSHL: {
-        __ shlq(left, RCX);
-        break;
-      }
-      default:
-        UNREACHABLE();
+
+    EmitShiftInt64ByRCX(compiler, op_kind(), left);
+  }
+}
+
+class ShiftUint32OpSlowPath : public ThrowErrorSlowPathCode {
+ public:
+  static const intptr_t kNumberOfArguments = 0;
+
+  ShiftUint32OpSlowPath(ShiftUint32OpInstr* instruction, intptr_t try_index)
+      : ThrowErrorSlowPathCode(instruction,
+                               kArgumentErrorUnboxedInt64RuntimeEntry,
+                               kNumberOfArguments,
+                               try_index) {}
+
+  const char* name() override { return "uint32 shift"; }
+
+  void EmitCodeAtSlowPathEntry(FlowGraphCompiler* compiler) override {
+    const Register out = instruction()->locs()->out(0).reg();
+    ASSERT(out == instruction()->locs()->in(0).reg());
+
+    Label throw_error;
+    __ testq(RCX, RCX);
+    __ j(LESS, &throw_error);
+
+    __ xorl(out, out);
+    __ jmp(exit_label());
+
+    __ Bind(&throw_error);
+
+    // Can't pass unboxed int64 value directly to runtime call, as all
+    // arguments are expected to be tagged (boxed).
+    // The unboxed int64 argument is passed through a dedicated slot in Thread.
+    // TODO(dartbug.com/33549): Clean this up when unboxed values
+    // could be passed as arguments.
+    __ movq(Address(THR, Thread::unboxed_int64_runtime_arg_offset()), RCX);
+  }
+};
+
+LocationSummary* ShiftUint32OpInstr::MakeLocationSummary(Zone* zone,
+                                                         bool opt) const {
+  const intptr_t kNumInputs = 2;
+  const intptr_t kNumTemps = 0;
+  LocationSummary* summary = new (zone) LocationSummary(
+      zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
+  summary->set_in(0, Location::RequiresRegister());
+  summary->set_in(1, Location::FixedRegisterOrConstant(right(), RCX));
+  summary->set_out(0, Location::SameAsFirstInput());
+  return summary;
+}
+
+void ShiftUint32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+  Register left = locs()->in(0).reg();
+  Register out = locs()->out(0).reg();
+  ASSERT(left == out);
+
+  if (locs()->in(1).IsConstant()) {
+    EmitShiftUint32ByConstant(compiler, op_kind(), left,
+                              locs()->in(1).constant());
+  } else {
+    ASSERT(locs()->in(1).reg() == RCX);
+
+    // Jump to a slow path if shift count is > 31 or negative.
+    ShiftUint32OpSlowPath* slow_path = NULL;
+    if (!IsShiftCountInRange(kUint32ShiftCountLimit)) {
+      slow_path =
+          new (Z) ShiftUint32OpSlowPath(this, compiler->CurrentTryIndex());
+      compiler->AddSlowPathCode(slow_path);
+
+      __ cmpq(RCX, Immediate(kUint32ShiftCountLimit));
+      __ j(ABOVE, slow_path->entry_label());
     }
+
+    EmitShiftUint32ByRCX(compiler, op_kind(), left);
+
+    if (slow_path != NULL) {
+      __ Bind(slow_path->exit_label());
+    }
+  }
+}
+
+LocationSummary* SpeculativeShiftUint32OpInstr::MakeLocationSummary(
+    Zone* zone,
+    bool opt) const {
+  const intptr_t kNumInputs = 2;
+  const intptr_t kNumTemps = 0;
+  LocationSummary* summary = new (zone)
+      LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
+  summary->set_in(0, Location::RequiresRegister());
+  summary->set_in(1, Location::FixedRegisterOrSmiConstant(right(), RCX));
+  summary->set_out(0, Location::SameAsFirstInput());
+  return summary;
+}
+
+void SpeculativeShiftUint32OpInstr::EmitNativeCode(
+    FlowGraphCompiler* compiler) {
+  Register left = locs()->in(0).reg();
+  Register out = locs()->out(0).reg();
+  ASSERT(left == out);
+
+  if (locs()->in(1).IsConstant()) {
+    EmitShiftUint32ByConstant(compiler, op_kind(), left,
+                              locs()->in(1).constant());
+  } else {
+    ASSERT(locs()->in(1).reg() == RCX);
+    __ SmiUntag(RCX);
+
+    if (!IsShiftCountInRange(kUint32ShiftCountLimit)) {
+      if (!IsShiftCountInRange()) {
+        // Deoptimize if shift count is negative.
+        ASSERT(CanDeoptimize());
+        Label* deopt =
+            compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryInt64Op);
+
+        __ testq(RCX, RCX);
+        __ j(LESS, deopt);
+      }
+
+      Label cont;
+      __ cmpq(RCX, Immediate(kUint32ShiftCountLimit));
+      __ j(LESS_EQUAL, &cont);
+
+      __ xorl(left, left);
+
+      __ Bind(&cont);
+    }
+
+    EmitShiftUint32ByRCX(compiler, op_kind(), left);
   }
 }
 
@@ -5310,6 +5577,10 @@
   return CompileType::FromCid(kSmiCid);
 }
 
+CompileType SpeculativeShiftUint32OpInstr::ComputeType() const {
+  return CompileType::FromCid(kSmiCid);
+}
+
 CompileType UnaryUint32OpInstr::ComputeType() const {
   return CompileType::FromCid(kSmiCid);
 }
@@ -5374,88 +5645,6 @@
   }
 }
 
-LocationSummary* ShiftUint32OpInstr::MakeLocationSummary(Zone* zone,
-                                                         bool opt) const {
-  const intptr_t kNumInputs = 2;
-  const intptr_t kNumTemps = 0;
-  LocationSummary* summary = new (zone)
-      LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
-  summary->set_in(0, Location::RequiresRegister());
-  summary->set_in(1, Location::FixedRegisterOrSmiConstant(right(), RCX));
-  summary->set_out(0, Location::SameAsFirstInput());
-  return summary;
-}
-
-void ShiftUint32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
-  const intptr_t kShifterLimit = 31;
-
-  Register left = locs()->in(0).reg();
-  Register out = locs()->out(0).reg();
-  ASSERT(left == out);
-
-  Label* deopt =
-      compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryInt64Op);
-
-  if (locs()->in(1).IsConstant()) {
-    // Shifter is constant.
-
-    const Object& constant = locs()->in(1).constant();
-    ASSERT(constant.IsSmi());
-    const intptr_t shift_value = Smi::Cast(constant).Value();
-
-    // Do the shift: (shift_value > 0) && (shift_value <= kShifterLimit).
-    switch (op_kind()) {
-      case Token::kSHR:
-        __ shrl(left, Immediate(shift_value));
-        break;
-      case Token::kSHL:
-        __ shll(left, Immediate(shift_value));
-        break;
-      default:
-        UNREACHABLE();
-    }
-    return;
-  }
-
-  // Non constant shift value.
-
-  Register shifter = locs()->in(1).reg();
-  ASSERT(shifter == RCX);
-
-  Label done;
-  Label zero;
-
-  // TODO(johnmccutchan): Use range information to avoid these checks.
-  __ SmiUntag(shifter);
-  __ cmpq(shifter, Immediate(0));
-  // If shift value is < 0, deoptimize.
-  __ j(NEGATIVE, deopt);
-  __ cmpq(shifter, Immediate(kShifterLimit));
-  // If shift value is >= 32, return zero.
-  __ j(ABOVE, &zero);
-
-  // Do the shift.
-  switch (op_kind()) {
-    case Token::kSHR:
-      __ shrl(left, shifter);
-      __ jmp(&done);
-      break;
-    case Token::kSHL:
-      __ shll(left, shifter);
-      __ jmp(&done);
-      break;
-    default:
-      UNREACHABLE();
-  }
-
-  __ Bind(&zero);
-  // Shift was greater than 31 bits, just return zero.
-  __ xorq(left, left);
-
-  // Exit path.
-  __ Bind(&done);
-}
-
 DEFINE_BACKEND(UnaryUint32Op, (SameAsFirstInput, Register value)) {
   __ notl(value);
 }
diff --git a/runtime/vm/compiler/backend/range_analysis.cc b/runtime/vm/compiler/backend/range_analysis.cc
index b39e2be..fd08f03 100644
--- a/runtime/vm/compiler/backend/range_analysis.cc
+++ b/runtime/vm/compiler/backend/range_analysis.cc
@@ -254,8 +254,9 @@
           values_.Add(defn);
           if (defn->IsBinaryInt64Op()) {
             binary_int64_ops_.Add(defn->AsBinaryInt64Op());
-          } else if (defn->IsShiftInt64Op()) {
-            shift_int64_ops_.Add(defn->AsShiftInt64Op());
+          } else if (defn->IsShiftInt64Op() ||
+                     defn->IsSpeculativeShiftInt64Op()) {
+            shift_int64_ops_.Add(defn->AsShiftIntegerOp());
           }
         }
       } else if (current->IsCheckArrayBound()) {
@@ -1497,7 +1498,7 @@
   }
 }
 
-static void NarrowShiftInt64Op(ShiftInt64OpInstr* int64_op) {
+static void NarrowShiftInt64Op(ShiftIntegerOpInstr* int64_op) {
   if (RangeUtils::Fits(int64_op->range(), RangeBoundary::kRangeBoundaryInt32) &&
       RangeUtils::Fits(int64_op->left()->definition()->range(),
                        RangeBoundary::kRangeBoundaryInt32) &&
@@ -1552,7 +1553,8 @@
   // & untagged of intermediate results.
   // TODO(johnmccutchan): Consider phis.
   return def->IsBoxInt64() || def->IsUnboxInt64() || def->IsBinaryInt64Op() ||
-         def->IsShiftInt64Op() || def->IsUnaryInt64Op();
+         def->IsShiftInt64Op() || def->IsSpeculativeShiftInt64Op() ||
+         def->IsUnaryInt64Op();
 }
 
 void IntegerInstructionSelector::FindPotentialUint32Definitions() {
@@ -1625,6 +1627,13 @@
         !selected_uint32_defs_->Contains(defn->ssa_temp_index())) {
       return false;
     }
+    // Right-hand side operand of ShiftInt64Op is not narrowing (all its bits
+    // should be taken into account).
+    if (ShiftIntegerOpInstr* shift = defn->AsShiftIntegerOp()) {
+      if (use == shift->right()) {
+        return false;
+      }
+    }
   }
   return true;
 }
@@ -1638,8 +1647,8 @@
   }
   // A right shift with an input outside of Uint32 range cannot be converted
   // because we need the high bits.
-  if (def->IsShiftInt64Op()) {
-    ShiftInt64OpInstr* op = def->AsShiftInt64Op();
+  if (def->IsShiftInt64Op() || def->IsSpeculativeShiftInt64Op()) {
+    ShiftIntegerOpInstr* op = def->AsShiftIntegerOp();
     if (op->op_kind() == Token::kSHR) {
       Definition* shift_input = op->left()->definition();
       ASSERT(shift_input != NULL);
@@ -1699,13 +1708,22 @@
   ASSERT(IsPotentialUint32Definition(def));
   // Should not see constant instructions.
   ASSERT(!def->IsConstant());
-  if (def->IsBinaryInt64Op()) {
-    BinaryInt64OpInstr* op = def->AsBinaryInt64Op();
+  if (def->IsBinaryIntegerOp()) {
+    BinaryIntegerOpInstr* op = def->AsBinaryIntegerOp();
     Token::Kind op_kind = op->op_kind();
     Value* left = op->left()->CopyWithType();
     Value* right = op->right()->CopyWithType();
     intptr_t deopt_id = op->DeoptimizationTarget();
-    return new (Z) BinaryUint32OpInstr(op_kind, left, right, deopt_id);
+    if (def->IsBinaryInt64Op()) {
+      return new (Z) BinaryUint32OpInstr(op_kind, left, right, deopt_id);
+    } else if (def->IsShiftInt64Op()) {
+      return new (Z) ShiftUint32OpInstr(op_kind, left, right, deopt_id);
+    } else if (def->IsSpeculativeShiftInt64Op()) {
+      return new (Z)
+          SpeculativeShiftUint32OpInstr(op_kind, left, right, deopt_id);
+    } else {
+      UNREACHABLE();
+    }
   } else if (def->IsBoxInt64()) {
     Value* value = def->AsBoxInt64()->value()->CopyWithType();
     return new (Z) BoxUint32Instr(value);
@@ -1720,13 +1738,6 @@
     Value* value = op->value()->CopyWithType();
     intptr_t deopt_id = op->DeoptimizationTarget();
     return new (Z) UnaryUint32OpInstr(op_kind, value, deopt_id);
-  } else if (def->IsShiftInt64Op()) {
-    ShiftInt64OpInstr* op = def->AsShiftInt64Op();
-    Token::Kind op_kind = op->op_kind();
-    Value* left = op->left()->CopyWithType();
-    Value* right = op->right()->CopyWithType();
-    intptr_t deopt_id = op->DeoptimizationTarget();
-    return new (Z) ShiftUint32OpInstr(op_kind, left, right, deopt_id);
   }
   UNREACHABLE();
   return NULL;
@@ -2504,7 +2515,7 @@
 void Definition::InferRange(RangeAnalysis* analysis, Range* range) {
   if (Type()->ToCid() == kSmiCid) {
     *range = Range::Full(RangeBoundary::kRangeBoundarySmi);
-  } else if (IsMintDefinition()) {
+  } else if (IsInt64Definition()) {
     *range = Range::Full(RangeBoundary::kRangeBoundaryInt64);
   } else if (IsInt32Definition()) {
     *range = Range::Full(RangeBoundary::kRangeBoundaryInt32);
@@ -2844,7 +2855,7 @@
                    right()->definition()->range(), range);
 }
 
-void ShiftInt64OpInstr::InferRange(RangeAnalysis* analysis, Range* range) {
+void ShiftIntegerOpInstr::InferRange(RangeAnalysis* analysis, Range* range) {
   CacheRange(&shift_range_, right()->definition()->range(),
              RangeBoundary::kRangeBoundaryInt64);
   InferRangeHelper(left()->definition()->range(),
@@ -2903,7 +2914,7 @@
   const Range* value_range = value()->definition()->range();
   if (value_range != NULL) {
     *range = *value_range;
-  } else if (!value()->definition()->IsMintDefinition() &&
+  } else if (!value()->definition()->IsInt64Definition() &&
              (value()->definition()->Type()->ToCid() != kSmiCid)) {
     *range = Range::Full(RangeBoundary::kRangeBoundaryInt64);
   }
diff --git a/runtime/vm/compiler/backend/range_analysis.h b/runtime/vm/compiler/backend/range_analysis.h
index e8140f8..0b25eaa 100644
--- a/runtime/vm/compiler/backend/range_analysis.h
+++ b/runtime/vm/compiler/backend/range_analysis.h
@@ -605,7 +605,7 @@
 
   GrowableArray<BinaryInt64OpInstr*> binary_int64_ops_;
 
-  GrowableArray<ShiftInt64OpInstr*> shift_int64_ops_;
+  GrowableArray<ShiftIntegerOpInstr*> shift_int64_ops_;
 
   // All CheckArrayBound instructions.
   GrowableArray<CheckArrayBoundInstr*> bounds_checks_;
diff --git a/runtime/vm/compiler/backend/type_propagator.cc b/runtime/vm/compiler/backend/type_propagator.cc
index 1f0f476..e4f7d2e 100644
--- a/runtime/vm/compiler/backend/type_propagator.cc
+++ b/runtime/vm/compiler/backend/type_propagator.cc
@@ -1328,6 +1328,10 @@
   return CompileType::Int();
 }
 
+CompileType SpeculativeShiftInt64OpInstr::ComputeType() const {
+  return CompileType::Int();
+}
+
 CompileType UnaryInt64OpInstr::ComputeType() const {
   return CompileType::Int();
 }
diff --git a/runtime/vm/compiler/call_specializer.cc b/runtime/vm/compiler/call_specializer.cc
index 41b3027..4faa42a 100644
--- a/runtime/vm/compiler/call_specializer.cc
+++ b/runtime/vm/compiler/call_specializer.cc
@@ -760,8 +760,9 @@
   } else if (operands_type == kMintCid) {
     if (!FlowGraphCompiler::SupportsUnboxedInt64()) return false;
     if ((op_kind == Token::kSHR) || (op_kind == Token::kSHL)) {
-      ShiftInt64OpInstr* shift_op = new (Z) ShiftInt64OpInstr(
-          op_kind, new (Z) Value(left), new (Z) Value(right), call->deopt_id());
+      SpeculativeShiftInt64OpInstr* shift_op = new (Z)
+          SpeculativeShiftInt64OpInstr(op_kind, new (Z) Value(left),
+                                       new (Z) Value(right), call->deopt_id());
       ReplaceCall(call, shift_op);
     } else {
       BinaryInt64OpInstr* bin_op = new (Z) BinaryInt64OpInstr(
diff --git a/runtime/vm/dart.cc b/runtime/vm/dart.cc
index afedf3b..ddce593 100644
--- a/runtime/vm/dart.cc
+++ b/runtime/vm/dart.cc
@@ -91,7 +91,7 @@
   // These offsets are embedded in precompiled instructions. We need simarm
   // (compiler) and arm (runtime) to agree.
   CHECK_OFFSET(Thread::stack_limit_offset(), 4);
-  CHECK_OFFSET(Thread::object_null_offset(), 48);
+  CHECK_OFFSET(Thread::object_null_offset(), 56);
   CHECK_OFFSET(SingleTargetCache::upper_limit_offset(), 14);
   CHECK_OFFSET(Isolate::object_store_offset(), 28);
   NOT_IN_PRODUCT(CHECK_OFFSET(sizeof(ClassHeapStats), 168));
@@ -100,7 +100,7 @@
   // These offsets are embedded in precompiled instructions. We need simarm64
   // (compiler) and arm64 (runtime) to agree.
   CHECK_OFFSET(Thread::stack_limit_offset(), 8);
-  CHECK_OFFSET(Thread::object_null_offset(), 96);
+  CHECK_OFFSET(Thread::object_null_offset(), 104);
   CHECK_OFFSET(SingleTargetCache::upper_limit_offset(), 26);
   CHECK_OFFSET(Isolate::object_store_offset(), 56);
   NOT_IN_PRODUCT(CHECK_OFFSET(sizeof(ClassHeapStats), 288));
diff --git a/runtime/vm/runtime_entry.cc b/runtime/vm/runtime_entry.cc
index 9a87566..4ffa330 100644
--- a/runtime/vm/runtime_entry.cc
+++ b/runtime/vm/runtime_entry.cc
@@ -218,6 +218,13 @@
   Exceptions::ThrowByType(Exceptions::kNoSuchMethod, args);
 }
 
+DEFINE_RUNTIME_ENTRY(ArgumentErrorUnboxedInt64, 0) {
+  // Unboxed value is passed through a dedicated slot in Thread.
+  int64_t unboxed_value = arguments.thread()->unboxed_int64_runtime_arg();
+  const Integer& value = Integer::Handle(zone, Integer::New(unboxed_value));
+  Exceptions::ThrowArgumentError(value);
+}
+
 // Allocation of a fixed length array of given element type.
 // This runtime entry is never called for allocating a List of a generic type,
 // because a prior run time call instantiates the element type if necessary.
diff --git a/runtime/vm/runtime_entry_list.h b/runtime/vm/runtime_entry_list.h
index 2c5f7f5..65c2c0a 100644
--- a/runtime/vm/runtime_entry_list.h
+++ b/runtime/vm/runtime_entry_list.h
@@ -35,6 +35,7 @@
   V(PatchStaticCall)                                                           \
   V(RangeError)                                                                \
   V(NullError)                                                                 \
+  V(ArgumentErrorUnboxedInt64)                                                 \
   V(ReThrow)                                                                   \
   V(StackOverflow)                                                             \
   V(Throw)                                                                     \
diff --git a/runtime/vm/thread.cc b/runtime/vm/thread.cc
index 5d62829..c04d527 100644
--- a/runtime/vm/thread.cc
+++ b/runtime/vm/thread.cc
@@ -69,6 +69,7 @@
       vm_tag_(0),
       task_kind_(kUnknownTask),
       async_stack_trace_(StackTrace::null()),
+      unboxed_int64_runtime_arg_(0),
       dart_stream_(NULL),
       os_thread_(NULL),
       thread_lock_(new Monitor()),
diff --git a/runtime/vm/thread.h b/runtime/vm/thread.h
index eb81ca54..d8f2792 100644
--- a/runtime/vm/thread.h
+++ b/runtime/vm/thread.h
@@ -564,6 +564,16 @@
   void set_vm_tag(uword tag) { vm_tag_ = tag; }
   static intptr_t vm_tag_offset() { return OFFSET_OF(Thread, vm_tag_); }
 
+  int64_t unboxed_int64_runtime_arg() const {
+    return unboxed_int64_runtime_arg_;
+  }
+  void set_unboxed_int64_runtime_arg(int64_t value) {
+    unboxed_int64_runtime_arg_ = value;
+  }
+  static intptr_t unboxed_int64_runtime_arg_offset() {
+    return OFFSET_OF(Thread, unboxed_int64_runtime_arg_);
+  }
+
   RawGrowableObjectArray* pending_functions();
   void clear_pending_functions();
 
@@ -788,6 +798,12 @@
   uword vm_tag_;
   TaskKind task_kind_;
   RawStackTrace* async_stack_trace_;
+  // Memory location dedicated for passing unboxed int64 values from
+  // generated code to runtime.
+  // TODO(dartbug.com/33549): Clean this up when unboxed values
+  // could be passed as arguments.
+  int64_t unboxed_int64_runtime_arg_;
+
 // State that is cached in the TLS for fast access in generated code.
 #define DECLARE_MEMBERS(type_name, member_name, expr, default_init_value)      \
   type_name member_name;
diff --git a/tests/language_2/vm/shift_special_cases_test.dart b/tests/language_2/vm/shift_special_cases_test.dart
new file mode 100644
index 0000000..604a0a3
--- /dev/null
+++ b/tests/language_2/vm/shift_special_cases_test.dart
@@ -0,0 +1,345 @@
+// Copyright (c) 2018, the Dart project authors.  Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+// VMOptions=--optimization-counter-threshold=10 --no-background-compilation
+
+// Test for special cases of << and >> integer operations with int64.
+
+import "package:expect/expect.dart";
+
+// int64 value, does not fit to smi
+int v1 = 0x778899aabbccddee;
+int v2 = 0x6000000000000000;
+int v3 = -0x778899aabbccddee;
+int negativeInt64 = -0x7000000000000000;
+int smi = 128;
+int negativeSmi = -3;
+
+int shl(int a, int b) => a << b;
+int shr(int a, int b) => a >> b;
+
+int shlUint32(int a, int b) => ((a & 0xffff) << b) & 0xffff;
+int shrUint32(int a, int b) => (a & 0xffff) >> b;
+
+int testInt64ShlByNegative1(int a, int b) {
+  int x = a + 1;
+  int y = a - 2;
+  try {
+    x = a << b;
+    Expect.fail('Shift by negative count should throw an error');
+  } on ArgumentError {
+    Expect.equals(0x778899aabbccddef, x);
+    Expect.equals(0x778899aabbccddec, y);
+  }
+}
+
+int testInt64ShlByNegative2(int a, int b) {
+  int x = a + 1;
+  int y = a - 2;
+  try {
+    x = shl(a, b);
+    Expect.fail('Shift by negative count should throw an error');
+  } on ArgumentError {
+    Expect.equals(0x778899aabbccddef, x);
+    Expect.equals(0x778899aabbccddec, y);
+  }
+}
+
+int testInt64ShlByNegative3(int a) {
+  int x = a + 1;
+  int y = a - 2;
+  try {
+    int i = -64;
+    x = a << i;
+    Expect.fail('Shift by negative count should throw an error');
+  } on ArgumentError {
+    Expect.equals(0x778899aabbccddef, x);
+    Expect.equals(0x778899aabbccddec, y);
+  }
+}
+
+int testInt64ShrByNegative1(int a, int b) {
+  int x = a + 1;
+  int y = a - 2;
+  try {
+    x = a >> b;
+    Expect.fail('Shift by negative count should throw an error');
+  } on ArgumentError {
+    Expect.equals(0x778899aabbccddef, x);
+    Expect.equals(0x778899aabbccddec, y);
+  }
+}
+
+int testInt64ShrByNegative2(int a, int b) {
+  int x = a + 1;
+  int y = a - 2;
+  try {
+    x = shr(a, b);
+    Expect.fail('Shift by negative count should throw an error');
+  } on ArgumentError {
+    Expect.equals(0x778899aabbccddef, x);
+    Expect.equals(0x778899aabbccddec, y);
+  }
+}
+
+int testInt64ShrByNegative3(int a) {
+  int x = a + 1;
+  int y = a - 2;
+  try {
+    int i = -64;
+    x = a >> i;
+    Expect.fail('Shift by negative count should throw an error');
+  } on ArgumentError {
+    Expect.equals(0x778899aabbccddef, x);
+    Expect.equals(0x778899aabbccddec, y);
+  }
+}
+
+int testInt64ShlByLarge1(int a, int b) {
+  int x = a << b;
+  Expect.equals(0, x);
+}
+
+int testInt64ShlByLarge2(int a) {
+  int i = 64;
+  int x = a << i;
+  Expect.equals(0, x);
+}
+
+int testInt64ShlByLarge3(int a) {
+  int i = 0x7fffffffffffffff;
+  int x = a << i;
+  Expect.equals(0, x);
+}
+
+int testInt64ShrByLarge1a(int a, int b) {
+  int x = a >> b;
+  Expect.equals(0, x);
+}
+
+int testInt64ShrByLarge1b(int a, int b) {
+  int x = a >> b;
+  Expect.equals(-1, x);
+}
+
+int testInt64ShrByLarge2a(int a) {
+  int i = 64;
+  int x = a >> i;
+  Expect.equals(0, x);
+}
+
+int testInt64ShrByLarge2b(int a) {
+  int i = 64;
+  int x = a >> i;
+  Expect.equals(-1, x);
+}
+
+int testInt64ShrByLarge3a(int a) {
+  int i = 0x7fffffffffffffff;
+  int x = a >> i;
+  Expect.equals(0, x);
+}
+
+int testInt64ShrByLarge3b(int a) {
+  int i = 0x7fffffffffffffff;
+  int x = a >> i;
+  Expect.equals(-1, x);
+}
+
+int testUint32ShlByNegative1(int a, int b) {
+  int x = (a & 0xfff) + 1;
+  int y = (a & 0xfff) - 2;
+  try {
+    x = ((a & 0xffff) << b) & 0xffff;
+    Expect.fail('Shift by negative count should throw an error');
+  } on ArgumentError {
+    Expect.equals(0xdef, x);
+    Expect.equals(0xdec, y);
+  }
+  return x;
+}
+
+int testUint32ShlByNegative2(int a, int b) {
+  int x = (a & 0xfff) + 1;
+  int y = (a & 0xfff) - 2;
+  try {
+    x = shlUint32(a, b);
+    Expect.fail('Shift by negative count should throw an error');
+  } on ArgumentError {
+    Expect.equals(0xdef, x);
+    Expect.equals(0xdec, y);
+  }
+}
+
+int testUint32ShlByNegative3(int a) {
+  int x = (a & 0xfff) + 1;
+  int y = (a & 0xfff) - 2;
+  try {
+    int i = -64;
+    x = ((a & 0xffff) << i) & 0xffff;
+    Expect.fail('Shift by negative count should throw an error');
+  } on ArgumentError {
+    Expect.equals(0xdef, x);
+    Expect.equals(0xdec, y);
+  }
+}
+
+int testUint32ShrByNegative1(int a, int b) {
+  int x = (a & 0xfff) + 1;
+  int y = (a & 0xfff) - 2;
+  try {
+    x = ((a & 0xffff) >> b) & 0xffff;
+    Expect.fail('Shift by negative count should throw an error');
+  } on ArgumentError {
+    Expect.equals(0xdef, x);
+    Expect.equals(0xdec, y);
+  }
+}
+
+int testUint32ShrByNegative2(int a, int b) {
+  int x = (a & 0xfff) + 1;
+  int y = (a & 0xfff) - 2;
+  try {
+    x = shrUint32(a, b);
+    Expect.fail('Shift by negative count should throw an error');
+  } on ArgumentError {
+    Expect.equals(0xdef, x);
+    Expect.equals(0xdec, y);
+  }
+}
+
+int testUint32ShrByNegative3(int a) {
+  int x = (a & 0xfff) + 1;
+  int y = (a & 0xfff) - 2;
+  try {
+    int i = -64;
+    x = ((a & 0xffff) >> i) & 0xffff;
+    Expect.fail('Shift by negative count should throw an error');
+  } on ArgumentError {
+    Expect.equals(0xdef, x);
+    Expect.equals(0xdec, y);
+  }
+}
+
+int testUint32ShlByLarge1(int a, int b) {
+  int x = ((a & 0xffff) << b) & 0xffff;
+  Expect.equals(0, x);
+}
+
+int testUint32ShlByLarge2(int a) {
+  int i = 64;
+  int x = ((a & 0xffff) << i) & 0xffff;
+  Expect.equals(0, x);
+}
+
+int testUint32ShlByLarge3(int a) {
+  int i = 0x7fffffffffffffff;
+  int x = ((a & 0xffff) << i) & 0xffff;
+  Expect.equals(0, x);
+}
+
+int testUint32ShrByLarge1(int a, int b) {
+  int x = ((a & 0xffff) >> b) & 0xffff;
+  Expect.equals(0, x);
+}
+
+int testUint32ShrByLarge2(int a) {
+  int i = 64;
+  int x = ((a & 0xffff) >> i) & 0xffff;
+  Expect.equals(0, x);
+}
+
+int testUint32ShrByLarge3(int a) {
+  int i = 0x7fffffffffffffff;
+  int x = ((a & 0xffff) >> i) & 0xffff;
+  Expect.equals(0, x);
+}
+
+doTests1() {
+  testInt64ShlByNegative1(v1, negativeSmi);
+  testInt64ShlByNegative2(v1, negativeSmi);
+  testInt64ShlByNegative3(v1);
+
+  testInt64ShrByNegative1(v1, negativeSmi);
+  testInt64ShrByNegative2(v1, negativeSmi);
+  testInt64ShrByNegative3(v1);
+
+  testInt64ShlByLarge1(v1, smi);
+  testInt64ShlByLarge1(v3, smi);
+
+  testInt64ShlByLarge2(v1);
+  testInt64ShlByLarge2(v3);
+
+  testInt64ShlByLarge3(v1);
+  testInt64ShlByLarge3(v3);
+
+  testInt64ShrByLarge1a(v1, smi);
+  testInt64ShrByLarge1b(v3, smi);
+
+  testInt64ShrByLarge2a(v1);
+  testInt64ShrByLarge2b(v3);
+
+  testInt64ShrByLarge3a(v1);
+  testInt64ShrByLarge3b(v3);
+
+  testUint32ShlByNegative1(v1, negativeSmi);
+  testUint32ShlByNegative2(v1, negativeSmi);
+  testUint32ShlByNegative3(v1);
+
+  testUint32ShrByNegative1(v1, negativeSmi);
+  testUint32ShrByNegative2(v1, negativeSmi);
+  testUint32ShrByNegative3(v1);
+
+  testUint32ShlByLarge1(v1, smi);
+  testUint32ShlByLarge1(v3, smi);
+
+  testUint32ShlByLarge2(v1);
+  testUint32ShlByLarge2(v3);
+
+  testUint32ShlByLarge3(v1);
+  testUint32ShlByLarge3(v3);
+
+  testUint32ShrByLarge1(v1, smi);
+  testUint32ShrByLarge1(v3, smi);
+
+  testUint32ShrByLarge2(v1);
+  testUint32ShrByLarge2(v3);
+
+  testUint32ShrByLarge3(v1);
+  testUint32ShrByLarge3(v3);
+}
+
+doTests2() {
+  testInt64ShlByNegative1(v1, negativeInt64);
+  testInt64ShlByNegative2(v1, negativeInt64);
+
+  testInt64ShrByNegative1(v1, negativeInt64);
+  testInt64ShrByNegative2(v1, negativeInt64);
+
+  testInt64ShlByLarge1(v1, v2);
+  testInt64ShlByLarge1(v3, v2);
+
+  testInt64ShrByLarge1a(v1, v2);
+  testInt64ShrByLarge1b(v3, v2);
+
+  testUint32ShlByNegative1(v1, negativeInt64);
+  testUint32ShlByNegative2(v1, negativeInt64);
+
+  testUint32ShrByNegative1(v1, negativeInt64);
+  testUint32ShrByNegative2(v1, negativeInt64);
+
+  testUint32ShlByLarge1(v1, v2);
+  testUint32ShlByLarge1(v3, v2);
+
+  testUint32ShrByLarge1(v1, v2);
+  testUint32ShrByLarge1(v3, v2);
+}
+
+main() {
+  for (var i = 0; i < 20; ++i) {
+    doTests1();
+  }
+  for (var i = 0; i < 50; ++i) {
+    doTests2();
+  }
+}