Version 1.1.0-dev.5.7

svn merge -c 31536 https://dart.googlecode.com/svn/branches/bleeding_edge trunk
svn merge -c 31575 https://dart.googlecode.com/svn/branches/bleeding_edge trunk
svn merge -c 31576 https://dart.googlecode.com/svn/branches/bleeding_edge trunk
svn merge -c 31593 https://dart.googlecode.com/svn/branches/bleeding_edge trunk
svn merge -c 31616 https://dart.googlecode.com/svn/branches/bleeding_edge trunk
svn merge -c 31618 https://dart.googlecode.com/svn/branches/bleeding_edge trunk
svn merge -c 31628 https://dart.googlecode.com/svn/branches/bleeding_edge trunk
svn merge -c 31528 https://dart.googlecode.com/svn/branches/bleeding_edge trunk
svn merge -c 31565 https://dart.googlecode.com/svn/branches/bleeding_edge trunk
svn merge -c 31592 https://dart.googlecode.com/svn/branches/bleeding_edge trunk
svn merge -c 31595 https://dart.googlecode.com/svn/branches/bleeding_edge trunk

R=ricow@google.com

Review URL: https://codereview.chromium.org//132163005

git-svn-id: http://dart.googlecode.com/svn/trunk@31689 260f80e4-7a28-3924-810f-c04153c831b5
diff --git a/runtime/bin/builtin.dart b/runtime/bin/builtin.dart
index 2a0e678..51e6dce 100644
--- a/runtime/bin/builtin.dart
+++ b/runtime/bin/builtin.dart
@@ -85,6 +85,9 @@
               .fold(new BytesBuilder(), (b, d) => b..add(d))
               .then((builder) {
                 _requestCompleted(builder.takeBytes(), response);
+                // This client is only used for a single request. Force closing
+                // it now otherwise we wait around until it times out.
+                _client.close(force:true);
               });
         }).catchError((error) {
           _requestFailed(error);
diff --git a/runtime/lib/typed_data.cc b/runtime/lib/typed_data.cc
index 703a7f9..e08a75fb 100644
--- a/runtime/lib/typed_data.cc
+++ b/runtime/lib/typed_data.cc
@@ -65,34 +65,29 @@
   return Integer::null();
 }
 
+
 template <typename DstType, typename SrcType>
 static RawBool* CopyData(const Instance& dst, const Instance& src,
                          const Smi& dst_start, const Smi& src_start,
                          const Smi& length) {
   const DstType& dst_array = DstType::Cast(dst);
   const SrcType& src_array = SrcType::Cast(src);
-  intptr_t element_size_in_bytes = dst_array.ElementSizeInBytes();
-  intptr_t dst_offset_in_bytes = dst_start.Value() * element_size_in_bytes;
-  intptr_t src_offset_in_bytes = src_start.Value() * element_size_in_bytes;
-  intptr_t length_in_bytes = length.Value() * element_size_in_bytes;
+  const intptr_t dst_offset_in_bytes = dst_start.Value();
+  const intptr_t src_offset_in_bytes = src_start.Value();
+  const intptr_t length_in_bytes = length.Value();
   if (dst_array.ElementType() != src_array.ElementType()) {
     return Bool::False().raw();
   }
-  RangeCheck(src_offset_in_bytes,
-             length_in_bytes,
-             src_array.LengthInBytes(),
-             element_size_in_bytes);
-  RangeCheck(dst_offset_in_bytes,
-             length_in_bytes,
-             dst_array.LengthInBytes(),
-             element_size_in_bytes);
+  ASSERT(Utils::RangeCheck(
+      src_offset_in_bytes, length_in_bytes, src_array.LengthInBytes()));
+  ASSERT(Utils::RangeCheck(
+      dst_offset_in_bytes, length_in_bytes, dst_array.LengthInBytes()));
   TypedData::Copy<DstType, SrcType>(dst_array, dst_offset_in_bytes,
                                     src_array, src_offset_in_bytes,
                                     length_in_bytes);
   return Bool::True().raw();
 }
 
-
 DEFINE_NATIVE_ENTRY(TypedData_setRange, 5) {
   GET_NON_NULL_NATIVE_ARGUMENT(Instance, dst, arguments->NativeArgAt(0));
   GET_NON_NULL_NATIVE_ARGUMENT(Smi, dst_start, arguments->NativeArgAt(1));
@@ -122,10 +117,10 @@
           dst, src, dst_start, src_start, length);
     }
   }
+  UNREACHABLE();
   return Bool::False().raw();
 }
 
-
 // We check the length parameter against a possible maximum length for the
 // array based on available physical addressable memory on the system. The
 // maximum possible length is a scaled value of kSmiMax which is set up based
diff --git a/runtime/lib/typed_data.dart b/runtime/lib/typed_data.dart
index 2a71553..376acb3 100644
--- a/runtime/lib/typed_data.dart
+++ b/runtime/lib/typed_data.dart
@@ -62,6 +62,8 @@
                                              int length]) {
     return new _Uint8ClampedArrayView(buffer, offsetInBytes, length);
   }
+
+  bool _isClamped() { return true; }
 }
 
 
@@ -523,15 +525,63 @@
     return IterableMixinWorkaround.getRangeList(this, start, end);
   }
 
-  void setRange(int start, int end, Iterable iterable, [int skipCount = 0]) {
-    if (!_setRange(start, end - start, iterable, skipCount)) {
-      IterableMixinWorkaround.setRangeList(this, start,
-                                           end, iterable, skipCount);
+  bool _isClamped() { return false; }
+
+  void setRange(int start, int end, Iterable from, [int skipCount = 0]) {
+    // Check ranges.
+    if ((start < 0) || (start > length)) {
+      _throwRangeError(start, length + 1);
     }
+    if ((end < 0) || (end > length)) {
+      _throwRangeError(end, length + 1);
+    }
+    if (start > end) {
+      _throwRangeError(start, end + 1);
+    }
+    if (skipCount < 0) {
+      throw new ArgumentError(skipCount);
+    }
+
+    final count = end - start;
+    if ((from.length - skipCount) < count) {
+      throw new StateError("Not enough elements");
+    }
+
+    if (from is _TypedListBase) {
+      final needsClamping =
+          this._isClamped() && (this._isClamped() != from._isClamped());
+      if (this.elementSizeInBytes == from.elementSizeInBytes) {
+        if (needsClamping) {
+          Lists.copy(from, skipCount, this, start, count);
+          return;
+        } else if (this.buffer._setRange(
+                     start * elementSizeInBytes + this.offsetInBytes,
+                     count * elementSizeInBytes,
+                     from.buffer,
+                     skipCount * elementSizeInBytes + from.offsetInBytes)) {
+          return;
+        }
+      } else if (from.buffer == this.buffer) {
+        // Different element sizes, but same buffer means that we need
+        // an intermediate structure.
+        // TODO(srdjan): Optimize to skip copying if the range does not overlap.
+        final temp_buffer = new List(count);
+        for (int i = 0; i < count; i++) {
+          temp_buffer[i] = from[skipCount + i];
+        }
+        for (int i = start; i < end; i++) {
+          this[i] = temp_buffer[i - start];
+        }
+        return;
+      }
+    }
+    IterableMixinWorkaround.setRangeList(this, start,
+                                         end, from, skipCount);
   }
 
   void setAll(int index, Iterable iterable) {
-    IterableMixinWorkaround.setAllList(this, index, iterable);
+    final end = iterable.length + index;
+    setRange(index, end, iterable);
   }
 
   void fillRange(int start, int end, [fillValue]) {
@@ -548,7 +598,11 @@
 
   // Internal utility methods.
 
-  bool _setRange(int start, int length, Iterable from, int startFrom)
+  // Returns true if operation succeeds.
+  // Returns false if 'from' and 'this' do not have the same element types.
+  // The copy occurs using a memory copy (no clamping, conversion, etc).
+  bool _setRange(int startInBytes, int lengthInBytes,
+                 _TypedListBase from, int startFromInBytes)
       native "TypedData_setRange";
 }
 
@@ -567,12 +621,10 @@
     return this;
   }
 
-
   // Methods implementing the collection interface.
 
   int get length native "TypedData_length";
 
-
   // Internal utility methods.
 
   int _getInt8(int offsetInBytes) native "TypedData_GetInt8";
@@ -738,6 +790,7 @@
     return new _Uint8ClampedArrayView(buffer, offsetInBytes, length);
   }
 
+  bool _isClamped() { return true; }
 
   // Methods implementing List interface.
 
@@ -1503,6 +1556,7 @@
     return _new(length);
   }
 
+  bool _isClamped() { return true; }
 
   // Method(s) implementing the List interface.
 
@@ -2286,7 +2340,6 @@
       length = _length {
   }
 
-
   // Method(s) implementing the TypedData interface.
 
   int get lengthInBytes {
@@ -2419,6 +2472,8 @@
   }
 
 
+  bool _isClamped() { return true; }
+
   // Method(s) implementing List interface.
 
   int operator[](int index) {
diff --git a/runtime/tests/vm/vm.status b/runtime/tests/vm/vm.status
index 669168b..200b6e8 100644
--- a/runtime/tests/vm/vm.status
+++ b/runtime/tests/vm/vm.status
@@ -27,6 +27,11 @@
 cc/ThreadInterrupterMedium: Skip
 cc/ThreadInterrupterLow: Skip
 
+[ $system == linux ]
+cc/ThreadInterrupterHigh: Skip
+cc/ThreadInterrupterMedium: Skip
+cc/ThreadInterrupterLow: Skip
+
 [ $system == macos ]
 cc/ThreadInterrupterHigh: Skip
 cc/ThreadInterrupterMedium: Skip
diff --git a/runtime/vm/bootstrap_natives.h b/runtime/vm/bootstrap_natives.h
index 5747777..0a054bc 100644
--- a/runtime/vm/bootstrap_natives.h
+++ b/runtime/vm/bootstrap_natives.h
@@ -145,7 +145,7 @@
   V(TypedData_Float32Array_new, 1)                                             \
   V(TypedData_Float64Array_new, 1)                                             \
   V(TypedData_Float32x4Array_new, 1)                                           \
-  V(TypedData_Int32x4Array_new, 1)                                            \
+  V(TypedData_Int32x4Array_new, 1)                                             \
   V(ExternalTypedData_Int8Array_new, 1)                                        \
   V(ExternalTypedData_Uint8Array_new, 1)                                       \
   V(ExternalTypedData_Uint8ClampedArray_new, 1)                                \
@@ -158,7 +158,7 @@
   V(ExternalTypedData_Float32Array_new, 1)                                     \
   V(ExternalTypedData_Float64Array_new, 1)                                     \
   V(ExternalTypedData_Float32x4Array_new, 1)                                   \
-  V(ExternalTypedData_Int32x4Array_new, 1)                                    \
+  V(ExternalTypedData_Int32x4Array_new, 1)                                     \
   V(TypedData_length, 1)                                                       \
   V(TypedData_setRange, 5)                                                     \
   V(TypedData_GetInt8, 2)                                                      \
@@ -183,8 +183,8 @@
   V(TypedData_SetFloat64, 3)                                                   \
   V(TypedData_GetFloat32x4, 2)                                                 \
   V(TypedData_SetFloat32x4, 3)                                                 \
-  V(TypedData_GetInt32x4, 2)                                                  \
-  V(TypedData_SetInt32x4, 3)                                                  \
+  V(TypedData_GetInt32x4, 2)                                                   \
+  V(TypedData_SetInt32x4, 3)                                                   \
   V(ByteData_ToEndianInt16, 2)                                                 \
   V(ByteData_ToEndianUint16, 2)                                                \
   V(ByteData_ToEndianInt32, 2)                                                 \
@@ -195,7 +195,7 @@
   V(ByteData_ToEndianFloat64, 2)                                               \
   V(Float32x4_fromDoubles, 5)                                                  \
   V(Float32x4_splat, 2)                                                        \
-  V(Float32x4_fromInt32x4Bits, 2)                                             \
+  V(Float32x4_fromInt32x4Bits, 2)                                              \
   V(Float32x4_zero, 1)                                                         \
   V(Float32x4_add, 2)                                                          \
   V(Float32x4_negate, 1)                                                       \
@@ -227,34 +227,34 @@
   V(Float32x4_sqrt, 1)                                                         \
   V(Float32x4_reciprocal, 1)                                                   \
   V(Float32x4_reciprocalSqrt, 1)                                               \
-  V(Int32x4_fromInts, 5)                                                      \
-  V(Int32x4_fromBools, 5)                                                     \
-  V(Int32x4_fromFloat32x4Bits, 2)                                             \
-  V(Int32x4_or, 2)                                                            \
-  V(Int32x4_and, 2)                                                           \
-  V(Int32x4_xor, 2)                                                           \
-  V(Int32x4_add, 2)                                                           \
-  V(Int32x4_sub, 2)                                                           \
-  V(Int32x4_getX, 1)                                                          \
-  V(Int32x4_getY, 1)                                                          \
-  V(Int32x4_getZ, 1)                                                          \
-  V(Int32x4_getW, 1)                                                          \
-  V(Int32x4_setX, 2)                                                          \
-  V(Int32x4_setY, 2)                                                          \
-  V(Int32x4_setZ, 2)                                                          \
-  V(Int32x4_setW, 2)                                                          \
-  V(Int32x4_getSignMask, 1)                                                   \
-  V(Int32x4_shuffle, 2)                                                       \
-  V(Int32x4_shuffleMix, 3)                                                    \
-  V(Int32x4_getFlagX, 1)                                                      \
-  V(Int32x4_getFlagY, 1)                                                      \
-  V(Int32x4_getFlagZ, 1)                                                      \
-  V(Int32x4_getFlagW, 1)                                                      \
-  V(Int32x4_setFlagX, 2)                                                      \
-  V(Int32x4_setFlagY, 2)                                                      \
-  V(Int32x4_setFlagZ, 2)                                                      \
-  V(Int32x4_setFlagW, 2)                                                      \
-  V(Int32x4_select, 3)                                                        \
+  V(Int32x4_fromInts, 5)                                                       \
+  V(Int32x4_fromBools, 5)                                                      \
+  V(Int32x4_fromFloat32x4Bits, 2)                                              \
+  V(Int32x4_or, 2)                                                             \
+  V(Int32x4_and, 2)                                                            \
+  V(Int32x4_xor, 2)                                                            \
+  V(Int32x4_add, 2)                                                            \
+  V(Int32x4_sub, 2)                                                            \
+  V(Int32x4_getX, 1)                                                           \
+  V(Int32x4_getY, 1)                                                           \
+  V(Int32x4_getZ, 1)                                                           \
+  V(Int32x4_getW, 1)                                                           \
+  V(Int32x4_setX, 2)                                                           \
+  V(Int32x4_setY, 2)                                                           \
+  V(Int32x4_setZ, 2)                                                           \
+  V(Int32x4_setW, 2)                                                           \
+  V(Int32x4_getSignMask, 1)                                                    \
+  V(Int32x4_shuffle, 2)                                                        \
+  V(Int32x4_shuffleMix, 3)                                                     \
+  V(Int32x4_getFlagX, 1)                                                       \
+  V(Int32x4_getFlagY, 1)                                                       \
+  V(Int32x4_getFlagZ, 1)                                                       \
+  V(Int32x4_getFlagW, 1)                                                       \
+  V(Int32x4_setFlagX, 2)                                                       \
+  V(Int32x4_setFlagY, 2)                                                       \
+  V(Int32x4_setFlagZ, 2)                                                       \
+  V(Int32x4_setFlagW, 2)                                                       \
+  V(Int32x4_select, 3)                                                         \
   V(Isolate_mainPort, 0)                                                       \
   V(Isolate_spawnFunction, 1)                                                  \
   V(Isolate_spawnUri, 1)                                                       \
diff --git a/runtime/vm/class_finalizer.cc b/runtime/vm/class_finalizer.cc
index 6519cc2..1ac53e7 100644
--- a/runtime/vm/class_finalizer.cc
+++ b/runtime/vm/class_finalizer.cc
@@ -1273,6 +1273,9 @@
       // A constructor cannot override anything.
       for (intptr_t i = 0; i < interfaces.Length(); i++) {
         super_class ^= interfaces.At(i);
+        // Finalize superclass since overrides check relies on all members
+        // of the superclass to be finalized.
+        FinalizeClass(super_class);
         overridden_function = super_class.LookupDynamicFunction(name);
         if (!overridden_function.IsNull() &&
             !function.HasCompatibleParametersWith(overridden_function,
diff --git a/runtime/vm/deferred_objects.cc b/runtime/vm/deferred_objects.cc
index 5a6412d..defd14e 100644
--- a/runtime/vm/deferred_objects.cc
+++ b/runtime/vm/deferred_objects.cc
@@ -13,7 +13,7 @@
 DECLARE_FLAG(bool, trace_deoptimization_verbose);
 
 
-void DeferredDouble::Materialize() {
+void DeferredDouble::Materialize(DeoptContext* deopt_context) {
   RawDouble** double_slot = reinterpret_cast<RawDouble**>(slot());
   *double_slot = Double::New(value());
 
@@ -24,7 +24,7 @@
 }
 
 
-void DeferredMint::Materialize() {
+void DeferredMint::Materialize(DeoptContext* deopt_context) {
   RawMint** mint_slot = reinterpret_cast<RawMint**>(slot());
   ASSERT(!Smi::IsValid64(value()));
   Mint& mint = Mint::Handle();
@@ -38,7 +38,7 @@
 }
 
 
-void DeferredFloat32x4::Materialize() {
+void DeferredFloat32x4::Materialize(DeoptContext* deopt_context) {
   RawFloat32x4** float32x4_slot = reinterpret_cast<RawFloat32x4**>(slot());
   RawFloat32x4* raw_float32x4 = Float32x4::New(value());
   *float32x4_slot = raw_float32x4;
@@ -54,7 +54,7 @@
 }
 
 
-void DeferredInt32x4::Materialize() {
+void DeferredInt32x4::Materialize(DeoptContext* deopt_context) {
   RawInt32x4** int32x4_slot = reinterpret_cast<RawInt32x4**>(slot());
   RawInt32x4* raw_int32x4 = Int32x4::New(value());
   *int32x4_slot = raw_int32x4;
@@ -70,13 +70,8 @@
 }
 
 
-void DeferredObjectRef::Materialize() {
-  // TODO(turnidge): Consider passing the deopt_context to materialize
-  // instead of accessing it through the current isolate.  It would
-  // make it easier to test deferred object materialization in a unit
-  // test eventually.
-  DeferredObject* obj =
-      Isolate::Current()->deopt_context()->GetDeferredObject(index());
+void DeferredObjectRef::Materialize(DeoptContext* deopt_context) {
+  DeferredObject* obj = deopt_context->GetDeferredObject(index());
   *slot() = obj->object();
   if (FLAG_trace_deoptimization_verbose) {
     OS::PrintErr("writing instance ref at %" Px ": %s\n",
diff --git a/runtime/vm/deferred_objects.h b/runtime/vm/deferred_objects.h
index ae2e29f..809a2ad 100644
--- a/runtime/vm/deferred_objects.h
+++ b/runtime/vm/deferred_objects.h
@@ -13,6 +13,7 @@
 class Instance;
 class RawInstance;
 class RawObject;
+class DeoptContext;
 
 // Used by the deoptimization infrastructure to defer allocation of
 // unboxed objects until frame is fully rewritten and GC is safe.
@@ -27,7 +28,7 @@
   RawInstance** slot() const { return slot_; }
   DeferredSlot* next() const { return next_; }
 
-  virtual void Materialize() = 0;
+  virtual void Materialize(DeoptContext* deopt_context) = 0;
 
  private:
   RawInstance** const slot_;
@@ -42,7 +43,7 @@
   DeferredDouble(double value, RawInstance** slot, DeferredSlot* next)
       : DeferredSlot(slot, next), value_(value) { }
 
-  virtual void Materialize();
+  virtual void Materialize(DeoptContext* deopt_context);
 
   double value() const { return value_; }
 
@@ -58,7 +59,7 @@
   DeferredMint(int64_t value, RawInstance** slot, DeferredSlot* next)
       : DeferredSlot(slot, next), value_(value) { }
 
-  virtual void Materialize();
+  virtual void Materialize(DeoptContext* deopt_context);
 
   int64_t value() const { return value_; }
 
@@ -75,7 +76,7 @@
                     DeferredSlot* next)
       : DeferredSlot(slot, next), value_(value) { }
 
-  virtual void Materialize();
+  virtual void Materialize(DeoptContext* deopt_context);
 
   simd128_value_t value() const { return value_; }
 
@@ -92,7 +93,7 @@
                    DeferredSlot* next)
       : DeferredSlot(slot, next), value_(value) { }
 
-  virtual void Materialize();
+  virtual void Materialize(DeoptContext* deopt_context);
 
   simd128_value_t value() const { return value_; }
 
@@ -111,7 +112,7 @@
   DeferredObjectRef(intptr_t index, RawInstance** slot, DeferredSlot* next)
       : DeferredSlot(slot, next), index_(index) { }
 
-  virtual void Materialize();
+  virtual void Materialize(DeoptContext* deopt_context);
 
   intptr_t index() const { return index_; }
 
diff --git a/runtime/vm/deopt_instructions.cc b/runtime/vm/deopt_instructions.cc
index 6a1bad4..874c314 100644
--- a/runtime/vm/deopt_instructions.cc
+++ b/runtime/vm/deopt_instructions.cc
@@ -301,7 +301,8 @@
 }
 
 
-static void FillDeferredSlots(DeferredSlot** slot_list) {
+static void FillDeferredSlots(DeoptContext* deopt_context,
+                              DeferredSlot** slot_list) {
   DeferredSlot* slot = *slot_list;
   *slot_list = NULL;
 
@@ -309,7 +310,7 @@
     DeferredSlot* current = slot;
     slot = slot->next();
 
-    current->Materialize();
+    current->Materialize(deopt_context);
 
     delete current;
   }
@@ -325,8 +326,8 @@
   // objects can't be referencing other deferred objects because storing
   // an object into a field is always conservatively treated as escaping by
   // allocation sinking and load forwarding.
-  FillDeferredSlots(&deferred_boxes_);
-  FillDeferredSlots(&deferred_object_refs_);
+  FillDeferredSlots(this, &deferred_boxes_);
+  FillDeferredSlots(this, &deferred_object_refs_);
 
   // Compute total number of artificial arguments used during deoptimization.
   intptr_t deopt_arg_count = 0;
diff --git a/runtime/vm/disassembler.cc b/runtime/vm/disassembler.cc
index 121b01e..7803188 100644
--- a/runtime/vm/disassembler.cc
+++ b/runtime/vm/disassembler.cc
@@ -73,4 +73,30 @@
   free(p);
 }
 
+
+class FindAddrVisitor : public FindObjectVisitor {
+   public:
+    explicit FindAddrVisitor(uword addr)
+        : FindObjectVisitor(Isolate::Current()), addr_(addr) { }
+    virtual ~FindAddrVisitor() { }
+
+    virtual uword filter_addr() const { return addr_; }
+
+    // Check if object matches find condition.
+    virtual bool FindObject(RawObject* obj) const {
+      return obj == reinterpret_cast<RawObject*>(addr_);
+    }
+
+   private:
+    const uword addr_;
+
+    DISALLOW_COPY_AND_ASSIGN(FindAddrVisitor);
+};
+
+
+bool Disassembler::CanFindOldObject(uword addr) {
+  FindAddrVisitor visitor(addr);
+  return Isolate::Current()->heap()->FindOldObject(&visitor) != Object::null();
+}
+
 }  // namespace dart
diff --git a/runtime/vm/disassembler.h b/runtime/vm/disassembler.h
index c7772a5..ec7aec5 100644
--- a/runtime/vm/disassembler.h
+++ b/runtime/vm/disassembler.h
@@ -128,6 +128,8 @@
                                 char* human_buffer, intptr_t human_size,
                                 int* out_instr_len, uword pc);
 
+  static bool CanFindOldObject(uword addr);
+
  private:
   static const int kHexadecimalBufferSize = 32;
   static const int kUserReadableBufferSize = 256;
diff --git a/runtime/vm/disassembler_ia32.cc b/runtime/vm/disassembler_ia32.cc
index 69e5ed0..150e451 100644
--- a/runtime/vm/disassembler_ia32.cc
+++ b/runtime/vm/disassembler_ia32.cc
@@ -472,8 +472,9 @@
   Print(addr_buffer);
   // Try to print as heap object or stub name
   if (((addr & kSmiTagMask) == kHeapObjectTag) &&
+      reinterpret_cast<RawObject*>(addr)->IsOldObject() &&
       !Isolate::Current()->heap()->CodeContains(addr) &&
-      Isolate::Current()->heap()->Contains(addr - kHeapObjectTag)) {
+      Disassembler::CanFindOldObject(addr)) {
     const Object& obj = Object::Handle(reinterpret_cast<RawObject*>(addr));
     if (obj.IsArray()) {
       const Array& arr = Array::Cast(obj);
diff --git a/runtime/vm/disassembler_x64.cc b/runtime/vm/disassembler_x64.cc
index 0055f6f..86f8d25 100644
--- a/runtime/vm/disassembler_x64.cc
+++ b/runtime/vm/disassembler_x64.cc
@@ -807,8 +807,9 @@
   AppendToBuffer("%#" Px "", addr);
   // Try to print as heap object or stub name
   if (((addr & kSmiTagMask) == kHeapObjectTag) &&
+      reinterpret_cast<RawObject*>(addr)->IsOldObject() &&
       !Isolate::Current()->heap()->CodeContains(addr) &&
-      Isolate::Current()->heap()->Contains(addr - kHeapObjectTag)) {
+      Disassembler::CanFindOldObject(addr)) {
     const Object& obj = Object::Handle(reinterpret_cast<RawObject*>(addr));
     if (obj.IsArray()) {
       const Array& arr = Array::Cast(obj);
diff --git a/runtime/vm/flow_graph_builder.cc b/runtime/vm/flow_graph_builder.cc
index 7a915ba..0cdaac5 100644
--- a/runtime/vm/flow_graph_builder.cc
+++ b/runtime/vm/flow_graph_builder.cc
@@ -967,11 +967,15 @@
   // Call to stub that checks whether the debugger is in single
   // step mode. This call must happen before the contexts are
   // unchained so that captured variables can be inspected.
-  AddInstruction(new DebugStepCheckInstr(node->token_pos()));
+  // No debugger check is done in native functions.
+  const Function& function = owner()->parsed_function()->function();
+  if (!function.is_native()) {
+    AddInstruction(new DebugStepCheckInstr(node->token_pos(),
+                                           PcDescriptors::kReturn));
+  }
 
   Value* return_value = for_value.value();
   if (FLAG_enable_type_checks) {
-    const Function& function = owner()->parsed_function()->function();
     const bool is_implicit_dynamic_getter =
         (!function.is_static() &&
         ((function.kind() == RawFunction::kImplicitGetter) ||
@@ -983,8 +987,7 @@
     // However, factories may create an instance of the wrong type.
     if (!is_implicit_dynamic_getter && !function.IsConstructor()) {
       const AbstractType& dst_type =
-          AbstractType::ZoneHandle(
-              owner()->parsed_function()->function().result_type());
+          AbstractType::ZoneHandle(function.result_type());
       return_value = BuildAssignableValue(node->value()->token_pos(),
                                           return_value,
                                           dst_type,
@@ -3207,6 +3210,15 @@
 //                               value: <Expression> }
 void EffectGraphVisitor::HandleStoreLocal(StoreLocalNode* node,
                                           bool result_is_needed) {
+  // If the right hand side is an expression that does not contain
+  // a safe point for the debugger to stop, add an explicit stub
+  // call.
+  if (node->value()->IsLiteralNode() ||
+      node->value()->IsLoadLocalNode()) {
+    AddInstruction(new DebugStepCheckInstr(node->token_pos(),
+                                           PcDescriptors::kRuntimeCall));
+  }
+
   ValueGraphVisitor for_value(owner());
   node->value()->Visit(&for_value);
   Append(for_value);
diff --git a/runtime/vm/flow_graph_compiler.cc b/runtime/vm/flow_graph_compiler.cc
index 2e90989b..5ad7edb 100644
--- a/runtime/vm/flow_graph_compiler.cc
+++ b/runtime/vm/flow_graph_compiler.cc
@@ -17,6 +17,7 @@
 #include "vm/longjump.h"
 #include "vm/object_store.h"
 #include "vm/parser.h"
+#include "vm/stack_frame.h"
 #include "vm/stub_code.h"
 #include "vm/symbols.h"
 
@@ -297,6 +298,51 @@
 }
 
 
+void FlowGraphCompiler::EmitTrySync(Instruction* instr, intptr_t try_index) {
+  ASSERT(is_optimizing());
+  Environment* env = instr->env();
+  CatchBlockEntryInstr* catch_block =
+      flow_graph().graph_entry()->GetCatchEntry(try_index);
+  const GrowableArray<Definition*>* idefs = catch_block->initial_definitions();
+
+  // Construct a ParallelMove instruction for parameters and locals. Skip the
+  // special locals exception_var and stacktrace_var since they will be filled
+  // when an exception is thrown. Constant locations are known to be the same
+  // at all instructions that may throw, and do not need to be materialized.
+
+  // Parameters first.
+  intptr_t i = 0;
+  const intptr_t num_non_copied_params = flow_graph().num_non_copied_params();
+  ParallelMoveInstr* move_instr = new ParallelMoveInstr();
+  for (; i < num_non_copied_params; ++i) {
+    if ((*idefs)[i]->IsConstant()) continue;  // Common constants
+    Location src = env->LocationAt(i);
+    intptr_t dest_index = i - num_non_copied_params;
+    Location dest = Location::StackSlot(dest_index);
+    move_instr->AddMove(dest, src);
+  }
+
+  // Process locals. Skip exception_var and stacktrace_var.
+  intptr_t local_base = kFirstLocalSlotFromFp + num_non_copied_params;
+  intptr_t ex_idx = local_base - catch_block->exception_var().index();
+  intptr_t st_idx = local_base - catch_block->stacktrace_var().index();
+  for (; i < flow_graph().variable_count(); ++i) {
+    if (i == ex_idx || i == st_idx) continue;
+    if ((*idefs)[i]->IsConstant()) continue;
+    Location src = env->LocationAt(i);
+    ASSERT(!src.IsFpuRegister());
+    ASSERT(!src.IsDoubleStackSlot());
+    intptr_t dest_index = i - num_non_copied_params;
+    Location dest = Location::StackSlot(dest_index);
+    move_instr->AddMove(dest, src);
+    // Update safepoint bitmap to indicate that the target location
+    // now contains a pointer.
+    instr->locs()->stack_bitmap()->Set(dest_index, true);
+  }
+  parallel_move_resolver()->EmitNativeCode(move_instr);
+}
+
+
 intptr_t FlowGraphCompiler::StackSize() const {
   if (is_optimizing_) {
     return flow_graph_.graph_entry()->spill_slot_count();
diff --git a/runtime/vm/flow_graph_compiler.h b/runtime/vm/flow_graph_compiler.h
index e86da7e..56468e7 100644
--- a/runtime/vm/flow_graph_compiler.h
+++ b/runtime/vm/flow_graph_compiler.h
@@ -473,8 +473,6 @@
 
   void EmitFrameEntry();
 
-  void EmitTrySyncMove(intptr_t dest_offset, Location loc, bool* push_emitted);
-
   void AddStaticCallTarget(const Function& function);
 
   void GenerateDeferredCode();
diff --git a/runtime/vm/flow_graph_compiler_arm.cc b/runtime/vm/flow_graph_compiler_arm.cc
index f8d4182..082c46c 100644
--- a/runtime/vm/flow_graph_compiler_arm.cc
+++ b/runtime/vm/flow_graph_compiler_arm.cc
@@ -718,74 +718,6 @@
 }
 
 
-void FlowGraphCompiler::EmitTrySyncMove(intptr_t dest_offset,
-                                        Location loc,
-                                        bool* push_emitted) {
-  if (loc.IsConstant()) {
-    if (!*push_emitted) {
-      __ Push(R0);
-      *push_emitted = true;
-    }
-    __ LoadObject(R0, loc.constant());
-    __ StoreToOffset(kWord, R0, FP, dest_offset);
-  } else if (loc.IsRegister()) {
-    if (*push_emitted && (loc.reg() == R0)) {
-      __ ldr(R0, Address(SP, 0));
-      __ StoreToOffset(kWord, R0, FP, dest_offset);
-    } else {
-      __ StoreToOffset(kWord, loc.reg(), FP, dest_offset);
-    }
-  } else {
-    const intptr_t src_offset = loc.ToStackSlotOffset();
-    if (src_offset != dest_offset) {
-      if (!*push_emitted) {
-        __ Push(R0);
-        *push_emitted = true;
-      }
-      __ LoadFromOffset(kWord, R0, FP, src_offset);
-      __ StoreToOffset(kWord, R0, FP, dest_offset);
-    }
-  }
-}
-
-
-void FlowGraphCompiler::EmitTrySync(Instruction* instr, intptr_t try_index) {
-  ASSERT(is_optimizing());
-  Environment* env = instr->env();
-  CatchBlockEntryInstr* catch_block =
-      flow_graph().graph_entry()->GetCatchEntry(try_index);
-  const GrowableArray<Definition*>* idefs = catch_block->initial_definitions();
-  // Parameters.
-  intptr_t i = 0;
-  bool push_emitted = false;
-  const intptr_t num_non_copied_params = flow_graph().num_non_copied_params();
-  const intptr_t param_base =
-      kParamEndSlotFromFp + num_non_copied_params;
-  for (; i < num_non_copied_params; ++i) {
-    if ((*idefs)[i]->IsConstant()) continue;  // Common constants
-    Location loc = env->LocationAt(i);
-    EmitTrySyncMove((param_base - i) * kWordSize, loc, &push_emitted);
-  }
-
-  // Process locals. Skip exception_var and stacktrace_var.
-  intptr_t local_base = kFirstLocalSlotFromFp + num_non_copied_params;
-  intptr_t ex_idx = local_base - catch_block->exception_var().index();
-  intptr_t st_idx = local_base - catch_block->stacktrace_var().index();
-  for (; i < flow_graph().variable_count(); ++i) {
-    if (i == ex_idx || i == st_idx) continue;
-    if ((*idefs)[i]->IsConstant()) continue;
-    Location loc = env->LocationAt(i);
-    EmitTrySyncMove((local_base - i) * kWordSize, loc, &push_emitted);
-    // Update safepoint bitmap to indicate that the target location
-    // now contains a pointer.
-    instr->locs()->stack_bitmap()->Set(i - num_non_copied_params, true);
-  }
-  if (push_emitted) {
-    __ Pop(R0);
-  }
-}
-
-
 void FlowGraphCompiler::EmitInstructionEpilogue(Instruction* instr) {
   if (is_optimizing()) return;
   Definition* defn = instr->AsDefinition();
diff --git a/runtime/vm/flow_graph_compiler_ia32.cc b/runtime/vm/flow_graph_compiler_ia32.cc
index 807e6e4..cfbdaed 100644
--- a/runtime/vm/flow_graph_compiler_ia32.cc
+++ b/runtime/vm/flow_graph_compiler_ia32.cc
@@ -742,75 +742,6 @@
 }
 
 
-void FlowGraphCompiler::EmitTrySyncMove(intptr_t dest_offset,
-                                        Location loc,
-                                        bool* push_emitted) {
-  const Address dest(EBP, dest_offset);
-  if (loc.IsConstant()) {
-    if (!*push_emitted) {
-      __ pushl(EAX);
-      *push_emitted = true;
-    }
-    __ LoadObject(EAX, loc.constant());
-    __ movl(dest, EAX);
-  } else if (loc.IsRegister()) {
-    if (*push_emitted && loc.reg() == EAX) {
-      __ movl(EAX, Address(ESP, 0));
-      __ movl(dest, EAX);
-    } else {
-      __ movl(dest, loc.reg());
-    }
-  } else {
-    Address src = loc.ToStackSlotAddress();
-    if (!src.Equals(dest)) {
-      if (!*push_emitted) {
-        __ pushl(EAX);
-        *push_emitted = true;
-      }
-      __ movl(EAX, src);
-      __ movl(dest, EAX);
-    }
-  }
-}
-
-
-void FlowGraphCompiler::EmitTrySync(Instruction* instr, intptr_t try_index) {
-  ASSERT(is_optimizing());
-  Environment* env = instr->env();
-  CatchBlockEntryInstr* catch_block =
-      flow_graph().graph_entry()->GetCatchEntry(try_index);
-  const GrowableArray<Definition*>* idefs = catch_block->initial_definitions();
-  // Parameters.
-  intptr_t i = 0;
-  bool push_emitted = false;
-  const intptr_t num_non_copied_params = flow_graph().num_non_copied_params();
-  const intptr_t param_base =
-      kParamEndSlotFromFp + num_non_copied_params;
-  for (; i < num_non_copied_params; ++i) {
-    if ((*idefs)[i]->IsConstant()) continue;  // Common constants
-    Location loc = env->LocationAt(i);
-    EmitTrySyncMove((param_base - i) * kWordSize, loc, &push_emitted);
-  }
-
-  // Process locals. Skip exception_var and stacktrace_var.
-  intptr_t local_base = kFirstLocalSlotFromFp + num_non_copied_params;
-  intptr_t ex_idx = local_base - catch_block->exception_var().index();
-  intptr_t st_idx = local_base - catch_block->stacktrace_var().index();
-  for (; i < flow_graph().variable_count(); ++i) {
-    if (i == ex_idx || i == st_idx) continue;
-    if ((*idefs)[i]->IsConstant()) continue;
-    Location loc = env->LocationAt(i);
-    EmitTrySyncMove((local_base - i) * kWordSize, loc, &push_emitted);
-    // Update safepoint bitmap to indicate that the target location
-    // now contains a pointer.
-    instr->locs()->stack_bitmap()->Set(i - num_non_copied_params, true);
-  }
-  if (push_emitted) {
-    __ popl(EAX);
-  }
-}
-
-
 void FlowGraphCompiler::EmitInstructionEpilogue(Instruction* instr) {
   if (is_optimizing()) return;
   Definition* defn = instr->AsDefinition();
diff --git a/runtime/vm/flow_graph_compiler_mips.cc b/runtime/vm/flow_graph_compiler_mips.cc
index f524363..49e259d 100644
--- a/runtime/vm/flow_graph_compiler_mips.cc
+++ b/runtime/vm/flow_graph_compiler_mips.cc
@@ -741,74 +741,6 @@
 }
 
 
-void FlowGraphCompiler::EmitTrySyncMove(intptr_t dest_offset,
-                                        Location loc,
-                                        bool* push_emitted) {
-  if (loc.IsConstant()) {
-    if (!*push_emitted) {
-      __ Push(T0);
-      *push_emitted = true;
-    }
-    __ LoadObject(T0, loc.constant());
-    __ StoreToOffset(T0, FP, dest_offset);
-  } else if (loc.IsRegister()) {
-    if (*push_emitted && loc.reg() == T0) {
-      __ lw(T0, Address(SP, 0));
-      __ StoreToOffset(T0, FP, dest_offset);
-    } else {
-      __ StoreToOffset(loc.reg(), FP, dest_offset);
-    }
-  } else {
-    const intptr_t src_offset = loc.ToStackSlotOffset();
-    if (src_offset != dest_offset) {
-      if (!*push_emitted) {
-        __ Push(T0);
-        *push_emitted = true;
-      }
-      __ LoadFromOffset(T0, FP, src_offset);
-      __ StoreToOffset(T0, FP, dest_offset);
-    }
-  }
-}
-
-
-void FlowGraphCompiler::EmitTrySync(Instruction* instr, intptr_t try_index) {
-  ASSERT(is_optimizing());
-  Environment* env = instr->env();
-  CatchBlockEntryInstr* catch_block =
-      flow_graph().graph_entry()->GetCatchEntry(try_index);
-  const GrowableArray<Definition*>* idefs = catch_block->initial_definitions();
-  // Parameters.
-  intptr_t i = 0;
-  bool push_emitted = false;
-  const intptr_t num_non_copied_params = flow_graph().num_non_copied_params();
-  const intptr_t param_base =
-      kParamEndSlotFromFp + num_non_copied_params;
-  for (; i < num_non_copied_params; ++i) {
-    if ((*idefs)[i]->IsConstant()) continue;  // Common constants
-    Location loc = env->LocationAt(i);
-    EmitTrySyncMove((param_base - i) * kWordSize, loc, &push_emitted);
-  }
-
-  // Process locals. Skip exception_var and stacktrace_var.
-  intptr_t local_base = kFirstLocalSlotFromFp + num_non_copied_params;
-  intptr_t ex_idx = local_base - catch_block->exception_var().index();
-  intptr_t st_idx = local_base - catch_block->stacktrace_var().index();
-  for (; i < flow_graph().variable_count(); ++i) {
-    if (i == ex_idx || i == st_idx) continue;
-    if ((*idefs)[i]->IsConstant()) continue;
-    Location loc = env->LocationAt(i);
-    EmitTrySyncMove((local_base - i) * kWordSize, loc, &push_emitted);
-    // Update safepoint bitmap to indicate that the target location
-    // now contains a pointer.
-    instr->locs()->stack_bitmap()->Set(i - num_non_copied_params, true);
-  }
-  if (push_emitted) {
-    __ Pop(T0);
-  }
-}
-
-
 void FlowGraphCompiler::EmitInstructionEpilogue(Instruction* instr) {
   if (is_optimizing()) return;
   Definition* defn = instr->AsDefinition();
diff --git a/runtime/vm/flow_graph_compiler_x64.cc b/runtime/vm/flow_graph_compiler_x64.cc
index 496ea0a..293ae4f 100644
--- a/runtime/vm/flow_graph_compiler_x64.cc
+++ b/runtime/vm/flow_graph_compiler_x64.cc
@@ -722,74 +722,6 @@
 }
 
 
-void FlowGraphCompiler::EmitTrySyncMove(intptr_t dest_offset,
-                                        Location loc,
-                                        bool* push_emitted) {
-  const Address dest(RBP, dest_offset);
-  if (loc.IsConstant()) {
-    if (!*push_emitted) {
-      __ pushq(RAX);
-      *push_emitted = true;
-    }
-    __ LoadObject(RAX, loc.constant(), PP);
-    __ movq(dest, RAX);
-  } else if (loc.IsRegister()) {
-    if (*push_emitted && loc.reg() == RAX) {
-      __ movq(RAX, Address(RSP, 0));
-      __ movq(dest, RAX);
-    } else {
-      __ movq(dest, loc.reg());
-    }
-  } else {
-    Address src = loc.ToStackSlotAddress();
-    if (!src.Equals(dest)) {
-      if (!*push_emitted) {
-        __ pushq(RAX);
-        *push_emitted = true;
-      }
-      __ movq(RAX, src);
-      __ movq(dest, RAX);
-    }
-  }
-}
-
-
-void FlowGraphCompiler::EmitTrySync(Instruction* instr, intptr_t try_index) {
-  ASSERT(is_optimizing());
-  Environment* env = instr->env();
-  CatchBlockEntryInstr* catch_block =
-      flow_graph().graph_entry()->GetCatchEntry(try_index);
-  const GrowableArray<Definition*>* idefs = catch_block->initial_definitions();
-  // Parameters.
-  intptr_t i = 0;
-  bool push_emitted = false;
-  const intptr_t num_non_copied_params = flow_graph().num_non_copied_params();
-  const intptr_t param_base = kParamEndSlotFromFp + num_non_copied_params;
-  for (; i < num_non_copied_params; ++i) {
-    if ((*idefs)[i]->IsConstant()) continue;  // Common constants
-    Location loc = env->LocationAt(i);
-    EmitTrySyncMove((param_base - i) * kWordSize, loc, &push_emitted);
-  }
-
-  // Process locals. Skip exception_var and stacktrace_var.
-  intptr_t local_base = kFirstLocalSlotFromFp + num_non_copied_params;
-  intptr_t ex_idx = local_base - catch_block->exception_var().index();
-  intptr_t st_idx = local_base - catch_block->stacktrace_var().index();
-  for (; i < flow_graph().variable_count(); ++i) {
-    if (i == ex_idx || i == st_idx) continue;
-    if ((*idefs)[i]->IsConstant()) continue;
-    Location loc = env->LocationAt(i);
-    EmitTrySyncMove((local_base - i) * kWordSize, loc, &push_emitted);
-    // Update safepoint bitmap to indicate that the target location
-    // now contains a pointer.
-    instr->locs()->stack_bitmap()->Set(i - num_non_copied_params, true);
-  }
-  if (push_emitted) {
-    __ popq(RAX);
-  }
-}
-
-
 void FlowGraphCompiler::EmitInstructionEpilogue(Instruction* instr) {
   if (is_optimizing()) return;
   Definition* defn = instr->AsDefinition();
diff --git a/runtime/vm/flow_graph_inliner.cc b/runtime/vm/flow_graph_inliner.cc
index f631d94..3c50a5b 100644
--- a/runtime/vm/flow_graph_inliner.cc
+++ b/runtime/vm/flow_graph_inliner.cc
@@ -1060,7 +1060,7 @@
 
 bool PolymorphicInliner::TryInlining(intptr_t receiver_cid,
                                      const Function& target) {
-  if (!target.IsOptimizable()) {
+  if (!target.IsInlineable()) {
     if (TryInlineRecognizedMethod(receiver_cid, target)) {
       owner_->inlined_ = true;
       return true;
diff --git a/runtime/vm/flow_graph_optimizer.cc b/runtime/vm/flow_graph_optimizer.cc
index c6443fe..d488c48 100644
--- a/runtime/vm/flow_graph_optimizer.cc
+++ b/runtime/vm/flow_graph_optimizer.cc
@@ -538,8 +538,20 @@
 void FlowGraphOptimizer::InsertConversion(Representation from,
                                           Representation to,
                                           Value* use,
-                                          Instruction* insert_before,
-                                          Instruction* deopt_target) {
+                                          bool is_environment_use) {
+  Instruction* insert_before;
+  Instruction* deopt_target;
+  PhiInstr* phi = use->instruction()->AsPhi();
+  if (phi != NULL) {
+    ASSERT(phi->is_alive());
+    // For phis conversions have to be inserted in the predecessor.
+    insert_before =
+        phi->block()->PredecessorAt(use->use_index())->last_instruction();
+    deopt_target = NULL;
+  } else {
+    deopt_target = insert_before = use->instruction();
+  }
+
   Definition* converted = NULL;
   if ((from == kTagged) && (to == kUnboxedMint)) {
     ASSERT((deopt_target != NULL) ||
@@ -632,9 +644,13 @@
     }
   }
   ASSERT(converted != NULL);
-  use->BindTo(converted);
   InsertBefore(insert_before, converted, use->instruction()->env(),
                Definition::kValue);
+  if (is_environment_use) {
+    use->BindToEnvironment(converted);
+  } else {
+    use->BindTo(converted);
+  }
 }
 
 
@@ -644,21 +660,17 @@
   if (from_rep == to_rep || to_rep == kNoRepresentation) {
     return;
   }
+  InsertConversion(from_rep, to_rep, use, /*is_environment_use=*/ false);
+}
 
-  Instruction* insert_before;
-  Instruction* deopt_target;
-  PhiInstr* phi = use->instruction()->AsPhi();
-  if (phi != NULL) {
-    ASSERT(phi->is_alive());
-    // For phis conversions have to be inserted in the predecessor.
-    insert_before =
-        phi->block()->PredecessorAt(use->use_index())->last_instruction();
-    deopt_target = NULL;
-  } else {
-    deopt_target = insert_before = use->instruction();
+
+void FlowGraphOptimizer::ConvertEnvironmentUse(Value* use,
+                                               Representation from_rep) {
+  const Representation to_rep = kTagged;
+  if (from_rep == to_rep || to_rep == kNoRepresentation) {
+    return;
   }
-
-  InsertConversion(from_rep, to_rep, use, insert_before, deopt_target);
+  InsertConversion(from_rep, to_rep, use, /*is_environment_use=*/ true);
 }
 
 
@@ -670,6 +682,18 @@
        it.Advance()) {
     ConvertUse(it.Current(), from_rep);
   }
+
+  for (Value::Iterator it(def->env_use_list());
+       !it.Done();
+       it.Advance()) {
+    Value* use = it.Current();
+    if (use->instruction()->MayThrow() &&
+        use->instruction()->GetBlock()->InsideTryBlock()) {
+      // Environment uses at calls inside try-blocks must be converted to
+      // tagged representation.
+      ConvertEnvironmentUse(it.Current(), from_rep);
+    }
+  }
 }
 
 
@@ -5072,8 +5096,10 @@
            use = use->next_use()) {
         Instruction* instr = use->instruction();
         if (instr->IsPushArgument() ||
-            (instr->IsStoreVMField() && (use->use_index() != 1)) ||
-            (instr->IsStoreInstanceField() && (use->use_index() != 0)) ||
+            (instr->IsStoreVMField()
+             && (use->use_index() != StoreVMFieldInstr::kObjectPos)) ||
+            (instr->IsStoreInstanceField()
+             && (use->use_index() != StoreInstanceFieldInstr::kInstancePos)) ||
             instr->IsStoreStaticField() ||
             instr->IsPhi() ||
             instr->IsAssertAssignable() ||
diff --git a/runtime/vm/flow_graph_optimizer.h b/runtime/vm/flow_graph_optimizer.h
index 8c3b3bd..a3d035c 100644
--- a/runtime/vm/flow_graph_optimizer.h
+++ b/runtime/vm/flow_graph_optimizer.h
@@ -176,12 +176,12 @@
   void InsertConversionsFor(Definition* def);
 
   void ConvertUse(Value* use, Representation from);
+  void ConvertEnvironmentUse(Value* use, Representation from);
 
   void InsertConversion(Representation from,
                         Representation to,
                         Value* use,
-                        Instruction* insert_before,
-                        Instruction* deopt_target);
+                        bool is_environment_use);
 
   bool InstanceCallNeedsClassCheck(InstanceCallInstr* call) const;
   bool MethodExtractorNeedsClassCheck(InstanceCallInstr* call) const;
diff --git a/runtime/vm/heap.cc b/runtime/vm/heap.cc
index 63d0c9d..b3d604a 100644
--- a/runtime/vm/heap.cc
+++ b/runtime/vm/heap.cc
@@ -155,6 +155,11 @@
 }
 
 
+RawObject* Heap::FindOldObject(FindObjectVisitor* visitor) const {
+  return old_space_->FindObject(visitor, HeapPage::kData);
+}
+
+
 void Heap::CollectGarbage(Space space, ApiCallbacks api_callbacks) {
   bool invoke_api_callbacks = (api_callbacks == kInvokeApiCallbacks);
   switch (space) {
diff --git a/runtime/vm/heap.h b/runtime/vm/heap.h
index 680c294..2c39e2b 100644
--- a/runtime/vm/heap.h
+++ b/runtime/vm/heap.h
@@ -132,6 +132,7 @@
   // traversal through the heap space continues.
   RawInstructions* FindObjectInCodeSpace(FindObjectVisitor* visitor);
   RawInstructions* FindObjectInStubCodeSpace(FindObjectVisitor* visitor);
+  RawObject* FindOldObject(FindObjectVisitor* visitor) const;
 
   void CollectGarbage(Space space);
   void CollectGarbage(Space space, ApiCallbacks api_callbacks);
diff --git a/runtime/vm/intermediate_language.cc b/runtime/vm/intermediate_language.cc
index 4a9d61b..1121281 100644
--- a/runtime/vm/intermediate_language.cc
+++ b/runtime/vm/intermediate_language.cc
@@ -1468,10 +1468,7 @@
   ASSERT(!compiler->is_optimizing());
   const ExternalLabel label("debug_step_check",
                             StubCode::DebugStepCheckEntryPoint());
-  compiler->GenerateCall(token_pos(),
-                         &label,
-                         PcDescriptors::kReturn,
-                         locs());
+  compiler->GenerateCall(token_pos(), &label, stub_kind_, locs());
 }
 
 
diff --git a/runtime/vm/intermediate_language.h b/runtime/vm/intermediate_language.h
index 1ed8ab8..2c7bcb4 100644
--- a/runtime/vm/intermediate_language.h
+++ b/runtime/vm/intermediate_language.h
@@ -482,6 +482,7 @@
 
   // Change the definition after use lists have been computed.
   inline void BindTo(Definition* definition);
+  inline void BindToEnvironment(Definition* definition);
 
   Value* Copy() { return new Value(definition_); }
 
@@ -1794,6 +1795,13 @@
 }
 
 
+inline void Value::BindToEnvironment(Definition* def) {
+  RemoveFromUseList();
+  set_definition(def);
+  def->AddEnvUse(this);
+}
+
+
 class PhiInstr : public Definition {
  public:
   PhiInstr(JoinEntryInstr* block, intptr_t num_inputs)
@@ -3419,8 +3427,10 @@
 
 class DebugStepCheckInstr : public TemplateInstruction<0> {
  public:
-  explicit DebugStepCheckInstr(intptr_t token_pos)
-      : token_pos_(token_pos) {
+  DebugStepCheckInstr(intptr_t token_pos,
+                      PcDescriptors::Kind stub_kind)
+      : token_pos_(token_pos),
+        stub_kind_(stub_kind) {
   }
 
   DECLARE_INSTRUCTION(DebugStepCheck)
@@ -3434,6 +3444,7 @@
 
  private:
   const intptr_t token_pos_;
+  const PcDescriptors::Kind stub_kind_;
 
   DISALLOW_COPY_AND_ASSIGN(DebugStepCheckInstr);
 };
@@ -4305,19 +4316,19 @@
       : offset_in_bytes_(offset_in_bytes), type_(type) {
     ASSERT(type.IsZoneHandle());  // May be null if field is not an instance.
     SetInputAt(kValuePos, value);
-    SetInputAt(kDestPos, dest);
+    SetInputAt(kObjectPos, dest);
   }
 
   enum {
     kValuePos = 0,
-    kDestPos = 1
+    kObjectPos = 1
   };
 
   DECLARE_INSTRUCTION(StoreVMField)
   virtual CompileType* ComputeInitialType() const;
 
   Value* value() const { return inputs_[kValuePos]; }
-  Value* dest() const { return inputs_[kDestPos]; }
+  Value* dest() const { return inputs_[kObjectPos]; }
   intptr_t offset_in_bytes() const { return offset_in_bytes_; }
   const AbstractType& type() const { return type_; }
 
diff --git a/runtime/vm/intermediate_language_arm.cc b/runtime/vm/intermediate_language_arm.cc
index 2a0ea82..db31d4a 100644
--- a/runtime/vm/intermediate_language_arm.cc
+++ b/runtime/vm/intermediate_language_arm.cc
@@ -1319,8 +1319,6 @@
 
   Label* fail = (deopt != NULL) ? deopt : &fail_label;
 
-  const bool ok_is_fall_through = (deopt != NULL);
-
   if (!compiler->is_optimizing() || (field_cid == kIllegalCid)) {
     if (!compiler->is_optimizing() && (field_reg == kNoRegister)) {
       // Currently we can't have different location summaries for optimized
@@ -1505,12 +1503,10 @@
         }
       }
     }
-    if (!ok_is_fall_through) {
-      __ b(&ok);
-    }
 
     if (deopt == NULL) {
       ASSERT(!compiler->is_optimizing());
+      __ b(&ok);
       __ Bind(fail);
 
       __ ldr(IP, FieldAddress(field_reg, Field::guarded_cid_offset()));
@@ -1525,7 +1521,6 @@
   } else {
     ASSERT(compiler->is_optimizing());
     ASSERT(deopt != NULL);
-    ASSERT(ok_is_fall_through);
     // Field guard class has been initialized and is known.
     if (field_reg != kNoRegister) {
       __ LoadObject(field_reg, Field::ZoneHandle(field().raw()));
diff --git a/runtime/vm/intermediate_language_ia32.cc b/runtime/vm/intermediate_language_ia32.cc
index 27fef148..fe0c840 100644
--- a/runtime/vm/intermediate_language_ia32.cc
+++ b/runtime/vm/intermediate_language_ia32.cc
@@ -1307,8 +1307,6 @@
 
   Label* fail = (deopt != NULL) ? deopt : &fail_label;
 
-  const bool ok_is_fall_through = (deopt != NULL);
-
   if (!compiler->is_optimizing() || (field_cid == kIllegalCid)) {
     if (!compiler->is_optimizing() && (field_reg == kNoRegister)) {
       // Currently we can't have different location summaries for optimized
@@ -1509,12 +1507,9 @@
       }
     }
 
-    if (!ok_is_fall_through) {
-      __ jmp(&ok);
-    }
-
     if (deopt == NULL) {
       ASSERT(!compiler->is_optimizing());
+      __ jmp(&ok);
       __ Bind(fail);
 
       __ cmpl(FieldAddress(field_reg, Field::guarded_cid_offset()),
@@ -1529,7 +1524,6 @@
   } else {
     ASSERT(compiler->is_optimizing());
     ASSERT(deopt != NULL);
-    ASSERT(ok_is_fall_through);
     // Field guard class has been initialized and is known.
     if (field_reg != kNoRegister) {
       __ LoadObject(field_reg, Field::ZoneHandle(field().raw()));
diff --git a/runtime/vm/intermediate_language_mips.cc b/runtime/vm/intermediate_language_mips.cc
index cfa3c4a..548fb5b 100644
--- a/runtime/vm/intermediate_language_mips.cc
+++ b/runtime/vm/intermediate_language_mips.cc
@@ -1393,8 +1393,6 @@
 
   Label* fail = (deopt != NULL) ? deopt : &fail_label;
 
-  const bool ok_is_fall_through = (deopt != NULL);
-
   if (!compiler->is_optimizing() || (field_cid == kIllegalCid)) {
     if (!compiler->is_optimizing() && (field_reg == kNoRegister)) {
       // Currently we can't have different location summaries for optimized
@@ -1576,12 +1574,10 @@
         }
       }
     }
-    if (!ok_is_fall_through) {
-      __ b(&ok);
-    }
 
     if (deopt == NULL) {
       ASSERT(!compiler->is_optimizing());
+      __ b(&ok);
       __ Bind(fail);
 
       __ lw(CMPRES1, FieldAddress(field_reg, Field::guarded_cid_offset()));
@@ -1596,7 +1592,6 @@
   } else {
     ASSERT(compiler->is_optimizing());
     ASSERT(deopt != NULL);
-    ASSERT(ok_is_fall_through);
     // Field guard class has been initialized and is known.
     if (field_reg != kNoRegister) {
       __ LoadObject(field_reg, Field::ZoneHandle(field().raw()));
diff --git a/runtime/vm/intermediate_language_x64.cc b/runtime/vm/intermediate_language_x64.cc
index 21c719d..d2030e6 100644
--- a/runtime/vm/intermediate_language_x64.cc
+++ b/runtime/vm/intermediate_language_x64.cc
@@ -1210,8 +1210,6 @@
 
   Label* fail = (deopt != NULL) ? deopt : &fail_label;
 
-  const bool ok_is_fall_through = (deopt != NULL);
-
   if (!compiler->is_optimizing() || (field_cid == kIllegalCid)) {
     if (!compiler->is_optimizing() && (field_reg == kNoRegister)) {
       // Currently we can't have different location summaries for optimized
@@ -1409,12 +1407,10 @@
         }
       }
     }
-    if (!ok_is_fall_through) {
-      __ jmp(&ok);
-    }
 
     if (deopt == NULL) {
       ASSERT(!compiler->is_optimizing());
+      __ jmp(&ok);
       __ Bind(fail);
 
       __ CompareImmediate(FieldAddress(field_reg, Field::guarded_cid_offset()),
@@ -1429,7 +1425,6 @@
   } else {
     ASSERT(compiler->is_optimizing());
     ASSERT(deopt != NULL);
-    ASSERT(ok_is_fall_through);
     // Field guard class has been initialized and is known.
     if (field_reg != kNoRegister) {
       __ LoadObject(field_reg, Field::ZoneHandle(field().raw()), PP);
diff --git a/runtime/vm/object.cc b/runtime/vm/object.cc
index 37fa9c3..98e442d 100644
--- a/runtime/vm/object.cc
+++ b/runtime/vm/object.cc
@@ -4841,6 +4841,9 @@
 void Function::SetIsOptimizable(bool value) const {
   ASSERT(!is_native());
   set_is_optimizable(value);
+  if (!value) {
+    set_is_inlinable(false);
+  }
 }
 
 
diff --git a/runtime/vm/profiler.cc b/runtime/vm/profiler.cc
index 86b6846..ad701cb 100644
--- a/runtime/vm/profiler.cc
+++ b/runtime/vm/profiler.cc
@@ -32,7 +32,7 @@
     defined(TARGET_OS_MACOS) || defined(TARGET_OS_ANDROID)
   DEFINE_FLAG(bool, profile, false, "Enable Sampling Profiler");
 #else
-  DEFINE_FLAG(bool, profile, true, "Enable Sampling Profiler");
+  DEFINE_FLAG(bool, profile, false, "Enable Sampling Profiler");
 #endif
 DEFINE_FLAG(bool, trace_profiled_isolates, false, "Trace profiled isolates.");
 DEFINE_FLAG(charp, profile_dir, NULL,
diff --git a/sdk/lib/io/stdio.dart b/sdk/lib/io/stdio.dart
index 5626d84..ea0a382 100644
--- a/sdk/lib/io/stdio.dart
+++ b/sdk/lib/io/stdio.dart
@@ -164,8 +164,7 @@
  *
  * Use [hasTerminal] to test if there is a terminal associated to stdout.
  */
-class Stdout extends _StdSink {
-  // TODO(15721): Should implement IOSink (for documentation purpose).
+class Stdout extends _StdSink implements IOSink {
   Stdout._(IOSink sink) : super(sink);
 
   /**
diff --git a/tests/language/disasssemble_test.dart b/tests/language/disasssemble_test.dart
new file mode 100644
index 0000000..8e53224
--- /dev/null
+++ b/tests/language/disasssemble_test.dart
@@ -0,0 +1,17 @@
+// Copyright (c) 2014, the Dart project authors.  Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+// Dart test program for testing isolate communication with
+// typed objects.
+// VMOptions=--disassemble
+
+// Tests proper object recognition in disassembler.
+
+f(x) {
+ return "foo";
+}
+
+main() {
+ print(f(0));
+}
+
diff --git a/tests/lib/lib.status b/tests/lib/lib.status
index 7679ca1..c25c984 100644
--- a/tests/lib/lib.status
+++ b/tests/lib/lib.status
@@ -198,9 +198,6 @@
 [ $runtime == vm ]
 async/timer_not_available_test: Fail, OK
 mirrors/native_class_test: Fail, OK # This test is meant to run in a browser.
-typed_data/setRange_1_test: Fail # Issue 15413
-typed_data/setRange_2_test: Fail # Issue 15413
-typed_data/setRange_3_test: Fail # Issue 15413
 mirrors/syntax_error_test/01: Fail # Issue 15886
 
 [ $compiler == none  ]
@@ -221,9 +218,6 @@
 mirrors/immutable_collections_test: Fail # Issue 11857, Issue 14321
 mirrors/library_uri_io_test: Skip # Not intended for drt as it uses dart:io.
 mirrors/local_isolate_test: Skip # http://dartbug.com/12188
-typed_data/setRange_1_test: Fail # Issue 15413
-typed_data/setRange_2_test: Fail # Issue 15413
-typed_data/setRange_3_test: Fail # Issue 15413
 
 [ $compiler == none && ( $runtime == drt || $runtime == dartium ) ]
 async/timer_test: Fail, Pass # Issue 15487
diff --git a/tests/lib/typed_data/setRange_4_test.dart b/tests/lib/typed_data/setRange_4_test.dart
new file mode 100644
index 0000000..8f50657
--- /dev/null
+++ b/tests/lib/typed_data/setRange_4_test.dart
@@ -0,0 +1,22 @@
+// Copyright (c) 2014, the Dart project authors.  Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+import 'setRange_lib.dart';
+import 'package:expect/expect.dart';
+import 'dart:typed_data';
+
+clampingTest() {
+  var a1 = new Int8List(8);
+  var a2 = new Uint8ClampedList.view(a1.buffer);
+  initialize(a1);
+  Expect.equals('[1, 2, 3, 4, 5, 6, 7, 8]', '$a1');
+  Expect.equals('[1, 2, 3, 4, 5, 6, 7, 8]', '$a2');
+  a1[0] = -1;
+  a2.setRange(0, 2, a1);
+  Expect.equals('[0, 2, 3, 4, 5, 6, 7, 8]', '$a2');
+}
+
+main() {
+  clampingTest();
+}
diff --git a/tools/VERSION b/tools/VERSION
index 02e71ef..8f227b3 100644
--- a/tools/VERSION
+++ b/tools/VERSION
@@ -28,4 +28,4 @@
 MINOR 1
 PATCH 0
 PRERELEASE 5
-PRERELEASE_PATCH 6
+PRERELEASE_PATCH 7