Version 1.5.0-dev.1.0
svn merge -r 36409:36469 https://dart.googlecode.com/svn/branches/bleeding_edge trunk
git-svn-id: http://dart.googlecode.com/svn/trunk@36471 260f80e4-7a28-3924-810f-c04153c831b5
diff --git a/docs/language/dartLangSpec.tex b/docs/language/dartLangSpec.tex
index fa2e883..a87c3a1 100644
--- a/docs/language/dartLangSpec.tex
+++ b/docs/language/dartLangSpec.tex
@@ -5223,10 +5223,16 @@
A {\em script} is a library whose exported namespace (\ref{exports}) includes a top-level function \code{main}.
A script $S$ may be executed as follows:
-First, $S$ is compiled as a library as specified above. Then, the top-level function \code{main} that is in the exported namespace of $S$ is invoked. If \code{main} has no formal parameters, it is invoked with no arguments. Otherwise \code{main} is invoked with a single actual argument whose type implements \code{List$<$String$>$}. It is a run time error if $S$ does not declare or import a top-level function \code{main}. It is a static warning if \code{main} has more than one required parameter.
+First, $S$ is compiled as a library as specified above. Then, the top-level function \code{main} that is in the exported namespace of $S$ is invoked. If \code{main} has no positional parameters, it is invoked with no arguments. Otherwise if \code{main} has exactly one positional parameter, it is invoked with a single actual argument whose runtime type implements \code{List$<$String$>$}. Otherwise \code{main} is invoked with the following two actual arguments:
+\begin{enumerate}
+\item An object whose runtime type implements \code{List$<$String$>$}.
+\item The initial message of the current isolate $i$ as determined by the invocation of \code{Isolate.spawnUri} that spawned $i$.
+\end{enumerate}
+
+It is a run time error if $S$ does not declare or import a top-level function \code{main}. It is a static warning if \code{main} has more than two required parameters.
\commentary {
-If \code{main} requires more than one argument, a run time error will occur.
+Note that if \code{main} requires more than two arguments, a run time error will occur.
}
\rationale{
diff --git a/pkg/collection/CHANGELOG.md b/pkg/collection/CHANGELOG.md
new file mode 100644
index 0000000..e5a6381
--- /dev/null
+++ b/pkg/collection/CHANGELOG.md
@@ -0,0 +1,7 @@
+## 0.9.3
+
+* Add a `MapKeySet` class that exposes an unmodifiable `Set` view of a `Map`'s
+ keys.
+
+* Add a `MapValueSet` class that takes a function from values to keys and uses
+ it to expose a `Set` view of a `Map`'s values.
diff --git a/pkg/collection/lib/wrappers.dart b/pkg/collection/lib/wrappers.dart
index 0fcc8c2..c9e919d 100644
--- a/pkg/collection/lib/wrappers.dart
+++ b/pkg/collection/lib/wrappers.dart
@@ -19,19 +19,15 @@
part "src/unmodifiable_wrappers.dart";
/**
- * Creates an [Iterable] that delegates all operations to a base iterable.
+ * A base class for delegating iterables.
*
- * This class can be used hide non-`Iterable` methods of an iterable object,
- * or it can be extended to add extra functionality on top of an existing
- * iterable object.
+ * Subclasses can provide a [_base] that should be delegated to. Unlike
+ * [DelegatingIterable], this allows the base to be created on demand.
*/
-class DelegatingIterable<E> implements Iterable<E> {
- final Iterable<E> _base;
+abstract class _DelegatingIterableBase<E> implements Iterable<E> {
+ Iterable<E> get _base;
- /**
- * Create a wrapper that forwards operations to [base].
- */
- const DelegatingIterable(Iterable<E> base) : _base = base;
+ const _DelegatingIterableBase();
bool any(bool test(E element)) => _base.any(test);
@@ -93,6 +89,22 @@
String toString() => _base.toString();
}
+/**
+ * Creates an [Iterable] that delegates all operations to a base iterable.
+ *
+ * This class can be used hide non-`Iterable` methods of an iterable object,
+ * or it can be extended to add extra functionality on top of an existing
+ * iterable object.
+ */
+class DelegatingIterable<E> extends _DelegatingIterableBase<E> {
+ final Iterable<E> _base;
+
+ /**
+ * Create a wrapper that forwards operations to [base].
+ */
+ const DelegatingIterable(Iterable<E> base) : _base = base;
+}
+
/**
* Creates a [List] that delegates all operations to a base list.
@@ -337,3 +349,219 @@
String toString() => _base.toString();
}
+
+/**
+ * An unmodifiable [Set] view of the keys of a [Map].
+ *
+ * The set delegates all operations to the underlying map.
+ *
+ * A `Map` can only contain each key once, so its keys can always
+ * be viewed as a `Set` without any loss, even if the [Map.keys]
+ * getter only shows an [Iterable] view of the keys.
+ *
+ * Note that [lookup] is not supported for this set.
+ */
+class MapKeySet<E> extends _DelegatingIterableBase<E>
+ with UnmodifiableSetMixin<E> {
+ final Map<E, dynamic> _baseMap;
+
+ MapKeySet(Map<E, dynamic> base) : _baseMap = base;
+
+ Iterable<E> get _base => _baseMap.keys;
+
+ bool contains(Object element) => _baseMap.containsKey(element);
+
+ bool get isEmpty => _baseMap.isEmpty;
+
+ bool get isNotEmpty => _baseMap.isNotEmpty;
+
+ int get length => _baseMap.length;
+
+ String toString() => "{${_base.join(', ')}}";
+
+ bool containsAll(Iterable<Object> other) => other.every(contains);
+
+ /**
+ * Returns a new set with the the elements of [this] that are not in [other].
+ *
+ * That is, the returned set contains all the elements of this [Set] that are
+ * not elements of [other] according to `other.contains`.
+ *
+ * Note that the returned set will use the default equality operation, which
+ * may be different than the equality operation [this] uses.
+ */
+ Set<E> difference(Set<E> other) =>
+ where((element) => !other.contains(element)).toSet();
+
+ /**
+ * Returns a new set which is the intersection between [this] and [other].
+ *
+ * That is, the returned set contains all the elements of this [Set] that are
+ * also elements of [other] according to `other.contains`.
+ *
+ * Note that the returned set will use the default equality operation, which
+ * may be different than the equality operation [this] uses.
+ */
+ Set<E> intersection(Set<Object> other) => where(other.contains).toSet();
+
+ /**
+ * Throws an [UnsupportedError] since there's no corresponding method for
+ * [Map]s.
+ */
+ E lookup(E element) => throw new UnsupportedError(
+ "MapKeySet doesn't support lookup().");
+
+ /**
+ * Returns a new set which contains all the elements of [this] and [other].
+ *
+ * That is, the returned set contains all the elements of this [Set] and all
+ * the elements of [other].
+ *
+ * Note that the returned set will use the default equality operation, which
+ * may be different than the equality operation [this] uses.
+ */
+ Set<E> union(Set<E> other) => toSet()..addAll(other);
+}
+
+/**
+ * Creates a modifiable [Set] view of the values of a [Map].
+ *
+ * The `Set` view assumes that the keys of the `Map` can be uniquely determined
+ * from the values. The `keyForValue` function passed to the constructor finds
+ * the key for a single value. The `keyForValue` function should be consistent
+ * with equality. If `value1 == value2` then `keyForValue(value1)` and
+ * `keyForValue(value2)` should be considered equal keys by the underlying map,
+ * and vice versa.
+ *
+ * Modifying the set will modify the underlying map based on the key returned by
+ * `keyForValue`.
+ *
+ * If the `Map` contents are not compatible with the `keyForValue` function, the
+ * set will not work consistently, and may give meaningless responses or do
+ * inconsistent updates.
+ *
+ * This set can, for example, be used on a map from database record IDs to the
+ * records. It exposes the records as a set, and allows for writing both
+ * `recordSet.add(databaseRecord)` and `recordMap[id]`.
+ *
+ * Effectively, the map will act as a kind of index for the set.
+ */
+class MapValueSet<K, V> extends _DelegatingIterableBase<V> implements Set<V> {
+ final Map<K, V> _baseMap;
+ final Function _keyForValue;
+
+ /**
+ * Creates a new [MapValueSet] based on [base].
+ *
+ * [keyForValue] returns the key in the map that should be associated with the
+ * given value. The set's notion of equality is identical to the equality of
+ * the return values of [keyForValue].
+ */
+ MapValueSet(Map<K, V> base, K keyForValue(V value))
+ : _baseMap = base,
+ _keyForValue = keyForValue;
+
+ Iterable<V> get _base => _baseMap.values;
+
+ bool contains(Object element) {
+ if (element != null && element is! V) return false;
+ return _baseMap.containsKey(_keyForValue(element));
+ }
+
+ bool get isEmpty => _baseMap.isEmpty;
+
+ bool get isNotEmpty => _baseMap.isNotEmpty;
+
+ int get length => _baseMap.length;
+
+ String toString() => toSet().toString();
+
+ bool add(V value) {
+ K key = _keyForValue(value);
+ bool result = false;
+ _baseMap.putIfAbsent(key, () {
+ result = true;
+ return value;
+ });
+ return result;
+ }
+
+ void addAll(Iterable<V> elements) => elements.forEach(add);
+
+ void clear() => _baseMap.clear();
+
+ bool containsAll(Iterable<Object> other) => other.every(contains);
+
+ /**
+ * Returns a new set with the the elements of [this] that are not in [other].
+ *
+ * That is, the returned set contains all the elements of this [Set] that are
+ * not elements of [other] according to `other.contains`.
+ *
+ * Note that the returned set will use the default equality operation, which
+ * may be different than the equality operation [this] uses.
+ */
+ Set<V> difference(Set<V> other) =>
+ where((element) => !other.contains(element)).toSet();
+
+ /**
+ * Returns a new set which is the intersection between [this] and [other].
+ *
+ * That is, the returned set contains all the elements of this [Set] that are
+ * also elements of [other] according to `other.contains`.
+ *
+ * Note that the returned set will use the default equality operation, which
+ * may be different than the equality operation [this] uses.
+ */
+ Set<V> intersection(Set<Object> other) => where(other.contains).toSet();
+
+ V lookup(V element) => _baseMap[_keyForValue(element)];
+
+ bool remove(Object value) {
+ if (value != null && value is! V) return false;
+ var key = _keyForValue(value);
+ if (!_baseMap.containsKey(key)) return false;
+ _baseMap.remove(key);
+ return true;
+ }
+
+ void removeAll(Iterable<Object> elements) => elements.forEach(remove);
+
+ void removeWhere(bool test(V element)) {
+ var toRemove = [];
+ _baseMap.forEach((key, value) {
+ if (test(value)) toRemove.add(key);
+ });
+ toRemove.forEach(_baseMap.remove);
+ }
+
+ void retainAll(Iterable<Object> elements) {
+ var valuesToRetain = new Set<V>.identity();
+ for (var element in elements) {
+ if (element != null && element is! V) continue;
+ var key = _keyForValue(element);
+ if (!_baseMap.containsKey(key)) continue;
+ valuesToRetain.add(_baseMap[key]);
+ }
+
+ var keysToRemove = [];
+ _baseMap.forEach((k, v) {
+ if (!valuesToRetain.contains(v)) keysToRemove.add(k);
+ });
+ keysToRemove.forEach(_baseMap.remove);
+ }
+
+ void retainWhere(bool test(V element)) =>
+ removeWhere((element) => !test(element));
+
+ /**
+ * Returns a new set which contains all the elements of [this] and [other].
+ *
+ * That is, the returned set contains all the elements of this [Set] and all
+ * the elements of [other].
+ *
+ * Note that the returned set will use the default equality operation, which
+ * may be different than the equality operation [this] uses.
+ */
+ Set<V> union(Set<V> other) => toSet()..addAll(other);
+}
diff --git a/pkg/collection/pubspec.yaml b/pkg/collection/pubspec.yaml
index 58482ce..45cf9b7 100644
--- a/pkg/collection/pubspec.yaml
+++ b/pkg/collection/pubspec.yaml
@@ -1,5 +1,5 @@
name: collection
-version: 0.9.2
+version: 0.9.3-dev
author: Dart Team <misc@dartlang.org>
description: Collections and utilities functions and classes related to collections.
homepage: http://www.dartlang.org
diff --git a/pkg/collection/test/wrapper_test.dart b/pkg/collection/test/wrapper_test.dart
index 86a0ddd..5858aaf 100644
--- a/pkg/collection/test/wrapper_test.dart
+++ b/pkg/collection/test/wrapper_test.dart
@@ -265,6 +265,178 @@
expect.toString().equals.toString();
}
+ // Runs tests of Set behavior.
+ //
+ // [setUpSet] should return a set with two elements: "foo" and "bar".
+ void testTwoElementSet(Set<String> setUpSet()) {
+ group("with two elements", () {
+ var set;
+ setUp(() => set = setUpSet());
+
+ test(".any", () {
+ expect(set.any((element) => element == "foo"), isTrue);
+ expect(set.any((element) => element == "baz"), isFalse);
+ });
+
+ test(".elementAt", () {
+ expect(set.elementAt(0), equals("foo"));
+ expect(set.elementAt(1), equals("bar"));
+ expect(() => set.elementAt(2), throwsRangeError);
+ });
+
+ test(".every", () {
+ expect(set.every((element) => element == "foo"), isFalse);
+ expect(set.every((element) => element is String), isTrue);
+ });
+
+ test(".expand", () {
+ expect(set.expand((element) {
+ return [element.substring(0, 1), element.substring(1)];
+ }), equals(["f", "oo", "b", "ar"]));
+ });
+
+ test(".first", () {
+ expect(set.first, equals("foo"));
+ });
+
+ test(".firstWhere", () {
+ expect(set.firstWhere((element) => element is String), equals("foo"));
+ expect(set.firstWhere((element) => element.startsWith("b")),
+ equals("bar"));
+ expect(() => set.firstWhere((element) => element is int),
+ throwsStateError);
+ expect(set.firstWhere((element) => element is int, orElse: () => "baz"),
+ equals("baz"));
+ });
+
+ test(".fold", () {
+ expect(set.fold("start", (previous, element) => previous + element),
+ equals("startfoobar"));
+ });
+
+ test(".forEach", () {
+ var values = [];
+ set.forEach(values.add);
+ expect(values, equals(["foo", "bar"]));
+ });
+
+ test(".iterator", () {
+ var values = [];
+ for (var element in set) {
+ values.add(element);
+ }
+ expect(values, equals(["foo", "bar"]));
+ });
+
+ test(".join", () {
+ expect(set.join(", "), equals("foo, bar"));
+ });
+
+ test(".last", () {
+ expect(set.last, equals("bar"));
+ });
+
+ test(".lastWhere", () {
+ expect(set.lastWhere((element) => element is String), equals("bar"));
+ expect(set.lastWhere((element) => element.startsWith("f")),
+ equals("foo"));
+ expect(() => set.lastWhere((element) => element is int),
+ throwsStateError);
+ expect(set.lastWhere((element) => element is int, orElse: () => "baz"),
+ equals("baz"));
+ });
+
+ test(".map", () {
+ expect(set.map((element) => element.substring(1)),
+ equals(["oo", "ar"]));
+ });
+
+ test(".reduce", () {
+ expect(set.reduce((previous, element) => previous + element),
+ equals("foobar"));
+ });
+
+ test(".singleWhere", () {
+ expect(() => set.singleWhere((element) => element == "baz"),
+ throwsStateError);
+ expect(set.singleWhere((element) => element == "foo"),
+ "foo");
+ expect(() => set.singleWhere((element) => element is String),
+ throwsStateError);
+ });
+
+ test(".skip", () {
+ expect(set.skip(0), equals(["foo", "bar"]));
+ expect(set.skip(1), equals(["bar"]));
+ expect(set.skip(2), equals([]));
+ });
+
+ test(".skipWhile", () {
+ expect(set.skipWhile((element) => element.startsWith("f")),
+ equals(["bar"]));
+ expect(set.skipWhile((element) => element.startsWith("z")),
+ equals(["foo", "bar"]));
+ expect(set.skipWhile((element) => element is String),
+ equals([]));
+ });
+
+ test(".take", () {
+ expect(set.take(0), equals([]));
+ expect(set.take(1), equals(["foo"]));
+ expect(set.take(2), equals(["foo", "bar"]));
+ });
+
+ test(".takeWhile", () {
+ expect(set.takeWhile((element) => element.startsWith("f")),
+ equals(["foo"]));
+ expect(set.takeWhile((element) => element.startsWith("z")),
+ equals([]));
+ expect(set.takeWhile((element) => element is String),
+ equals(["foo", "bar"]));
+ });
+
+ test(".toList", () {
+ expect(set.toList(), equals(["foo", "bar"]));
+ expect(() => set.toList(growable: false).add("baz"),
+ throwsUnsupportedError);
+ expect(set.toList()..add("baz"), equals(["foo", "bar", "baz"]));
+ });
+
+ test(".toSet", () {
+ expect(set.toSet(), equals(new Set.from(["foo", "bar"])));
+ });
+
+ test(".where", () {
+ expect(set.where((element) => element.startsWith("f")),
+ equals(["foo"]));
+ expect(set.where((element) => element.startsWith("z")), equals([]));
+ expect(set.where((element) => element is String),
+ equals(["foo", "bar"]));
+ });
+
+ test(".containsAll", () {
+ expect(set.containsAll(["foo", "bar"]), isTrue);
+ expect(set.containsAll(["foo"]), isTrue);
+ expect(set.containsAll(["foo", "bar", "qux"]), isFalse);
+ });
+
+ test(".difference", () {
+ expect(set.difference(new Set.from(["foo", "baz"])),
+ equals(new Set.from(["bar"])));
+ });
+
+ test(".intersection", () {
+ expect(set.intersection(new Set.from(["foo", "baz"])),
+ equals(new Set.from(["foo"])));
+ });
+
+ test(".union", () {
+ expect(set.union(new Set.from(["foo", "baz"])),
+ equals(new Set.from(["foo", "bar", "baz"])));
+ });
+ });
+ }
+
test("Iterable", () {
testIterable(new IterableExpector());
});
@@ -284,4 +456,209 @@
test("Map", () {
testMap(new MapExpector());
});
+
+ group("MapKeySet", () {
+ var map;
+ var set;
+
+ setUp(() {
+ map = new Map<String, int>();
+ set = new MapKeySet<String>(map);
+ });
+
+ testTwoElementSet(() {
+ map["foo"] = 1;
+ map["bar"] = 2;
+ return set;
+ });
+
+ test(".single", () {
+ expect(() => set.single, throwsStateError);
+ map["foo"] = 1;
+ expect(set.single, equals("foo"));
+ map["bar"] = 1;
+ expect(() => set.single, throwsStateError);
+ });
+
+ test(".toString", () {
+ expect(set.toString(), equals("{}"));
+ map["foo"] = 1;
+ map["bar"] = 2;
+ expect(set.toString(), equals("{foo, bar}"));
+ });
+
+ test(".contains", () {
+ expect(set.contains("foo"), isFalse);
+ map["foo"] = 1;
+ expect(set.contains("foo"), isTrue);
+ });
+
+ test(".isEmpty", () {
+ expect(set.isEmpty, isTrue);
+ map["foo"] = 1;
+ expect(set.isEmpty, isFalse);
+ });
+
+ test(".isNotEmpty", () {
+ expect(set.isNotEmpty, isFalse);
+ map["foo"] = 1;
+ expect(set.isNotEmpty, isTrue);
+ });
+
+ test(".length", () {
+ expect(set, hasLength(0));
+ map["foo"] = 1;
+ expect(set, hasLength(1));
+ map["bar"] = 2;
+ expect(set, hasLength(2));
+ });
+
+ test("is unmodifiable", () {
+ expect(() => set.add("baz"), throwsUnsupportedError);
+ expect(() => set.addAll(["baz", "bang"]), throwsUnsupportedError);
+ expect(() => set.remove("foo"), throwsUnsupportedError);
+ expect(() => set.removeAll(["baz", "bang"]), throwsUnsupportedError);
+ expect(() => set.retainAll(["foo"]), throwsUnsupportedError);
+ expect(() => set.removeWhere((_) => true), throwsUnsupportedError);
+ expect(() => set.retainWhere((_) => true), throwsUnsupportedError);
+ expect(() => set.clear(), throwsUnsupportedError);
+ });
+ });
+
+ group("MapValueSet", () {
+ var map;
+ var set;
+
+ setUp(() {
+ map = new Map<String, String>();
+ set = new MapValueSet<String, String>(map,
+ (string) => string.substring(0, 1));
+ });
+
+ testTwoElementSet(() {
+ map["f"] = "foo";
+ map["b"] = "bar";
+ return set;
+ });
+
+ test(".single", () {
+ expect(() => set.single, throwsStateError);
+ map["f"] = "foo";
+ expect(set.single, equals("foo"));
+ map["b"] = "bar";
+ expect(() => set.single, throwsStateError);
+ });
+
+ test(".toString", () {
+ expect(set.toString(), equals("{}"));
+ map["f"] = "foo";
+ map["b"] = "bar";
+ expect(set.toString(), equals("{foo, bar}"));
+ });
+
+ test(".contains", () {
+ expect(set.contains("foo"), isFalse);
+ map["f"] = "foo";
+ expect(set.contains("foo"), isTrue);
+ expect(set.contains("fblthp"), isTrue);
+ });
+
+ test(".isEmpty", () {
+ expect(set.isEmpty, isTrue);
+ map["f"] = "foo";
+ expect(set.isEmpty, isFalse);
+ });
+
+ test(".isNotEmpty", () {
+ expect(set.isNotEmpty, isFalse);
+ map["f"] = "foo";
+ expect(set.isNotEmpty, isTrue);
+ });
+
+ test(".length", () {
+ expect(set, hasLength(0));
+ map["f"] = "foo";
+ expect(set, hasLength(1));
+ map["b"] = "bar";
+ expect(set, hasLength(2));
+ });
+
+ test(".lookup", () {
+ map["f"] = "foo";
+ expect(set.lookup("fblthp"), equals("foo"));
+ expect(set.lookup("bar"), isNull);
+ });
+
+ test(".add", () {
+ set.add("foo");
+ set.add("bar");
+ expect(map, equals({"f": "foo", "b": "bar"}));
+ });
+
+ test(".addAll", () {
+ set.addAll(["foo", "bar"]);
+ expect(map, equals({"f": "foo", "b": "bar"}));
+ });
+
+ test(".clear", () {
+ map["f"] = "foo";
+ map["b"] = "bar";
+ set.clear();
+ expect(map, isEmpty);
+ });
+
+ test(".remove", () {
+ map["f"] = "foo";
+ map["b"] = "bar";
+ set.remove("fblthp");
+ expect(map, equals({"b": "bar"}));
+ });
+
+ test(".removeAll", () {
+ map["f"] = "foo";
+ map["b"] = "bar";
+ map["q"] = "qux";
+ set.removeAll(["fblthp", "qux"]);
+ expect(map, equals({"b": "bar"}));
+ });
+
+ test(".removeWhere", () {
+ map["f"] = "foo";
+ map["b"] = "bar";
+ map["q"] = "qoo";
+ set.removeWhere((element) => element.endsWith("o"));
+ expect(map, equals({"b": "bar"}));
+ });
+
+ test(".retainAll", () {
+ map["f"] = "foo";
+ map["b"] = "bar";
+ map["q"] = "qux";
+ set.retainAll(["fblthp", "qux"]);
+ expect(map, equals({"f": "foo", "q": "qux"}));
+ });
+
+ test(".retainAll respects an unusual notion of equality", () {
+ map = new HashMap<String, String>(
+ equals: (value1, value2) =>
+ value1.toLowerCase() == value2.toLowerCase(),
+ hashCode: (value) => value.toLowerCase().hashCode);
+ set = new MapValueSet<String, String>(map,
+ (string) => string.substring(0, 1));
+
+ map["f"] = "foo";
+ map["B"] = "bar";
+ map["Q"] = "qux";
+ set.retainAll(["fblthp", "qux"]);
+ expect(map, equals({"f": "foo", "Q": "qux"}));
+ });
+
+ test(".retainWhere", () {
+ map["f"] = "foo";
+ map["b"] = "bar";
+ map["q"] = "qoo";
+ set.retainWhere((element) => element.endsWith("o"));
+ expect(map, equals({"f": "foo", "q": "qoo"}));
+ });
+ });
}
diff --git a/pkg/pkg.status b/pkg/pkg.status
index 7b1559c..3ae6d11 100644
--- a/pkg/pkg.status
+++ b/pkg/pkg.status
@@ -18,6 +18,27 @@
scheduled_test/test/scheduled_process_test: Pass, Slow # Issue 9231
polymer/test/build/script_compactor_test: Pass, Slow
+[ $compiler == none && ($runtime == drt || $runtime == dartium) ]
+third_party/angular_tests/browser_test/core_dom/shadow_root_options: Fail # Issue 18931 (Disabled for Chrome 35 roll)
+polymer/example/component/news/test/news_index_test: Pass, RuntimeError # Issue 18931
+polymer/test/attr_deserialize_test: Pass, RuntimeError # Issue 18931
+polymer/test/attr_mustache_test: Pass, RuntimeError # Issue 18931
+polymer/test/bind_test: Pass, RuntimeError # Issue 18931
+polymer/test/custom_event_test: Pass, RuntimeError # Issue 18931
+polymer/test/entered_view_test: Pass, RuntimeError # Issue 18931
+polymer/test/event_handlers_test: Pass, RuntimeError # Issue 18931
+polymer/test/event_path_test: Pass, RuntimeError # Issue 18931
+polymer/test/events_test: Pass, RuntimeError # Issue 18931
+polymer/test/instance_attrs_test: Pass, RuntimeError # Issue 18931
+polymer/test/js_interop_test: Pass, RuntimeError # Issue 18931
+polymer/test/nested_binding_test: Pass, RuntimeError # Issue 18931
+polymer/test/noscript_test: Pass, RuntimeError # Issue 18931
+polymer/test/prop_attr_bind_reflection_test: Pass, RuntimeError # Issue 18931
+polymer/test/prop_attr_reflection_test: Pass, RuntimeError # Issue 18931
+polymer/test/publish_attributes_test: Pass, RuntimeError # Issue 18931
+polymer/test/take_attributes_test: Pass, RuntimeError # Issue 18931
+polymer/test/template_distribute_dynamic_test: Pass, RuntimeError # Issue 18931
+
[ $runtime == vm && $mode == debug]
analysis_server/test/analysis_server_test: Pass, Timeout
analysis_server/test/domain_context_test: Pass, Timeout
diff --git a/pkg/pkgbuild.status b/pkg/pkgbuild.status
index d473ea5..836631d 100644
--- a/pkg/pkgbuild.status
+++ b/pkg/pkgbuild.status
@@ -16,7 +16,6 @@
[ $use_public_packages ]
samples/third_party/angular_todo: Pass, Slow
-pkg/third_party/html5lib: PubGetError # csslib needs to be published
[ $use_public_packages && $builder_tag == russian ]
samples/third_party/todomvc: Fail # Issue 18104
diff --git a/runtime/bin/io_sources.gypi b/runtime/bin/io_sources.gypi
index e5bdbe3..d79413b 100644
--- a/runtime/bin/io_sources.gypi
+++ b/runtime/bin/io_sources.gypi
@@ -14,6 +14,7 @@
'io_service_patch.dart',
'platform_patch.dart',
'process_patch.dart',
+ 'service_object_patch.dart',
'socket_patch.dart',
'stdio_patch.dart',
'secure_socket_patch.dart',
diff --git a/runtime/bin/main.cc b/runtime/bin/main.cc
index 4ae0c26..c58fd92 100644
--- a/runtime/bin/main.cc
+++ b/runtime/bin/main.cc
@@ -391,7 +391,7 @@
if (strncmp(argv[i], kPackageRoot, strlen(kPackageRoot)) == 0) {
if (!ProcessPackageRootOption(argv[i] + strlen(kPackageRoot))) {
i++;
- if (!ProcessPackageRootOption(argv[i])) {
+ if ((argv[i] == NULL) || !ProcessPackageRootOption(argv[i])) {
Log::PrintErr("Invalid option specification : '%s'\n", argv[i - 1]);
i++;
break;
@@ -845,13 +845,6 @@
}
-static const char* ServiceRequestError(const char* message) {
- TextBuffer buffer(128);
- buffer.Printf("{\"type\":\"Error\",\"text\":\"%s\"}", message);
- return buffer.Steal();
-}
-
-
static const char* ServiceRequestError(Dart_Handle error) {
TextBuffer buffer(128);
buffer.Printf("{\"type\":\"Error\",\"text\":\"Internal error %s\"}",
@@ -876,27 +869,35 @@
intptr_t num_options,
void* user_data) {
DartScope scope;
- const char* kSockets = "sockets";
- if (num_arguments == 2 &&
- strncmp(arguments[1], kSockets, strlen(kSockets)) == 0) {
- Dart_Handle dart_io_str = Dart_NewStringFromCString("dart:io");
- if (Dart_IsError(dart_io_str)) return ServiceRequestError(dart_io_str);
- Dart_Handle io_lib = Dart_LookupLibrary(dart_io_str);
- if (Dart_IsError(io_lib)) return ServiceRequestError(io_lib);
- Dart_Handle handler_function_name =
- Dart_NewStringFromCString("_socketsStats");
- if (Dart_IsError(handler_function_name)) {
- return ServiceRequestError(handler_function_name);
- }
- Dart_Handle result = Dart_Invoke(io_lib, handler_function_name, 0, NULL);
- if (Dart_IsError(result)) return ServiceRequestError(result);
- const char *json;
- result = Dart_StringToCString(result, &json);
- if (Dart_IsError(result)) return ServiceRequestError(result);
- return strdup(json);
- } else {
- return ServiceRequestError("Unrecognized path");
+ ASSERT(num_arguments > 0);
+ ASSERT(strncmp(arguments[0], "io", 2) == 0);
+ // TODO(ajohnsen): Store the library/function in isolate data or user_data.
+ Dart_Handle dart_io_str = Dart_NewStringFromCString("dart:io");
+ if (Dart_IsError(dart_io_str)) return ServiceRequestError(dart_io_str);
+ Dart_Handle io_lib = Dart_LookupLibrary(dart_io_str);
+ if (Dart_IsError(io_lib)) return ServiceRequestError(io_lib);
+ Dart_Handle handler_function_name =
+ Dart_NewStringFromCString("_serviceObjectHandler");
+ if (Dart_IsError(handler_function_name)) {
+ return ServiceRequestError(handler_function_name);
}
+ Dart_Handle paths = Dart_NewList(num_arguments - 1);
+ for (int i = 0; i < num_arguments - 1; i++) {
+ Dart_ListSetAt(paths, i, Dart_NewStringFromCString(arguments[i + 1]));
+ }
+ Dart_Handle keys = Dart_NewList(num_options);
+ Dart_Handle values = Dart_NewList(num_options);
+ for (int i = 0; i < num_options; i++) {
+ Dart_ListSetAt(keys, i, Dart_NewStringFromCString(option_keys[i]));
+ Dart_ListSetAt(values, i, Dart_NewStringFromCString(option_values[i]));
+ }
+ Dart_Handle args[] = {paths, keys, values};
+ Dart_Handle result = Dart_Invoke(io_lib, handler_function_name, 3, args);
+ if (Dart_IsError(result)) return ServiceRequestError(result);
+ const char *json;
+ result = Dart_StringToCString(result, &json);
+ if (Dart_IsError(result)) return ServiceRequestError(result);
+ return strdup(json);
}
diff --git a/runtime/bin/service_object_patch.dart b/runtime/bin/service_object_patch.dart
new file mode 100644
index 0000000..f23dd71
--- /dev/null
+++ b/runtime/bin/service_object_patch.dart
@@ -0,0 +1,52 @@
+// Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+
+final Map _servicePathMap = {
+ 'http' : {
+ 'servers' : _httpServersServiceObject
+ }
+};
+
+String _serviceObjectHandler(List<String> paths,
+ List<String> keys,
+ List<String> values) {
+ assert(keys.length == values.length);
+ badPath() {
+ throw "Invalid path '${paths.join("/")}'";
+ }
+ if (paths.isEmpty) {
+ badPath();
+ }
+ int i = 0;
+ var current = _servicePathMap;
+ do {
+ current = current[paths[i]];
+ i++;
+ } while (i < paths.length && current is Map);
+ if (current is! Function) {
+ badPath();
+ }
+ var query = new Map();
+ for (int i = 0; i < keys.length; i++) {
+ query[keys[i]] = values[i];
+ }
+ return JSON.encode(current(paths.sublist(i)));
+}
+
+Map _httpServersServiceObject(args) {
+ if (args.length == 1) {
+ var server = _HttpServer._servers[int.parse(args.first)];
+ if (server == null) {
+ return {};
+ }
+ return server._toJSON(false);
+ }
+ return {
+ 'id': 'io/http/servers',
+ 'type': 'HttpServerList',
+ 'members': _HttpServer._servers.values
+ .map((server) => server._toJSON(true)).toList(),
+ };
+}
diff --git a/runtime/include/dart_api.h b/runtime/include/dart_api.h
index 77da50c..7aded50 100755
--- a/runtime/include/dart_api.h
+++ b/runtime/include/dart_api.h
@@ -2155,6 +2155,81 @@
*/
DART_EXPORT void* Dart_GetNativeIsolateData(Dart_NativeArguments args);
+typedef enum {
+ Dart_NativeArgument_kBool = 0,
+ Dart_NativeArgument_kInt32,
+ Dart_NativeArgument_kUint32,
+ Dart_NativeArgument_kInt64,
+ Dart_NativeArgument_kUint64,
+ Dart_NativeArgument_kDouble,
+ Dart_NativeArgument_kString,
+ Dart_NativeArgument_kInstance,
+ Dart_NativeArgument_kNativeFields,
+} Dart_NativeArgument_Type;
+
+typedef struct _Dart_NativeArgument_Descriptor {
+ uint8_t type;
+ uint8_t index;
+} Dart_NativeArgument_Descriptor;
+
+typedef union _Dart_NativeArgument_Value {
+ bool as_bool;
+ int32_t as_int32;
+ uint32_t as_uint32;
+ int64_t as_int64;
+ uint64_t as_uint64;
+ double as_double;
+ struct {
+ Dart_Handle dart_str;
+ void* peer;
+ } as_string;
+ struct {
+ intptr_t num_fields;
+ intptr_t* values;
+ } as_native_fields;
+ Dart_Handle as_instance;
+} Dart_NativeArgument_Value;
+
+enum {
+ kNativeArgNumberPos = 0,
+ kNativeArgNumberSize = 8,
+ kNativeArgTypePos = kNativeArgNumberPos + kNativeArgNumberSize,
+ kNativeArgTypeSize = 8,
+};
+
+#define BITMASK(size) ((1 << size) - 1)
+#define DART_NATIVE_ARG_DESCRIPTOR(type, position) \
+ (((type & BITMASK(kNativeArgTypeSize)) << kNativeArgTypePos) | \
+ (position & BITMASK(kNativeArgNumberSize)))
+
+/**
+ * Gets the native arguments based on the types passed in and populates
+ * the passed arguments buffer with appropriate native values.
+ *
+ * \param args the Native arguments block passed into the native call.
+ * \param num_arguments length of argument descriptor array and argument
+ * values array passed in.
+ * \param arg_descriptors an array that describes the arguments that
+ * need to be retrieved. For each argument to be retrieved the descriptor
+ * contains the argument number (0, 1 etc.) and the argument type
+ * described using Dart_NativeArgument_Type, e.g:
+ * DART_NATIVE_ARG_DESCRIPTOR(Dart_NativeArgument_kBool, 1) indicates
+ * that the first argument is to be retrieved and it should be a boolean.
+ * \param arg_values array into which the native arguments need to be
+ * extracted into, the array is allocated by the caller (it could be
+ * stack allocated to avoid the malloc/free performance overhead).
+ *
+ * \return Success if all the arguments could be extracted correctly,
+ * returns an error handle if there were any errors while extracting the
+ * arguments (mismatched number of arguments, incorrect types, etc.).
+ */
+DART_EXPORT Dart_Handle Dart_GetNativeArguments(
+ Dart_NativeArguments args,
+ int num_arguments,
+ const Dart_NativeArgument_Descriptor* arg_descriptors,
+ Dart_NativeArgument_Value* arg_values);
+
+
/**
* Gets the native argument at some index.
*/
diff --git a/runtime/lib/mirrors.cc b/runtime/lib/mirrors.cc
index a72d6fc..cfcebe1 100644
--- a/runtime/lib/mirrors.cc
+++ b/runtime/lib/mirrors.cc
@@ -101,14 +101,17 @@
// Only generative constructors can have initializing formals.
if (!func.IsConstructor()) return;
- const Class& cls = Class::Handle(func.Owner());
- const Error& error = Error::Handle(cls.EnsureIsFinalized(Isolate::Current()));
+ Isolate* isolate = Isolate::Current();
+ const Class& cls = Class::Handle(isolate, func.Owner());
+ const Error& error = Error::Handle(
+ isolate, cls.EnsureIsFinalized(Isolate::Current()));
if (!error.IsNull()) {
ThrowInvokeError(error);
UNREACHABLE();
}
if (!func.HasCode()) {
- const Error& error = Error::Handle(Compiler::CompileFunction(func));
+ const Error& error = Error::Handle(
+ isolate, Compiler::CompileFunction(isolate, func));
if (!error.IsNull()) {
ThrowInvokeError(error);
UNREACHABLE();
diff --git a/runtime/vm/assembler_arm.cc b/runtime/vm/assembler_arm.cc
index d712f98..a7c442d 100644
--- a/runtime/vm/assembler_arm.cc
+++ b/runtime/vm/assembler_arm.cc
@@ -273,12 +273,24 @@
}
+void Assembler::adcs(Register rd, Register rn, ShifterOperand so,
+ Condition cond) {
+ EmitType01(cond, so.type(), ADC, 1, rn, rd, so);
+}
+
+
void Assembler::sbc(Register rd, Register rn, ShifterOperand so,
Condition cond) {
EmitType01(cond, so.type(), SBC, 0, rn, rd, so);
}
+void Assembler::sbcs(Register rd, Register rn, ShifterOperand so,
+ Condition cond) {
+ EmitType01(cond, so.type(), SBC, 1, rn, rd, so);
+}
+
+
void Assembler::rsc(Register rd, Register rn, ShifterOperand so,
Condition cond) {
EmitType01(cond, so.type(), RSC, 0, rn, rd, so);
@@ -2161,6 +2173,11 @@
}
+void Assembler::SignFill(Register rd, Register rm) {
+ Asr(rd, rm, 31);
+}
+
+
void Assembler::Vreciprocalqs(QRegister qd, QRegister qm) {
ASSERT(qm != QTMP);
ASSERT(qd != QTMP);
diff --git a/runtime/vm/assembler_arm.h b/runtime/vm/assembler_arm.h
index 607754b..19033fa 100644
--- a/runtime/vm/assembler_arm.h
+++ b/runtime/vm/assembler_arm.h
@@ -351,8 +351,12 @@
void adc(Register rd, Register rn, ShifterOperand so, Condition cond = AL);
+ void adcs(Register rd, Register rn, ShifterOperand so, Condition cond = AL);
+
void sbc(Register rd, Register rn, ShifterOperand so, Condition cond = AL);
+ void sbcs(Register rd, Register rn, ShifterOperand so, Condition cond = AL);
+
void rsc(Register rd, Register rn, ShifterOperand so, Condition cond = AL);
void tst(Register rn, ShifterOperand so, Condition cond = AL);
@@ -694,6 +698,9 @@
void Ror(Register rd, Register rm, Register rs, Condition cond = AL);
void Rrx(Register rd, Register rm, Condition cond = AL);
+ // Fill rd with the sign of rm.
+ void SignFill(Register rd, Register rm);
+
void Vreciprocalqs(QRegister qd, QRegister qm);
void VreciprocalSqrtqs(QRegister qd, QRegister qm);
// If qm must be preserved, then provide a (non-QTMP) temporary.
diff --git a/runtime/vm/assembler_arm64.cc b/runtime/vm/assembler_arm64.cc
index 090d6a7..7db683d 100644
--- a/runtime/vm/assembler_arm64.cc
+++ b/runtime/vm/assembler_arm64.cc
@@ -1227,11 +1227,8 @@
void Assembler::UpdateAllocationStats(intptr_t cid,
- Register temp_reg,
Register pp,
Heap::Space space) {
- ASSERT(temp_reg != kNoRegister);
- ASSERT(temp_reg != TMP);
ASSERT(cid > 0);
Isolate* isolate = Isolate::Current();
ClassTable* class_table = isolate->class_table();
@@ -1242,34 +1239,30 @@
const uword count_field_offset = (space == Heap::kNew) ?
ClassHeapStats::allocated_since_gc_new_space_offset() :
ClassHeapStats::allocated_since_gc_old_space_offset();
- LoadImmediate(temp_reg, class_heap_stats_table_address + class_offset, pp);
- const Address& count_address = Address(temp_reg, count_field_offset);
+ LoadImmediate(TMP2, class_heap_stats_table_address + class_offset, pp);
+ const Address& count_address = Address(TMP2, count_field_offset);
ldr(TMP, count_address);
AddImmediate(TMP, TMP, 1, pp);
str(TMP, count_address);
} else {
- ASSERT(temp_reg != kNoRegister);
const uword class_offset = cid * sizeof(ClassHeapStats); // NOLINT
const uword count_field_offset = (space == Heap::kNew) ?
ClassHeapStats::allocated_since_gc_new_space_offset() :
ClassHeapStats::allocated_since_gc_old_space_offset();
- LoadImmediate(temp_reg, class_table->ClassStatsTableAddress(), pp);
- ldr(temp_reg, Address(temp_reg));
- AddImmediate(temp_reg, temp_reg, class_offset, pp);
- ldr(TMP, Address(temp_reg, count_field_offset));
+ LoadImmediate(TMP2, class_table->ClassStatsTableAddress(), pp);
+ ldr(TMP, Address(TMP2));
+ AddImmediate(TMP2, TMP, class_offset, pp);
+ ldr(TMP, Address(TMP2, count_field_offset));
AddImmediate(TMP, TMP, 1, pp);
- str(TMP, Address(temp_reg, count_field_offset));
+ str(TMP, Address(TMP2, count_field_offset));
}
}
void Assembler::UpdateAllocationStatsWithSize(intptr_t cid,
Register size_reg,
- Register temp_reg,
Register pp,
Heap::Space space) {
- ASSERT(temp_reg != kNoRegister);
- ASSERT(temp_reg != TMP);
ASSERT(cid > 0);
Isolate* isolate = Isolate::Current();
ClassTable* class_table = isolate->class_table();
@@ -1283,9 +1276,9 @@
const uword size_field_offset = (space == Heap::kNew) ?
ClassHeapStats::allocated_size_since_gc_new_space_offset() :
ClassHeapStats::allocated_size_since_gc_old_space_offset();
- LoadImmediate(temp_reg, class_heap_stats_table_address + class_offset, pp);
- const Address& count_address = Address(temp_reg, count_field_offset);
- const Address& size_address = Address(temp_reg, size_field_offset);
+ LoadImmediate(TMP2, class_heap_stats_table_address + class_offset, pp);
+ const Address& count_address = Address(TMP2, count_field_offset);
+ const Address& size_address = Address(TMP2, size_field_offset);
ldr(TMP, count_address);
AddImmediate(TMP, TMP, 1, pp);
str(TMP, count_address);
@@ -1293,7 +1286,6 @@
add(TMP, TMP, Operand(size_reg));
str(TMP, size_address);
} else {
- ASSERT(temp_reg != kNoRegister);
const uword class_offset = cid * sizeof(ClassHeapStats); // NOLINT
const uword count_field_offset = (space == Heap::kNew) ?
ClassHeapStats::allocated_since_gc_new_space_offset() :
@@ -1301,15 +1293,15 @@
const uword size_field_offset = (space == Heap::kNew) ?
ClassHeapStats::allocated_size_since_gc_new_space_offset() :
ClassHeapStats::allocated_size_since_gc_old_space_offset();
- LoadImmediate(temp_reg, class_table->ClassStatsTableAddress(), pp);
- ldr(temp_reg, Address(temp_reg));
- AddImmediate(temp_reg, temp_reg, class_offset, pp);
- ldr(TMP, Address(temp_reg, count_field_offset));
+ LoadImmediate(TMP2, class_table->ClassStatsTableAddress(), pp);
+ ldr(TMP, Address(TMP2));
+ AddImmediate(TMP2, TMP, class_offset, pp);
+ ldr(TMP, Address(TMP2, count_field_offset));
AddImmediate(TMP, TMP, 1, pp);
- str(TMP, Address(temp_reg, count_field_offset));
- ldr(TMP, Address(temp_reg, size_field_offset));
+ str(TMP, Address(TMP2, count_field_offset));
+ ldr(TMP, Address(TMP2, size_field_offset));
add(TMP, TMP, Operand(size_reg));
- str(TMP, Address(temp_reg, size_field_offset));
+ str(TMP, Address(TMP2, size_field_offset));
}
}
@@ -1317,7 +1309,6 @@
void Assembler::TryAllocate(const Class& cls,
Label* failure,
Register instance_reg,
- Register temp_reg,
Register pp) {
ASSERT(failure != NULL);
if (FLAG_inline_alloc) {
@@ -1342,7 +1333,7 @@
ASSERT(instance_size >= kHeapObjectTag);
AddImmediate(
instance_reg, instance_reg, -instance_size + kHeapObjectTag, pp);
- UpdateAllocationStats(cls.id(), temp_reg, pp);
+ UpdateAllocationStats(cls.id(), pp);
uword tags = 0;
tags = RawObject::SizeTag::update(instance_size, tags);
diff --git a/runtime/vm/assembler_arm64.h b/runtime/vm/assembler_arm64.h
index 10cfef3..5bf20fe 100644
--- a/runtime/vm/assembler_arm64.h
+++ b/runtime/vm/assembler_arm64.h
@@ -733,6 +733,44 @@
EmitFPTwoSourceOp(FSUBD, vd, vn, vm);
}
+ // SIMD operations.
+ void vadds(VRegister vd, VRegister vn, VRegister vm) {
+ EmitSIMDThreeSameOp(VADDS, vd, vn, vm);
+ }
+ void vaddd(VRegister vd, VRegister vn, VRegister vm) {
+ EmitSIMDThreeSameOp(VADDD, vd, vn, vm);
+ }
+ void vsubs(VRegister vd, VRegister vn, VRegister vm) {
+ EmitSIMDThreeSameOp(VSUBS, vd, vn, vm);
+ }
+ void vsubd(VRegister vd, VRegister vn, VRegister vm) {
+ EmitSIMDThreeSameOp(VSUBD, vd, vn, vm);
+ }
+ void vmuls(VRegister vd, VRegister vn, VRegister vm) {
+ EmitSIMDThreeSameOp(VMULS, vd, vn, vm);
+ }
+ void vmuld(VRegister vd, VRegister vn, VRegister vm) {
+ EmitSIMDThreeSameOp(VMULD, vd, vn, vm);
+ }
+ void vdivs(VRegister vd, VRegister vn, VRegister vm) {
+ EmitSIMDThreeSameOp(VDIVS, vd, vn, vm);
+ }
+ void vdivd(VRegister vd, VRegister vn, VRegister vm) {
+ EmitSIMDThreeSameOp(VDIVD, vd, vn, vm);
+ }
+ void vdups(VRegister vd, VRegister vn, int32_t idx) {
+ EmitSIMDCopyOp(VDUP, vd, vn, kSWord, 0, idx);
+ }
+ void vdupd(VRegister vd, VRegister vn, int32_t idx) {
+ EmitSIMDCopyOp(VDUP, vd, vn, kDWord, 0, idx);
+ }
+ void vinss(VRegister vd, int32_t didx, VRegister vn, int32_t sidx) {
+ EmitSIMDCopyOp(VINS, vd, vn, kSWord, sidx, didx);
+ }
+ void vinsd(VRegister vd, int32_t didx, VRegister vn, int32_t sidx) {
+ EmitSIMDCopyOp(VINS, vd, vn, kDWord, sidx, didx);
+ }
+
// Aliases.
void mov(Register rd, Register rn) {
if ((rd == SP) || (rn == SP)) {
@@ -970,13 +1008,11 @@
void LeaveStubFrame();
void UpdateAllocationStats(intptr_t cid,
- Register temp_reg,
Register pp,
Heap::Space space = Heap::kNew);
void UpdateAllocationStatsWithSize(intptr_t cid,
Register size_reg,
- Register temp_reg,
Register pp,
Heap::Space space = Heap::kNew);
@@ -987,7 +1023,6 @@
void TryAllocate(const Class& cls,
Label* failure,
Register instance_reg,
- Register temp_reg,
Register pp);
private:
@@ -1405,6 +1440,30 @@
Emit(encoding);
}
+ void EmitSIMDThreeSameOp(SIMDThreeSameOp op,
+ VRegister vd, VRegister vn, VRegister vm) {
+ const int32_t encoding =
+ op |
+ (static_cast<int32_t>(vd) << kVdShift) |
+ (static_cast<int32_t>(vn) << kVnShift) |
+ (static_cast<int32_t>(vm) << kVmShift);
+ Emit(encoding);
+ }
+
+ void EmitSIMDCopyOp(SIMDCopyOp op, VRegister vd, VRegister vn, OperandSize sz,
+ int32_t idx4, int32_t idx5) {
+ const int32_t shift = Log2OperandSizeBytes(sz);
+ const int32_t imm5 = ((idx5 << (shift + 1)) | (1 << shift)) & 0x1f;
+ const int32_t imm4 = (idx4 << shift) & 0xf;
+ const int32_t encoding =
+ op |
+ (imm5 << kImm5Shift) |
+ (imm4 << kImm4Shift) |
+ (static_cast<int32_t>(vd) << kVdShift) |
+ (static_cast<int32_t>(vn) << kVnShift);
+ Emit(encoding);
+ }
+
void StoreIntoObjectFilter(Register object, Register value, Label* no_update);
// Shorter filtering sequence that assumes that value is not a smi.
diff --git a/runtime/vm/assembler_arm64_test.cc b/runtime/vm/assembler_arm64_test.cc
index 6dca831..7fed7c3 100644
--- a/runtime/vm/assembler_arm64_test.cc
+++ b/runtime/vm/assembler_arm64_test.cc
@@ -1846,6 +1846,407 @@
}
+ASSEMBLER_TEST_GENERATE(Vadds, assembler) {
+ __ LoadDImmediate(V0, 0.0, kNoPP);
+ __ LoadDImmediate(V1, 1.0, kNoPP);
+ __ LoadDImmediate(V2, 2.0, kNoPP);
+ __ LoadDImmediate(V3, 3.0, kNoPP);
+
+ __ fcvtsd(V0, V0);
+ __ fcvtsd(V1, V1);
+ __ fcvtsd(V2, V2);
+ __ fcvtsd(V3, V3);
+
+ const int sword_bytes = 1 << Log2OperandSizeBytes(kSWord);
+ const int qword_bytes = 1 << Log2OperandSizeBytes(kQWord);
+ __ fstrs(V0, Address(SP, -1 * sword_bytes, Address::PreIndex));
+ __ fstrs(V1, Address(SP, -1 * sword_bytes, Address::PreIndex));
+ __ fstrs(V2, Address(SP, -1 * sword_bytes, Address::PreIndex));
+ __ fstrs(V3, Address(SP, -1 * sword_bytes, Address::PreIndex));
+
+ __ fldrq(V4, Address(SP, 1 * qword_bytes, Address::PostIndex));
+ __ vadds(V5, V4, V4);
+ __ fstrq(V5, Address(SP, -1 * qword_bytes, Address::PreIndex));
+
+ __ fldrs(V0, Address(SP, 1 * sword_bytes, Address::PostIndex));
+ __ fldrs(V1, Address(SP, 1 * sword_bytes, Address::PostIndex));
+ __ fldrs(V2, Address(SP, 1 * sword_bytes, Address::PostIndex));
+ __ fldrs(V3, Address(SP, 1 * sword_bytes, Address::PostIndex));
+
+ __ fcvtds(V0, V0);
+ __ fcvtds(V1, V1);
+ __ fcvtds(V2, V2);
+ __ fcvtds(V3, V3);
+
+ __ faddd(V0, V0, V1);
+ __ faddd(V0, V0, V2);
+ __ faddd(V0, V0, V3);
+ __ ret();
+}
+
+
+ASSEMBLER_TEST_RUN(Vadds, test) {
+ typedef int (*SimpleCode)();
+ EXPECT_EQ(12.0, EXECUTE_TEST_CODE_DOUBLE(SimpleCode, test->entry()));
+}
+
+
+ASSEMBLER_TEST_GENERATE(Vsubs, assembler) {
+ __ LoadDImmediate(V0, 0.0, kNoPP);
+ __ LoadDImmediate(V1, 1.0, kNoPP);
+ __ LoadDImmediate(V2, 2.0, kNoPP);
+ __ LoadDImmediate(V3, 3.0, kNoPP);
+ __ LoadDImmediate(V5, 0.0, kNoPP);
+
+ __ fcvtsd(V0, V0);
+ __ fcvtsd(V1, V1);
+ __ fcvtsd(V2, V2);
+ __ fcvtsd(V3, V3);
+
+ const int sword_bytes = 1 << Log2OperandSizeBytes(kSWord);
+ const int qword_bytes = 1 << Log2OperandSizeBytes(kQWord);
+ __ fstrs(V0, Address(SP, -1 * sword_bytes, Address::PreIndex));
+ __ fstrs(V1, Address(SP, -1 * sword_bytes, Address::PreIndex));
+ __ fstrs(V2, Address(SP, -1 * sword_bytes, Address::PreIndex));
+ __ fstrs(V3, Address(SP, -1 * sword_bytes, Address::PreIndex));
+
+ __ fldrq(V4, Address(SP, 1 * qword_bytes, Address::PostIndex));
+ __ vsubs(V5, V5, V4);
+ __ fstrq(V5, Address(SP, -1 * qword_bytes, Address::PreIndex));
+
+ __ fldrs(V0, Address(SP, 1 * sword_bytes, Address::PostIndex));
+ __ fldrs(V1, Address(SP, 1 * sword_bytes, Address::PostIndex));
+ __ fldrs(V2, Address(SP, 1 * sword_bytes, Address::PostIndex));
+ __ fldrs(V3, Address(SP, 1 * sword_bytes, Address::PostIndex));
+
+ __ fcvtds(V0, V0);
+ __ fcvtds(V1, V1);
+ __ fcvtds(V2, V2);
+ __ fcvtds(V3, V3);
+
+ __ faddd(V0, V0, V1);
+ __ faddd(V0, V0, V2);
+ __ faddd(V0, V0, V3);
+ __ ret();
+}
+
+
+ASSEMBLER_TEST_RUN(Vsubs, test) {
+ typedef int (*SimpleCode)();
+ EXPECT_EQ(-6.0, EXECUTE_TEST_CODE_DOUBLE(SimpleCode, test->entry()));
+}
+
+
+ASSEMBLER_TEST_GENERATE(Vmuls, assembler) {
+ __ LoadDImmediate(V0, 0.0, kNoPP);
+ __ LoadDImmediate(V1, 1.0, kNoPP);
+ __ LoadDImmediate(V2, 2.0, kNoPP);
+ __ LoadDImmediate(V3, 3.0, kNoPP);
+
+ __ fcvtsd(V0, V0);
+ __ fcvtsd(V1, V1);
+ __ fcvtsd(V2, V2);
+ __ fcvtsd(V3, V3);
+
+ const int sword_bytes = 1 << Log2OperandSizeBytes(kSWord);
+ const int qword_bytes = 1 << Log2OperandSizeBytes(kQWord);
+ __ fstrs(V0, Address(SP, -1 * sword_bytes, Address::PreIndex));
+ __ fstrs(V1, Address(SP, -1 * sword_bytes, Address::PreIndex));
+ __ fstrs(V2, Address(SP, -1 * sword_bytes, Address::PreIndex));
+ __ fstrs(V3, Address(SP, -1 * sword_bytes, Address::PreIndex));
+
+ __ fldrq(V4, Address(SP, 1 * qword_bytes, Address::PostIndex));
+ __ vmuls(V5, V4, V4);
+ __ fstrq(V5, Address(SP, -1 * qword_bytes, Address::PreIndex));
+
+ __ fldrs(V0, Address(SP, 1 * sword_bytes, Address::PostIndex));
+ __ fldrs(V1, Address(SP, 1 * sword_bytes, Address::PostIndex));
+ __ fldrs(V2, Address(SP, 1 * sword_bytes, Address::PostIndex));
+ __ fldrs(V3, Address(SP, 1 * sword_bytes, Address::PostIndex));
+
+ __ fcvtds(V0, V0);
+ __ fcvtds(V1, V1);
+ __ fcvtds(V2, V2);
+ __ fcvtds(V3, V3);
+
+ __ faddd(V0, V0, V1);
+ __ faddd(V0, V0, V2);
+ __ faddd(V0, V0, V3);
+ __ ret();
+}
+
+
+ASSEMBLER_TEST_RUN(Vmuls, test) {
+ typedef int (*SimpleCode)();
+ EXPECT_EQ(14.0, EXECUTE_TEST_CODE_DOUBLE(SimpleCode, test->entry()));
+}
+
+
+ASSEMBLER_TEST_GENERATE(Vdivs, assembler) {
+ __ LoadDImmediate(V0, 0.0, kNoPP);
+ __ LoadDImmediate(V1, 1.0, kNoPP);
+ __ LoadDImmediate(V2, 2.0, kNoPP);
+ __ LoadDImmediate(V3, 3.0, kNoPP);
+
+ __ fcvtsd(V0, V0);
+ __ fcvtsd(V1, V1);
+ __ fcvtsd(V2, V2);
+ __ fcvtsd(V3, V3);
+
+ const int sword_bytes = 1 << Log2OperandSizeBytes(kSWord);
+ const int qword_bytes = 1 << Log2OperandSizeBytes(kQWord);
+ __ fstrs(V0, Address(SP, -1 * sword_bytes, Address::PreIndex));
+ __ fstrs(V1, Address(SP, -1 * sword_bytes, Address::PreIndex));
+ __ fstrs(V2, Address(SP, -1 * sword_bytes, Address::PreIndex));
+ __ fstrs(V3, Address(SP, -1 * sword_bytes, Address::PreIndex));
+
+ __ fldrq(V4, Address(SP, 1 * qword_bytes, Address::PostIndex));
+ __ vdivs(V5, V4, V4);
+ __ fstrq(V5, Address(SP, -1 * qword_bytes, Address::PreIndex));
+
+ __ fldrs(V3, Address(SP, 1 * sword_bytes, Address::PostIndex));
+ __ fldrs(V2, Address(SP, 1 * sword_bytes, Address::PostIndex));
+ __ fldrs(V1, Address(SP, 1 * sword_bytes, Address::PostIndex));
+ __ fldrs(V0, Address(SP, 1 * sword_bytes, Address::PostIndex));
+
+ __ fcvtds(V0, V0);
+ __ fcvtds(V1, V1);
+ __ fcvtds(V2, V2);
+ __ fcvtds(V3, V3);
+
+ __ faddd(V0, V1, V1);
+ __ faddd(V0, V0, V2);
+ __ faddd(V0, V0, V3);
+ __ ret();
+}
+
+
+ASSEMBLER_TEST_RUN(Vdivs, test) {
+ typedef int (*SimpleCode)();
+ EXPECT_EQ(4.0, EXECUTE_TEST_CODE_DOUBLE(SimpleCode, test->entry()));
+}
+
+
+
+ASSEMBLER_TEST_GENERATE(Vaddd, assembler) {
+ __ LoadDImmediate(V0, 2.0, kNoPP);
+ __ LoadDImmediate(V1, 3.0, kNoPP);
+
+ const int dword_bytes = 1 << Log2OperandSizeBytes(kDWord);
+ const int qword_bytes = 1 << Log2OperandSizeBytes(kQWord);
+ __ fstrd(V0, Address(SP, -1 * dword_bytes, Address::PreIndex));
+ __ fstrd(V1, Address(SP, -1 * dword_bytes, Address::PreIndex));
+
+ __ fldrq(V4, Address(SP, 1 * qword_bytes, Address::PostIndex));
+ __ vaddd(V5, V4, V4);
+ __ fstrq(V5, Address(SP, -1 * qword_bytes, Address::PreIndex));
+
+ __ fldrd(V1, Address(SP, 1 * dword_bytes, Address::PostIndex));
+ __ fldrd(V0, Address(SP, 1 * dword_bytes, Address::PostIndex));
+
+ __ faddd(V0, V0, V1);
+ __ ret();
+}
+
+
+ASSEMBLER_TEST_RUN(Vaddd, test) {
+ typedef int (*SimpleCode)();
+ EXPECT_EQ(10.0, EXECUTE_TEST_CODE_DOUBLE(SimpleCode, test->entry()));
+}
+
+
+ASSEMBLER_TEST_GENERATE(Vsubd, assembler) {
+ __ LoadDImmediate(V0, 2.0, kNoPP);
+ __ LoadDImmediate(V1, 3.0, kNoPP);
+ __ LoadDImmediate(V5, 0.0, kNoPP);
+
+ const int dword_bytes = 1 << Log2OperandSizeBytes(kDWord);
+ const int qword_bytes = 1 << Log2OperandSizeBytes(kQWord);
+ __ fstrd(V0, Address(SP, -1 * dword_bytes, Address::PreIndex));
+ __ fstrd(V1, Address(SP, -1 * dword_bytes, Address::PreIndex));
+
+ __ fldrq(V4, Address(SP, 1 * qword_bytes, Address::PostIndex));
+ __ vsubd(V5, V5, V4);
+ __ fstrq(V5, Address(SP, -1 * qword_bytes, Address::PreIndex));
+
+ __ fldrd(V1, Address(SP, 1 * dword_bytes, Address::PostIndex));
+ __ fldrd(V0, Address(SP, 1 * dword_bytes, Address::PostIndex));
+
+ __ faddd(V0, V0, V1);
+ __ ret();
+}
+
+
+ASSEMBLER_TEST_RUN(Vsubd, test) {
+ typedef int (*SimpleCode)();
+ EXPECT_EQ(-5.0, EXECUTE_TEST_CODE_DOUBLE(SimpleCode, test->entry()));
+}
+
+
+ASSEMBLER_TEST_GENERATE(Vmuld, assembler) {
+ __ LoadDImmediate(V0, 2.0, kNoPP);
+ __ LoadDImmediate(V1, 3.0, kNoPP);
+
+ const int dword_bytes = 1 << Log2OperandSizeBytes(kDWord);
+ const int qword_bytes = 1 << Log2OperandSizeBytes(kQWord);
+ __ fstrd(V0, Address(SP, -1 * dword_bytes, Address::PreIndex));
+ __ fstrd(V1, Address(SP, -1 * dword_bytes, Address::PreIndex));
+
+ __ fldrq(V4, Address(SP, 1 * qword_bytes, Address::PostIndex));
+ __ vmuld(V5, V4, V4);
+ __ fstrq(V5, Address(SP, -1 * qword_bytes, Address::PreIndex));
+
+ __ fldrd(V1, Address(SP, 1 * dword_bytes, Address::PostIndex));
+ __ fldrd(V0, Address(SP, 1 * dword_bytes, Address::PostIndex));
+
+ __ faddd(V0, V0, V1);
+ __ ret();
+}
+
+
+ASSEMBLER_TEST_RUN(Vmuld, test) {
+ typedef int (*SimpleCode)();
+ EXPECT_EQ(13.0, EXECUTE_TEST_CODE_DOUBLE(SimpleCode, test->entry()));
+}
+
+
+ASSEMBLER_TEST_GENERATE(Vdivd, assembler) {
+ __ LoadDImmediate(V0, 2.0, kNoPP);
+ __ LoadDImmediate(V1, 3.0, kNoPP);
+
+ const int dword_bytes = 1 << Log2OperandSizeBytes(kDWord);
+ const int qword_bytes = 1 << Log2OperandSizeBytes(kQWord);
+ __ fstrd(V0, Address(SP, -1 * dword_bytes, Address::PreIndex));
+ __ fstrd(V1, Address(SP, -1 * dword_bytes, Address::PreIndex));
+
+ __ fldrq(V4, Address(SP, 1 * qword_bytes, Address::PostIndex));
+ __ vdivd(V5, V4, V4);
+ __ fstrq(V5, Address(SP, -1 * qword_bytes, Address::PreIndex));
+
+ __ fldrd(V1, Address(SP, 1 * dword_bytes, Address::PostIndex));
+ __ fldrd(V0, Address(SP, 1 * dword_bytes, Address::PostIndex));
+
+ __ faddd(V0, V0, V1);
+ __ ret();
+}
+
+
+ASSEMBLER_TEST_RUN(Vdivd, test) {
+ typedef int (*SimpleCode)();
+ EXPECT_EQ(2.0, EXECUTE_TEST_CODE_DOUBLE(SimpleCode, test->entry()));
+}
+
+
+ASSEMBLER_TEST_GENERATE(Vdupd, assembler) {
+ __ LoadDImmediate(V0, 21.0, kNoPP);
+ __ vdupd(V1, V0, 0);
+
+ const int dword_bytes = 1 << Log2OperandSizeBytes(kDWord);
+ const int qword_bytes = 1 << Log2OperandSizeBytes(kQWord);
+ __ fstrq(V1, Address(SP, -1 * qword_bytes, Address::PreIndex));
+
+ __ fldrd(V2, Address(SP, 1 * dword_bytes, Address::PostIndex));
+ __ fldrd(V3, Address(SP, 1 * dword_bytes, Address::PostIndex));
+
+ __ faddd(V0, V2, V3);
+ __ ret();
+}
+
+
+ASSEMBLER_TEST_RUN(Vdupd, test) {
+ typedef int (*SimpleCode)();
+ EXPECT_EQ(42.0, EXECUTE_TEST_CODE_DOUBLE(SimpleCode, test->entry()));
+}
+
+
+ASSEMBLER_TEST_GENERATE(Vdups, assembler) {
+ __ LoadDImmediate(V0, 21.0, kNoPP);
+ __ fcvtsd(V0, V0);
+ __ vdups(V1, V0, 0);
+
+
+ const int sword_bytes = 1 << Log2OperandSizeBytes(kSWord);
+ const int qword_bytes = 1 << Log2OperandSizeBytes(kQWord);
+ __ fstrq(V1, Address(SP, -1 * qword_bytes, Address::PreIndex));
+
+ __ fldrs(V3, Address(SP, 1 * sword_bytes, Address::PostIndex));
+ __ fldrs(V2, Address(SP, 1 * sword_bytes, Address::PostIndex));
+ __ fldrs(V1, Address(SP, 1 * sword_bytes, Address::PostIndex));
+ __ fldrs(V0, Address(SP, 1 * sword_bytes, Address::PostIndex));
+
+ __ fcvtds(V0, V0);
+ __ fcvtds(V1, V1);
+ __ fcvtds(V2, V2);
+ __ fcvtds(V3, V3);
+
+ __ faddd(V0, V1, V1);
+ __ faddd(V0, V0, V2);
+ __ faddd(V0, V0, V3);
+ __ ret();
+}
+
+
+ASSEMBLER_TEST_RUN(Vdups, test) {
+ typedef int (*SimpleCode)();
+ EXPECT_EQ(84.0, EXECUTE_TEST_CODE_DOUBLE(SimpleCode, test->entry()));
+}
+
+
+ASSEMBLER_TEST_GENERATE(Vinsd, assembler) {
+ __ LoadDImmediate(V5, 42.0, kNoPP);
+ __ vinsd(V1, 1, V5, 0); // V1[1] <- V0[0].
+
+ const int dword_bytes = 1 << Log2OperandSizeBytes(kDWord);
+ const int qword_bytes = 1 << Log2OperandSizeBytes(kQWord);
+ __ fstrq(V1, Address(SP, -1 * qword_bytes, Address::PreIndex));
+
+ __ fldrd(V2, Address(SP, 1 * dword_bytes, Address::PostIndex));
+ __ fldrd(V3, Address(SP, 1 * dword_bytes, Address::PostIndex));
+
+ __ fmovdd(V0, V3);
+ __ ret();
+}
+
+
+ASSEMBLER_TEST_RUN(Vinsd, test) {
+ typedef int (*SimpleCode)();
+ EXPECT_EQ(42.0, EXECUTE_TEST_CODE_DOUBLE(SimpleCode, test->entry()));
+}
+
+
+ASSEMBLER_TEST_GENERATE(Vinss, assembler) {
+ __ LoadDImmediate(V0, 21.0, kNoPP);
+ __ fcvtsd(V0, V0);
+ __ vinss(V1, 3, V0, 0);
+ __ vinss(V1, 1, V0, 0);
+
+ const int sword_bytes = 1 << Log2OperandSizeBytes(kSWord);
+ const int qword_bytes = 1 << Log2OperandSizeBytes(kQWord);
+ __ fstrq(V1, Address(SP, -1 * qword_bytes, Address::PreIndex));
+
+ __ fldrs(V3, Address(SP, 1 * sword_bytes, Address::PostIndex));
+ __ fldrs(V2, Address(SP, 1 * sword_bytes, Address::PostIndex));
+ __ fldrs(V1, Address(SP, 1 * sword_bytes, Address::PostIndex));
+ __ fldrs(V0, Address(SP, 1 * sword_bytes, Address::PostIndex));
+
+ __ fcvtds(V0, V0);
+ __ fcvtds(V1, V1);
+ __ fcvtds(V2, V2);
+ __ fcvtds(V3, V3);
+
+ __ faddd(V0, V0, V1);
+ __ faddd(V0, V0, V2);
+ __ faddd(V0, V0, V3);
+ __ ret();
+}
+
+
+ASSEMBLER_TEST_RUN(Vinss, test) {
+ typedef int (*SimpleCode)();
+ EXPECT_EQ(42.0, EXECUTE_TEST_CODE_DOUBLE(SimpleCode, test->entry()));
+}
+
+
// Called from assembler_test.cc.
// LR: return address.
// R0: context.
diff --git a/runtime/vm/assembler_arm_test.cc b/runtime/vm/assembler_arm_test.cc
index 6973e27..8029184 100644
--- a/runtime/vm/assembler_arm_test.cc
+++ b/runtime/vm/assembler_arm_test.cc
@@ -683,6 +683,40 @@
}
+ASSEMBLER_TEST_GENERATE(AddCarry, assembler) {
+ __ LoadImmediate(R2, 0xFFFFFFFF);
+ __ mov(R1, ShifterOperand(1));
+ __ mov(R0, ShifterOperand(0));
+ __ adds(R2, R2, ShifterOperand(R1));
+ __ adcs(R0, R0, ShifterOperand(R0));
+ __ bx(LR);
+}
+
+
+ASSEMBLER_TEST_RUN(AddCarry, test) {
+ EXPECT(test != NULL);
+ typedef int (*AddCarry)();
+ EXPECT_EQ(1, EXECUTE_TEST_CODE_INT32(AddCarry, test->entry()));
+}
+
+
+ASSEMBLER_TEST_GENERATE(SubCarry, assembler) {
+ __ LoadImmediate(R2, 0x0);
+ __ mov(R1, ShifterOperand(1));
+ __ mov(R0, ShifterOperand(0));
+ __ subs(R2, R2, ShifterOperand(R1));
+ __ sbcs(R0, R0, ShifterOperand(R0));
+ __ bx(LR);
+}
+
+
+ASSEMBLER_TEST_RUN(SubCarry, test) {
+ EXPECT(test != NULL);
+ typedef int (*SubCarry)();
+ EXPECT_EQ(-1, EXECUTE_TEST_CODE_INT32(SubCarry, test->entry()));
+}
+
+
ASSEMBLER_TEST_GENERATE(AndOrr, assembler) {
__ mov(R1, ShifterOperand(40));
__ mov(R2, ShifterOperand(0));
diff --git a/runtime/vm/bitmap.cc b/runtime/vm/bitmap.cc
index c0f19ab..8a2ebde 100644
--- a/runtime/vm/bitmap.cc
+++ b/runtime/vm/bitmap.cc
@@ -72,6 +72,17 @@
}
+void BitmapBuilder::Print() const {
+ for (intptr_t i = 0; i < Length(); i++) {
+ if (Get(i)) {
+ OS::Print("1");
+ } else {
+ OS::Print("0");
+ }
+ }
+}
+
+
bool BitmapBuilder::GetBit(intptr_t bit_offset) const {
if (!InRange(bit_offset)) {
return false;
diff --git a/runtime/vm/bitmap.h b/runtime/vm/bitmap.h
index fc2c685..6d3376a3 100644
--- a/runtime/vm/bitmap.h
+++ b/runtime/vm/bitmap.h
@@ -47,6 +47,8 @@
// Sets min..max (inclusive) to value.
void SetRange(intptr_t min, intptr_t max, bool value);
+ void Print() const;
+
private:
static const intptr_t kInitialSizeInBytes = 16;
static const intptr_t kIncrementSizeInBytes = 16;
diff --git a/runtime/vm/code_generator.cc b/runtime/vm/code_generator.cc
index 6ea4fb8..faac1ea 100644
--- a/runtime/vm/code_generator.cc
+++ b/runtime/vm/code_generator.cc
@@ -615,7 +615,7 @@
caller_code.GetStaticCallTargetFunctionAt(caller_frame->pc()));
if (!target_function.HasCode()) {
const Error& error =
- Error::Handle(Compiler::CompileFunction(target_function));
+ Error::Handle(Compiler::CompileFunction(isolate, target_function));
if (!error.IsNull()) {
Exceptions::PropagateError(error);
}
@@ -895,7 +895,8 @@
ASSERT(ic_data.NumberOfChecks() > 0);
const Function& target = Function::Handle(ic_data.GetTargetAt(0));
if (!target.HasCode()) {
- const Error& error = Error::Handle(Compiler::CompileFunction(target));
+ const Error& error = Error::Handle(Compiler::CompileFunction(isolate,
+ target));
if (!error.IsNull()) {
Exceptions::PropagateError(error);
}
@@ -1066,6 +1067,67 @@
UNREACHABLE();
}
+ // The following code is used to stress test deoptimization and
+ // debugger stack tracing.
+ bool do_deopt = false;
+ bool do_stacktrace = false;
+ if ((FLAG_deoptimize_every > 0) || (FLAG_stacktrace_every > 0)) {
+ // TODO(turnidge): To make --deoptimize_every and
+ // --stacktrace-every faster we could move this increment/test to
+ // the generated code.
+ int32_t count = isolate->IncrementAndGetStackOverflowCount();
+ if (FLAG_deoptimize_every > 0 &&
+ (count % FLAG_deoptimize_every) == 0) {
+ do_deopt = true;
+ }
+ if (FLAG_stacktrace_every > 0 &&
+ (count % FLAG_stacktrace_every) == 0) {
+ do_stacktrace = true;
+ }
+ }
+ if ((FLAG_deoptimize_filter != NULL) || (FLAG_stacktrace_filter != NULL)) {
+ DartFrameIterator iterator;
+ StackFrame* frame = iterator.NextFrame();
+ ASSERT(frame != NULL);
+ const Code& code = Code::Handle(frame->LookupDartCode());
+ ASSERT(!code.IsNull());
+ const Function& function = Function::Handle(code.function());
+ ASSERT(!function.IsNull());
+ const char* function_name = function.ToFullyQualifiedCString();
+ ASSERT(function_name != NULL);
+ if (code.is_optimized() &&
+ FLAG_deoptimize_filter != NULL &&
+ strstr(function_name, FLAG_deoptimize_filter) != NULL) {
+ OS::PrintErr("*** Forcing deoptimization (%s)\n",
+ function.ToFullyQualifiedCString());
+ do_deopt = true;
+ }
+ if (FLAG_stacktrace_filter != NULL &&
+ strstr(function_name, FLAG_stacktrace_filter) != NULL) {
+ OS::PrintErr("*** Computing stacktrace (%s)\n",
+ function.ToFullyQualifiedCString());
+ do_stacktrace = true;
+ }
+ }
+ if (do_deopt) {
+ // TODO(turnidge): Consider using DeoptimizeAt instead.
+ DeoptimizeAll();
+ }
+ if (do_stacktrace) {
+ String& var_name = String::Handle();
+ Instance& var_value = Instance::Handle();
+ DebuggerStackTrace* stack = isolate->debugger()->StackTrace();
+ intptr_t num_frames = stack->Length();
+ for (intptr_t i = 0; i < num_frames; i++) {
+ ActivationFrame* frame = stack->FrameAt(i);
+ const int num_vars = frame->NumLocalVariables();
+ intptr_t unused;
+ for (intptr_t v = 0; v < num_vars; v++) {
+ frame->VariableAt(v, &var_name, &unused, &unused, &var_value);
+ }
+ }
+ }
+
uword interrupt_bits = isolate->GetAndClearInterrupts();
if ((interrupt_bits & Isolate::kStoreBufferInterrupt) != 0) {
if (FLAG_verbose_gc) {
@@ -1125,8 +1187,8 @@
// Since the code is referenced from the frame and the ZoneHandle,
// it cannot have been removed from the function.
ASSERT(!original_code.IsNull());
- const Error& error =
- Error::Handle(Compiler::CompileOptimizedFunction(function, osr_id));
+ const Error& error = Error::Handle(Compiler::CompileOptimizedFunction(
+ isolate, function, osr_id));
if (!error.IsNull()) {
Exceptions::PropagateError(error);
}
@@ -1144,69 +1206,6 @@
frame->set_pc(optimized_entry);
}
}
-
- // The following code is used to stress test deoptimization and
- // debugger stack tracing.
- bool do_deopt = false;
- bool do_stacktrace = false;
- if (FLAG_deoptimize_every > 0 ||
- FLAG_stacktrace_every > 0) {
- // TODO(turnidge): To make --deoptimize_every and
- // --stacktrace-every faster we could move this increment/test to
- // the generated code.
- int32_t count = isolate->IncrementAndGetStackOverflowCount();
- if (FLAG_deoptimize_every > 0 &&
- (count % FLAG_deoptimize_every) == 0) {
- do_deopt = true;
- }
- if (FLAG_stacktrace_every > 0 &&
- (count % FLAG_stacktrace_every) == 0) {
- do_stacktrace = true;
- }
- }
- if (FLAG_deoptimize_filter != NULL ||
- FLAG_stacktrace_filter != NULL) {
- DartFrameIterator iterator;
- StackFrame* frame = iterator.NextFrame();
- ASSERT(frame != NULL);
- const Code& code = Code::Handle(frame->LookupDartCode());
- ASSERT(!code.IsNull());
- const Function& function = Function::Handle(code.function());
- ASSERT(!function.IsNull());
- const char* function_name = function.ToFullyQualifiedCString();
- ASSERT(function_name != NULL);
- if (code.is_optimized() &&
- FLAG_deoptimize_filter != NULL &&
- strstr(function_name, FLAG_deoptimize_filter) != NULL) {
- OS::PrintErr("*** Forcing deoptimization (%s)\n",
- function.ToFullyQualifiedCString());
- do_deopt = true;
- }
- if (FLAG_stacktrace_filter != NULL &&
- strstr(function_name, FLAG_stacktrace_filter) != NULL) {
- OS::PrintErr("*** Computing stacktrace (%s)\n",
- function.ToFullyQualifiedCString());
- do_stacktrace = true;
- }
- }
- if (do_deopt) {
- // TODO(turnidge): Consider using DeoptimizeAt instead.
- DeoptimizeAll();
- }
- if (do_stacktrace) {
- String& var_name = String::Handle();
- Instance& var_value = Instance::Handle();
- DebuggerStackTrace* stack = isolate->debugger()->StackTrace();
- intptr_t num_frames = stack->Length();
- for (intptr_t i = 0; i < num_frames; i++) {
- ActivationFrame* frame = stack->FrameAt(i);
- const int num_vars = frame->NumLocalVariables();
- intptr_t unused;
- for (intptr_t v = 0; v < num_vars; v++) {
- frame->VariableAt(v, &var_name, &unused, &unused, &var_value);
- }
- }
- }
}
@@ -1231,7 +1230,8 @@
// The requesting function can be already optimized (reoptimization).
// Returns the Code object where to continue execution.
DEFINE_RUNTIME_ENTRY(OptimizeInvokedFunction, 1) {
- const Function& function = Function::CheckedHandle(arguments.ArgAt(0));
+ const Function& function = Function::CheckedHandle(isolate,
+ arguments.ArgAt(0));
ASSERT(!function.IsNull());
ASSERT(function.HasCode());
@@ -1239,15 +1239,15 @@
// Reset usage counter for reoptimization before calling optimizer to
// prevent recursive triggering of function optimization.
function.set_usage_counter(0);
- const Error& error =
- Error::Handle(Compiler::CompileOptimizedFunction(function));
+ const Error& error = Error::Handle(
+ isolate, Compiler::CompileOptimizedFunction(isolate, function));
if (!error.IsNull()) {
Exceptions::PropagateError(error);
}
- const Code& optimized_code = Code::Handle(function.CurrentCode());
+ const Code& optimized_code = Code::Handle(isolate, function.CurrentCode());
ASSERT(!optimized_code.IsNull());
}
- arguments.SetReturn(Code::Handle(function.CurrentCode()));
+ arguments.SetReturn(Code::Handle(isolate, function.CurrentCode()));
}
@@ -1266,19 +1266,16 @@
UNREACHABLE();
}
ASSERT(frame->IsDartFrame());
- const Code& caller_code = Code::Handle(frame->LookupDartCode());
+ const Code& caller_code = Code::Handle(isolate, frame->LookupDartCode());
ASSERT(caller_code.is_optimized());
const Function& target_function = Function::Handle(
- caller_code.GetStaticCallTargetFunctionAt(frame->pc()));
+ isolate, caller_code.GetStaticCallTargetFunctionAt(frame->pc()));
const Code& target_code = Code::Handle(
- caller_code.GetStaticCallTargetCodeAt(frame->pc()));
+ isolate, caller_code.GetStaticCallTargetCodeAt(frame->pc()));
ASSERT(!target_code.IsNull());
if (!target_function.HasCode()) {
- // If target code was unoptimized than the code must have been kept
- // connected to the function.
- ASSERT(target_code.is_optimized());
- const Error& error =
- Error::Handle(Compiler::CompileFunction(target_function));
+ const Error& error = Error::Handle(
+ isolate, Compiler::CompileFunction(isolate, target_function));
if (!error.IsNull()) {
Exceptions::PropagateError(error);
}
@@ -1286,8 +1283,10 @@
ASSERT(target_function.HasCode());
ASSERT(target_function.raw() == target_code.function());
- const Code& current_target_code = Code::Handle(target_function.CurrentCode());
- const Instructions& instrs = Instructions::Handle(caller_code.instructions());
+ const Code& current_target_code = Code::Handle(
+ isolate, target_function.CurrentCode());
+ const Instructions& instrs = Instructions::Handle(
+ isolate, caller_code.instructions());
{
WritableInstructionsScope writable(instrs.EntryPoint(), instrs.size());
CodePatcher::PatchStaticCallAt(frame->pc(), caller_code,
diff --git a/runtime/vm/compiler.cc b/runtime/vm/compiler.cc
index 2039190..31fa6f2 100644
--- a/runtime/vm/compiler.cc
+++ b/runtime/vm/compiler.cc
@@ -67,7 +67,8 @@
DEFINE_RUNTIME_ENTRY(CompileFunction, 1) {
const Function& function = Function::CheckedHandle(arguments.ArgAt(0));
ASSERT(!function.HasCode());
- const Error& error = Error::Handle(Compiler::CompileFunction(function));
+ const Error& error = Error::Handle(Compiler::CompileFunction(isolate,
+ function));
if (!error.IsNull()) {
Exceptions::PropagateError(error);
}
@@ -159,11 +160,12 @@
// We remember all the classes that are being compiled in these lists. This
// also allows us to reset the marked_for_parsing state in case we see an
// error.
- Class& parse_class = Class::Handle();
+ VMTagScope tagScope(isolate, VMTag::kCompileTopLevelTagId);
+ Class& parse_class = Class::Handle(isolate);
const GrowableObjectArray& parse_list =
- GrowableObjectArray::Handle(GrowableObjectArray::New(4));
+ GrowableObjectArray::Handle(isolate, GrowableObjectArray::New(4));
const GrowableObjectArray& patch_list =
- GrowableObjectArray::Handle(GrowableObjectArray::New(4));
+ GrowableObjectArray::Handle(isolate, GrowableObjectArray::New(4));
// Parse the class and all the interfaces it implements and super classes.
StackZone zone(isolate);
@@ -228,7 +230,7 @@
}
}
- Error& error = Error::Handle();
+ Error& error = Error::Handle(isolate);
error = isolate->object_store()->sticky_error();
isolate->object_store()->clear_sticky_error();
return error.raw();
@@ -264,7 +266,6 @@
TimerScope timer(FLAG_compiler_stats, &CompilerStats::codegen_timer);
bool is_compiled = false;
Isolate* isolate = Isolate::Current();
- VMTagScope tagScope(isolate, VMTag::kCompileTagId);
HANDLESCOPE(isolate);
isolate->set_cha_used(false);
@@ -857,17 +858,22 @@
}
-RawError* Compiler::CompileFunction(const Function& function) {
+RawError* Compiler::CompileFunction(Isolate* isolate,
+ const Function& function) {
+ VMTagScope tagScope(isolate, VMTag::kCompileUnoptimizedTagId);
return CompileFunctionHelper(function, false, Isolate::kNoDeoptId);
}
-RawError* Compiler::CompileOptimizedFunction(const Function& function,
+RawError* Compiler::CompileOptimizedFunction(Isolate* isolate,
+ const Function& function,
intptr_t osr_id) {
+ VMTagScope tagScope(isolate, VMTag::kCompileOptimizedTagId);
return CompileFunctionHelper(function, true, osr_id);
}
+// This is only used from unit tests.
RawError* Compiler::CompileParsedFunction(
ParsedFunction* parsed_function) {
Isolate* isolate = Isolate::Current();
@@ -892,9 +898,10 @@
RawError* Compiler::CompileAllFunctions(const Class& cls) {
- Error& error = Error::Handle();
- Array& functions = Array::Handle(cls.functions());
- Function& func = Function::Handle();
+ Isolate* isolate = Isolate::Current();
+ Error& error = Error::Handle(isolate);
+ Array& functions = Array::Handle(isolate, cls.functions());
+ Function& func = Function::Handle(isolate);
// Class dynamic lives in the vm isolate. Its array fields cannot be set to
// an empty array.
if (functions.IsNull()) {
@@ -908,7 +915,7 @@
if (!func.HasCode() &&
!func.is_abstract() &&
!func.IsRedirectingFactory()) {
- error = CompileFunction(func);
+ error = CompileFunction(isolate, func);
if (!error.IsNull()) {
return error.raw();
}
@@ -919,12 +926,12 @@
// more closures can be added to the end of the array. Compile all the
// closures until we have reached the end of the "worklist".
GrowableObjectArray& closures =
- GrowableObjectArray::Handle(cls.closures());
+ GrowableObjectArray::Handle(isolate, cls.closures());
if (!closures.IsNull()) {
for (int i = 0; i < closures.Length(); i++) {
func ^= closures.At(i);
if (!func.HasCode()) {
- error = CompileFunction(func);
+ error = CompileFunction(isolate, func);
if (!error.IsNull()) {
return error.raw();
}
diff --git a/runtime/vm/compiler.h b/runtime/vm/compiler.h
index ab49c80..5185b63 100644
--- a/runtime/vm/compiler.h
+++ b/runtime/vm/compiler.h
@@ -39,12 +39,13 @@
// Generates code for given function and sets its code field.
//
// Returns Error::null() if there is no compilation error.
- static RawError* CompileFunction(const Function& function);
+ static RawError* CompileFunction(Isolate* isolate, const Function& function);
// Generates optimized code for function.
//
// Returns Error::null() if there is no compilation error.
static RawError* CompileOptimizedFunction(
+ Isolate* isolate,
const Function& function,
intptr_t osr_id = Isolate::kNoDeoptId);
diff --git a/runtime/vm/constants_arm64.h b/runtime/vm/constants_arm64.h
index 59de4ad..d5550e4 100644
--- a/runtime/vm/constants_arm64.h
+++ b/runtime/vm/constants_arm64.h
@@ -453,6 +453,28 @@
BICS = LogicalShiftFixed | B30 | B29 | B21,
};
+// C.3.6.5
+enum SIMDCopyOp {
+ SIMDCopyMask = 0x9fe08400,
+ SIMDCopyFixed = DPSimd1Fixed | B10,
+ VDUP = SIMDCopyFixed | B30,
+ VINS = SIMDCopyFixed | B30 | B29,
+};
+
+// C.3.6.16
+enum SIMDThreeSameOp {
+ SIMDThreeSameMask = 0x9f200400,
+ SIMDThreeSameFixed = DPSimd1Fixed | B21 | B10,
+ VADDS = SIMDThreeSameFixed | B30 | B15 | B14 | B12,
+ VADDD = SIMDThreeSameFixed | B30 | B22 | B15 | B14 | B12,
+ VSUBS = SIMDThreeSameFixed | B30 | B23 | B15 | B14 | B12,
+ VSUBD = SIMDThreeSameFixed | B30 | B23 | B22 | B15 | B14 | B12,
+ VMULS = SIMDThreeSameFixed | B30 | B29 | B15 | B14 | B12 | B11,
+ VMULD = SIMDThreeSameFixed | B30 | B29 | B22 | B15 | B14 | B12 | B11,
+ VDIVS = SIMDThreeSameFixed | B30 | B29 | B15 | B14 | B13 | B12 | B11,
+ VDIVD = SIMDThreeSameFixed | B30 | B29 | B22 | B15 | B14 | B13 | B12 | B11,
+};
+
// C.3.6.22
enum FPCompareOp {
FPCompareMask = 0xffa0fc07,
@@ -528,11 +550,13 @@
_V(MiscDP2Source) \
_V(MiscDP3Source) \
_V(LogicalShift) \
+_V(SIMDCopy) \
+_V(SIMDThreeSame) \
+_V(FPCompare) \
_V(FPOneSource) \
_V(FPTwoSource) \
_V(FPImm) \
_V(FPIntCvt) \
-_V(FPCompare) \
enum Shift {
@@ -602,6 +626,10 @@
// Immediates.
kImm3Shift = 10,
kImm3Bits = 3,
+ kImm4Shift = 11,
+ kImm4Bits = 4,
+ kImm5Shift = 16,
+ kImm5Bits = 5,
kImm6Shift = 10,
kImm6Bits = 6,
kImm8Shift = 13,
diff --git a/runtime/vm/coverage.cc b/runtime/vm/coverage.cc
index c800703..0ea8bda 100644
--- a/runtime/vm/coverage.cc
+++ b/runtime/vm/coverage.cc
@@ -20,6 +20,7 @@
void CodeCoverage::CompileAndAdd(const Function& function,
const JSONArray& hits_arr) {
+ Isolate* isolate = Isolate::Current();
if (!function.HasCode()) {
// If the function should not be compiled or if the compilation failed,
// then just skip this method.
@@ -35,7 +36,8 @@
OS::Print("### Coverage skipped compiling: %s\n", function.ToCString());
return;
}
- const Error& err = Error::Handle(Compiler::CompileFunction(function));
+ const Error& err = Error::Handle(
+ isolate, Compiler::CompileFunction(isolate, function));
if (!err.IsNull()) {
OS::Print("### Coverage failed compiling:\n%s\n", err.ToErrorCString());
return;
@@ -43,7 +45,6 @@
}
ASSERT(function.HasCode());
- Isolate* isolate = Isolate::Current();
// Print the hit counts for all IC datas.
const Script& script = Script::Handle(function.script());
const Code& code = Code::Handle(function.unoptimized_code());
diff --git a/runtime/vm/dart_api_impl.cc b/runtime/vm/dart_api_impl.cc
index 96da554..6835887 100644
--- a/runtime/vm/dart_api_impl.cc
+++ b/runtime/vm/dart_api_impl.cc
@@ -81,6 +81,142 @@
}
+static bool GetNativeStringArgument(NativeArguments* arguments,
+ int arg_index,
+ Dart_Handle* str,
+ void** peer) {
+ ASSERT(peer != NULL);
+ if (Api::StringGetPeerHelper(arguments, arg_index, peer)) {
+ *str = NULL;
+ return true;
+ }
+ Isolate* isolate = arguments->isolate();
+ ASSERT(isolate == Isolate::Current());
+ *peer = NULL;
+ REUSABLE_OBJECT_HANDLESCOPE(isolate);
+ Object& obj = isolate->ObjectHandle();
+ obj = arguments->NativeArgAt(arg_index);
+ if (RawObject::IsStringClassId(obj.GetClassId())) {
+ ASSERT(isolate->api_state() &&
+ isolate->api_state()->top_scope() != NULL);
+ *str = Api::NewHandle(isolate, obj.raw());
+ return true;
+ }
+ if (obj.IsNull()) {
+ *str = Api::Null();
+ return true;
+ }
+ return false;
+}
+
+
+static bool GetNativeIntegerArgument(NativeArguments* arguments,
+ int arg_index,
+ int64_t* value) {
+ ASSERT(value != NULL);
+ if (Api::GetNativeIntegerArgument(arguments, arg_index, value)) {
+ return true;
+ }
+ Isolate* isolate = arguments->isolate();
+ ASSERT(isolate == Isolate::Current());
+ REUSABLE_OBJECT_HANDLESCOPE(isolate);
+ Object& obj = isolate->ObjectHandle();
+ obj = arguments->NativeArgAt(arg_index);
+ intptr_t cid = obj.GetClassId();
+ if (cid == kBigintCid) {
+ const Bigint& bigint = Bigint::Cast(obj);
+ if (BigintOperations::FitsIntoInt64(bigint)) {
+ *value = BigintOperations::ToInt64(bigint);
+ return true;
+ }
+ }
+ return false;
+}
+
+
+static bool GetNativeUnsignedIntegerArgument(NativeArguments* arguments,
+ int arg_index,
+ uint64_t* value) {
+ ASSERT(value != NULL);
+ int64_t arg_value = 0;
+ if (Api::GetNativeIntegerArgument(arguments, arg_index, &arg_value)) {
+ *value = static_cast<uint64_t>(arg_value);
+ return true;
+ }
+ Isolate* isolate = arguments->isolate();
+ ASSERT(isolate == Isolate::Current());
+ REUSABLE_OBJECT_HANDLESCOPE(isolate);
+ Object& obj = isolate->ObjectHandle();
+ obj = arguments->NativeArgAt(arg_index);
+ intptr_t cid = obj.GetClassId();
+ if (cid == kBigintCid) {
+ const Bigint& bigint = Bigint::Cast(obj);
+ if (BigintOperations::FitsIntoUint64(bigint)) {
+ *value = BigintOperations::ToUint64(bigint);
+ return true;
+ }
+ }
+ return false;
+}
+
+
+static bool GetNativeDoubleArgument(NativeArguments* arguments,
+ int arg_index,
+ double* value) {
+ ASSERT(value != NULL);
+ if (Api::GetNativeDoubleArgument(arguments, arg_index, value)) {
+ return true;
+ }
+ Isolate* isolate = arguments->isolate();
+ ASSERT(isolate == Isolate::Current());
+ REUSABLE_OBJECT_HANDLESCOPE(isolate);
+ Object& obj = isolate->ObjectHandle();
+ obj = arguments->NativeArgAt(arg_index);
+ intptr_t cid = obj.GetClassId();
+ if (cid == kBigintCid) {
+ *value = Bigint::Cast(obj).AsDoubleValue();
+ return true;
+ }
+ return false;
+}
+
+
+static Dart_Handle GetNativeFieldsOfArgument(NativeArguments* arguments,
+ int arg_index,
+ int num_fields,
+ intptr_t* field_values,
+ const char* current_func) {
+ ASSERT(field_values != NULL);
+ if (Api::GetNativeFieldsOfArgument(arguments,
+ arg_index,
+ num_fields,
+ field_values)) {
+ return Api::Success();
+ }
+ Isolate* isolate = arguments->isolate();
+ ASSERT(isolate == Isolate::Current());
+ REUSABLE_OBJECT_HANDLESCOPE(isolate);
+ Object& obj = isolate->ObjectHandle();
+ obj = arguments->NativeArgAt(arg_index);
+ if (obj.IsNull()) {
+ memset(field_values, 0, (num_fields * sizeof(field_values[0])));
+ return Api::Success();
+ }
+ // We did not succeed in extracting the native fields report the
+ // appropriate error.
+ if (!obj.IsInstance()) {
+ return Api::NewError("%s expects argument at index '%d' to be of"
+ " type Instance.", current_func, arg_index);
+ }
+ const Instance& instance = Instance::Cast(obj);
+ int field_count = instance.NumNativeFields();
+ ASSERT(num_fields != field_count);
+ return Api::NewError(
+ "%s: expected %d 'num_fields' but was passed in %d.",
+ current_func, field_count, num_fields);
+}
+
+
Heap::Space SpaceForExternal(Isolate* isolate, intptr_t size) {
Heap* heap = isolate->heap();
// If 'size' would be a significant fraction of new space, then use old.
@@ -356,6 +492,74 @@
}
+bool Api::GetNativeIntegerArgument(NativeArguments* arguments,
+ int arg_index,
+ int64_t* value) {
+ NoGCScope no_gc_scope;
+ RawObject* raw_obj = arguments->NativeArgAt(arg_index);
+ if (raw_obj->IsHeapObject()) {
+ intptr_t cid = raw_obj->GetClassId();
+ if (cid == kMintCid) {
+ *value = reinterpret_cast<RawMint*>(raw_obj)->ptr()->value_;
+ return true;
+ }
+ return false;
+ }
+ *value = Smi::Value(reinterpret_cast<RawSmi*>(raw_obj));
+ return true;
+}
+
+
+bool Api::GetNativeDoubleArgument(NativeArguments* arguments,
+ int arg_index,
+ double* value) {
+ NoGCScope no_gc_scope;
+ RawObject* raw_obj = arguments->NativeArgAt(arg_index);
+ if (raw_obj->IsHeapObject()) {
+ intptr_t cid = raw_obj->GetClassId();
+ if (cid == kDoubleCid) {
+ *value = reinterpret_cast<RawDouble*>(raw_obj)->ptr()->value_;
+ return true;
+ }
+ if (cid == kMintCid) {
+ *value = static_cast<double>(
+ reinterpret_cast<RawMint*>(raw_obj)->ptr()->value_);
+ return true;
+ }
+ return false;
+ }
+ *value = static_cast<double>(Smi::Value(reinterpret_cast<RawSmi*>(raw_obj)));
+ return true;
+}
+
+
+bool Api::GetNativeFieldsOfArgument(NativeArguments* arguments,
+ int arg_index,
+ int num_fields,
+ intptr_t* field_values) {
+ NoGCScope no_gc_scope;
+ RawObject* raw_obj = arguments->NativeArgAt(arg_index);
+ if (raw_obj->IsHeapObject()) {
+ intptr_t cid = raw_obj->GetClassId();
+ if (cid > kNumPredefinedCids) {
+ RawTypedData* native_fields = *reinterpret_cast<RawTypedData**>(
+ RawObject::ToAddr(raw_obj) + sizeof(RawObject));
+ if (native_fields == TypedData::null()) {
+ memset(field_values, 0, (num_fields * sizeof(field_values[0])));
+ } else if (num_fields == Smi::Value(native_fields->ptr()->length_)) {
+ intptr_t* native_values =
+ bit_cast<intptr_t*, uint8_t*>(native_fields->ptr()->data_);
+ memmove(field_values,
+ native_values,
+ (num_fields * sizeof(field_values[0])));
+ }
+ return true;
+ }
+ }
+ return false;
+}
+
+
void Api::SetWeakHandleReturnValue(NativeArguments* args,
Dart_WeakPersistentHandle retval) {
args->SetReturnUnsafe(FinalizablePersistentHandle::Cast(retval)->raw());
@@ -4014,6 +4218,140 @@
}
+DART_EXPORT Dart_Handle Dart_GetNativeArguments(
+ Dart_NativeArguments args,
+ int num_arguments,
+ const Dart_NativeArgument_Descriptor* argument_descriptors,
+ Dart_NativeArgument_Value* arg_values) {
+ NativeArguments* arguments = reinterpret_cast<NativeArguments*>(args);
+ ASSERT(arguments->isolate() == Isolate::Current());
+ if (arg_values == NULL) {
+ RETURN_NULL_ERROR(arg_values);
+ }
+ for (int i = 0; i < num_arguments; i++) {
+ Dart_NativeArgument_Descriptor desc = argument_descriptors[i];
+ Dart_NativeArgument_Type arg_type = static_cast<Dart_NativeArgument_Type>(
+ desc.type);
+ int arg_index = desc.index;
+ ASSERT(arg_index >= 0 && arg_index < arguments->NativeArgCount());
+ Dart_NativeArgument_Value* native_value = &(arg_values[i]);
+ switch (arg_type) {
+ case Dart_NativeArgument_kBool:
+ if (!Api::GetNativeBooleanArgument(arguments,
+ arg_index,
+ &(native_value->as_bool))) {
+ return Api::NewError("%s: expects argument at index %d to be of"
+ " type Boolean.", CURRENT_FUNC, i);
+ }
+ break;
+
+ case Dart_NativeArgument_kInt32: {
+ int64_t value = 0;
+ if (!GetNativeIntegerArgument(arguments,
+ arg_index,
+ &value)) {
+ return Api::NewError("%s: expects argument at index %d to be of"
+ " type Integer.", CURRENT_FUNC, i);
+ }
+ if (value < INT_MIN || value > INT_MAX) {
+ return Api::NewError("%s: argument value at index %d is out of range",
+ CURRENT_FUNC, i);
+ }
+ native_value->as_int32 = static_cast<int32_t>(value);
+ break;
+ }
+
+ case Dart_NativeArgument_kUint32: {
+ int64_t value = 0;
+ if (!GetNativeIntegerArgument(arguments,
+ arg_index,
+ &value)) {
+ return Api::NewError("%s: expects argument at index %d to be of"
+ " type Integer.", CURRENT_FUNC, i);
+ }
+ if (value < 0 || value > UINT_MAX) {
+ return Api::NewError("%s: argument value at index %d is out of range",
+ CURRENT_FUNC, i);
+ }
+ native_value->as_uint32 = static_cast<uint32_t>(value);
+ break;
+ }
+
+ case Dart_NativeArgument_kInt64: {
+ int64_t value = 0;
+ if (!GetNativeIntegerArgument(arguments,
+ arg_index,
+ &value)) {
+ return Api::NewError("%s: expects argument at index %d to be of"
+ " type Integer.", CURRENT_FUNC, i);
+ }
+ native_value->as_int64 = value;
+ break;
+ }
+
+ case Dart_NativeArgument_kUint64: {
+ uint64_t value = 0;
+ if (!GetNativeUnsignedIntegerArgument(arguments,
+ arg_index,
+ &value)) {
+ return Api::NewError("%s: expects argument at index %d to be of"
+ " type Integer.", CURRENT_FUNC, i);
+ }
+ native_value->as_uint64 = value;
+ break;
+ }
+
+ case Dart_NativeArgument_kDouble:
+ if (!GetNativeDoubleArgument(arguments,
+ arg_index,
+ &(native_value->as_double))) {
+ return Api::NewError("%s: expects argument at index %d to be of"
+ " type Double.", CURRENT_FUNC, i);
+ }
+ break;
+
+ case Dart_NativeArgument_kString:
+ if (!GetNativeStringArgument(arguments,
+ arg_index,
+ &(native_value->as_string.dart_str),
+ &(native_value->as_string.peer))) {
+ return Api::NewError("%s: expects argument at index %d to be of"
+ " type String.", CURRENT_FUNC, i);
+ }
+ break;
+
+ case Dart_NativeArgument_kNativeFields: {
+ Dart_Handle result = GetNativeFieldsOfArgument(
+ arguments,
+ arg_index,
+ native_value->as_native_fields.num_fields,
+ native_value->as_native_fields.values,
+ CURRENT_FUNC);
+ if (result != Api::Success()) {
+ return result;
+ }
+ break;
+ }
+
+ case Dart_NativeArgument_kInstance: {
+ Isolate* isolate = arguments->isolate();
+ ASSERT(isolate == Isolate::Current());
+ ASSERT(isolate->api_state() &&
+ isolate->api_state()->top_scope() != NULL);
+ native_value->as_instance =
+ Api::NewHandle(isolate, arguments->NativeArgAt(arg_index));
+ break;
+ }
+
+ default:
+ return Api::NewError("%s: invalid argument type %d.",
+ CURRENT_FUNC, arg_type);
+ }
+ }
+ return Api::Success();
+}
+
+
DART_EXPORT Dart_Handle Dart_GetNativeArgument(Dart_NativeArguments args,
int index) {
TRACE_API_CALL(CURRENT_FUNC);
@@ -4048,30 +4386,11 @@
if (field_values == NULL) {
RETURN_NULL_ERROR(field_values);
}
- Isolate* isolate = arguments->isolate();
- ASSERT(isolate == Isolate::Current());
- REUSABLE_OBJECT_HANDLESCOPE(isolate);
- Object& obj = isolate->ObjectHandle();
- obj = arguments->NativeArgAt(arg_index);
- if (obj.IsNull()) {
- for (intptr_t i = 0; i < num_fields; i++) {
- field_values[i] = 0;
- }
- return Api::Success();
- }
- if (!obj.IsInstance()) {
- return Api::NewError("%s expects argument at index '%d' to be of"
- " type Instance.", CURRENT_FUNC, arg_index);
- }
- const Instance& instance = Instance::Cast(obj);
- uint16_t field_count = instance.NumNativeFields();
- if (num_fields != field_count) {
- return Api::NewError(
- "%s: invalid 'field_values' array specified for returning field values",
- CURRENT_FUNC);
- }
- instance.GetNativeFields(num_fields, field_values);
- return Api::Success();
+ return GetNativeFieldsOfArgument(arguments,
+ arg_index,
+ num_fields,
+ field_values,
+ CURRENT_FUNC);
}
@@ -4094,23 +4413,12 @@
int arg_index,
void** peer) {
NativeArguments* arguments = reinterpret_cast<NativeArguments*>(args);
- Isolate* isolate = arguments->isolate();
- ASSERT(isolate == Isolate::Current());
- if (Api::StringGetPeerHelper(arguments, arg_index, peer)) {
- return Api::Success();
+ Dart_Handle result = Api::Null();
+ if (!GetNativeStringArgument(arguments, arg_index, &result, peer)) {
+ return Api::NewError("%s expects argument at %d to be of"
+ " type String.", CURRENT_FUNC, arg_index);
}
- *peer = NULL;
- REUSABLE_OBJECT_HANDLESCOPE(isolate);
- Object& obj = isolate->ObjectHandle();
- obj = arguments->NativeArgAt(arg_index);
- if (RawObject::IsStringClassId(obj.GetClassId())) {
- return Api::NewHandle(isolate, obj.raw());
- }
- if (obj.IsNull()) {
- return Api::Null();
- }
- return Api::NewError("%s expects argument to be of"
- " type String.", CURRENT_FUNC);
+ return result;
}
@@ -4124,33 +4432,11 @@
"%s: argument 'index' out of range. Expected 0..%d but saw %d.",
CURRENT_FUNC, arguments->NativeArgCount() - 1, index);
}
- Isolate* isolate = arguments->isolate();
- ASSERT(isolate == Isolate::Current());
- REUSABLE_OBJECT_HANDLESCOPE(isolate);
- Object& obj = isolate->ObjectHandle();
- obj = arguments->NativeArgAt(index);
- intptr_t cid = obj.GetClassId();
- if (cid == kSmiCid) {
- *value = Smi::Cast(obj).Value();
- return Api::Success();
+ if (!GetNativeIntegerArgument(arguments, index, value)) {
+ return Api::NewError("%s: expects argument at %d to be of"
+ " type Integer.", CURRENT_FUNC, index);
}
- if (cid == kMintCid) {
- *value = Mint::Cast(obj).value();
- return Api::Success();
- }
- if (cid == kBigintCid) {
- const Bigint& bigint = Bigint::Cast(obj);
- if (BigintOperations::FitsIntoInt64(bigint)) {
- *value = BigintOperations::ToInt64(bigint);
- return Api::Success();
- }
- return Api::NewError(
- "%s: argument %d is a big integer that does not fit in 'value'.",
- CURRENT_FUNC, index);
- }
- return Api::NewError(
- "%s: argument %d is not an Integer argument.",
- CURRENT_FUNC, index);
+ return Api::Success();
}
@@ -4164,11 +4450,11 @@
"%s: argument 'index' out of range. Expected 0..%d but saw %d.",
CURRENT_FUNC, arguments->NativeArgCount() - 1, index);
}
- if (Api::GetNativeBooleanArgument(arguments, index, value)) {
- return Api::Success();
+ if (!Api::GetNativeBooleanArgument(arguments, index, value)) {
+ return Api::NewError("%s: expects argument at %d to be of type Boolean.",
+ CURRENT_FUNC, index);
}
- return Api::NewError("%s: argument %d is not a Boolean argument.",
- CURRENT_FUNC, index);
+ return Api::Success();
}
@@ -4182,31 +4468,11 @@
"%s: argument 'index' out of range. Expected 0..%d but saw %d.",
CURRENT_FUNC, arguments->NativeArgCount() - 1, index);
}
- Isolate* isolate = arguments->isolate();
- ASSERT(isolate == Isolate::Current());
- REUSABLE_OBJECT_HANDLESCOPE(isolate);
- Object& obj = isolate->ObjectHandle();
- obj = arguments->NativeArgAt(index);
- intptr_t cid = obj.GetClassId();
- if (cid == kDoubleCid) {
- *value = Double::Cast(obj).value();
- return Api::Success();
+ if (!GetNativeDoubleArgument(arguments, index, value)) {
+ return Api::NewError("%s: expects argument at %d to be of"
+ " type Double.", CURRENT_FUNC, index);
}
- if (cid == kSmiCid) {
- *value = Smi::Cast(obj).AsDoubleValue();
- return Api::Success();
- }
- if (cid == kMintCid) {
- *value = Mint::Cast(obj).AsDoubleValue();
- return Api::Success();
- }
- if (cid == kBigintCid) {
- *value = Bigint::Cast(obj).AsDoubleValue();
- return Api::Success();
- }
- return Api::NewError(
- "%s: argument %d is not a Double argument.",
- CURRENT_FUNC, index);
+ return Api::Success();
}
diff --git a/runtime/vm/dart_api_impl.h b/runtime/vm/dart_api_impl.h
index c26afac..7595911 100644
--- a/runtime/vm/dart_api_impl.h
+++ b/runtime/vm/dart_api_impl.h
@@ -224,6 +224,22 @@
int arg_index,
bool* value);
+ // Helper function to get the integer value of a Integer native argument.
+ static bool GetNativeIntegerArgument(NativeArguments* args,
+ int arg_index,
+ int64_t* value);
+
+ // Helper function to get the double value of a Double native argument.
+ static bool GetNativeDoubleArgument(NativeArguments* args,
+ int arg_index,
+ double* value);
+
+ // Helper function to get the native fields of an Instance native argument.
+ static bool GetNativeFieldsOfArgument(NativeArguments* args,
+ int arg_index,
+ int num_fields,
+ intptr_t* field_values);
+
// Helper function to set the return value of native functions.
static void SetReturnValue(NativeArguments* args, Dart_Handle retval) {
args->SetReturnUnsafe(UnwrapHandle(retval));
diff --git a/runtime/vm/dart_api_impl_test.cc b/runtime/vm/dart_api_impl_test.cc
index f0325b6..3470c06 100644
--- a/runtime/vm/dart_api_impl_test.cc
+++ b/runtime/vm/dart_api_impl_test.cc
@@ -5208,7 +5208,204 @@
}
-void NativeArgumentCounter(Dart_NativeArguments args) {
+static intptr_t kNativeArgumentNativeField1Value = 30;
+static intptr_t kNativeArgumentNativeField2Value = 40;
+static intptr_t native_arg_str_peer = 100;
+static void NativeArgumentCreate(Dart_NativeArguments args) {
+ Dart_Handle lib = Dart_LookupLibrary(NewString(TestCase::url()));
+ Dart_Handle type = Dart_GetType(lib, NewString("MyObject"), 0, NULL);
+ EXPECT_VALID(type);
+
+ // Allocate without a constructor.
+ Dart_Handle obj = Dart_Allocate(type);
+ EXPECT_VALID(obj);
+
+ // Setup native fields.
+ Dart_SetNativeInstanceField(obj, 0, kNativeArgumentNativeField1Value);
+ Dart_SetNativeInstanceField(obj, 1, kNativeArgumentNativeField2Value);
+ kNativeArgumentNativeField1Value *= 2;
+ kNativeArgumentNativeField2Value *= 2;
+ Dart_SetReturnValue(args, obj);
+}
+
+
+static void NativeArgumentAccess(Dart_NativeArguments args) {
+ const int kNumNativeFields = 2;
+
+ // Test different argument types with a valid descriptor set.
+ {
+ const char* cstr = NULL;
+ intptr_t native_fields1[kNumNativeFields];
+ intptr_t native_fields2[kNumNativeFields];
+ const Dart_NativeArgument_Descriptor arg_descriptors[9] = {
+ { Dart_NativeArgument_kNativeFields, 0 },
+ { Dart_NativeArgument_kInt32, 1 },
+ { Dart_NativeArgument_kUint64, 2 },
+ { Dart_NativeArgument_kBool, 3 },
+ { Dart_NativeArgument_kDouble, 4 },
+ { Dart_NativeArgument_kString, 5 },
+ { Dart_NativeArgument_kString, 6 },
+ { Dart_NativeArgument_kNativeFields, 7 },
+ { Dart_NativeArgument_kInstance, 7 },
+ };
+ Dart_NativeArgument_Value arg_values[9];
+ arg_values[0].as_native_fields.num_fields = kNumNativeFields;
+ arg_values[0].as_native_fields.values = native_fields1;
+ arg_values[7].as_native_fields.num_fields = kNumNativeFields;
+ arg_values[7].as_native_fields.values = native_fields2;
+ Dart_Handle result = Dart_GetNativeArguments(args,
+ 9,
+ arg_descriptors,
+ arg_values);
+ EXPECT_VALID(result);
+
+ EXPECT(arg_values[0].as_native_fields.values[0] == 30);
+ EXPECT(arg_values[0].as_native_fields.values[1] == 40);
+
+ EXPECT(arg_values[1].as_int32 == 77);
+
+ EXPECT(arg_values[2].as_uint64 == 0xffffffffffffffff);
+
+ EXPECT(arg_values[3].as_bool == true);
+
+ EXPECT(arg_values[4].as_double == 3.14);
+
+ EXPECT_VALID(arg_values[5].as_string.dart_str);
+ EXPECT(Dart_IsString(arg_values[5].as_string.dart_str));
+ EXPECT_VALID(Dart_StringToCString(arg_values[5].as_string.dart_str, &cstr));
+ EXPECT_STREQ("abcdefg", cstr);
+ EXPECT(arg_values[5].as_string.peer == NULL);
+
+ EXPECT(arg_values[6].as_string.dart_str == NULL);
+ EXPECT(arg_values[6].as_string.peer ==
+ reinterpret_cast<void*>(&native_arg_str_peer));
+
+ EXPECT(arg_values[7].as_native_fields.values[0] == 60);
+ EXPECT(arg_values[7].as_native_fields.values[1] == 80);
+
+ EXPECT_VALID(arg_values[8].as_instance);
+ EXPECT(Dart_IsInstance(arg_values[8].as_instance));
+ int field_count = 0;
+ EXPECT_VALID(Dart_GetNativeInstanceFieldCount(
+ arg_values[8].as_instance, &field_count));
+ EXPECT(field_count == 2);
+ }
+
+ // Test with an invalid descriptor set (invalid type).
+ {
+ const Dart_NativeArgument_Descriptor arg_descriptors[8] = {
+ { Dart_NativeArgument_kInt32, 1 },
+ { Dart_NativeArgument_kUint64, 2 },
+ { Dart_NativeArgument_kString, 3 },
+ { Dart_NativeArgument_kDouble, 4 },
+ { Dart_NativeArgument_kString, 5 },
+ { Dart_NativeArgument_kString, 6 },
+ { Dart_NativeArgument_kNativeFields, 0 },
+ { Dart_NativeArgument_kNativeFields, 7 },
+ };
+ Dart_NativeArgument_Value arg_values[8];
+ Dart_Handle result = Dart_GetNativeArguments(args,
+ 8,
+ arg_descriptors,
+ arg_values);
+ EXPECT(Dart_IsError(result));
+ }
+
+ // Test with an invalid range error.
+ {
+ const Dart_NativeArgument_Descriptor arg_descriptors[8] = {
+ { Dart_NativeArgument_kInt32, 2 },
+ { Dart_NativeArgument_kUint64, 2 },
+ { Dart_NativeArgument_kBool, 3 },
+ { Dart_NativeArgument_kDouble, 4 },
+ { Dart_NativeArgument_kString, 5 },
+ { Dart_NativeArgument_kString, 6 },
+ { Dart_NativeArgument_kNativeFields, 0 },
+ { Dart_NativeArgument_kNativeFields, 7 },
+ };
+ Dart_NativeArgument_Value arg_values[8];
+ Dart_Handle result = Dart_GetNativeArguments(args,
+ 8,
+ arg_descriptors,
+ arg_values);
+ EXPECT(Dart_IsError(result));
+ }
+
+ Dart_SetIntegerReturnValue(args, 0);
+}
+
+
+static Dart_NativeFunction native_args_lookup(Dart_Handle name,
+ int argument_count,
+ bool* auto_scope_setup) {
+ const Object& obj = Object::Handle(Api::UnwrapHandle(name));
+ if (!obj.IsString()) {
+ return NULL;
+ }
+ ASSERT(auto_scope_setup != NULL);
+ *auto_scope_setup = true;
+ const char* function_name = obj.ToCString();
+ ASSERT(function_name != NULL);
+ if (!strcmp(function_name, "NativeArgument_Create")) {
+ return reinterpret_cast<Dart_NativeFunction>(&NativeArgumentCreate);
+ } else if (!strcmp(function_name, "NativeArgument_Access")) {
+ return reinterpret_cast<Dart_NativeFunction>(&NativeArgumentAccess);
+ }
+ return NULL;
+}
+
+
+TEST_CASE(GetNativeArguments) {
+ const char* kScriptChars =
+ "import 'dart:nativewrappers';"
+ "class MyObject extends NativeFieldWrapperClass2 {"
+ " static MyObject createObject() native 'NativeArgument_Create';"
+ " int accessFields(int arg1,"
+ " int arg2,"
+ " bool arg3,"
+ " double arg4,"
+ " String arg5,"
+ " String arg6,"
+ " MyObject arg7) native 'NativeArgument_Access';"
+ "}"
+ "int testMain(String extstr) {"
+ " String str = 'abcdefg';"
+ " MyObject obj1 = MyObject.createObject();"
+ " MyObject obj2 = MyObject.createObject();"
+ " return obj1.accessFields(77,"
+ " 0xffffffffffffffff,"
+ " true,"
+ " 3.14,"
+ " str,"
+ " extstr,"
+ " obj2);"
+ "}";
+
+ Dart_Handle lib = TestCase::LoadTestScript(
+ kScriptChars,
+ reinterpret_cast<Dart_NativeEntryResolver>(native_args_lookup));
+
+ intptr_t size;
+ Dart_Handle ascii_str = NewString("string");
+ EXPECT_VALID(ascii_str);
+ EXPECT_VALID(Dart_StringStorageSize(ascii_str, &size));
+ uint8_t ext_ascii_str[10];
+ Dart_Handle extstr = Dart_MakeExternalString(
+ ascii_str,
+ ext_ascii_str,
+ size,
+ reinterpret_cast<void*>(&native_arg_str_peer),
+ NULL);
+
+ Dart_Handle args[1];
+ args[0] = extstr;
+ Dart_Handle result = Dart_Invoke(lib, NewString("testMain"), 1, args);
+ EXPECT_VALID(result);
+ EXPECT(Dart_IsInteger(result));
+}
+
+
+static void NativeArgumentCounter(Dart_NativeArguments args) {
Dart_EnterScope();
int count = Dart_GetNativeArgumentCount(args);
Dart_SetReturnValue(args, Dart_NewInteger(count));
diff --git a/runtime/vm/dart_entry.cc b/runtime/vm/dart_entry.cc
index 1fde05a..b4a790a 100644
--- a/runtime/vm/dart_entry.cc
+++ b/runtime/vm/dart_entry.cc
@@ -45,8 +45,10 @@
// Get the entrypoint corresponding to the function specified, this
// will result in a compilation of the function if it is not already
// compiled.
+ Isolate* isolate = Isolate::Current();
if (!function.HasCode()) {
- const Error& error = Error::Handle(Compiler::CompileFunction(function));
+ const Error& error = Error::Handle(
+ isolate, Compiler::CompileFunction(isolate, function));
if (!error.IsNull()) {
return error.raw();
}
@@ -54,7 +56,7 @@
// Now Call the invoke stub which will invoke the dart function.
invokestub entrypoint = reinterpret_cast<invokestub>(
StubCode::InvokeDartCodeEntryPoint());
- const Code& code = Code::Handle(function.CurrentCode());
+ const Code& code = Code::Handle(isolate, function.CurrentCode());
ASSERT(!code.IsNull());
ASSERT(Isolate::Current()->no_callback_scope_depth() == 0);
#if defined(USING_SIMULATOR)
diff --git a/runtime/vm/debugger.cc b/runtime/vm/debugger.cc
index f8fd318..49d2d70 100644
--- a/runtime/vm/debugger.cc
+++ b/runtime/vm/debugger.cc
@@ -1178,8 +1178,9 @@
// Can't instrument native functions.
return;
}
+ Isolate* isolate = Isolate::Current();
if (!target_function.HasCode()) {
- Compiler::CompileFunction(target_function);
+ Compiler::CompileFunction(isolate, target_function);
// If there were any errors, ignore them silently and return without
// adding breakpoints to target.
if (!target_function.HasCode()) {
@@ -1188,11 +1189,11 @@
}
// Hang on to the code object before deoptimizing, in case deoptimization
// might cause the GC to run.
- Code& code = Code::Handle(target_function.unoptimized_code());
+ Code& code = Code::Handle(isolate, target_function.unoptimized_code());
ASSERT(!code.IsNull());
DeoptimizeWorld();
ASSERT(!target_function.HasOptimizedCode());
- PcDescriptors& desc = PcDescriptors::Handle(code.pc_descriptors());
+ PcDescriptors& desc = PcDescriptors::Handle(isolate, code.pc_descriptors());
for (intptr_t i = 0; i < desc.Length(); i++) {
if (IsSafePoint(desc, i)) {
CodeBreakpoint* bpt = GetCodeBreakpoint(desc.PC(i));
@@ -1222,22 +1223,12 @@
ActivationFrame* Debugger::CollectDartFrame(Isolate* isolate,
uword pc,
StackFrame* frame,
- const Code& code_param,
+ const Code& code,
const Array& deopt_frame,
intptr_t deopt_frame_offset,
ActivationFrame* callee_activation,
const Context& entry_ctx) {
- // TODO(turnidge): Remove the workaround below once...
- // https://code.google.com/p/dart/issues/detail?id=18384
- // ...is fixed.
- Code& code = Code::Handle(isolate);
- code = code_param.raw();
- if (!code.ContainsInstructionAt(pc)) {
- code = Code::LookupCode(pc);
- ASSERT(!code.IsNull());
- ASSERT(code.ContainsInstructionAt(pc));
- }
-
+ ASSERT(code.ContainsInstructionAt(pc));
// We provide either a callee activation or an entry context. Not both.
ASSERT(((callee_activation != NULL) && entry_ctx.IsNull()) ||
((callee_activation == NULL) && !entry_ctx.IsNull()));
@@ -1251,7 +1242,7 @@
// closure function, because it may not be on the stack yet.
bool is_closure_call = false;
const PcDescriptors& pc_desc =
- PcDescriptors::Handle(code.pc_descriptors());
+ PcDescriptors::Handle(isolate, code.pc_descriptors());
for (int i = 0; i < pc_desc.Length(); i++) {
if (pc_desc.PC(i) == pc &&
@@ -1317,7 +1308,8 @@
deopt_context->FillDestFrame();
deopt_context->MaterializeDeferredObjects();
- const Array& dest_frame = Array::Handle(deopt_context->DestFrameAsArray());
+ const Array& dest_frame = Array::Handle(isolate,
+ deopt_context->DestFrameAsArray());
isolate->set_deopt_context(NULL);
delete deopt_context;
@@ -1331,7 +1323,7 @@
DebuggerStackTrace* stack_trace = new DebuggerStackTrace(8);
StackFrameIterator iterator(false);
ActivationFrame* current_activation = NULL;
- Context& entry_ctx = Context::Handle(isolate->top_context());
+ Context& entry_ctx = Context::Handle(isolate, isolate->top_context());
Code& code = Code::Handle(isolate);
Code& inlined_code = Code::Handle(isolate);
Array& deopt_frame = Array::Handle(isolate);
@@ -1362,7 +1354,7 @@
inlined_code = it.code();
if (FLAG_trace_debugger_stacktrace) {
const Function& function =
- Function::Handle(inlined_code.function());
+ Function::Handle(isolate, inlined_code.function());
ASSERT(!function.IsNull());
OS::PrintErr("CollectStackTrace: visiting inlined function: %s\n",
function.ToFullyQualifiedCString());
diff --git a/runtime/vm/deopt_instructions.cc b/runtime/vm/deopt_instructions.cc
index c461d93..0c9a7ec 100644
--- a/runtime/vm/deopt_instructions.cc
+++ b/runtime/vm/deopt_instructions.cc
@@ -189,6 +189,7 @@
case DeoptInstr::kStackSlot:
case DeoptInstr::kDoubleStackSlot:
case DeoptInstr::kInt64StackSlot:
+ case DeoptInstr::kInt64StackSlotPair:
case DeoptInstr::kFloat32x4StackSlot:
case DeoptInstr::kInt32x4StackSlot:
case DeoptInstr::kFloat64x2StackSlot:
@@ -199,7 +200,8 @@
case DeoptInstr::kRegister:
case DeoptInstr::kFpuRegister:
- case DeoptInstr::kInt64FpuRegister:
+ case DeoptInstr::kInt64RegisterPair:
+ case DeoptInstr::kInt64StackSlotRegister:
case DeoptInstr::kFloat32x4FpuRegister:
case DeoptInstr::kInt32x4FpuRegister:
case DeoptInstr::kFloat64x2FpuRegister:
@@ -451,7 +453,7 @@
virtual const char* ToCString() const {
return Isolate::Current()->current_zone()->PrintToString(
- "ms%" Pd "", stack_slot_index_);
+ "int64 stack slot:%" Pd "", stack_slot_index_);
}
void Execute(DeoptContext* deopt_context, intptr_t* dest_addr) {
@@ -719,21 +721,28 @@
};
-class DeoptInt64FpuRegisterInstr: public DeoptInstr {
+class DeoptInt64RegisterPairInstr: public DeoptInstr {
public:
- explicit DeoptInt64FpuRegisterInstr(intptr_t reg_as_int)
- : reg_(static_cast<FpuRegister>(reg_as_int)) {}
+ DeoptInt64RegisterPairInstr(intptr_t lo_reg_as_int, intptr_t hi_reg_as_int)
+ : lo_reg_(static_cast<Register>(lo_reg_as_int)),
+ hi_reg_(static_cast<Register>(hi_reg_as_int)) {}
- virtual intptr_t source_index() const { return static_cast<intptr_t>(reg_); }
- virtual DeoptInstr::Kind kind() const { return kInt64FpuRegister; }
+ virtual intptr_t source_index() const {
+ return EncodeRegisters(static_cast<intptr_t>(lo_reg_),
+ static_cast<intptr_t>(hi_reg_));
+ }
+ virtual DeoptInstr::Kind kind() const { return kInt64RegisterPair; }
virtual const char* ToCString() const {
return Isolate::Current()->current_zone()->PrintToString(
- "%s(m)", Assembler::FpuRegisterName(reg_));
+ "int64 register pair: %s,%s", Assembler::RegisterName(hi_reg_),
+ Assembler::RegisterName(lo_reg_));
}
void Execute(DeoptContext* deopt_context, intptr_t* dest_addr) {
- int64_t value = deopt_context->FpuRegisterValueAsInt64(reg_);
+ uint32_t lo_value = deopt_context->RegisterValue(lo_reg_);
+ int32_t hi_value = deopt_context->RegisterValue(hi_reg_);
+ int64_t value = Utils::LowHighTo64Bits(lo_value, hi_value);
*reinterpret_cast<RawSmi**>(dest_addr) = Smi::New(0);
if (Smi::IsValid64(value)) {
*dest_addr = reinterpret_cast<intptr_t>(
@@ -744,10 +753,178 @@
}
}
- private:
- const FpuRegister reg_;
+ static const intptr_t kFieldWidth = kBitsPerWord / 2;
+ class LoRegister : public BitField<intptr_t, 0, kFieldWidth> { };
+ class HiRegister : public BitField<intptr_t, kFieldWidth, kFieldWidth> { };
+ static intptr_t EncodeRegisters(intptr_t lo_reg_as_int,
+ intptr_t hi_reg_as_int) {
+ return LoRegister::encode(lo_reg_as_int) |
+ HiRegister::encode(hi_reg_as_int);
+ }
- DISALLOW_COPY_AND_ASSIGN(DeoptInt64FpuRegisterInstr);
+ static intptr_t DecodeLoRegister(intptr_t v) {
+ return LoRegister::decode(v);
+ }
+
+ static intptr_t DecodeHiRegister(intptr_t v) {
+ return HiRegister::decode(v);
+ }
+
+ private:
+ const Register lo_reg_;
+ const Register hi_reg_;
+
+ DISALLOW_COPY_AND_ASSIGN(DeoptInt64RegisterPairInstr);
+};
+
+
+class DeoptInt64StackSlotPairInstr: public DeoptInstr {
+ public:
+ DeoptInt64StackSlotPairInstr(intptr_t lo_slot, intptr_t hi_slot)
+ : lo_slot_(static_cast<Register>(lo_slot)),
+ hi_slot_(static_cast<Register>(hi_slot)) {}
+
+ virtual intptr_t source_index() const {
+ return EncodeSlots(static_cast<intptr_t>(lo_slot_),
+ static_cast<intptr_t>(hi_slot_));
+ }
+ virtual DeoptInstr::Kind kind() const { return kInt64StackSlotPair; }
+
+ virtual const char* ToCString() const {
+ return Isolate::Current()->current_zone()->PrintToString(
+ "int64 stack slots: %" Pd", %" Pd "", lo_slot_, hi_slot_);
+ }
+
+ void Execute(DeoptContext* deopt_context, intptr_t* dest_addr) {
+ intptr_t lo_source_index =
+ deopt_context->source_frame_size() - lo_slot_ - 1;
+ int32_t* lo_source_addr = reinterpret_cast<int32_t*>(
+ deopt_context->GetSourceFrameAddressAt(lo_source_index));
+ intptr_t hi_source_index =
+ deopt_context->source_frame_size() - hi_slot_ - 1;
+ int32_t* hi_source_addr = reinterpret_cast<int32_t*>(
+ deopt_context->GetSourceFrameAddressAt(hi_source_index));
+ int64_t value = Utils::LowHighTo64Bits(*lo_source_addr, *hi_source_addr);
+ *reinterpret_cast<RawSmi**>(dest_addr) = Smi::New(0);
+ if (Smi::IsValid64(value)) {
+ *dest_addr = reinterpret_cast<intptr_t>(
+ Smi::New(static_cast<intptr_t>(value)));
+ } else {
+ deopt_context->DeferMintMaterialization(
+ value, reinterpret_cast<RawMint**>(dest_addr));
+ }
+ }
+
+ static const intptr_t kFieldWidth = kBitsPerWord / 2;
+ class LoSlot : public BitField<intptr_t, 0, kFieldWidth> { };
+ class HiSlot : public BitField<intptr_t, kFieldWidth, kFieldWidth> { };
+ static intptr_t EncodeSlots(intptr_t lo_slot,
+ intptr_t hi_slot) {
+ return LoSlot::encode(lo_slot) |
+ HiSlot::encode(hi_slot);
+ }
+
+ static intptr_t DecodeLoSlot(intptr_t v) {
+ return LoSlot::decode(v);
+ }
+
+ static intptr_t DecodeHiSlot(intptr_t v) {
+ return HiSlot::decode(v);
+ }
+
+ private:
+ const intptr_t lo_slot_;
+ const intptr_t hi_slot_;
+
+ DISALLOW_COPY_AND_ASSIGN(DeoptInt64StackSlotPairInstr);
+};
+
+
+class DeoptInt64StackSlotRegisterInstr : public DeoptInstr {
+ public:
+ DeoptInt64StackSlotRegisterInstr(intptr_t source_index,
+ intptr_t reg_as_int,
+ bool flip)
+ : slot_(source_index),
+ reg_(static_cast<Register>(reg_as_int)),
+ flip_(flip) {
+ // when flip_ is false, stack slot is low bits and reg is high bits.
+ // when flip_ is true, stack slot is high bits and reg is low bits.
+ }
+
+ virtual intptr_t source_index() const {
+ return Encode(static_cast<intptr_t>(slot_),
+ static_cast<intptr_t>(reg_),
+ flip_ ? 1 : 0);
+ }
+ virtual DeoptInstr::Kind kind() const { return kInt64StackSlotRegister; }
+
+ virtual const char* ToCString() const {
+ if (flip_) {
+ return Isolate::Current()->current_zone()->PrintToString(
+ "int64 reg: %s, stack slot: %" Pd "", Assembler::RegisterName(reg_),
+ slot_);
+ } else {
+ return Isolate::Current()->current_zone()->PrintToString(
+ "int64 stack slot: %" Pd", reg: %s", slot_,
+ Assembler::RegisterName(reg_));
+ }
+ }
+
+ void Execute(DeoptContext* deopt_context, intptr_t* dest_addr) {
+ intptr_t slot_source_index =
+ deopt_context->source_frame_size() - slot_ - 1;
+ int32_t* slot_source_addr = reinterpret_cast<int32_t*>(
+ deopt_context->GetSourceFrameAddressAt(slot_source_index));
+ int32_t slot_value = *slot_source_addr;
+ int32_t reg_value = deopt_context->RegisterValue(reg_);
+ int64_t value;
+ if (flip_) {
+ value = Utils::LowHighTo64Bits(reg_value, slot_value);
+ } else {
+ value = Utils::LowHighTo64Bits(slot_value, reg_value);
+ }
+ *reinterpret_cast<RawSmi**>(dest_addr) = Smi::New(0);
+ if (Smi::IsValid64(value)) {
+ *dest_addr = reinterpret_cast<intptr_t>(
+ Smi::New(static_cast<intptr_t>(value)));
+ } else {
+ deopt_context->DeferMintMaterialization(
+ value, reinterpret_cast<RawMint**>(dest_addr));
+ }
+ }
+
+ static const intptr_t kFieldWidth = kBitsPerWord / 2;
+ class Slot : public BitField<intptr_t, 0, kFieldWidth> { };
+ class Reg : public BitField<intptr_t, kFieldWidth, kFieldWidth - 1> { };
+ // 1 bit for the flip.
+ class Flip : public BitField<intptr_t, kFieldWidth * 2 - 1, 1> { };
+
+ static intptr_t Encode(intptr_t slot,
+ intptr_t reg_as_int,
+ bool flip) {
+ return Slot::encode(slot) |
+ Reg::encode(reg_as_int) |
+ Flip::encode(flip ? 1 : 0);
+ }
+
+ static intptr_t DecodeSlot(intptr_t v) {
+ return Slot::decode(v);
+ }
+
+ static intptr_t DecodeReg(intptr_t v) {
+ return Reg::decode(v);
+ }
+
+ static bool DecodeFlip(intptr_t v) {
+ return Flip::decode(v);
+ }
+
+ private:
+ const intptr_t slot_;
+ const Register reg_;
+ const bool flip_;
+ DISALLOW_COPY_AND_ASSIGN(DeoptInt64StackSlotRegisterInstr);
};
@@ -1139,7 +1316,28 @@
case kConstant: return new DeoptConstantInstr(source_index);
case kRegister: return new DeoptRegisterInstr(source_index);
case kFpuRegister: return new DeoptFpuRegisterInstr(source_index);
- case kInt64FpuRegister: return new DeoptInt64FpuRegisterInstr(source_index);
+ case kInt64RegisterPair: {
+ intptr_t lo_reg_as_int =
+ DeoptInt64RegisterPairInstr::LoRegister::decode(source_index);
+ intptr_t hi_reg_as_int =
+ DeoptInt64RegisterPairInstr::HiRegister::decode(source_index);
+ return new DeoptInt64RegisterPairInstr(lo_reg_as_int, hi_reg_as_int);
+ }
+ case kInt64StackSlotPair: {
+ intptr_t lo_slot =
+ DeoptInt64StackSlotPairInstr::LoSlot::decode(source_index);
+ intptr_t hi_slot =
+ DeoptInt64StackSlotPairInstr::HiSlot::decode(source_index);
+ return new DeoptInt64StackSlotPairInstr(lo_slot, hi_slot);
+ }
+ case kInt64StackSlotRegister: {
+ intptr_t slot =
+ DeoptInt64StackSlotRegisterInstr::Slot::decode(source_index);
+ intptr_t reg_as_int =
+ DeoptInt64StackSlotRegisterInstr::Reg::decode(source_index);
+ bool flip = DeoptInt64StackSlotRegisterInstr::Flip::decode(source_index);
+ return new DeoptInt64StackSlotRegisterInstr(slot, reg_as_int, flip);
+ }
case kFloat32x4FpuRegister:
return new DeoptFloat32x4FpuRegisterInstr(source_index);
case kFloat64x2FpuRegister:
@@ -1271,8 +1469,6 @@
} else if (source_loc.IsFpuRegister()) {
if (value->definition()->representation() == kUnboxedDouble) {
deopt_instr = new DeoptFpuRegisterInstr(source_loc.fpu_reg());
- } else if (value->definition()->representation() == kUnboxedMint) {
- deopt_instr = new DeoptInt64FpuRegisterInstr(source_loc.fpu_reg());
} else if (value->definition()->representation() == kUnboxedFloat32x4) {
deopt_instr = new DeoptFloat32x4FpuRegisterInstr(source_loc.fpu_reg());
} else if (value->definition()->representation() == kUnboxedInt32x4) {
@@ -1303,6 +1499,34 @@
ASSERT(value->definition()->representation() == kUnboxedFloat64x2);
deopt_instr = new DeoptFloat64x2StackSlotInstr(source_index);
}
+ } else if (source_loc.IsPairLocation()) {
+ ASSERT(value->definition()->representation() == kUnboxedMint);
+ // There are four cases to consider here:
+ // (R = Register, S = Stack slot).
+ // 1) R, R.
+ // 2) S, S.
+ // 3) R, S.
+ // 4) S, R.
+ PairLocation* pair = source_loc.AsPairLocation();
+ if (pair->At(0).IsRegister() && pair->At(1).IsRegister()) {
+ deopt_instr = new DeoptInt64RegisterPairInstr(pair->At(0).reg(),
+ pair->At(1).reg());
+ } else if (pair->At(0).IsStackSlot() && pair->At(1).IsStackSlot()) {
+ deopt_instr = new DeoptInt64StackSlotPairInstr(
+ CalculateStackIndex(pair->At(0)),
+ CalculateStackIndex(pair->At(1)));
+ } else if (pair->At(0).IsRegister() && pair->At(1).IsStackSlot()) {
+ deopt_instr = new DeoptInt64StackSlotRegisterInstr(
+ CalculateStackIndex(pair->At(1)),
+ pair->At(0).reg(),
+ true);
+ } else {
+ ASSERT(pair->At(0).IsStackSlot() && pair->At(1).IsRegister());
+ deopt_instr = new DeoptInt64StackSlotRegisterInstr(
+ CalculateStackIndex(pair->At(0)),
+ pair->At(1).reg(),
+ false);
+ }
} else if (source_loc.IsInvalid() &&
value->definition()->IsMaterializeObject()) {
const intptr_t index = FindMaterialization(
diff --git a/runtime/vm/deopt_instructions.h b/runtime/vm/deopt_instructions.h
index 551d043..23a5e97 100644
--- a/runtime/vm/deopt_instructions.h
+++ b/runtime/vm/deopt_instructions.h
@@ -58,21 +58,29 @@
intptr_t RegisterValue(Register reg) const {
ASSERT(cpu_registers_ != NULL);
+ ASSERT(reg >= 0);
+ ASSERT(reg < kNumberOfCpuRegisters);
return cpu_registers_[reg];
}
double FpuRegisterValue(FpuRegister reg) const {
ASSERT(fpu_registers_ != NULL);
+ ASSERT(reg >= 0);
+ ASSERT(reg < kNumberOfFpuRegisters);
return *reinterpret_cast<double*>(&fpu_registers_[reg]);
}
int64_t FpuRegisterValueAsInt64(FpuRegister reg) const {
ASSERT(fpu_registers_ != NULL);
+ ASSERT(reg >= 0);
+ ASSERT(reg < kNumberOfFpuRegisters);
return *reinterpret_cast<int64_t*>(&fpu_registers_[reg]);
}
simd128_value_t FpuRegisterValueAsSimd128(FpuRegister reg) const {
ASSERT(fpu_registers_ != NULL);
+ ASSERT(reg >= 0);
+ ASSERT(reg < kNumberOfFpuRegisters);
const float* address = reinterpret_cast<float*>(&fpu_registers_[reg]);
return simd128_value_t().readFrom(address);
}
@@ -216,16 +224,18 @@
kConstant,
kRegister,
kFpuRegister,
- kInt64FpuRegister,
+ kInt64RegisterPair,
kFloat32x4FpuRegister,
kFloat64x2FpuRegister,
kInt32x4FpuRegister,
kStackSlot,
kDoubleStackSlot,
kInt64StackSlot,
+ kInt64StackSlotPair,
kFloat32x4StackSlot,
kFloat64x2StackSlot,
kInt32x4StackSlot,
+ kInt64StackSlotRegister,
kPcMarker,
kPp,
kCallerFp,
diff --git a/runtime/vm/disassembler_arm64.cc b/runtime/vm/disassembler_arm64.cc
index c79e2d0..797d4d5 100644
--- a/runtime/vm/disassembler_arm64.cc
+++ b/runtime/vm/disassembler_arm64.cc
@@ -353,9 +353,29 @@
}
}
case 'c': {
- ASSERT(STRING_STARTS_WITH(format, "cond"));
- PrintCondition(instr);
- return 4;
+ if (format[1] == 's') {
+ ASSERT(STRING_STARTS_WITH(format, "csz"));
+ const int32_t imm5 = instr->Bits(16, 5);
+ char const* typ = "??";
+ if (imm5 & 0x1) {
+ typ = "b";
+ } else if (imm5 & 0x2) {
+ typ = "h";
+ } else if (imm5 & 0x4) {
+ typ = "s";
+ } else if (imm5 & 0x8) {
+ typ = "d";
+ }
+ buffer_pos_ += OS::SNPrint(current_position_in_buffer(),
+ remaining_size_in_buffer(),
+ "%s",
+ typ);
+ return 3;
+ } else {
+ ASSERT(STRING_STARTS_WITH(format, "cond"));
+ PrintCondition(instr);
+ return 4;
+ }
}
case 'd': {
if (format[4] == '2') {
@@ -422,7 +442,35 @@
return 2;
}
case 'i': { // 'imm12, 'imm16, 'immd
- if (format[3] == '1') {
+ if (format[1] == 'd') {
+ // Element index for a SIMD copy instruction.
+ ASSERT(STRING_STARTS_WITH(format, "idx"));
+ const int32_t imm4 = instr->Bits(11, 4);
+ const int32_t imm5 = instr->Bits(16, 5);
+ int32_t shift = 0;
+ int32_t imm = -1;
+ if (format[3] == '4') {
+ imm = imm4;
+ } else if (format[3] == '5') {
+ imm = imm5;
+ shift = 1;
+ }
+ int32_t idx = -1;
+ if (imm5 & 0x1) {
+ idx = imm >> shift;
+ } else if (imm5 & 0x2) {
+ idx = imm >> (shift + 1);
+ } else if (imm5 & 0x4) {
+ idx = imm >> (shift + 2);
+ } else if (imm5 & 0x8) {
+ idx = imm >> (shift + 3);
+ }
+ buffer_pos_ += OS::SNPrint(current_position_in_buffer(),
+ remaining_size_in_buffer(),
+ "[%d]",
+ idx);
+ return 4;
+ } else if (format[3] == '1') {
uint64_t imm;
int ret = 5;
if (format[4] == '2') {
@@ -490,7 +538,22 @@
return FormatRegister(instr, format);
}
case 'v': {
- return FormatVRegister(instr, format);
+ if (format[1] == 's') {
+ ASSERT(STRING_STARTS_WITH(format, "vsz"));
+ char const* sz_str;
+ if (instr->Bit(22) == 0) {
+ sz_str = "f32";
+ } else {
+ sz_str = "f64";
+ }
+ buffer_pos_ += OS::SNPrint(current_position_in_buffer(),
+ remaining_size_in_buffer(),
+ "%s",
+ sz_str);
+ return 3;
+ } else {
+ return FormatVRegister(instr, format);
+ }
}
case 's': { // 's: S flag.
if (format[1] == 'h') {
@@ -960,8 +1023,55 @@
}
+void ARM64Decoder::DecodeSIMDCopy(Instr* instr) {
+ const int32_t Q = instr->Bit(30);
+ const int32_t op = instr->Bit(29);
+ const int32_t imm4 = instr->Bits(11, 4);
+
+ if ((Q == 1) && (op == 0) && (imm4 == 0)) {
+ Format(instr, "vdup'csz 'vd, 'vn'idx5");
+ } else if ((Q == 1) && (op == 1)) {
+ Format(instr, "vins'csz 'vd'idx5, 'vn'idx4");
+ } else {
+ Unknown(instr);
+ }
+}
+
+
+void ARM64Decoder::DecodeSIMDThreeSame(Instr* instr) {
+ const int32_t Q = instr->Bit(30);
+ const int32_t U = instr->Bit(29);
+ const int32_t opcode = instr->Bits(11, 5);
+
+ if (Q == 0) {
+ Unknown(instr);
+ return;
+ }
+
+ if ((U == 0) && (opcode == 0x1a)) {
+ if (instr->Bit(23) == 0) {
+ Format(instr, "vadd'vsz 'vd, 'vn, 'vm");
+ } else {
+ Format(instr, "vsub'vsz 'vd, 'vn, 'vm");
+ }
+ } else if ((U == 1) && (opcode == 0x1b)) {
+ Format(instr, "vmul'vsz 'vd, 'vn, 'vm");
+ } else if ((U == 1) && (opcode == 0x1f)) {
+ Format(instr, "vdiv'vsz 'vd, 'vn, 'vm");
+ } else {
+ Unknown(instr);
+ }
+}
+
+
void ARM64Decoder::DecodeDPSimd1(Instr* instr) {
- Unknown(instr);
+ if (instr->IsSIMDCopyOp()) {
+ DecodeSIMDCopy(instr);
+ } else if (instr->IsSIMDThreeSameOp()) {
+ DecodeSIMDThreeSame(instr);
+ } else {
+ Unknown(instr);
+ }
}
diff --git a/runtime/vm/flow_graph.cc b/runtime/vm/flow_graph.cc
index a175511..e7f25a8e 100644
--- a/runtime/vm/flow_graph.cc
+++ b/runtime/vm/flow_graph.cc
@@ -104,7 +104,7 @@
void FlowGraph::InsertBefore(Instruction* next,
Instruction* instr,
Environment* env,
- Definition::UseKind use_kind) {
+ UseKind use_kind) {
InsertAfter(next->previous(), instr, env, use_kind);
}
@@ -112,8 +112,8 @@
void FlowGraph::InsertAfter(Instruction* prev,
Instruction* instr,
Environment* env,
- Definition::UseKind use_kind) {
- if (use_kind == Definition::kValue) {
+ UseKind use_kind) {
+ if (use_kind == kValue) {
ASSERT(instr->IsDefinition());
AllocateSSAIndexes(instr->AsDefinition());
}
@@ -126,8 +126,8 @@
Instruction* FlowGraph::AppendTo(Instruction* prev,
Instruction* instr,
Environment* env,
- Definition::UseKind use_kind) {
- if (use_kind == Definition::kValue) {
+ UseKind use_kind) {
+ if (use_kind == kValue) {
ASSERT(instr->IsDefinition());
AllocateSSAIndexes(instr->AsDefinition());
}
@@ -892,7 +892,7 @@
} else if (load != NULL) {
// The graph construction ensures we do not have an unused LoadLocal
// computation.
- ASSERT(definition->is_used());
+ ASSERT(definition->HasTemp());
intptr_t index = load->local().BitIndexIn(num_non_copied_params_);
result = (*env)[index];
@@ -918,13 +918,13 @@
if (drop->value() != NULL) {
result = drop->value()->definition();
}
- ASSERT((drop->value() != NULL) || !drop->is_used());
+ ASSERT((drop->value() != NULL) || !drop->HasTemp());
} else {
- ASSERT(definition->is_used());
+ ASSERT(definition->HasTemp());
result = GetConstant(constant->value());
}
// Update expression stack or remove from graph.
- if (definition->is_used()) {
+ if (definition->HasTemp()) {
ASSERT(result != NULL);
env->Add(result);
// We remove load/store/constant instructions when we find their
@@ -934,7 +934,7 @@
}
} else {
// Not a load, store, or constant.
- if (definition->is_used()) {
+ if (definition->HasTemp()) {
// Assign fresh SSA temporary and update expression stack.
AllocateSSAIndexes(definition);
env->Add(definition);
diff --git a/runtime/vm/flow_graph.h b/runtime/vm/flow_graph.h
index fc5214e..26116b8 100644
--- a/runtime/vm/flow_graph.h
+++ b/runtime/vm/flow_graph.h
@@ -133,18 +133,20 @@
ConstantInstr* GetConstant(const Object& object);
void AddToInitialDefinitions(Definition* defn);
+ enum UseKind { kEffect, kValue };
+
void InsertBefore(Instruction* next,
Instruction* instr,
Environment* env,
- Definition::UseKind use_kind);
+ UseKind use_kind);
void InsertAfter(Instruction* prev,
Instruction* instr,
Environment* env,
- Definition::UseKind use_kind);
+ UseKind use_kind);
Instruction* AppendTo(Instruction* prev,
Instruction* instr,
Environment* env,
- Definition::UseKind use_kind);
+ UseKind use_kind);
// Operations on the flow graph.
void ComputeSSA(intptr_t next_virtual_register_number,
diff --git a/runtime/vm/flow_graph_allocator.cc b/runtime/vm/flow_graph_allocator.cc
index 4a3d66d..bfc098f 100644
--- a/runtime/vm/flow_graph_allocator.cc
+++ b/runtime/vm/flow_graph_allocator.cc
@@ -398,11 +398,24 @@
return;
}
- OS::Print(" live range v%" Pd " [%" Pd ", %" Pd ") in ",
- vreg(), Start(), End());
+ OS::Print(" live range v%" Pd " [%" Pd ", %" Pd ") in ", vreg(),
+ Start(),
+ End());
assigned_location().Print();
+ if (spill_slot_.HasStackIndex()) {
+ intptr_t stack_slot = spill_slot_.stack_index();
+ OS::Print(" allocated spill slot: %" Pd "", stack_slot);
+ }
OS::Print("\n");
+ SafepointPosition* safepoint = first_safepoint();
+ while (safepoint != NULL) {
+ OS::Print(" Safepoint [%" Pd "]: ", safepoint->pos());
+ safepoint->locs()->stack_bitmap()->Print();
+ OS::Print("\n");
+ safepoint = safepoint->next();
+ }
+
UsePosition* use_pos = uses_;
for (UseInterval* interval = first_use_interval_;
interval != NULL;
@@ -564,6 +577,7 @@
GraphEntryInstr* graph_entry = flow_graph_.graph_entry();
for (intptr_t i = 0; i < graph_entry->initial_definitions()->length(); i++) {
Definition* defn = (*graph_entry->initial_definitions())[i];
+ ASSERT(!defn->HasPairRepresentation());
LiveRange* range = GetLiveRange(defn->ssa_temp_index());
range->AddUseInterval(graph_entry->start_pos(), graph_entry->end_pos());
range->DefineAt(graph_entry->start_pos());
@@ -612,11 +626,14 @@
// in stack maps.
spill_slots_.Add(range_end);
quad_spill_slots_.Add(false);
+ untagged_spill_slots_.Add(false);
+ // Note, all incoming parameters are assumed to be tagged.
MarkAsObjectAtSafepoints(range);
} else if (defn->IsConstant() && block->IsCatchBlockEntry()) {
// Constants at catch block entries consume spill slots.
spill_slots_.Add(range_end);
quad_spill_slots_.Add(false);
+ untagged_spill_slots_.Add(false);
}
}
@@ -632,7 +649,6 @@
static Location::Kind RegisterKindForResult(Instruction* instr) {
if ((instr->representation() == kUnboxedDouble) ||
- (instr->representation() == kUnboxedMint) ||
(instr->representation() == kUnboxedFloat32x4) ||
(instr->representation() == kUnboxedInt32x4) ||
(instr->representation() == kUnboxedFloat64x2) ||
@@ -802,8 +818,12 @@
for (intptr_t i = 0; i < env->Length(); ++i) {
Value* value = env->ValueAt(i);
- locations[i] = Location::Any();
Definition* def = value->definition();
+ if (def->HasPairRepresentation()) {
+ locations[i] = Location::Pair(Location::Any(), Location::Any());
+ } else {
+ locations[i] = Location::Any();
+ }
if (def->IsPushArgument()) {
// Frame size is unknown until after allocation.
@@ -827,13 +847,23 @@
continue;
}
- LiveRange* range = GetLiveRange(def->ssa_temp_index());
- range->AddUseInterval(block_start_pos, use_pos);
- range->AddUse(use_pos, &locations[i]);
-
if (def->HasPairRepresentation()) {
- LiveRange* range =
+ PairLocation* location_pair = locations[i].AsPairLocation();
+ {
+ // First live range.
+ LiveRange* range = GetLiveRange(def->ssa_temp_index());
+ range->AddUseInterval(block_start_pos, use_pos);
+ range->AddUse(use_pos, location_pair->SlotAt(0));
+ }
+ {
+ // Second live range.
+ LiveRange* range =
GetLiveRange(ToSecondPairVreg(def->ssa_temp_index()));
+ range->AddUseInterval(block_start_pos, use_pos);
+ range->AddUse(use_pos, location_pair->SlotAt(1));
+ }
+ } else {
+ LiveRange* range = GetLiveRange(def->ssa_temp_index());
range->AddUseInterval(block_start_pos, use_pos);
range->AddUse(use_pos, &locations[i]);
}
@@ -869,13 +899,25 @@
continue;
}
- locations[i] = Location::Any();
-
- LiveRange* range = GetLiveRange(def->ssa_temp_index());
- range->AddUseInterval(block_start_pos, use_pos);
- range->AddUse(use_pos, &locations[i]);
if (def->HasPairRepresentation()) {
- LiveRange* range = GetLiveRange(ToSecondPairVreg(def->ssa_temp_index()));
+ locations[i] = Location::Pair(Location::Any(), Location::Any());
+ PairLocation* location_pair = locations[i].AsPairLocation();
+ {
+ // First live range.
+ LiveRange* range = GetLiveRange(def->ssa_temp_index());
+ range->AddUseInterval(block_start_pos, use_pos);
+ range->AddUse(use_pos, location_pair->SlotAt(0));
+ }
+ {
+ // Second live range.
+ LiveRange* range =
+ GetLiveRange(ToSecondPairVreg(def->ssa_temp_index()));
+ range->AddUseInterval(block_start_pos, use_pos);
+ range->AddUse(use_pos, location_pair->SlotAt(1));
+ }
+ } else {
+ locations[i] = Location::Any();
+ LiveRange* range = GetLiveRange(def->ssa_temp_index());
range->AddUseInterval(block_start_pos, use_pos);
range->AddUse(use_pos, &locations[i]);
}
@@ -889,7 +931,8 @@
intptr_t pos,
Location* in_ref,
Value* input,
- intptr_t vreg) {
+ intptr_t vreg,
+ RegisterSet* live_registers) {
ASSERT(in_ref != NULL);
ASSERT(!in_ref->IsPairLocation());
ASSERT(input != NULL);
@@ -903,6 +946,9 @@
// value --*
// register [-----)
//
+ if (live_registers != NULL) {
+ live_registers->Add(*in_ref, range->representation());
+ }
MoveOperands* move =
AddMoveAt(pos - 1, *in_ref, Location::Any());
BlockLocation(*in_ref, pos - 1, pos + 1);
@@ -1114,6 +1160,12 @@
locs->out(0).IsUnallocated() &&
(locs->out(0).policy() == Location::kSameAsFirstInput);
+ // Output is same as first input which is a pair.
+ if (output_same_as_first_input && locs->in(0).IsPairLocation()) {
+ // Make out into a PairLocation.
+ locs->set_out(0, Location::Pair(Location::RequiresRegister(),
+ Location::RequiresRegister()));
+ }
// Add uses from the deoptimization environment.
if (current->env() != NULL) ProcessEnvironmentUses(block, current);
@@ -1128,6 +1180,10 @@
// the location is the first register or second register.
Value* input = current->InputAt(j);
Location* in_ref = locs->in_slot(j);
+ RegisterSet* live_registers = NULL;
+ if (locs->HasCallOnSlowPath()) {
+ live_registers = locs->live_registers();
+ }
if (in_ref->IsPairLocation()) {
ASSERT(input->definition()->HasPairRepresentation());
PairLocation* pair = in_ref->AsPairLocation();
@@ -1135,12 +1191,12 @@
// Each element of the pair is assigned it's own virtual register number
// and is allocated its own LiveRange.
ProcessOneInput(block, pos, pair->SlotAt(0),
- input, vreg);
+ input, vreg, live_registers);
ProcessOneInput(block, pos, pair->SlotAt(1), input,
- ToSecondPairVreg(vreg));
+ ToSecondPairVreg(vreg), live_registers);
} else {
ProcessOneInput(block, pos, in_ref, input,
- input->definition()->ssa_temp_index());
+ input->definition()->ssa_temp_index(), live_registers);
}
}
}
@@ -1522,6 +1578,16 @@
}
+UsePosition* AllocationFinger::FirstInterferingUse(intptr_t after) {
+ if (IsInstructionEndPosition(after)) {
+ // If after is a position at the end of the instruction disregard
+ // any use occuring at it.
+ after += 1;
+ }
+ return FirstRegisterUse(after);
+}
+
+
void AllocationFinger::UpdateAfterSplit(intptr_t first_use_after_split_pos) {
if ((first_register_use_ != NULL) &&
(first_register_use_->pos() >= first_use_after_split_pos)) {
@@ -1695,7 +1761,8 @@
split_pos = ToInstructionStart(to) - 1;
}
- ASSERT((split_pos != kIllegalPosition) && (from < split_pos));
+ ASSERT(split_pos != kIllegalPosition);
+ ASSERT(from < split_pos);
return range->SplitAt(split_pos);
}
@@ -1769,6 +1836,8 @@
((range->representation() == kUnboxedFloat32x4) ||
(range->representation() == kUnboxedInt32x4) ||
(range->representation() == kUnboxedFloat64x2));
+ const bool need_untagged = (register_kind_ == Location::kRegister) &&
+ ((range->representation() == kUntagged));
// Search for a free spill slot among allocated: the value in it should be
// dead and its type should match (e.g. it should not be a part of the quad if
@@ -1780,6 +1849,7 @@
: 0;
for (; idx < spill_slots_.length(); idx++) {
if ((need_quad == quad_spill_slots_[idx]) &&
+ (need_untagged == untagged_spill_slots_[idx]) &&
(spill_slots_[idx] <= start)) {
break;
}
@@ -1789,9 +1859,11 @@
// No free spill slot found. Allocate a new one.
spill_slots_.Add(0);
quad_spill_slots_.Add(need_quad);
+ untagged_spill_slots_.Add(need_untagged);
if (need_quad) { // Allocate two double stack slots if we need quad slot.
spill_slots_.Add(0);
quad_spill_slots_.Add(need_quad);
+ untagged_spill_slots_.Add(need_untagged);
}
}
@@ -1822,8 +1894,7 @@
ASSERT(need_quad);
location = Location::QuadStackSlot(slot_idx);
} else {
- ASSERT((range->representation() == kUnboxedDouble) ||
- (range->representation() == kUnboxedMint));
+ ASSERT((range->representation() == kUnboxedDouble));
location = Location::DoubleStackSlot(slot_idx);
}
range->set_spill_slot(location);
@@ -1841,6 +1912,7 @@
for (SafepointPosition* safepoint = range->first_safepoint();
safepoint != NULL;
safepoint = safepoint->next()) {
+ // Mark the stack slot as having an object.
safepoint->locs()->stack_bitmap()->Set(stack_index, true);
}
range = range->next_sibling();
@@ -2199,9 +2271,8 @@
return false;
}
- const UsePosition* use =
- allocated->finger()->FirstRegisterBeneficialUse(unallocated->Start());
-
+ UsePosition* use =
+ allocated->finger()->FirstInterferingUse(start);
if ((use != NULL) && ((ToInstructionStart(use->pos()) - start) <= 1)) {
// This register is blocked by interval that is used
// as register in the current instruction and can't
@@ -2281,7 +2352,7 @@
if (intersection == kMaxPosition) return false;
const intptr_t spill_position = first_unallocated->start();
- UsePosition* use = allocated->finger()->FirstRegisterUse(spill_position);
+ UsePosition* use = allocated->finger()->FirstInterferingUse(spill_position);
if (use == NULL) {
// No register uses after this point.
SpillAfter(allocated, spill_position);
@@ -2316,6 +2387,7 @@
void FlowGraphAllocator::ConvertUseTo(UsePosition* use, Location loc) {
+ ASSERT(!loc.IsPairLocation());
ASSERT(use->location_slot() != NULL);
Location* slot = use->location_slot();
ASSERT(slot->IsUnallocated());
@@ -2350,7 +2422,7 @@
safepoint = safepoint->next()) {
if (!safepoint->locs()->always_calls()) {
ASSERT(safepoint->locs()->can_call());
- safepoint->locs()->live_registers()->Add(loc);
+ safepoint->locs()->live_registers()->Add(loc, range->representation());
}
}
}
@@ -2669,12 +2741,22 @@
}
+static Representation RepresentationForRange(Representation definition_rep) {
+ if (definition_rep == kUnboxedMint) {
+ // kUnboxedMint is split into two ranges, each of which are kUntagged.
+ return kUntagged;
+ }
+ return definition_rep;
+}
+
+
void FlowGraphAllocator::CollectRepresentations() {
// Parameters.
GraphEntryInstr* graph_entry = flow_graph_.graph_entry();
for (intptr_t i = 0; i < graph_entry->initial_definitions()->length(); ++i) {
Definition* def = (*graph_entry->initial_definitions())[i];
- value_representations_[def->ssa_temp_index()] = def->representation();
+ value_representations_[def->ssa_temp_index()] =
+ RepresentationForRange(def->representation());
ASSERT(!def->HasPairRepresentation());
}
@@ -2690,7 +2772,9 @@
i < catch_entry->initial_definitions()->length();
++i) {
Definition* def = (*catch_entry->initial_definitions())[i];
- value_representations_[def->ssa_temp_index()] = def->representation();
+ ASSERT(!def->HasPairRepresentation());
+ value_representations_[def->ssa_temp_index()] =
+ RepresentationForRange(def->representation());
}
}
// Phis.
@@ -2700,7 +2784,9 @@
// TODO(johnmccutchan): Fix handling of PhiInstr with PairLocation.
PhiInstr* phi = it.Current();
if ((phi != NULL) && (phi->ssa_temp_index() >= 0)) {
- value_representations_[phi->ssa_temp_index()] = phi->representation();
+ ASSERT(!phi->HasPairRepresentation());
+ value_representations_[phi->ssa_temp_index()] =
+ RepresentationForRange(phi->representation());
}
}
}
@@ -2711,9 +2797,11 @@
Definition* def = instr_it.Current()->AsDefinition();
if ((def != NULL) && (def->ssa_temp_index() >= 0)) {
const intptr_t vreg = def->ssa_temp_index();
- value_representations_[vreg] = def->representation();
+ value_representations_[vreg] =
+ RepresentationForRange(def->representation());
if (def->HasPairRepresentation()) {
- value_representations_[ToSecondPairVreg(vreg)] = def->representation();
+ value_representations_[ToSecondPairVreg(vreg)] =
+ RepresentationForRange(def->representation());
}
}
}
@@ -2761,6 +2849,7 @@
cpu_spill_slot_count_ = spill_slots_.length();
spill_slots_.Clear();
quad_spill_slots_.Clear();
+ untagged_spill_slots_.Clear();
PrepareForAllocation(Location::kFpuRegister,
kNumberOfFpuRegisters,
diff --git a/runtime/vm/flow_graph_allocator.h b/runtime/vm/flow_graph_allocator.h
index ab05f25..e795552b 100644
--- a/runtime/vm/flow_graph_allocator.h
+++ b/runtime/vm/flow_graph_allocator.h
@@ -113,7 +113,8 @@
intptr_t pos,
Location* in_ref,
Value* input,
- intptr_t vreg);
+ intptr_t vreg,
+ RegisterSet* live_registers);
void ProcessOneOutput(BlockEntryInstr* block,
Instruction* current,
intptr_t pos,
@@ -317,6 +318,11 @@
// are disjoint.
GrowableArray<bool> quad_spill_slots_;
+ // Track whether a spill slot is expected to hold a tagged or untagged value.
+ // This is used to keep tagged and untagged spill slots disjoint. See bug
+ // #18955 for details.
+ GrowableArray<bool> untagged_spill_slots_;
+
intptr_t cpu_spill_slot_count_;
DISALLOW_COPY_AND_ASSIGN(FlowGraphAllocator);
@@ -498,6 +504,7 @@
Location FirstHint();
UsePosition* FirstRegisterUse(intptr_t after_pos);
UsePosition* FirstRegisterBeneficialUse(intptr_t after_pos);
+ UsePosition* FirstInterferingUse(intptr_t after_pos);
private:
UseInterval* first_pending_use_interval_;
diff --git a/runtime/vm/flow_graph_builder.cc b/runtime/vm/flow_graph_builder.cc
index 3374c10..00c7e57 100644
--- a/runtime/vm/flow_graph_builder.cc
+++ b/runtime/vm/flow_graph_builder.cc
@@ -558,7 +558,6 @@
ASSERT(is_open());
owner()->DeallocateTemps(definition->InputCount());
owner()->add_args_pushed(-definition->ArgumentCount());
- definition->set_use_kind(Definition::kValue);
definition->set_temp_index(owner()->AllocateTemp());
if (is_empty()) {
entry_ = definition;
@@ -574,7 +573,6 @@
ASSERT(is_open());
owner()->DeallocateTemps(definition->InputCount());
owner()->add_args_pushed(-definition->ArgumentCount());
- definition->set_use_kind(Definition::kEffect);
if (is_empty()) {
entry_ = definition;
} else {
diff --git a/runtime/vm/flow_graph_compiler.cc b/runtime/vm/flow_graph_compiler.cc
index 2671d5b..7da1818 100644
--- a/runtime/vm/flow_graph_compiler.cc
+++ b/runtime/vm/flow_graph_compiler.cc
@@ -40,8 +40,14 @@
DECLARE_FLAG(charp, deoptimize_filter);
DECLARE_FLAG(bool, warn_on_javascript_compatibility);
+// TODO(zra): remove once arm64 has simd.
+#if defined(TARGET_ARCH_ARM64)
+DEFINE_FLAG(bool, enable_simd_inline, false,
+ "Enable inlining of SIMD related method calls.");
+#else
DEFINE_FLAG(bool, enable_simd_inline, true,
"Enable inlining of SIMD related method calls.");
+#endif
DEFINE_FLAG(bool, source_lines, false, "Emit source line as assembly comment.");
// Assign locations to incoming arguments, i.e., values pushed above spill slots
@@ -561,7 +567,7 @@
for (intptr_t i = 0; i < kNumberOfCpuRegisters; ++i) {
Register reg = static_cast<Register>(i);
if (locs->live_registers()->ContainsRegister(reg)) {
- bitmap->Set(bitmap->Length(), true);
+ bitmap->Set(bitmap->Length(), locs->live_registers()->IsTagged(reg));
}
}
}
@@ -632,7 +638,6 @@
Value* value = it.CurrentValue();
switch (value->definition()->representation()) {
case kUnboxedDouble:
- case kUnboxedMint:
it.SetCurrentLocation(Location::DoubleStackSlot(index));
break;
case kUnboxedFloat32x4:
@@ -643,6 +648,27 @@
default:
UNREACHABLE();
}
+ } else if (loc.IsPairLocation()) {
+ intptr_t representation =
+ it.CurrentValue()->definition()->representation();
+ ASSERT(representation == kUnboxedMint);
+ PairLocation* value_pair = loc.AsPairLocation();
+ intptr_t index_lo;
+ intptr_t index_hi;
+ if (value_pair->At(0).IsRegister()) {
+ index_lo = cpu_reg_slots[value_pair->At(0).reg()];
+ } else {
+ ASSERT(value_pair->At(0).IsStackSlot());
+ index_lo = value_pair->At(0).stack_index();
+ }
+ if (value_pair->At(1).IsRegister()) {
+ index_hi = cpu_reg_slots[value_pair->At(1).reg()];
+ } else {
+ ASSERT(value_pair->At(1).IsStackSlot());
+ index_hi = value_pair->At(1).stack_index();
+ }
+ it.SetCurrentLocation(Location::Pair(Location::StackSlot(index_lo),
+ Location::StackSlot(index_hi)));
} else if (loc.IsInvalid()) {
Definition* def =
it.CurrentValue()->definition();
diff --git a/runtime/vm/flow_graph_compiler.h b/runtime/vm/flow_graph_compiler.h
index bc4b773..e2f30da 100644
--- a/runtime/vm/flow_graph_compiler.h
+++ b/runtime/vm/flow_graph_compiler.h
@@ -450,20 +450,20 @@
// Array/list element address computations.
static intptr_t DataOffsetFor(intptr_t cid);
static intptr_t ElementSizeFor(intptr_t cid);
- static FieldAddress ElementAddressForIntIndex(intptr_t cid,
- intptr_t index_scale,
- Register array,
- intptr_t offset);
- static FieldAddress ElementAddressForRegIndex(intptr_t cid,
- intptr_t index_scale,
- Register array,
- Register index);
- static Address ExternalElementAddressForIntIndex(intptr_t index_scale,
- Register array,
- intptr_t offset);
- static Address ExternalElementAddressForRegIndex(intptr_t index_scale,
- Register array,
- Register index);
+ Address ElementAddressForIntIndex(intptr_t cid,
+ intptr_t index_scale,
+ Register array,
+ intptr_t offset);
+ Address ElementAddressForRegIndex(intptr_t cid,
+ intptr_t index_scale,
+ Register array,
+ Register index);
+ Address ExternalElementAddressForIntIndex(intptr_t index_scale,
+ Register array,
+ intptr_t offset);
+ Address ExternalElementAddressForRegIndex(intptr_t index_scale,
+ Register array,
+ Register index);
// Returns 'sorted' array in decreasing count order.
static void SortICDataByCount(const ICData& ic_data,
diff --git a/runtime/vm/flow_graph_compiler_arm.cc b/runtime/vm/flow_graph_compiler_arm.cc
index c918bed..17253e2 100644
--- a/runtime/vm/flow_graph_compiler_arm.cc
+++ b/runtime/vm/flow_graph_compiler_arm.cc
@@ -720,7 +720,7 @@
return;
}
Definition* defn = instr->AsDefinition();
- if ((defn != NULL) && defn->is_used()) {
+ if ((defn != NULL) && defn->HasTemp()) {
__ Push(defn->locs()->out(0).reg());
}
}
@@ -1446,23 +1446,25 @@
ASSERT(offset == (fpu_regs_count * kFpuRegisterSize));
}
- // Store general purpose registers with the lowest register number at the
+ // Store general purpose registers with the highest register number at the
// lowest address.
- const intptr_t cpu_registers = locs->live_registers()->cpu_registers();
- ASSERT((cpu_registers & ~kAllCpuRegistersList) == 0);
- if (cpu_registers != 0) {
- __ PushList(cpu_registers);
+ for (intptr_t reg_idx = 0; reg_idx < kNumberOfCpuRegisters; ++reg_idx) {
+ Register reg = static_cast<Register>(reg_idx);
+ if (locs->live_registers()->ContainsRegister(reg)) {
+ __ Push(reg);
+ }
}
}
void FlowGraphCompiler::RestoreLiveRegisters(LocationSummary* locs) {
- // General purpose registers have the lowest register number at the
+ // General purpose registers have the highest register number at the
// lowest address.
- const intptr_t cpu_registers = locs->live_registers()->cpu_registers();
- ASSERT((cpu_registers & ~kAllCpuRegistersList) == 0);
- if (cpu_registers != 0) {
- __ PopList(cpu_registers);
+ for (intptr_t reg_idx = kNumberOfCpuRegisters - 1; reg_idx >= 0; --reg_idx) {
+ Register reg = static_cast<Register>(reg_idx);
+ if (locs->live_registers()->ContainsRegister(reg)) {
+ __ Pop(reg);
+ }
}
const intptr_t fpu_regs_count = locs->live_registers()->FpuRegisterCount();
@@ -1534,22 +1536,20 @@
}
-// Do not implement or use this function.
-FieldAddress FlowGraphCompiler::ElementAddressForIntIndex(intptr_t cid,
- intptr_t index_scale,
- Register array,
- intptr_t index) {
+Address FlowGraphCompiler::ElementAddressForIntIndex(intptr_t cid,
+ intptr_t index_scale,
+ Register array,
+ intptr_t index) {
UNREACHABLE();
return FieldAddress(array, index);
}
-// Do not implement or use this function.
-FieldAddress FlowGraphCompiler::ElementAddressForRegIndex(intptr_t cid,
- intptr_t index_scale,
- Register array,
- Register index) {
- UNREACHABLE(); // No register indexed with offset addressing mode on ARM.
+Address FlowGraphCompiler::ElementAddressForRegIndex(intptr_t cid,
+ intptr_t index_scale,
+ Register array,
+ Register index) {
+ UNREACHABLE();
return FieldAddress(array, index);
}
diff --git a/runtime/vm/flow_graph_compiler_arm64.cc b/runtime/vm/flow_graph_compiler_arm64.cc
index d19fbb5..61b97b9 100644
--- a/runtime/vm/flow_graph_compiler_arm64.cc
+++ b/runtime/vm/flow_graph_compiler_arm64.cc
@@ -26,6 +26,8 @@
DECLARE_FLAG(int, optimization_counter_threshold);
DECLARE_FLAG(int, reoptimization_counter_threshold);
DECLARE_FLAG(bool, eliminate_type_checks);
+DECLARE_FLAG(bool, enable_simd_inline);
+
FlowGraphCompiler::~FlowGraphCompiler() {
// BlockInfos are zone-allocated, so their destructors are not called.
@@ -47,7 +49,7 @@
bool FlowGraphCompiler::SupportsUnboxedSimd128() {
- return false;
+ return FLAG_enable_simd_inline;
}
@@ -717,7 +719,7 @@
return;
}
Definition* defn = instr->AsDefinition();
- if ((defn != NULL) && defn->is_used()) {
+ if ((defn != NULL) && defn->HasTemp()) {
__ Push(defn->locs()->out(0).reg());
}
}
@@ -1527,22 +1529,20 @@
}
-// Do not implement or use this function.
-FieldAddress FlowGraphCompiler::ElementAddressForIntIndex(intptr_t cid,
- intptr_t index_scale,
- Register array,
- intptr_t index) {
+Address FlowGraphCompiler::ElementAddressForIntIndex(intptr_t cid,
+ intptr_t index_scale,
+ Register array,
+ intptr_t index) {
UNREACHABLE();
return FieldAddress(array, index);
}
-// Do not implement or use this function.
-FieldAddress FlowGraphCompiler::ElementAddressForRegIndex(intptr_t cid,
- intptr_t index_scale,
- Register array,
- Register index) {
- UNREACHABLE(); // No register indexed with offset addressing mode on ARM.
+Address FlowGraphCompiler::ElementAddressForRegIndex(intptr_t cid,
+ intptr_t index_scale,
+ Register array,
+ Register index) {
+ UNREACHABLE();
return FieldAddress(array, index);
}
diff --git a/runtime/vm/flow_graph_compiler_ia32.cc b/runtime/vm/flow_graph_compiler_ia32.cc
index c0e1f3f..01d002b 100644
--- a/runtime/vm/flow_graph_compiler_ia32.cc
+++ b/runtime/vm/flow_graph_compiler_ia32.cc
@@ -741,7 +741,7 @@
return;
}
Definition* defn = instr->AsDefinition();
- if ((defn != NULL) && defn->is_used()) {
+ if ((defn != NULL) && defn->HasTemp()) {
Location value = defn->locs()->out(0);
if (value.IsRegister()) {
__ pushl(value.reg());
@@ -1539,10 +1539,10 @@
}
-FieldAddress FlowGraphCompiler::ElementAddressForIntIndex(intptr_t cid,
- intptr_t index_scale,
- Register array,
- intptr_t index) {
+Address FlowGraphCompiler::ElementAddressForIntIndex(intptr_t cid,
+ intptr_t index_scale,
+ Register array,
+ intptr_t index) {
const int64_t disp =
static_cast<int64_t>(index) * index_scale + DataOffsetFor(cid);
ASSERT(Utils::IsInt(32, disp));
@@ -1568,10 +1568,10 @@
}
-FieldAddress FlowGraphCompiler::ElementAddressForRegIndex(intptr_t cid,
- intptr_t index_scale,
- Register array,
- Register index) {
+Address FlowGraphCompiler::ElementAddressForRegIndex(intptr_t cid,
+ intptr_t index_scale,
+ Register array,
+ Register index) {
return FieldAddress(array,
index,
ToScaleFactor(index_scale),
diff --git a/runtime/vm/flow_graph_compiler_mips.cc b/runtime/vm/flow_graph_compiler_mips.cc
index 3b2d462..20e8c29 100644
--- a/runtime/vm/flow_graph_compiler_mips.cc
+++ b/runtime/vm/flow_graph_compiler_mips.cc
@@ -737,7 +737,7 @@
void FlowGraphCompiler::EmitInstructionEpilogue(Instruction* instr) {
if (is_optimizing()) return;
Definition* defn = instr->AsDefinition();
- if ((defn != NULL) && defn->is_used()) {
+ if ((defn != NULL) && defn->HasTemp()) {
__ Push(defn->locs()->out(0).reg());
}
}
@@ -1597,19 +1597,19 @@
}
-FieldAddress FlowGraphCompiler::ElementAddressForIntIndex(intptr_t cid,
- intptr_t index_scale,
- Register array,
- intptr_t index) {
+Address FlowGraphCompiler::ElementAddressForIntIndex(intptr_t cid,
+ intptr_t index_scale,
+ Register array,
+ intptr_t index) {
UNREACHABLE();
return FieldAddress(array, index);
}
-FieldAddress FlowGraphCompiler::ElementAddressForRegIndex(intptr_t cid,
- intptr_t index_scale,
- Register array,
- Register index) {
+Address FlowGraphCompiler::ElementAddressForRegIndex(intptr_t cid,
+ intptr_t index_scale,
+ Register array,
+ Register index) {
UNREACHABLE();
return FieldAddress(array, index);
}
diff --git a/runtime/vm/flow_graph_compiler_x64.cc b/runtime/vm/flow_graph_compiler_x64.cc
index adefcec..24f3693 100644
--- a/runtime/vm/flow_graph_compiler_x64.cc
+++ b/runtime/vm/flow_graph_compiler_x64.cc
@@ -723,7 +723,7 @@
return;
}
Definition* defn = instr->AsDefinition();
- if ((defn != NULL) && defn->is_used()) {
+ if ((defn != NULL) && defn->HasTemp()) {
Location value = defn->locs()->out(0);
if (value.IsRegister()) {
__ pushq(value.reg());
@@ -1575,10 +1575,10 @@
}
-FieldAddress FlowGraphCompiler::ElementAddressForIntIndex(intptr_t cid,
- intptr_t index_scale,
- Register array,
- intptr_t index) {
+Address FlowGraphCompiler::ElementAddressForIntIndex(intptr_t cid,
+ intptr_t index_scale,
+ Register array,
+ intptr_t index) {
const int64_t disp =
static_cast<int64_t>(index) * index_scale + DataOffsetFor(cid);
ASSERT(Utils::IsInt(32, disp));
@@ -1604,10 +1604,10 @@
}
-FieldAddress FlowGraphCompiler::ElementAddressForRegIndex(intptr_t cid,
- intptr_t index_scale,
- Register array,
- Register index) {
+Address FlowGraphCompiler::ElementAddressForRegIndex(intptr_t cid,
+ intptr_t index_scale,
+ Register array,
+ Register index) {
return FieldAddress(array,
index,
ToScaleFactor(index_scale),
diff --git a/runtime/vm/flow_graph_inliner.cc b/runtime/vm/flow_graph_inliner.cc
index 113a69b..96d5d22 100644
--- a/runtime/vm/flow_graph_inliner.cc
+++ b/runtime/vm/flow_graph_inliner.cc
@@ -817,7 +817,17 @@
private:
friend class PolymorphicInliner;
+
+ static bool Contains(const GrowableArray<intptr_t>& a, intptr_t deopt_id) {
+ for (intptr_t i = 0; i < a.length(); i++) {
+ if (a[i] == deopt_id) return true;
+ }
+ return false;
+ }
+
void PrintInlinedInfoFor(const Function& caller, intptr_t depth) {
+ // Prevent duplicate printing as inlined_info aggregates all inlinining.
+ GrowableArray<intptr_t> call_instructions_printed;
// Print those that were inlined.
for (intptr_t i = 0; i < inlined_info_.length(); i++) {
const InlinedInfo& info = inlined_info_[i];
@@ -825,7 +835,8 @@
continue;
}
if ((info.inlined_depth == depth) &&
- (info.caller->raw() == caller.raw())) {
+ (info.caller->raw() == caller.raw()) &&
+ !Contains(call_instructions_printed, info.call_instr->GetDeoptId())) {
for (int t = 0; t < depth; t++) {
OS::Print(" ");
}
@@ -833,8 +844,10 @@
info.call_instr->GetDeoptId(),
info.inlined->ToQualifiedCString());
PrintInlinedInfoFor(*info.inlined, depth + 1);
+ call_instructions_printed.Add(info.call_instr->GetDeoptId());
}
}
+ call_instructions_printed.Clear();
// Print those that were not inlined.
for (intptr_t i = 0; i < inlined_info_.length(); i++) {
const InlinedInfo& info = inlined_info_[i];
@@ -842,7 +855,8 @@
continue;
}
if ((info.inlined_depth == depth) &&
- (info.caller->raw() == caller.raw())) {
+ (info.caller->raw() == caller.raw()) &&
+ !Contains(call_instructions_printed, info.call_instr->GetDeoptId())) {
for (int t = 0; t < depth; t++) {
OS::Print(" ");
}
@@ -850,6 +864,7 @@
info.call_instr->GetDeoptId(),
info.inlined->ToQualifiedCString(),
info.bailout_reason);
+ call_instructions_printed.Add(info.call_instr->GetDeoptId());
}
}
}
@@ -1337,7 +1352,7 @@
last,
result,
call_->env(), // Return can become deoptimization target.
- Definition::kEffect);
+ FlowGraph::kEffect);
entry->set_last_instruction(result);
exit_collector->AddExit(result);
GraphEntryInstr* graph_entry =
diff --git a/runtime/vm/flow_graph_optimizer.cc b/runtime/vm/flow_graph_optimizer.cc
index 98c611f..6ef9ef6 100644
--- a/runtime/vm/flow_graph_optimizer.cc
+++ b/runtime/vm/flow_graph_optimizer.cc
@@ -321,7 +321,7 @@
Isolate::kNoDeoptId,
instr->token_pos());
instr->ReplaceUsesWith(load);
- flow_graph()->InsertAfter(instr, load, NULL, Definition::kValue);
+ flow_graph()->InsertAfter(instr, load, NULL, FlowGraph::kValue);
}
@@ -334,7 +334,7 @@
rep,
cid);
instr->ReplaceUsesWith(extract);
- flow_graph()->InsertAfter(instr, extract, NULL, Definition::kValue);
+ flow_graph()->InsertAfter(instr, extract, NULL, FlowGraph::kValue);
}
@@ -602,7 +602,7 @@
Definition* converted = NULL;
if ((from == kTagged) && (to == kUnboxedMint)) {
ASSERT((deopt_target != NULL) ||
- (use->Type()->ToCid() == kDoubleCid));
+ (use->Type()->ToCid() == kUnboxedMint));
const intptr_t deopt_id = (deopt_target != NULL) ?
deopt_target->DeoptimizationTarget() : Isolate::kNoDeoptId;
converted = new UnboxIntegerInstr(use->CopyWithType(), deopt_id);
@@ -616,7 +616,7 @@
// TODO(fschneider): Implement direct unboxed mint-to-double conversion.
BoxIntegerInstr* boxed = new BoxIntegerInstr(use->CopyWithType());
use->BindTo(boxed);
- InsertBefore(insert_before, boxed, NULL, Definition::kValue);
+ InsertBefore(insert_before, boxed, NULL, FlowGraph::kValue);
const intptr_t deopt_id = (deopt_target != NULL) ?
deopt_target->DeoptimizationTarget() : Isolate::kNoDeoptId;
@@ -688,7 +688,7 @@
UNIMPLEMENTED();
}
use->BindTo(boxed);
- InsertBefore(insert_before, boxed, NULL, Definition::kValue);
+ InsertBefore(insert_before, boxed, NULL, FlowGraph::kValue);
Value* to_value = new Value(boxed);
if (to == kUnboxedDouble) {
converted = new UnboxDoubleInstr(to_value, deopt_id);
@@ -706,7 +706,7 @@
}
ASSERT(converted != NULL);
InsertBefore(insert_before, converted, use->instruction()->env(),
- Definition::kValue);
+ FlowGraph::kValue);
if (is_environment_use) {
use->BindToEnvironment(converted);
} else {
@@ -984,7 +984,7 @@
deopt_id,
insert_before->token_pos()),
deopt_environment,
- Definition::kEffect);
+ FlowGraph::kEffect);
}
}
@@ -1012,7 +1012,7 @@
// Type propagation has not run yet, we cannot eliminate the check.
Instruction* check = GetCheckClass(
to_check, unary_checks, deopt_id, insert_before->token_pos());
- InsertBefore(insert_before, check, deopt_environment, Definition::kEffect);
+ InsertBefore(insert_before, check, deopt_environment, FlowGraph::kEffect);
}
@@ -1207,7 +1207,7 @@
cursor = flow_graph()->AppendTo(cursor,
load_type_args,
NULL,
- Definition::kValue);
+ FlowGraph::kValue);
instantiator = array;
type_args = load_type_args;
@@ -1265,7 +1265,7 @@
cursor = flow_graph()->AppendTo(cursor,
assert_value,
call->env(),
- Definition::kValue);
+ FlowGraph::kValue);
}
array_cid = PrepareInlineIndexedOp(call,
@@ -1289,7 +1289,7 @@
cursor = flow_graph()->AppendTo(cursor,
check,
call->env(),
- Definition::kEffect);
+ FlowGraph::kEffect);
}
if (array_cid == kTypedDataFloat32ArrayCid) {
@@ -1298,7 +1298,7 @@
cursor = flow_graph()->AppendTo(cursor,
stored_value,
NULL,
- Definition::kValue);
+ FlowGraph::kValue);
}
intptr_t index_scale = FlowGraphCompiler::ElementSizeFor(array_cid);
@@ -1313,7 +1313,7 @@
flow_graph()->AppendTo(cursor,
*last,
call->env(),
- Definition::kEffect);
+ FlowGraph::kEffect);
return true;
}
@@ -1567,7 +1567,7 @@
call->deopt_id(),
call->token_pos()),
call->env(),
- Definition::kEffect);
+ FlowGraph::kEffect);
// Insert array length load and bounds check.
LoadFieldInstr* length =
@@ -1583,7 +1583,7 @@
*cursor = flow_graph()->AppendTo(*cursor,
length,
NULL,
- Definition::kValue);
+ FlowGraph::kValue);
*cursor = flow_graph()->AppendTo(*cursor,
new CheckArrayBoundInstr(
@@ -1591,7 +1591,7 @@
new Value(index),
call->deopt_id()),
call->env(),
- Definition::kEffect);
+ FlowGraph::kEffect);
if (array_cid == kGrowableObjectArrayCid) {
// Insert data elements load.
@@ -1604,7 +1604,7 @@
*cursor = flow_graph()->AppendTo(*cursor,
elements,
NULL,
- Definition::kValue);
+ FlowGraph::kValue);
// Load from the data from backing store which is a fixed-length array.
*array = elements;
array_cid = kArrayCid;
@@ -1615,7 +1615,7 @@
*cursor = flow_graph()->AppendTo(*cursor,
elements,
NULL,
- Definition::kValue);
+ FlowGraph::kValue);
*array = elements;
}
return array_cid;
@@ -1664,14 +1664,14 @@
cursor,
*last,
deopt_id != Isolate::kNoDeoptId ? call->env() : NULL,
- Definition::kValue);
+ FlowGraph::kValue);
if (array_cid == kTypedDataFloat32ArrayCid) {
*last = new FloatToDoubleInstr(new Value(*last), deopt_id);
flow_graph()->AppendTo(cursor,
*last,
deopt_id != Isolate::kNoDeoptId ? call->env() : NULL,
- Definition::kValue);
+ FlowGraph::kValue);
}
return true;
}
@@ -1795,7 +1795,7 @@
// string is not of length one.
StringToCharCodeInstr* char_code_right =
new StringToCharCodeInstr(new Value(right), kOneByteStringCid);
- InsertBefore(call, char_code_right, call->env(), Definition::kValue);
+ InsertBefore(call, char_code_right, call->env(), FlowGraph::kValue);
right_val = new Value(char_code_right);
}
@@ -1850,13 +1850,13 @@
call->deopt_id(),
call->token_pos()),
call->env(),
- Definition::kEffect);
+ FlowGraph::kEffect);
InsertBefore(call,
new CheckSmiInstr(new Value(right),
call->deopt_id(),
call->token_pos()),
call->env(),
- Definition::kEffect);
+ FlowGraph::kEffect);
cid = kSmiCid;
} else if (HasTwoMintOrSmi(ic_data) &&
FlowGraphCompiler::SupportsUnboxedMints()) {
@@ -1876,7 +1876,7 @@
new Value(right),
call->deopt_id()),
call->env(),
- Definition::kEffect);
+ FlowGraph::kEffect);
cid = kDoubleCid;
}
}
@@ -1952,13 +1952,13 @@
call->deopt_id(),
call->token_pos()),
call->env(),
- Definition::kEffect);
+ FlowGraph::kEffect);
InsertBefore(call,
new CheckSmiInstr(new Value(right),
call->deopt_id(),
call->token_pos()),
call->env(),
- Definition::kEffect);
+ FlowGraph::kEffect);
cid = kSmiCid;
} else if (HasTwoMintOrSmi(ic_data) &&
FlowGraphCompiler::SupportsUnboxedMints()) {
@@ -1978,7 +1978,7 @@
new Value(right),
call->deopt_id()),
call->env(),
- Definition::kEffect);
+ FlowGraph::kEffect);
cid = kDoubleCid;
}
}
@@ -2128,7 +2128,7 @@
new Value(right),
call->deopt_id()),
call->env(),
- Definition::kEffect);
+ FlowGraph::kEffect);
}
BinaryDoubleOpInstr* double_bin_op =
@@ -2166,7 +2166,7 @@
call->deopt_id(),
call->token_pos()),
call->env(),
- Definition::kEffect);
+ FlowGraph::kEffect);
ConstantInstr* constant =
flow_graph()->GetConstant(Smi::Handle(
Smi::New(Smi::Cast(obj).Value() - 1)));
@@ -2221,7 +2221,7 @@
call->deopt_id(),
call->token_pos()),
call->env(),
- Definition::kEffect);
+ FlowGraph::kEffect);
unary_op = new UnarySmiOpInstr(op_kind, new Value(input), call->deopt_id());
} else if ((op_kind == Token::kBIT_NOT) &&
HasOnlySmiOrMint(*call->ic_data()) &&
@@ -2708,18 +2708,18 @@
call->deopt_id(),
call->token_pos()),
call->env(),
- Definition::kEffect);
+ FlowGraph::kEffect);
// Load the length of the string.
LoadFieldInstr* length = BuildLoadStringLength(str);
- cursor = flow_graph()->AppendTo(cursor, length, NULL, Definition::kValue);
+ cursor = flow_graph()->AppendTo(cursor, length, NULL, FlowGraph::kValue);
// Bounds check.
cursor = flow_graph()->AppendTo(cursor,
new CheckArrayBoundInstr(new Value(length),
new Value(index),
call->deopt_id()),
call->env(),
- Definition::kEffect);
+ FlowGraph::kEffect);
LoadIndexedInstr* load_indexed = new LoadIndexedInstr(
new Value(str),
@@ -2732,7 +2732,7 @@
cursor = flow_graph()->AppendTo(cursor,
load_indexed,
NULL,
- Definition::kValue);
+ FlowGraph::kValue);
ASSERT(cursor == load_indexed);
return load_indexed;
}
@@ -2782,7 +2782,7 @@
StringFromCharCodeInstr* char_at =
new StringFromCharCodeInstr(new Value(*last), cid);
- flow_graph()->AppendTo(*last, char_at, NULL, Definition::kValue);
+ flow_graph()->AppendTo(*last, char_at, NULL, FlowGraph::kValue);
*last = char_at;
return true;
@@ -3077,7 +3077,7 @@
// No BIT_AND operation needed.
ReplaceCall(call, left_shift);
} else {
- InsertBefore(call, left_shift, call->env(), Definition::kValue);
+ InsertBefore(call, left_shift, call->env(), FlowGraph::kValue);
BinarySmiOpInstr* bit_and =
new BinarySmiOpInstr(Token::kBIT_AND,
new Value(left_shift), new Value(int32_mask),
@@ -3097,7 +3097,7 @@
new ShiftMintOpInstr(Token::kSHL,
new Value(value), new Value(count),
call->deopt_id());
- InsertBefore(call, left_shift, call->env(), Definition::kValue);
+ InsertBefore(call, left_shift, call->env(), FlowGraph::kValue);
BinaryMintOpInstr* bit_and =
new BinaryMintOpInstr(Token::kBIT_AND,
new Value(left_shift), new Value(int32_mask),
@@ -3530,14 +3530,14 @@
cursor,
*last,
deopt_id != Isolate::kNoDeoptId ? call->env() : NULL,
- Definition::kValue);
+ FlowGraph::kValue);
if (view_cid == kTypedDataFloat32ArrayCid) {
*last = new FloatToDoubleInstr(new Value(*last), deopt_id);
flow_graph()->AppendTo(cursor,
*last,
deopt_id != Isolate::kNoDeoptId ? call->env() : NULL,
- Definition::kValue);
+ FlowGraph::kValue);
}
return true;
}
@@ -3656,7 +3656,7 @@
cursor = flow_graph()->AppendTo(cursor,
stored_value,
NULL,
- Definition::kValue);
+ FlowGraph::kValue);
}
StoreBarrierType needs_store_barrier = kNoStoreBarrier;
@@ -3673,7 +3673,7 @@
*last,
call->deopt_id() != Isolate::kNoDeoptId ?
call->env() : NULL,
- Definition::kEffect);
+ FlowGraph::kEffect);
return true;
}
@@ -3692,7 +3692,7 @@
call->deopt_id(),
call->token_pos()),
call->env(),
- Definition::kEffect);
+ FlowGraph::kEffect);
LoadFieldInstr* length =
new LoadFieldInstr(new Value(*array),
@@ -3706,7 +3706,7 @@
*cursor = flow_graph()->AppendTo(*cursor,
length,
NULL,
- Definition::kValue);
+ FlowGraph::kValue);
intptr_t element_size = FlowGraphCompiler::ElementSizeFor(array_cid);
ConstantInstr* bytes_per_element =
@@ -3717,7 +3717,7 @@
new Value(bytes_per_element),
call->deopt_id(), call->token_pos());
*cursor = flow_graph()->AppendTo(*cursor, len_in_bytes, call->env(),
- Definition::kValue);
+ FlowGraph::kValue);
ConstantInstr* length_adjustment =
flow_graph()->GetConstant(Smi::Handle(Smi::New(
@@ -3729,7 +3729,7 @@
new Value(length_adjustment),
call->deopt_id(), call->token_pos());
*cursor = flow_graph()->AppendTo(*cursor, adjusted_length, call->env(),
- Definition::kValue);
+ FlowGraph::kValue);
// Check adjusted_length > 0.
ConstantInstr* zero = flow_graph()->GetConstant(Smi::Handle(Smi::New(0)));
@@ -3739,7 +3739,7 @@
new Value(zero),
call->deopt_id()),
call->env(),
- Definition::kEffect);
+ FlowGraph::kEffect);
// Check 0 <= byte_index < adjusted_length.
*cursor = flow_graph()->AppendTo(*cursor,
new CheckArrayBoundInstr(
@@ -3747,7 +3747,7 @@
new Value(byte_index),
call->deopt_id()),
call->env(),
- Definition::kEffect);
+ FlowGraph::kEffect);
if (RawObject::IsExternalTypedDataClassId(array_cid)) {
LoadUntaggedInstr* elements =
@@ -3756,7 +3756,7 @@
*cursor = flow_graph()->AppendTo(*cursor,
elements,
NULL,
- Definition::kValue);
+ FlowGraph::kValue);
*array = elements;
}
return array_cid;
@@ -4004,7 +4004,7 @@
InsertBefore(call,
left_cid,
NULL,
- Definition::kValue);
+ FlowGraph::kValue);
const intptr_t type_cid = Class::Handle(type.type_class()).id();
ConstantInstr* cid =
flow_graph()->GetConstant(Smi::Handle(Smi::New(type_cid)));
@@ -4419,7 +4419,7 @@
instr->deopt_id(),
instr->token_pos()),
instr->env(),
- Definition::kEffect);
+ FlowGraph::kEffect);
needs_store_barrier = kNoStoreBarrier;
}
@@ -4429,7 +4429,7 @@
field,
instr->deopt_id()),
instr->env(),
- Definition::kEffect);
+ FlowGraph::kEffect);
}
// Field guard was detached.
@@ -4717,7 +4717,7 @@
ConstraintInstr* constraint =
new ConstraintInstr(new Value(defn), constraint_range);
- flow_graph_->InsertAfter(after, constraint, NULL, Definition::kValue);
+ flow_graph_->InsertAfter(after, constraint, NULL, FlowGraph::kValue);
RenameDominatedUses(defn, constraint, constraint);
constraints_.Add(constraint);
return constraint;
@@ -5156,7 +5156,7 @@
it->RemoveCurrentFromGraph();
GotoInstr* last = pre_header->last_instruction()->AsGoto();
// Using kind kEffect will not assign a fresh ssa temporary index.
- flow_graph()->InsertBefore(last, current, last->env(), Definition::kEffect);
+ flow_graph()->InsertBefore(last, current, last->env(), FlowGraph::kEffect);
current->deopt_id_ = last->GetDeoptId();
}
@@ -9395,7 +9395,7 @@
flow_graph->InsertBefore(branch,
if_then_else,
NULL,
- Definition::kValue);
+ FlowGraph::kValue);
phi->ReplaceUsesWith(if_then_else);
@@ -9640,12 +9640,12 @@
AbstractType::ZoneHandle(),
alloc->token_pos());
flow_graph_->InsertBefore(
- exit, load, NULL, Definition::kValue);
+ exit, load, NULL, FlowGraph::kValue);
values->Add(new Value(load));
}
MaterializeObjectInstr* mat = new MaterializeObjectInstr(cls, slots, values);
- flow_graph_->InsertBefore(exit, mat, NULL, Definition::kValue);
+ flow_graph_->InsertBefore(exit, mat, NULL, FlowGraph::kValue);
// Replace all mentions of this allocation with a newly inserted
// MaterializeObject instruction.
diff --git a/runtime/vm/flow_graph_optimizer.h b/runtime/vm/flow_graph_optimizer.h
index 02a1e49..ae814e0 100644
--- a/runtime/vm/flow_graph_optimizer.h
+++ b/runtime/vm/flow_graph_optimizer.h
@@ -64,7 +64,7 @@
void InsertBefore(Instruction* next,
Instruction* instr,
Environment* env,
- Definition::UseKind use_kind) {
+ FlowGraph::UseKind use_kind) {
flow_graph_->InsertBefore(next, instr, env, use_kind);
}
diff --git a/runtime/vm/il_printer.cc b/runtime/vm/il_printer.cc
index db27a46..9a2002e 100644
--- a/runtime/vm/il_printer.cc
+++ b/runtime/vm/il_printer.cc
@@ -170,17 +170,15 @@
static void PrintUse(BufferFormatter* f, const Definition& definition) {
- if (definition.is_used()) {
- if (definition.HasSSATemp()) {
- if (definition.HasPairRepresentation()) {
- f->Print("v%" Pd ", v%" Pd "", definition.ssa_temp_index(),
- definition.ssa_temp_index() + 1);
- } else {
- f->Print("v%" Pd "", definition.ssa_temp_index());
- }
- } else if (definition.temp_index() != -1) {
- f->Print("t%" Pd "", definition.temp_index());
+ if (definition.HasSSATemp()) {
+ if (definition.HasPairRepresentation()) {
+ f->Print("v%" Pd ", v%" Pd "", definition.ssa_temp_index(),
+ definition.ssa_temp_index() + 1);
+ } else {
+ f->Print("v%" Pd "", definition.ssa_temp_index());
}
+ } else if (definition.HasTemp()) {
+ f->Print("t%" Pd "", definition.temp_index());
}
}
@@ -214,9 +212,7 @@
void Definition::PrintTo(BufferFormatter* f) const {
PrintUse(f, *this);
- if (is_used()) {
- if (HasSSATemp() || (temp_index() != -1)) f->Print(" <- ");
- }
+ if (HasSSATemp() || HasTemp()) f->Print(" <- ");
if (GetDeoptId() != Isolate::kNoDeoptId) {
f->Print("%s:%" Pd "(", DebugName(), GetDeoptId());
} else {
diff --git a/runtime/vm/intermediate_language.cc b/runtime/vm/intermediate_language.cc
index 93c8f84..c1bcf36 100644
--- a/runtime/vm/intermediate_language.cc
+++ b/runtime/vm/intermediate_language.cc
@@ -43,7 +43,6 @@
ssa_temp_index_(-1),
input_use_list_(NULL),
env_use_list_(NULL),
- use_kind_(kValue), // Phis and parameters rely on this default.
constant_value_(Object::ZoneHandle(ConstantPropagator::Unknown())) {
}
@@ -1403,7 +1402,7 @@
new MathUnaryInstr(MathUnaryInstr::kDoubleSquare,
new Value(left()->definition()),
DeoptimizationTarget());
- flow_graph->InsertBefore(this, math_unary, env(), Definition::kValue);
+ flow_graph->InsertBefore(this, math_unary, env(), FlowGraph::kValue);
return math_unary;
}
@@ -1661,7 +1660,7 @@
ConstantInstr* c = value()->definition()->AsConstant();
if ((c != NULL) && c->value().IsDouble()) {
UnboxedConstantInstr* uc = new UnboxedConstantInstr(c->value());
- flow_graph->InsertBefore(this, uc, NULL, Definition::kValue);
+ flow_graph->InsertBefore(this, uc, NULL, FlowGraph::kValue);
return uc;
}
@@ -2132,7 +2131,6 @@
Value* value = InputAt(i);
switch (value->definition()->representation()) {
case kUnboxedDouble:
- case kUnboxedMint:
locations_[i] = Location::DoubleStackSlot(index);
break;
case kUnboxedFloat32x4:
@@ -2143,6 +2141,8 @@
default:
UNREACHABLE();
}
+ } else if (loc.IsPairLocation()) {
+ UNREACHABLE();
} else if (loc.IsInvalid()) {
// We currently only perform one iteration of allocation
// sinking, so we do not expect to find materialized objects
@@ -2349,7 +2349,7 @@
for (intptr_t i = 0; i < length; ++i) {
copy->values_.Add(values_[i]->Copy());
if (locations_ != NULL) {
- copy->locations_[i] = locations_[i];
+ copy->locations_[i] = locations_[i].Copy();
}
}
return copy;
diff --git a/runtime/vm/intermediate_language.h b/runtime/vm/intermediate_language.h
index f1a82f1..b1b982c 100644
--- a/runtime/vm/intermediate_language.h
+++ b/runtime/vm/intermediate_language.h
@@ -1749,8 +1749,6 @@
// Abstract super-class of all instructions that define a value (Bind, Phi).
class Definition : public Instruction {
public:
- enum UseKind { kEffect, kValue };
-
Definition();
virtual Definition* AsDefinition() { return this; }
@@ -1770,21 +1768,20 @@
intptr_t temp_index() const { return temp_index_; }
void set_temp_index(intptr_t index) { temp_index_ = index; }
void ClearTempIndex() { temp_index_ = -1; }
+ bool HasTemp() const { return temp_index_ >= 0; }
intptr_t ssa_temp_index() const { return ssa_temp_index_; }
void set_ssa_temp_index(intptr_t index) {
ASSERT(index >= 0);
- ASSERT(is_used());
ssa_temp_index_ = index;
}
bool HasSSATemp() const { return ssa_temp_index_ >= 0; }
void ClearSSATempIndex() { ssa_temp_index_ = -1; }
bool HasPairRepresentation() const {
return (representation() == kPairOfTagged) ||
- (representation() == kPairOfUnboxedDouble);
+ (representation() == kPairOfUnboxedDouble) ||
+ (representation() == kUnboxedMint);
}
- bool is_used() const { return (use_kind_ != kEffect); }
- void set_use_kind(UseKind kind) { use_kind_ = kind; }
// Compile time type of the definition, which may be requested before type
// propagation during graph building.
@@ -1910,7 +1907,6 @@
intptr_t ssa_temp_index_;
Value* input_use_list_;
Value* env_use_list_;
- UseKind use_kind_;
Object& constant_value_;
@@ -2072,7 +2068,6 @@
public:
explicit PushArgumentInstr(Value* value) {
SetInputAt(0, value);
- set_use_kind(kEffect); // Override the default.
}
DECLARE_INSTRUCTION(PushArgument)
diff --git a/runtime/vm/intermediate_language_arm.cc b/runtime/vm/intermediate_language_arm.cc
index 34df2e3..fd12d67 100644
--- a/runtime/vm/intermediate_language_arm.cc
+++ b/runtime/vm/intermediate_language_arm.cc
@@ -78,7 +78,7 @@
// The entry needs to be patchable, no inlined objects are allowed in the area
// that will be overwritten by the patch instructions: a branch macro sequence.
void ReturnInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- Register result = locs()->in(0).reg();
+ const Register result = locs()->in(0).reg();
ASSERT(result == R0);
#if defined(DEBUG)
Label stack_ok;
@@ -240,7 +240,7 @@
void LoadLocalInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- Register result = locs()->out(0).reg();
+ const Register result = locs()->out(0).reg();
__ LoadFromOffset(kWord, result, FP, local().index() * kWordSize);
}
@@ -253,8 +253,8 @@
void StoreLocalInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- Register value = locs()->in(0).reg();
- Register result = locs()->out(0).reg();
+ const Register value = locs()->in(0).reg();
+ const Register result = locs()->out(0).reg();
ASSERT(result == value); // Assert that register assignment is correct.
__ str(value, Address(FP, local().index() * kWordSize));
}
@@ -270,7 +270,7 @@
void ConstantInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// The register allocator drops constant definitions that have no uses.
if (!locs()->out(0).IsInvalid()) {
- Register result = locs()->out(0).reg();
+ const Register result = locs()->out(0).reg();
__ LoadObject(result, value());
}
}
@@ -354,8 +354,8 @@
void AssertBooleanInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- Register obj = locs()->in(0).reg();
- Register result = locs()->out(0).reg();
+ const Register obj = locs()->in(0).reg();
+ const Register result = locs()->out(0).reg();
EmitAssertBoolean(obj, token_pos(), deopt_id(), locs(), compiler);
ASSERT(obj == result);
@@ -380,14 +380,13 @@
LocationSummary* EqualityCompareInstr::MakeLocationSummary(bool opt) const {
const intptr_t kNumInputs = 2;
if (operation_cid() == kMintCid) {
- const intptr_t kNumTemps = 3;
+ const intptr_t kNumTemps = 0;
LocationSummary* locs =
new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
- locs->set_in(0, Location::RequiresFpuRegister());
- locs->set_in(1, Location::RequiresFpuRegister());
- locs->set_temp(0, Location::RequiresFpuRegister());
- locs->set_temp(1, Location::RequiresRegister());
- locs->set_temp(2, Location::RequiresRegister());
+ locs->set_in(0, Location::Pair(Location::RequiresRegister(),
+ Location::RequiresRegister()));
+ locs->set_in(1, Location::Pair(Location::RequiresRegister(),
+ Location::RequiresRegister()));
locs->set_out(0, Location::RequiresRegister());
return locs;
}
@@ -515,37 +514,32 @@
LocationSummary* locs,
Token::Kind kind) {
ASSERT(Token::IsEqualityOperator(kind));
- QRegister left = locs->in(0).fpu_reg();
- QRegister right = locs->in(1).fpu_reg();
- QRegister tmpq = locs->temp(0).fpu_reg();
- Register tmp_lo = locs->temp(1).reg();
- Register tmp_hi = locs->temp(2).reg();
+ PairLocation* left_pair = locs->in(0).AsPairLocation();
+ Register left1 = left_pair->At(0).reg();
+ Register left2 = left_pair->At(1).reg();
+ PairLocation* right_pair = locs->in(1).AsPairLocation();
+ Register right1 = right_pair->At(0).reg();
+ Register right2 = right_pair->At(1).reg();
- __ vceqqi(kWord, tmpq, left, right);
- __ vmovrrd(tmp_lo, tmp_hi, EvenDRegisterOf(tmpq));
- // tmp_lo and tmp_hi must both be 0xffffffff.
- __ and_(tmp_lo, tmp_lo, ShifterOperand(tmp_hi));
-
- Condition true_condition = TokenKindToMintCondition(kind);
- __ CompareImmediate(tmp_lo, 0xffffffff);
- return true_condition;
+ // Compare lower.
+ __ cmp(left1, ShifterOperand(right1));
+ // Compare upper if lower is equal.
+ __ cmp(left2, ShifterOperand(right2), EQ);
+ return TokenKindToMintCondition(kind);
}
static Condition EmitUnboxedMintComparisonOp(FlowGraphCompiler* compiler,
LocationSummary* locs,
Token::Kind kind) {
- QRegister left = locs->in(0).fpu_reg();
- QRegister right = locs->in(1).fpu_reg();
- DRegister dleft0 = EvenDRegisterOf(left);
- DRegister dright0 = EvenDRegisterOf(right);
- SRegister sleft0 = EvenSRegisterOf(dleft0);
- SRegister sleft1 = OddSRegisterOf(dleft0);
- SRegister sright0 = EvenSRegisterOf(dright0);
- SRegister sright1 = OddSRegisterOf(dright0);
+ PairLocation* left_pair = locs->in(0).AsPairLocation();
+ Register left1 = left_pair->At(0).reg();
+ Register left2 = left_pair->At(1).reg();
+ PairLocation* right_pair = locs->in(1).AsPairLocation();
+ Register right1 = right_pair->At(0).reg();
+ Register right2 = right_pair->At(1).reg();
- Register tmp_left = locs->temp(0).reg();
- Register tmp_right = locs->temp(1).reg();
+ Register out = locs->temp(0).reg();
// 64-bit comparison
Condition hi_true_cond, hi_false_cond, lo_false_cond;
@@ -568,25 +562,18 @@
}
Label is_true, is_false, done;
- __ vmovrs(tmp_left, sleft1);
- __ vmovrs(tmp_right, sright1);
- __ cmp(tmp_left, ShifterOperand(tmp_right));
- __ b(&is_false, hi_false_cond);
- __ b(&is_true, hi_true_cond);
+ // Compare upper halves first.
+ __ cmp(left2, ShifterOperand(right2));
+ __ LoadImmediate(out, 0, hi_false_cond);
+ __ LoadImmediate(out, 1, hi_true_cond);
+ // If higher words aren't equal, skip comparing lower words.
+ __ b(&done, NE);
- __ vmovrs(tmp_left, sleft0);
- __ vmovrs(tmp_right, sright0);
- __ cmp(tmp_left, ShifterOperand(tmp_right));
- __ b(&is_false, lo_false_cond);
- // Else is true.
- __ b(&is_true);
-
- __ Bind(&is_false);
- __ LoadImmediate(tmp_left, 0);
- __ b(&done);
- __ Bind(&is_true);
- __ LoadImmediate(tmp_left, 1);
+ __ cmp(left1, ShifterOperand(right1));
+ __ LoadImmediate(out, 1);
+ __ LoadImmediate(out, 0, lo_false_cond);
__ Bind(&done);
+
return NegateCondition(lo_false_cond);
}
@@ -609,10 +596,10 @@
static Condition EmitDoubleComparisonOp(FlowGraphCompiler* compiler,
LocationSummary* locs,
Token::Kind kind) {
- QRegister left = locs->in(0).fpu_reg();
- QRegister right = locs->in(1).fpu_reg();
- DRegister dleft = EvenDRegisterOf(left);
- DRegister dright = EvenDRegisterOf(right);
+ const QRegister left = locs->in(0).fpu_reg();
+ const QRegister right = locs->in(1).fpu_reg();
+ const DRegister dleft = EvenDRegisterOf(left);
+ const DRegister dright = EvenDRegisterOf(right);
__ vcmpd(dleft, dright);
__ vmstat();
Condition true_condition = TokenKindToDoubleCondition(kind);
@@ -640,7 +627,7 @@
BranchLabels labels = { NULL, NULL, NULL };
Condition true_condition = EmitComparisonCode(compiler, labels);
- Register result = locs()->out(0).reg();
+ const Register result = locs()->out(0).reg();
if ((operation_cid() == kSmiCid) || (operation_cid() == kMintCid)) {
__ LoadObject(result, Bool::True(), true_condition);
__ LoadObject(result, Bool::False(), NegateCondition(true_condition));
@@ -688,7 +675,7 @@
Condition TestSmiInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
BranchLabels labels) {
- Register left = locs()->in(0).reg();
+ const Register left = locs()->in(0).reg();
Location right = locs()->in(1);
if (right.IsConstant()) {
ASSERT(right.constant().IsSmi());
@@ -732,8 +719,8 @@
Condition TestCidsInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
BranchLabels labels) {
ASSERT((kind() == Token::kIS) || (kind() == Token::kISNOT));
- Register val_reg = locs()->in(0).reg();
- Register cid_reg = locs()->temp(0).reg();
+ const Register val_reg = locs()->in(0).reg();
+ const Register cid_reg = locs()->temp(0).reg();
Label* deopt = CanDeoptimize() ?
compiler->AddDeoptStub(deopt_id(), ICData::kDeoptTestCids) : NULL;
@@ -776,7 +763,7 @@
void TestCidsInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- Register result_reg = locs()->out(0).reg();
+ const Register result_reg = locs()->out(0).reg();
Label is_true, is_false, done;
BranchLabels labels = { &is_true, &is_false, &is_false };
EmitComparisonCode(compiler, labels);
@@ -793,13 +780,14 @@
const intptr_t kNumInputs = 2;
const intptr_t kNumTemps = 0;
if (operation_cid() == kMintCid) {
- const intptr_t kNumTemps = 2;
+ const intptr_t kNumTemps = 1;
LocationSummary* locs =
new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
- locs->set_in(0, Location::RequiresFpuRegister());
- locs->set_in(1, Location::RequiresFpuRegister());
+ locs->set_in(0, Location::Pair(Location::RequiresRegister(),
+ Location::RequiresRegister()));
+ locs->set_in(1, Location::Pair(Location::RequiresRegister(),
+ Location::RequiresRegister()));
locs->set_temp(0, Location::RequiresRegister());
- locs->set_temp(1, Location::RequiresRegister());
locs->set_out(0, Location::RequiresRegister());
return locs;
}
@@ -843,12 +831,12 @@
BranchLabels labels = { NULL, NULL, NULL };
Condition true_condition = EmitComparisonCode(compiler, labels);
- Register result = locs()->out(0).reg();
+ const Register result = locs()->out(0).reg();
if (operation_cid() == kSmiCid) {
__ LoadObject(result, Bool::True(), true_condition);
__ LoadObject(result, Bool::False(), NegateCondition(true_condition));
} else if (operation_cid() == kMintCid) {
- Register cr = locs()->temp(0).reg();
+ const Register cr = locs()->temp(0).reg();
__ LoadObject(result, Bool::True());
__ CompareImmediate(cr, 1);
__ LoadObject(result, Bool::False(), NE);
@@ -873,7 +861,7 @@
if (operation_cid() == kSmiCid) {
EmitBranchOnCondition(compiler, true_condition, labels);
} else if (operation_cid() == kMintCid) {
- Register result = locs()->temp(0).reg();
+ const Register result = locs()->temp(0).reg();
__ CompareImmediate(result, 1);
__ b(labels.true_label, EQ);
__ b(labels.false_label, NE);
@@ -903,7 +891,7 @@
ASSERT(locs()->temp(0).reg() == R1);
ASSERT(locs()->temp(1).reg() == R2);
ASSERT(locs()->temp(2).reg() == R5);
- Register result = locs()->out(0).reg();
+ const Register result = locs()->out(0).reg();
// Push the result place holder initialized to NULL.
__ PushObject(Object::ZoneHandle());
@@ -957,8 +945,8 @@
void StringFromCharCodeInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- Register char_code = locs()->in(0).reg();
- Register result = locs()->out(0).reg();
+ const Register char_code = locs()->in(0).reg();
+ const Register result = locs()->out(0).reg();
__ LoadImmediate(result,
reinterpret_cast<uword>(Symbols::PredefinedAddress()));
__ AddImmediate(result, Symbols::kNullCharCodeSymbolOffset * kWordSize);
@@ -976,8 +964,8 @@
void StringToCharCodeInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(cid_ == kOneByteStringCid);
- Register str = locs()->in(0).reg();
- Register result = locs()->out(0).reg();
+ const Register str = locs()->in(0).reg();
+ const Register result = locs()->out(0).reg();
__ ldr(result, FieldAddress(str, String::length_offset()));
__ cmp(result, ShifterOperand(Smi::RawValue(1)));
__ LoadImmediate(result, -1, NE);
@@ -998,7 +986,7 @@
void StringInterpolateInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- Register array = locs()->in(0).reg();
+ const Register array = locs()->in(0).reg();
__ Push(array);
const int kNumberOfArguments = 1;
const Array& kNoArgumentNames = Object::null_array();
@@ -1021,8 +1009,8 @@
void LoadUntaggedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- Register object = locs()->in(0).reg();
- Register result = locs()->out(0).reg();
+ const Register object = locs()->in(0).reg();
+ const Register result = locs()->out(0).reg();
__ LoadFromOffset(kWord, result, object, offset() - kHeapObjectTag);
}
@@ -1036,8 +1024,8 @@
void LoadClassIdInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- Register object = locs()->in(0).reg();
- Register result = locs()->out(0).reg();
+ const Register object = locs()->in(0).reg();
+ const Register result = locs()->out(0).reg();
Label load, done;
__ tst(object, ShifterOperand(kSmiTagMask));
__ b(&load, NE);
@@ -1153,7 +1141,11 @@
} else {
locs->set_out(0, Location::RequiresFpuRegister());
}
+ } else if (representation() == kUnboxedMint) {
+ locs->set_out(0, Location::Pair(Location::RequiresRegister(),
+ Location::RequiresRegister()));
} else {
+ ASSERT(representation() == kTagged);
locs->set_out(0, Location::RequiresRegister());
}
return locs;
@@ -1162,12 +1154,11 @@
void LoadIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
if ((representation() == kUnboxedDouble) ||
- (representation() == kUnboxedMint) ||
(representation() == kUnboxedFloat32x4) ||
(representation() == kUnboxedInt32x4) ||
(representation() == kUnboxedFloat64x2)) {
- Register array = locs()->in(0).reg();
- Register idx = locs()->in(1).reg();
+ const Register array = locs()->in(0).reg();
+ const Register idx = locs()->in(1).reg();
switch (index_scale()) {
case 1:
__ add(idx, array, ShifterOperand(idx, ASR, kSmiTagSize));
@@ -1194,23 +1185,6 @@
const QRegister result = locs()->out(0).fpu_reg();
const DRegister dresult0 = EvenDRegisterOf(result);
switch (class_id()) {
- case kTypedDataInt32ArrayCid:
- __ veorq(result, result, result);
- __ ldr(TMP, element_address);
- // Re-use the index register so we don't have to require a low-numbered
- // Q register.
- // Sign-extend into idx.
- __ Asr(idx, TMP, 31);
- __ vmovdrr(dresult0, TMP, idx);
- break;
- case kTypedDataUint32ArrayCid:
- __ veorq(result, result, result);
- __ ldr(TMP, element_address);
- // Re-use the index register so we don't have to require a low-numbered
- // Q register.
- __ LoadImmediate(idx, 0);
- __ vmovdrr(dresult0, TMP, idx);
- break;
case kTypedDataFloat32ArrayCid:
// Load single precision float.
// vldrs does not support indexed addressing.
@@ -1225,11 +1199,13 @@
case kTypedDataFloat32x4ArrayCid:
__ vldmd(IA, idx, dresult0, 2);
break;
+ default:
+ UNREACHABLE();
}
return;
}
- Register array = locs()->in(0).reg();
+ const Register array = locs()->in(0).reg();
Location index = locs()->in(1);
ASSERT(index.IsRegister()); // TODO(regis): Revisit.
Address element_address(kNoRegister, 0);
@@ -1267,6 +1243,33 @@
UNREACHABLE();
}
+ if (representation() == kUnboxedMint) {
+ ASSERT(locs()->out(0).IsPairLocation());
+ PairLocation* result_pair = locs()->out(0).AsPairLocation();
+ Register result1 = result_pair->At(0).reg();
+ Register result2 = result_pair->At(1).reg();
+ switch (class_id()) {
+ case kTypedDataInt32ArrayCid:
+ // Load low word.
+ __ ldr(result1, element_address);
+ // Sign extend into high word.
+ __ SignFill(result2, result1);
+ break;
+ case kTypedDataUint32ArrayCid:
+ // Load low word.
+ __ ldr(result1, element_address);
+ // Zero high word.
+ __ eor(result2, result2, ShifterOperand(result2));
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ return;
+ }
+
+ ASSERT(representation() == kTagged);
+
Register result = locs()->out(0).reg();
switch (class_id()) {
case kTypedDataInt8ArrayCid:
@@ -1384,11 +1387,16 @@
break;
case kTypedDataInt32ArrayCid:
case kTypedDataUint32ArrayCid:
- // Mints are stored in Q registers. For smis, use a writable register
- // because the value must be untagged before storing.
- locs->set_in(2, value()->IsSmiValue()
- ? Location::WritableRegister()
- : Location::FpuRegisterLocation(Q7));
+ // For smis, use a writable register because the value must be untagged
+ // before storing. Mints are stored in register pairs.
+ if (value()->IsSmiValue()) {
+ locs->set_in(2, Location::WritableRegister());
+ } else {
+ // We only move the lower 32-bits so we don't care where the high bits
+ // are located.
+ locs->set_in(2, Location::Pair(Location::RequiresRegister(),
+ Location::Any()));
+ }
break;
case kTypedDataFloat32ArrayCid:
// Need low register (<= Q7).
@@ -1414,8 +1422,8 @@
(class_id() == kTypedDataFloat32x4ArrayCid) ||
(class_id() == kTypedDataFloat64x2ArrayCid) ||
(class_id() == kTypedDataInt32x4ArrayCid)) {
- Register array = locs()->in(0).reg();
- Register idx = locs()->in(1).reg();
+ const Register array = locs()->in(0).reg();
+ const Register idx = locs()->in(1).reg();
Location value = locs()->in(2);
switch (index_scale()) {
case 1:
@@ -1441,13 +1449,13 @@
}
switch (class_id()) {
case kTypedDataFloat32ArrayCid: {
- SRegister value_reg =
+ const SRegister value_reg =
EvenSRegisterOf(EvenDRegisterOf(value.fpu_reg()));
__ StoreSToOffset(value_reg, idx, 0);
break;
}
case kTypedDataFloat64ArrayCid: {
- DRegister value_reg = EvenDRegisterOf(value.fpu_reg());
+ const DRegister value_reg = EvenDRegisterOf(value.fpu_reg());
__ StoreDToOffset(value_reg, idx, 0);
break;
}
@@ -1464,7 +1472,7 @@
return;
}
- Register array = locs()->in(0).reg();
+ const Register array = locs()->in(0).reg();
Location index = locs()->in(1);
Address element_address(kNoRegister, 0);
@@ -1505,13 +1513,13 @@
switch (class_id()) {
case kArrayCid:
if (ShouldEmitStoreBarrier()) {
- Register value = locs()->in(2).reg();
+ const Register value = locs()->in(2).reg();
__ StoreIntoObject(array, element_address, value);
} else if (locs()->in(2).IsConstant()) {
const Object& constant = locs()->in(2).constant();
__ StoreIntoObjectNoBarrier(array, element_address, constant);
} else {
- Register value = locs()->in(2).reg();
+ const Register value = locs()->in(2).reg();
__ StoreIntoObjectNoBarrier(array, element_address, value);
}
break;
@@ -1524,7 +1532,7 @@
__ LoadImmediate(IP, static_cast<int8_t>(constant.Value()));
__ strb(IP, element_address);
} else {
- Register value = locs()->in(2).reg();
+ const Register value = locs()->in(2).reg();
__ SmiUntag(value);
__ strb(value, element_address);
}
@@ -1544,7 +1552,7 @@
__ LoadImmediate(IP, static_cast<int8_t>(value));
__ strb(IP, element_address);
} else {
- Register value = locs()->in(2).reg();
+ const Register value = locs()->in(2).reg();
Label store_value;
__ SmiUntag(value);
__ cmp(value, ShifterOperand(0xFF));
@@ -1559,7 +1567,7 @@
}
case kTypedDataInt16ArrayCid:
case kTypedDataUint16ArrayCid: {
- Register value = locs()->in(2).reg();
+ const Register value = locs()->in(2).reg();
__ SmiUntag(value);
__ strh(value, element_address);
break;
@@ -1568,15 +1576,14 @@
case kTypedDataUint32ArrayCid: {
if (value()->IsSmiValue()) {
ASSERT(RequiredInputRepresentation(2) == kTagged);
- Register value = locs()->in(2).reg();
+ const Register value = locs()->in(2).reg();
__ SmiUntag(value);
__ str(value, element_address);
} else {
ASSERT(RequiredInputRepresentation(2) == kUnboxedMint);
- QRegister value = locs()->in(2).fpu_reg();
- ASSERT(value == Q7);
- __ vmovrs(TMP, EvenSRegisterOf(EvenDRegisterOf(value)));
- __ str(TMP, element_address);
+ PairLocation* value_pair = locs()->in(2).AsPairLocation();
+ Register value1 = value_pair->At(0).reg();
+ __ str(value1, element_address);
}
break;
}
@@ -1622,11 +1629,11 @@
const intptr_t value_cid = value()->Type()->ToCid();
- Register value_reg = locs()->in(0).reg();
+ const Register value_reg = locs()->in(0).reg();
- Register value_cid_reg = locs()->temp(0).reg();
+ const Register value_cid_reg = locs()->temp(0).reg();
- Register temp_reg = locs()->temp(1).reg();
+ const Register temp_reg = locs()->temp(1).reg();
Register field_reg = needs_field_temp_reg ?
locs()->temp(locs()->temp_count() - 1).reg() : kNoRegister;
@@ -1975,7 +1982,7 @@
void StoreInstanceFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
Label skip_store;
- Register instance_reg = locs()->in(0).reg();
+ const Register instance_reg = locs()->in(0).reg();
if (IsUnboxedStore() && compiler->is_optimizing()) {
const DRegister value = EvenDRegisterOf(locs()->in(1).fpu_reg());
@@ -2160,7 +2167,7 @@
}
if (ShouldEmitStoreBarrier()) {
- Register value_reg = locs()->in(1).reg();
+ const Register value_reg = locs()->in(1).reg();
__ StoreIntoObject(instance_reg,
FieldAddress(instance_reg, offset_in_bytes_),
value_reg,
@@ -2172,7 +2179,7 @@
FieldAddress(instance_reg, offset_in_bytes_),
locs()->in(1).constant());
} else {
- Register value_reg = locs()->in(1).reg();
+ const Register value_reg = locs()->in(1).reg();
__ StoreIntoObjectNoBarrier(instance_reg,
FieldAddress(instance_reg, offset_in_bytes_), value_reg);
}
@@ -2198,8 +2205,8 @@
//
// This is safe only so long as LoadStaticFieldInstr cannot deoptimize.
void LoadStaticFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- Register field = locs()->in(0).reg();
- Register result = locs()->out(0).reg();
+ const Register field = locs()->in(0).reg();
+ const Register result = locs()->out(0).reg();
__ LoadFromOffset(kWord, result,
field, Field::value_offset() - kHeapObjectTag);
}
@@ -2215,8 +2222,8 @@
void StoreStaticFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- Register value = locs()->in(0).reg();
- Register temp = locs()->temp(0).reg();
+ const Register value = locs()->in(0).reg();
+ const Register temp = locs()->temp(0).reg();
__ LoadObject(temp, field());
if (this->value()->NeedsStoreBuffer()) {
@@ -2531,7 +2538,7 @@
}
Label done;
- Register result_reg = locs()->out(0).reg();
+ const Register result_reg = locs()->out(0).reg();
if (IsPotentialUnboxedLoad()) {
const DRegister value = EvenDRegisterOf(locs()->temp(0).fpu_reg());
const Register temp = locs()->temp(1).reg();
@@ -2636,8 +2643,8 @@
void InstantiateTypeInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- Register instantiator_reg = locs()->in(0).reg();
- Register result_reg = locs()->out(0).reg();
+ const Register instantiator_reg = locs()->in(0).reg();
+ const Register result_reg = locs()->out(0).reg();
// 'instantiator_reg' is the instantiator TypeArguments object (or null).
// A runtime call to instantiate the type is required.
@@ -2669,8 +2676,8 @@
void InstantiateTypeArgumentsInstr::EmitNativeCode(
FlowGraphCompiler* compiler) {
- Register instantiator_reg = locs()->in(0).reg();
- Register result_reg = locs()->out(0).reg();
+ const Register instantiator_reg = locs()->in(0).reg();
+ const Register result_reg = locs()->out(0).reg();
ASSERT(instantiator_reg == R0);
ASSERT(instantiator_reg == result_reg);
@@ -2761,8 +2768,8 @@
void CloneContextInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- Register context_value = locs()->in(0).reg();
- Register result = locs()->out(0).reg();
+ const Register context_value = locs()->in(0).reg();
+ const Register result = locs()->out(0).reg();
__ PushObject(Object::ZoneHandle()); // Make room for the result.
__ Push(context_value);
@@ -2833,7 +2840,7 @@
virtual void EmitNativeCode(FlowGraphCompiler* compiler) {
if (FLAG_use_osr) {
uword flags_address = Isolate::Current()->stack_overflow_flags_address();
- Register value = instruction_->locs()->temp(0).reg();
+ const Register value = instruction_->locs()->temp(0).reg();
__ Comment("CheckStackOverflowSlowPathOsr");
__ Bind(osr_entry_label());
__ LoadImmediate(IP, flags_address);
@@ -2885,7 +2892,7 @@
__ cmp(SP, ShifterOperand(IP));
__ b(slow_path->entry_label(), LS);
if (compiler->CanOSRFunction() && in_loop()) {
- Register temp = locs()->temp(0).reg();
+ const Register temp = locs()->temp(0).reg();
// In unoptimized code check the usage counter to trigger OSR at loop
// stack checks. Use progressively higher thresholds for more deeply
// nested loops to attempt to hit outer loops with OSR when possible.
@@ -2907,8 +2914,8 @@
BinarySmiOpInstr* shift_left) {
const bool is_truncating = shift_left->is_truncating();
const LocationSummary& locs = *shift_left->locs();
- Register left = locs.in(0).reg();
- Register result = locs.out(0).reg();
+ const Register left = locs.in(0).reg();
+ const Register result = locs.out(0).reg();
Label* deopt = shift_left->CanDeoptimize() ?
compiler->AddDeoptStub(shift_left->deopt_id(), ICData::kDeoptBinarySmiOp)
: NULL;
@@ -2943,7 +2950,7 @@
}
// Right (locs.in(1)) is not constant.
- Register right = locs.in(1).reg();
+ const Register right = locs.in(1).reg();
Range* right_range = shift_left->right()->definition()->range();
if (shift_left->left()->BindsToConstant() && !is_truncating) {
// TODO(srdjan): Implement code below for is_truncating().
@@ -3005,7 +3012,7 @@
// Check if count too large for handling it inlined.
__ Asr(IP, right, kSmiTagSize); // SmiUntag right into IP.
// Overflow test (preserve left, right, and IP);
- Register temp = locs.temp(0).reg();
+ const Register temp = locs.temp(0).reg();
__ Lsl(temp, left, IP);
__ cmp(left, ShifterOperand(temp, ASR, IP));
__ b(deopt, NE); // Overflow.
@@ -3067,8 +3074,8 @@
}
ASSERT(!is_truncating());
- Register left = locs()->in(0).reg();
- Register result = locs()->out(0).reg();
+ const Register left = locs()->in(0).reg();
+ const Register result = locs()->out(0).reg();
Label* deopt = NULL;
if (CanDeoptimize()) {
deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinarySmiOp);
@@ -3154,7 +3161,7 @@
ASSERT(kSmiTagSize == 1);
__ mov(IP, ShifterOperand(left, ASR, 31));
ASSERT(shift_count > 1); // 1, -1 case handled above.
- Register temp = locs()->temp(0).reg();
+ const Register temp = locs()->temp(0).reg();
__ add(temp, left, ShifterOperand(IP, LSR, 32 - shift_count));
ASSERT(shift_count > 0);
__ mov(result, ShifterOperand(temp, ASR, shift_count));
@@ -3231,7 +3238,7 @@
return;
}
- Register right = locs()->in(1).reg();
+ const Register right = locs()->in(1).reg();
Range* right_range = this->right()->definition()->range();
switch (op_kind()) {
case Token::kADD: {
@@ -3293,8 +3300,8 @@
__ cmp(right, ShifterOperand(0));
__ b(deopt, EQ);
}
- Register temp = locs()->temp(0).reg();
- DRegister dtemp = EvenDRegisterOf(locs()->temp(1).fpu_reg());
+ const Register temp = locs()->temp(0).reg();
+ const DRegister dtemp = EvenDRegisterOf(locs()->temp(1).fpu_reg());
__ Asr(temp, left, kSmiTagSize); // SmiUntag left into temp.
__ Asr(IP, right, kSmiTagSize); // SmiUntag right into IP.
@@ -3313,8 +3320,8 @@
__ cmp(right, ShifterOperand(0));
__ b(deopt, EQ);
}
- Register temp = locs()->temp(0).reg();
- DRegister dtemp = EvenDRegisterOf(locs()->temp(1).fpu_reg());
+ const Register temp = locs()->temp(0).reg();
+ const DRegister dtemp = EvenDRegisterOf(locs()->temp(1).fpu_reg());
__ Asr(temp, left, kSmiTagSize); // SmiUntag left into temp.
__ Asr(IP, right, kSmiTagSize); // SmiUntag right into IP.
@@ -3354,7 +3361,7 @@
__ CompareImmediate(IP, kCountLimit);
__ LoadImmediate(IP, kCountLimit, GT);
}
- Register temp = locs()->temp(0).reg();
+ const Register temp = locs()->temp(0).reg();
__ Asr(temp, left, kSmiTagSize); // SmiUntag left into temp.
__ Asr(result, temp, IP);
__ SmiTag(result);
@@ -3399,8 +3406,8 @@
ICData::kDeoptBinaryDoubleOp);
intptr_t left_cid = left()->Type()->ToCid();
intptr_t right_cid = right()->Type()->ToCid();
- Register left = locs()->in(0).reg();
- Register right = locs()->in(1).reg();
+ const Register left = locs()->in(0).reg();
+ const Register right = locs()->in(1).reg();
if (this->left()->definition() == this->right()->definition()) {
__ tst(left, ShifterOperand(kSmiTagMask));
} else if (left_cid == kSmiCid) {
@@ -3477,7 +3484,7 @@
} else {
Label* deopt = compiler->AddDeoptStub(deopt_id_,
ICData::kDeoptBinaryDoubleOp);
- Register temp = locs()->temp(0).reg();
+ const Register temp = locs()->temp(0).reg();
if (value_type->is_nullable() &&
(value_type->ToNullableCid() == kDoubleCid)) {
__ CompareImmediate(value, reinterpret_cast<intptr_t>(Object::null()));
@@ -3782,9 +3789,9 @@
void BinaryFloat32x4OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- QRegister left = locs()->in(0).fpu_reg();
- QRegister right = locs()->in(1).fpu_reg();
- QRegister result = locs()->out(0).fpu_reg();
+ const QRegister left = locs()->in(0).fpu_reg();
+ const QRegister right = locs()->in(1).fpu_reg();
+ const QRegister result = locs()->out(0).fpu_reg();
switch (op_kind()) {
case Token::kADD: __ vaddqs(result, left, right); break;
@@ -3809,18 +3816,18 @@
void BinaryFloat64x2OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- QRegister left = locs()->in(0).fpu_reg();
- QRegister right = locs()->in(1).fpu_reg();
- QRegister result = locs()->out(0).fpu_reg();
+ const QRegister left = locs()->in(0).fpu_reg();
+ const QRegister right = locs()->in(1).fpu_reg();
+ const QRegister result = locs()->out(0).fpu_reg();
- DRegister left0 = EvenDRegisterOf(left);
- DRegister left1 = OddDRegisterOf(left);
+ const DRegister left0 = EvenDRegisterOf(left);
+ const DRegister left1 = OddDRegisterOf(left);
- DRegister right0 = EvenDRegisterOf(right);
- DRegister right1 = OddDRegisterOf(right);
+ const DRegister right0 = EvenDRegisterOf(right);
+ const DRegister right1 = OddDRegisterOf(right);
- DRegister result0 = EvenDRegisterOf(result);
- DRegister result1 = OddDRegisterOf(result);
+ const DRegister result0 = EvenDRegisterOf(result);
+ const DRegister result1 = OddDRegisterOf(result);
switch (op_kind()) {
case Token::kADD:
@@ -3857,20 +3864,20 @@
void Simd32x4ShuffleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- QRegister value = locs()->in(0).fpu_reg();
- QRegister result = locs()->out(0).fpu_reg();
- DRegister dresult0 = EvenDRegisterOf(result);
- DRegister dresult1 = OddDRegisterOf(result);
- SRegister sresult0 = EvenSRegisterOf(dresult0);
- SRegister sresult1 = OddSRegisterOf(dresult0);
- SRegister sresult2 = EvenSRegisterOf(dresult1);
- SRegister sresult3 = OddSRegisterOf(dresult1);
+ const QRegister value = locs()->in(0).fpu_reg();
+ const QRegister result = locs()->out(0).fpu_reg();
+ const DRegister dresult0 = EvenDRegisterOf(result);
+ const DRegister dresult1 = OddDRegisterOf(result);
+ const SRegister sresult0 = EvenSRegisterOf(dresult0);
+ const SRegister sresult1 = OddSRegisterOf(dresult0);
+ const SRegister sresult2 = EvenSRegisterOf(dresult1);
+ const SRegister sresult3 = OddSRegisterOf(dresult1);
- DRegister dvalue0 = EvenDRegisterOf(value);
- DRegister dvalue1 = OddDRegisterOf(value);
+ const DRegister dvalue0 = EvenDRegisterOf(value);
+ const DRegister dvalue1 = OddDRegisterOf(value);
- DRegister dtemp0 = DTMP;
- DRegister dtemp1 = OddDRegisterOf(QTMP);
+ const DRegister dtemp0 = DTMP;
+ const DRegister dtemp1 = OddDRegisterOf(QTMP);
// For some cases the vdup instruction requires fewer
// instructions. For arbitrary shuffles, use vtbl.
@@ -3938,21 +3945,21 @@
void Simd32x4ShuffleMixInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- QRegister left = locs()->in(0).fpu_reg();
- QRegister right = locs()->in(1).fpu_reg();
- QRegister result = locs()->out(0).fpu_reg();
+ const QRegister left = locs()->in(0).fpu_reg();
+ const QRegister right = locs()->in(1).fpu_reg();
+ const QRegister result = locs()->out(0).fpu_reg();
- DRegister dresult0 = EvenDRegisterOf(result);
- DRegister dresult1 = OddDRegisterOf(result);
- SRegister sresult0 = EvenSRegisterOf(dresult0);
- SRegister sresult1 = OddSRegisterOf(dresult0);
- SRegister sresult2 = EvenSRegisterOf(dresult1);
- SRegister sresult3 = OddSRegisterOf(dresult1);
+ const DRegister dresult0 = EvenDRegisterOf(result);
+ const DRegister dresult1 = OddDRegisterOf(result);
+ const SRegister sresult0 = EvenSRegisterOf(dresult0);
+ const SRegister sresult1 = OddSRegisterOf(dresult0);
+ const SRegister sresult2 = EvenSRegisterOf(dresult1);
+ const SRegister sresult3 = OddSRegisterOf(dresult1);
- DRegister dleft0 = EvenDRegisterOf(left);
- DRegister dleft1 = OddDRegisterOf(left);
- DRegister dright0 = EvenDRegisterOf(right);
- DRegister dright1 = OddDRegisterOf(right);
+ const DRegister dleft0 = EvenDRegisterOf(left);
+ const DRegister dleft1 = OddDRegisterOf(left);
+ const DRegister dright0 = EvenDRegisterOf(right);
+ const DRegister dright1 = OddDRegisterOf(right);
switch (op_kind()) {
case MethodRecognizer::kFloat32x4ShuffleMix:
@@ -3993,12 +4000,12 @@
void Simd32x4GetSignMaskInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- QRegister value = locs()->in(0).fpu_reg();
- DRegister dvalue0 = EvenDRegisterOf(value);
- DRegister dvalue1 = OddDRegisterOf(value);
+ const QRegister value = locs()->in(0).fpu_reg();
+ const DRegister dvalue0 = EvenDRegisterOf(value);
+ const DRegister dvalue1 = OddDRegisterOf(value);
- Register out = locs()->out(0).reg();
- Register temp = locs()->temp(0).reg();
+ const Register out = locs()->out(0).reg();
+ const Register temp = locs()->temp(0).reg();
// X lane.
__ vmovrs(out, EvenSRegisterOf(dvalue0));
@@ -4037,14 +4044,14 @@
void Float32x4ConstructorInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- QRegister q0 = locs()->in(0).fpu_reg();
- QRegister q1 = locs()->in(1).fpu_reg();
- QRegister q2 = locs()->in(2).fpu_reg();
- QRegister q3 = locs()->in(3).fpu_reg();
- QRegister r = locs()->out(0).fpu_reg();
+ const QRegister q0 = locs()->in(0).fpu_reg();
+ const QRegister q1 = locs()->in(1).fpu_reg();
+ const QRegister q2 = locs()->in(2).fpu_reg();
+ const QRegister q3 = locs()->in(3).fpu_reg();
+ const QRegister r = locs()->out(0).fpu_reg();
- DRegister dr0 = EvenDRegisterOf(r);
- DRegister dr1 = OddDRegisterOf(r);
+ const DRegister dr0 = EvenDRegisterOf(r);
+ const DRegister dr1 = OddDRegisterOf(r);
__ vcvtsd(EvenSRegisterOf(dr0), EvenDRegisterOf(q0));
__ vcvtsd(OddSRegisterOf(dr0), EvenDRegisterOf(q1));
@@ -4064,7 +4071,7 @@
void Float32x4ZeroInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- QRegister q = locs()->out(0).fpu_reg();
+ const QRegister q = locs()->out(0).fpu_reg();
__ veorq(q, q, q);
}
@@ -4081,10 +4088,10 @@
void Float32x4SplatInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- QRegister value = locs()->in(0).fpu_reg();
- QRegister result = locs()->out(0).fpu_reg();
+ const QRegister value = locs()->in(0).fpu_reg();
+ const QRegister result = locs()->out(0).fpu_reg();
- DRegister dvalue0 = EvenDRegisterOf(value);
+ const DRegister dvalue0 = EvenDRegisterOf(value);
// Convert to Float32.
__ vcvtsd(STMP, dvalue0);
@@ -4107,9 +4114,9 @@
void Float32x4ComparisonInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- QRegister left = locs()->in(0).fpu_reg();
- QRegister right = locs()->in(1).fpu_reg();
- QRegister result = locs()->out(0).fpu_reg();
+ const QRegister left = locs()->in(0).fpu_reg();
+ const QRegister right = locs()->in(1).fpu_reg();
+ const QRegister result = locs()->out(0).fpu_reg();
switch (op_kind()) {
case MethodRecognizer::kFloat32x4Equal:
@@ -4151,9 +4158,9 @@
void Float32x4MinMaxInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- QRegister left = locs()->in(0).fpu_reg();
- QRegister right = locs()->in(1).fpu_reg();
- QRegister result = locs()->out(0).fpu_reg();
+ const QRegister left = locs()->in(0).fpu_reg();
+ const QRegister right = locs()->in(1).fpu_reg();
+ const QRegister result = locs()->out(0).fpu_reg();
switch (op_kind()) {
case MethodRecognizer::kFloat32x4Min:
@@ -4180,9 +4187,9 @@
void Float32x4SqrtInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- QRegister left = locs()->in(0).fpu_reg();
- QRegister result = locs()->out(0).fpu_reg();
- QRegister temp = locs()->temp(0).fpu_reg();
+ const QRegister left = locs()->in(0).fpu_reg();
+ const QRegister result = locs()->out(0).fpu_reg();
+ const QRegister temp = locs()->temp(0).fpu_reg();
switch (op_kind()) {
case MethodRecognizer::kFloat32x4Sqrt:
@@ -4212,9 +4219,9 @@
void Float32x4ScaleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- QRegister left = locs()->in(0).fpu_reg();
- QRegister right = locs()->in(1).fpu_reg();
- QRegister result = locs()->out(0).fpu_reg();
+ const QRegister left = locs()->in(0).fpu_reg();
+ const QRegister right = locs()->in(1).fpu_reg();
+ const QRegister result = locs()->out(0).fpu_reg();
switch (op_kind()) {
case MethodRecognizer::kFloat32x4Scale:
@@ -4239,8 +4246,8 @@
void Float32x4ZeroArgInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- QRegister left = locs()->in(0).fpu_reg();
- QRegister result = locs()->out(0).fpu_reg();
+ const QRegister left = locs()->in(0).fpu_reg();
+ const QRegister result = locs()->out(0).fpu_reg();
switch (op_kind()) {
case MethodRecognizer::kFloat32x4Negate:
@@ -4268,10 +4275,10 @@
void Float32x4ClampInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- QRegister left = locs()->in(0).fpu_reg();
- QRegister lower = locs()->in(1).fpu_reg();
- QRegister upper = locs()->in(2).fpu_reg();
- QRegister result = locs()->out(0).fpu_reg();
+ const QRegister left = locs()->in(0).fpu_reg();
+ const QRegister lower = locs()->in(1).fpu_reg();
+ const QRegister upper = locs()->in(2).fpu_reg();
+ const QRegister result = locs()->out(0).fpu_reg();
__ vminqs(result, left, upper);
__ vmaxqs(result, result, lower);
}
@@ -4291,16 +4298,16 @@
void Float32x4WithInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- QRegister replacement = locs()->in(0).fpu_reg();
- QRegister value = locs()->in(1).fpu_reg();
- QRegister result = locs()->out(0).fpu_reg();
+ const QRegister replacement = locs()->in(0).fpu_reg();
+ const QRegister value = locs()->in(1).fpu_reg();
+ const QRegister result = locs()->out(0).fpu_reg();
- DRegister dresult0 = EvenDRegisterOf(result);
- DRegister dresult1 = OddDRegisterOf(result);
- SRegister sresult0 = EvenSRegisterOf(dresult0);
- SRegister sresult1 = OddSRegisterOf(dresult0);
- SRegister sresult2 = EvenSRegisterOf(dresult1);
- SRegister sresult3 = OddSRegisterOf(dresult1);
+ const DRegister dresult0 = EvenDRegisterOf(result);
+ const DRegister dresult1 = OddDRegisterOf(result);
+ const SRegister sresult0 = EvenSRegisterOf(dresult0);
+ const SRegister sresult1 = OddSRegisterOf(dresult0);
+ const SRegister sresult2 = EvenSRegisterOf(dresult1);
+ const SRegister sresult3 = OddSRegisterOf(dresult1);
__ vcvtsd(STMP, EvenDRegisterOf(replacement));
if (result != value) {
@@ -4337,8 +4344,8 @@
void Float32x4ToInt32x4Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
- QRegister value = locs()->in(0).fpu_reg();
- QRegister result = locs()->out(0).fpu_reg();
+ const QRegister value = locs()->in(0).fpu_reg();
+ const QRegister result = locs()->out(0).fpu_reg();
if (value != result) {
__ vmovq(result, value);
@@ -4358,14 +4365,14 @@
void Simd64x2ShuffleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- QRegister value = locs()->in(0).fpu_reg();
+ const QRegister value = locs()->in(0).fpu_reg();
- DRegister dvalue0 = EvenDRegisterOf(value);
- DRegister dvalue1 = OddDRegisterOf(value);
+ const DRegister dvalue0 = EvenDRegisterOf(value);
+ const DRegister dvalue1 = OddDRegisterOf(value);
- QRegister result = locs()->out(0).fpu_reg();
+ const QRegister result = locs()->out(0).fpu_reg();
- DRegister dresult0 = EvenDRegisterOf(result);
+ const DRegister dresult0 = EvenDRegisterOf(result);
switch (op_kind()) {
case MethodRecognizer::kFloat64x2GetX:
@@ -4390,7 +4397,7 @@
void Float64x2ZeroInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- QRegister q = locs()->out(0).fpu_reg();
+ const QRegister q = locs()->out(0).fpu_reg();
__ veorq(q, q, q);
}
@@ -4407,14 +4414,14 @@
void Float64x2SplatInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- QRegister value = locs()->in(0).fpu_reg();
+ const QRegister value = locs()->in(0).fpu_reg();
- DRegister dvalue = EvenDRegisterOf(value);
+ const DRegister dvalue = EvenDRegisterOf(value);
- QRegister result = locs()->out(0).fpu_reg();
+ const QRegister result = locs()->out(0).fpu_reg();
- DRegister dresult0 = EvenDRegisterOf(result);
- DRegister dresult1 = OddDRegisterOf(result);
+ const DRegister dresult0 = EvenDRegisterOf(result);
+ const DRegister dresult1 = OddDRegisterOf(result);
// Splat across all lanes.
__ vmovd(dresult0, dvalue);
@@ -4436,15 +4443,15 @@
void Float64x2ConstructorInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- QRegister q0 = locs()->in(0).fpu_reg();
- QRegister q1 = locs()->in(1).fpu_reg();
- QRegister r = locs()->out(0).fpu_reg();
+ const QRegister q0 = locs()->in(0).fpu_reg();
+ const QRegister q1 = locs()->in(1).fpu_reg();
+ const QRegister r = locs()->out(0).fpu_reg();
- DRegister d0 = EvenDRegisterOf(q0);
- DRegister d1 = EvenDRegisterOf(q1);
+ const DRegister d0 = EvenDRegisterOf(q0);
+ const DRegister d1 = EvenDRegisterOf(q1);
- DRegister dr0 = EvenDRegisterOf(r);
- DRegister dr1 = OddDRegisterOf(r);
+ const DRegister dr0 = EvenDRegisterOf(r);
+ const DRegister dr1 = OddDRegisterOf(r);
__ vmovd(dr0, d0);
__ vmovd(dr1, d1);
@@ -4465,13 +4472,13 @@
void Float64x2ToFloat32x4Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
- QRegister q = locs()->in(0).fpu_reg();
- QRegister r = locs()->out(0).fpu_reg();
+ const QRegister q = locs()->in(0).fpu_reg();
+ const QRegister r = locs()->out(0).fpu_reg();
- DRegister dq0 = EvenDRegisterOf(q);
- DRegister dq1 = OddDRegisterOf(q);
+ const DRegister dq0 = EvenDRegisterOf(q);
+ const DRegister dq1 = OddDRegisterOf(q);
- DRegister dr0 = EvenDRegisterOf(r);
+ const DRegister dr0 = EvenDRegisterOf(r);
// Zero register.
__ veorq(r, r, r);
@@ -4496,13 +4503,13 @@
void Float32x4ToFloat64x2Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
- QRegister q = locs()->in(0).fpu_reg();
- QRegister r = locs()->out(0).fpu_reg();
+ const QRegister q = locs()->in(0).fpu_reg();
+ const QRegister r = locs()->out(0).fpu_reg();
- DRegister dq0 = EvenDRegisterOf(q);
+ const DRegister dq0 = EvenDRegisterOf(q);
- DRegister dr0 = EvenDRegisterOf(r);
- DRegister dr1 = OddDRegisterOf(r);
+ const DRegister dr0 = EvenDRegisterOf(r);
+ const DRegister dr1 = OddDRegisterOf(r);
// Set X.
__ vcvtds(dr0, EvenSRegisterOf(dq0));
@@ -4532,14 +4539,14 @@
void Float64x2ZeroArgInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- QRegister q = locs()->in(0).fpu_reg();
+ const QRegister q = locs()->in(0).fpu_reg();
if ((op_kind() == MethodRecognizer::kFloat64x2GetSignMask)) {
- DRegister dvalue0 = EvenDRegisterOf(q);
- DRegister dvalue1 = OddDRegisterOf(q);
+ const DRegister dvalue0 = EvenDRegisterOf(q);
+ const DRegister dvalue1 = OddDRegisterOf(q);
- Register out = locs()->out(0).reg();
- Register temp = locs()->temp(0).reg();
+ const Register out = locs()->out(0).reg();
+ const Register temp = locs()->temp(0).reg();
// Upper 32-bits of X lane.
__ vmovrs(out, OddSRegisterOf(dvalue0));
@@ -4553,12 +4560,12 @@
return;
}
ASSERT(representation() == kUnboxedFloat64x2);
- QRegister r = locs()->out(0).fpu_reg();
+ const QRegister r = locs()->out(0).fpu_reg();
- DRegister dvalue0 = EvenDRegisterOf(q);
- DRegister dvalue1 = OddDRegisterOf(q);
- DRegister dresult0 = EvenDRegisterOf(r);
- DRegister dresult1 = OddDRegisterOf(r);
+ const DRegister dvalue0 = EvenDRegisterOf(q);
+ const DRegister dvalue1 = OddDRegisterOf(q);
+ const DRegister dresult0 = EvenDRegisterOf(r);
+ const DRegister dresult1 = OddDRegisterOf(r);
switch (op_kind()) {
case MethodRecognizer::kFloat64x2Negate:
@@ -4591,13 +4598,13 @@
void Float64x2OneArgInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- QRegister left = locs()->in(0).fpu_reg();
- DRegister left0 = EvenDRegisterOf(left);
- DRegister left1 = OddDRegisterOf(left);
- QRegister right = locs()->in(1).fpu_reg();
- DRegister right0 = EvenDRegisterOf(right);
- DRegister right1 = OddDRegisterOf(right);
- QRegister out = locs()->out(0).fpu_reg();
+ const QRegister left = locs()->in(0).fpu_reg();
+ const DRegister left0 = EvenDRegisterOf(left);
+ const DRegister left1 = OddDRegisterOf(left);
+ const QRegister right = locs()->in(1).fpu_reg();
+ const DRegister right0 = EvenDRegisterOf(right);
+ const DRegister right1 = OddDRegisterOf(right);
+ const QRegister out = locs()->out(0).fpu_reg();
ASSERT(left == out);
switch (op_kind()) {
@@ -4668,18 +4675,18 @@
void Int32x4BoolConstructorInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- Register v0 = locs()->in(0).reg();
- Register v1 = locs()->in(1).reg();
- Register v2 = locs()->in(2).reg();
- Register v3 = locs()->in(3).reg();
- Register temp = locs()->temp(0).reg();
- QRegister result = locs()->out(0).fpu_reg();
- DRegister dresult0 = EvenDRegisterOf(result);
- DRegister dresult1 = OddDRegisterOf(result);
- SRegister sresult0 = EvenSRegisterOf(dresult0);
- SRegister sresult1 = OddSRegisterOf(dresult0);
- SRegister sresult2 = EvenSRegisterOf(dresult1);
- SRegister sresult3 = OddSRegisterOf(dresult1);
+ const Register v0 = locs()->in(0).reg();
+ const Register v1 = locs()->in(1).reg();
+ const Register v2 = locs()->in(2).reg();
+ const Register v3 = locs()->in(3).reg();
+ const Register temp = locs()->temp(0).reg();
+ const QRegister result = locs()->out(0).fpu_reg();
+ const DRegister dresult0 = EvenDRegisterOf(result);
+ const DRegister dresult1 = OddDRegisterOf(result);
+ const SRegister sresult0 = EvenSRegisterOf(dresult0);
+ const SRegister sresult1 = OddSRegisterOf(dresult0);
+ const SRegister sresult2 = EvenSRegisterOf(dresult1);
+ const SRegister sresult3 = OddSRegisterOf(dresult1);
__ veorq(result, result, result);
__ LoadImmediate(temp, 0xffffffff);
@@ -4711,15 +4718,15 @@
void Int32x4GetFlagInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- QRegister value = locs()->in(0).fpu_reg();
- Register result = locs()->out(0).reg();
+ const QRegister value = locs()->in(0).fpu_reg();
+ const Register result = locs()->out(0).reg();
- DRegister dvalue0 = EvenDRegisterOf(value);
- DRegister dvalue1 = OddDRegisterOf(value);
- SRegister svalue0 = EvenSRegisterOf(dvalue0);
- SRegister svalue1 = OddSRegisterOf(dvalue0);
- SRegister svalue2 = EvenSRegisterOf(dvalue1);
- SRegister svalue3 = OddSRegisterOf(dvalue1);
+ const DRegister dvalue0 = EvenDRegisterOf(value);
+ const DRegister dvalue1 = OddDRegisterOf(value);
+ const SRegister svalue0 = EvenSRegisterOf(dvalue0);
+ const SRegister svalue1 = OddSRegisterOf(dvalue0);
+ const SRegister svalue2 = EvenSRegisterOf(dvalue1);
+ const SRegister svalue3 = OddSRegisterOf(dvalue1);
switch (op_kind()) {
case MethodRecognizer::kInt32x4GetFlagX:
@@ -4758,11 +4765,11 @@
void Int32x4SelectInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- QRegister mask = locs()->in(0).fpu_reg();
- QRegister trueValue = locs()->in(1).fpu_reg();
- QRegister falseValue = locs()->in(2).fpu_reg();
- QRegister out = locs()->out(0).fpu_reg();
- QRegister temp = locs()->temp(0).fpu_reg();
+ const QRegister mask = locs()->in(0).fpu_reg();
+ const QRegister trueValue = locs()->in(1).fpu_reg();
+ const QRegister falseValue = locs()->in(2).fpu_reg();
+ const QRegister out = locs()->out(0).fpu_reg();
+ const QRegister temp = locs()->temp(0).fpu_reg();
// Copy mask.
__ vmovq(temp, mask);
@@ -4791,16 +4798,16 @@
void Int32x4SetFlagInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- QRegister mask = locs()->in(0).fpu_reg();
- Register flag = locs()->in(1).reg();
- QRegister result = locs()->out(0).fpu_reg();
+ const QRegister mask = locs()->in(0).fpu_reg();
+ const Register flag = locs()->in(1).reg();
+ const QRegister result = locs()->out(0).fpu_reg();
- DRegister dresult0 = EvenDRegisterOf(result);
- DRegister dresult1 = OddDRegisterOf(result);
- SRegister sresult0 = EvenSRegisterOf(dresult0);
- SRegister sresult1 = OddSRegisterOf(dresult0);
- SRegister sresult2 = EvenSRegisterOf(dresult1);
- SRegister sresult3 = OddSRegisterOf(dresult1);
+ const DRegister dresult0 = EvenDRegisterOf(result);
+ const DRegister dresult1 = OddDRegisterOf(result);
+ const SRegister sresult0 = EvenSRegisterOf(dresult0);
+ const SRegister sresult1 = OddSRegisterOf(dresult0);
+ const SRegister sresult2 = EvenSRegisterOf(dresult1);
+ const SRegister sresult3 = OddSRegisterOf(dresult1);
if (result != mask) {
__ vmovq(result, mask);
@@ -4839,8 +4846,8 @@
void Int32x4ToFloat32x4Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
- QRegister value = locs()->in(0).fpu_reg();
- QRegister result = locs()->out(0).fpu_reg();
+ const QRegister value = locs()->in(0).fpu_reg();
+ const QRegister result = locs()->out(0).fpu_reg();
if (value != result) {
__ vmovq(result, value);
@@ -4861,9 +4868,9 @@
void BinaryInt32x4OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- QRegister left = locs()->in(0).fpu_reg();
- QRegister right = locs()->in(1).fpu_reg();
- QRegister result = locs()->out(0).fpu_reg();
+ const QRegister left = locs()->in(0).fpu_reg();
+ const QRegister right = locs()->in(1).fpu_reg();
+ const QRegister result = locs()->out(0).fpu_reg();
switch (op_kind()) {
case Token::kBIT_AND: {
__ vandq(result, left, right);
@@ -4918,12 +4925,12 @@
void MathUnaryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
if (kind() == MathUnaryInstr::kSqrt) {
- DRegister val = EvenDRegisterOf(locs()->in(0).fpu_reg());
- DRegister result = EvenDRegisterOf(locs()->out(0).fpu_reg());
+ const DRegister val = EvenDRegisterOf(locs()->in(0).fpu_reg());
+ const DRegister result = EvenDRegisterOf(locs()->out(0).fpu_reg());
__ vsqrtd(result, val);
} else if (kind() == MathUnaryInstr::kDoubleSquare) {
- DRegister val = EvenDRegisterOf(locs()->in(0).fpu_reg());
- DRegister result = EvenDRegisterOf(locs()->out(0).fpu_reg());
+ const DRegister val = EvenDRegisterOf(locs()->in(0).fpu_reg());
+ const DRegister result = EvenDRegisterOf(locs()->out(0).fpu_reg());
__ vmuld(result, val, val);
} else {
ASSERT((kind() == MathUnaryInstr::kSin) ||
@@ -4976,10 +4983,10 @@
const intptr_t is_min = (op_kind() == MethodRecognizer::kMathMin);
if (result_cid() == kDoubleCid) {
Label done, returns_nan, are_equal;
- DRegister left = EvenDRegisterOf(locs()->in(0).fpu_reg());
- DRegister right = EvenDRegisterOf(locs()->in(1).fpu_reg());
- DRegister result = EvenDRegisterOf(locs()->out(0).fpu_reg());
- Register temp = locs()->temp(0).reg();
+ const DRegister left = EvenDRegisterOf(locs()->in(0).fpu_reg());
+ const DRegister right = EvenDRegisterOf(locs()->in(1).fpu_reg());
+ const DRegister result = EvenDRegisterOf(locs()->out(0).fpu_reg());
+ const Register temp = locs()->temp(0).reg();
__ vcmpd(left, right);
__ vmstat();
__ b(&returns_nan, VS);
@@ -5016,9 +5023,9 @@
}
ASSERT(result_cid() == kSmiCid);
- Register left = locs()->in(0).reg();
- Register right = locs()->in(1).reg();
- Register result = locs()->out(0).reg();
+ const Register left = locs()->in(0).reg();
+ const Register right = locs()->in(1).reg();
+ const Register result = locs()->out(0).reg();
__ cmp(left, ShifterOperand(right));
ASSERT(result == left);
if (is_min) {
@@ -5043,8 +5050,8 @@
void UnarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- Register value = locs()->in(0).reg();
- Register result = locs()->out(0).reg();
+ const Register value = locs()->in(0).reg();
+ const Register result = locs()->out(0).reg();
switch (op_kind()) {
case Token::kNEGATE: {
Label* deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnaryOp);
@@ -5075,8 +5082,8 @@
void UnaryDoubleOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- DRegister result = EvenDRegisterOf(locs()->out(0).fpu_reg());
- DRegister value = EvenDRegisterOf(locs()->in(0).fpu_reg());
+ const DRegister result = EvenDRegisterOf(locs()->out(0).fpu_reg());
+ const DRegister value = EvenDRegisterOf(locs()->in(0).fpu_reg());
__ vnegd(result, value);
}
@@ -5093,8 +5100,8 @@
void SmiToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- Register value = locs()->in(0).reg();
- DRegister result = EvenDRegisterOf(locs()->out(0).fpu_reg());
+ const Register value = locs()->in(0).reg();
+ const DRegister result = EvenDRegisterOf(locs()->out(0).fpu_reg());
__ SmiUntag(value);
__ vmovsr(STMP, value);
__ vcvtdi(result, STMP);
@@ -5207,8 +5214,9 @@
void DoubleToFloatInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- DRegister value = EvenDRegisterOf(locs()->in(0).fpu_reg());
- SRegister result = EvenSRegisterOf(EvenDRegisterOf(locs()->out(0).fpu_reg()));
+ const DRegister value = EvenDRegisterOf(locs()->in(0).fpu_reg());
+ const SRegister result =
+ EvenSRegisterOf(EvenDRegisterOf(locs()->out(0).fpu_reg()));
__ vcvtsd(result, value);
}
@@ -5226,8 +5234,9 @@
void FloatToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- SRegister value = EvenSRegisterOf(EvenDRegisterOf(locs()->in(0).fpu_reg()));
- DRegister result = EvenDRegisterOf(locs()->out(0).fpu_reg());
+ const SRegister value =
+ EvenSRegisterOf(EvenDRegisterOf(locs()->in(0).fpu_reg()));
+ const DRegister result = EvenDRegisterOf(locs()->out(0).fpu_reg());
__ vcvtds(result, value);
}
@@ -5452,13 +5461,13 @@
PairLocation* pair = locs()->in(0).AsPairLocation();
Location in_loc = pair->At(index());
if (representation() == kUnboxedDouble) {
- QRegister out = locs()->out(0).fpu_reg();
- QRegister in = in_loc.fpu_reg();
+ const QRegister out = locs()->out(0).fpu_reg();
+ const QRegister in = in_loc.fpu_reg();
__ vmovq(out, in);
} else {
ASSERT(representation() == kTagged);
- Register out = locs()->out(0).reg();
- Register in = in_loc.reg();
+ const Register out = locs()->out(0).reg();
+ const Register in = in_loc.reg();
__ mov(out, ShifterOperand(in));
}
}
@@ -5490,20 +5499,20 @@
deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinarySmiOp);
}
if (kind() == MergedMathInstr::kTruncDivMod) {
- Register left = locs()->in(0).reg();
- Register right = locs()->in(1).reg();
+ const Register left = locs()->in(0).reg();
+ const Register right = locs()->in(1).reg();
ASSERT(locs()->out(0).IsPairLocation());
PairLocation* pair = locs()->out(0).AsPairLocation();
- Register result_div = pair->At(0).reg();
- Register result_mod = pair->At(1).reg();
+ const Register result_div = pair->At(0).reg();
+ const Register result_mod = pair->At(1).reg();
Range* right_range = InputAt(1)->definition()->range();
if ((right_range == NULL) || right_range->Overlaps(0, 0)) {
// Handle divide by zero in runtime.
__ cmp(right, ShifterOperand(0));
__ b(deopt, EQ);
}
- Register temp = locs()->temp(0).reg();
- DRegister dtemp = EvenDRegisterOf(locs()->temp(1).fpu_reg());
+ const Register temp = locs()->temp(0).reg();
+ const DRegister dtemp = EvenDRegisterOf(locs()->temp(1).fpu_reg());
__ Asr(temp, left, kSmiTagSize); // SmiUntag left into temp.
__ Asr(IP, right, kSmiTagSize); // SmiUntag right into IP.
@@ -5629,8 +5638,8 @@
ASSERT((unary_checks().GetReceiverClassIdAt(0) != kSmiCid) ||
(unary_checks().NumberOfChecks() > 1));
- Register value = locs()->in(0).reg();
- Register temp = locs()->temp(0).reg();
+ const Register value = locs()->in(0).reg();
+ const Register temp = locs()->temp(0).reg();
Label* deopt = compiler->AddDeoptStub(deopt_id(), deopt_reason);
Label is_ok;
intptr_t cix = 0;
@@ -5668,7 +5677,7 @@
void CheckSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- Register value = locs()->in(0).reg();
+ const Register value = locs()->in(0).reg();
Label* deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptCheckSmi);
__ tst(value, ShifterOperand(kSmiTagMask));
__ b(deopt, NE);
@@ -5704,18 +5713,18 @@
}
if (index_loc.IsConstant()) {
- Register length = length_loc.reg();
+ const Register length = length_loc.reg();
const Smi& index = Smi::Cast(index_loc.constant());
__ CompareImmediate(length, reinterpret_cast<int32_t>(index.raw()));
__ b(deopt, LS);
} else if (length_loc.IsConstant()) {
const Smi& length = Smi::Cast(length_loc.constant());
- Register index = index_loc.reg();
+ const Register index = index_loc.reg();
__ CompareImmediate(index, reinterpret_cast<int32_t>(length.raw()));
__ b(deopt, CS);
} else {
- Register length = length_loc.reg();
- Register index = index_loc.reg();
+ const Register length = length_loc.reg();
+ const Register index = index_loc.reg();
__ cmp(index, ShifterOperand(length));
__ b(deopt, CS);
}
@@ -5724,20 +5733,19 @@
static void EmitJavascriptIntOverflowCheck(FlowGraphCompiler* compiler,
Label* overflow,
- QRegister result,
- Register tmp_hi, Register tmp_lo) {
- __ vmovrrd(tmp_lo, tmp_hi, EvenDRegisterOf(result));
+ Register result_lo,
+ Register result_hi) {
// Compare upper half.
Label check_lower;
- __ CompareImmediate(tmp_hi, 0x00200000);
+ __ CompareImmediate(result_hi, 0x00200000);
__ b(overflow, GT);
__ b(&check_lower, NE);
- __ CompareImmediate(tmp_lo, 0);
+ __ CompareImmediate(result_lo, 0);
__ b(overflow, HI);
__ Bind(&check_lower);
- __ CompareImmediate(tmp_hi, -0x00200000);
+ __ CompareImmediate(result_hi, -0x00200000);
__ b(overflow, LT);
// Anything in the lower part would make the number bigger than the lower
// bound, so we are done.
@@ -5746,19 +5754,13 @@
LocationSummary* UnboxIntegerInstr::MakeLocationSummary(bool opt) const {
const intptr_t kNumInputs = 1;
- const intptr_t value_cid = value()->Type()->ToCid();
- const bool needs_writable_input = (value_cid != kMintCid);
- const bool needs_temp = (value_cid != kMintCid);
- const intptr_t kNumTemps = needs_temp ? 1 : 0;
+ const intptr_t kNumTemps = 1;
LocationSummary* summary =
new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
- summary->set_in(0, needs_writable_input
- ? Location::WritableRegister()
- : Location::RequiresRegister());
- if (needs_temp) {
- summary->set_temp(0, Location::RequiresRegister());
- }
- summary->set_out(0, Location::RequiresFpuRegister());
+ summary->set_in(0, Location::RequiresRegister());
+ summary->set_temp(0, Location::RequiresRegister());
+ summary->set_out(0, Location::Pair(Location::RequiresRegister(),
+ Location::RequiresRegister()));
return summary;
}
@@ -5766,21 +5768,32 @@
void UnboxIntegerInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const intptr_t value_cid = value()->Type()->ToCid();
const Register value = locs()->in(0).reg();
- const QRegister result = locs()->out(0).fpu_reg();
+ PairLocation* result_pair = locs()->out(0).AsPairLocation();
+ Register result_lo = result_pair->At(0).reg();
+ Register result_hi = result_pair->At(1).reg();
+ ASSERT(value != result_lo);
+ ASSERT(value != result_hi);
__ Comment("UnboxIntegerInstr");
- __ veorq(result, result, result);
if (value_cid == kMintCid) {
- __ LoadDFromOffset(EvenDRegisterOf(result), value,
- Mint::value_offset() - kHeapObjectTag);
+ // Load low word.
+ __ LoadFromOffset(kWord,
+ result_lo,
+ value,
+ Mint::value_offset() - kHeapObjectTag);
+ // Load high word.
+ __ LoadFromOffset(kWord,
+ result_hi,
+ value,
+ Mint::value_offset() - kHeapObjectTag + kWordSize);
} else if (value_cid == kSmiCid) {
- Register temp = locs()->temp(0).reg();
- __ SmiUntag(value);
- // Sign extend value into temp.
- __ Asr(temp, value, 31);
- __ vmovdrr(EvenDRegisterOf(result), value, temp);
+ // Load Smi into result_lo.
+ __ mov(result_lo, ShifterOperand(value));
+ // Untag.
+ __ SmiUntag(result_lo);
+ __ SignFill(result_hi, result_lo);
} else {
- Register temp = locs()->temp(0).reg();
+ const Register temp = locs()->temp(0).reg();
Label* deopt = compiler->AddDeoptStub(deopt_id_,
ICData::kDeoptUnboxInteger);
Label is_smi, done;
@@ -5790,16 +5803,26 @@
__ b(deopt, NE);
// It's a Mint.
- __ LoadDFromOffset(EvenDRegisterOf(result), value,
- Mint::value_offset() - kHeapObjectTag);
+ // Load low word.
+ __ LoadFromOffset(kWord,
+ result_lo,
+ value,
+ Mint::value_offset() - kHeapObjectTag);
+ // Load high word.
+ __ LoadFromOffset(kWord,
+ result_hi,
+ value,
+ Mint::value_offset() - kHeapObjectTag + kWordSize);
__ b(&done);
// It's a Smi.
__ Bind(&is_smi);
- __ SmiUntag(value);
- // Sign extend into temp.
- __ Asr(temp, value, 31);
- __ vmovdrr(EvenDRegisterOf(result), value, temp);
+ // Load Smi into result_lo.
+ __ mov(result_lo, ShifterOperand(value));
+ // Untag.
+ __ SmiUntag(result_lo);
+ // Sign extend result_lo into result_hi.
+ __ SignFill(result_hi, result_lo);
__ Bind(&done);
}
}
@@ -5807,14 +5830,14 @@
LocationSummary* BoxIntegerInstr::MakeLocationSummary(bool opt) const {
const intptr_t kNumInputs = 1;
- const intptr_t kNumTemps = 2;
+ const intptr_t kNumTemps = 1;
LocationSummary* summary =
new LocationSummary(kNumInputs,
kNumTemps,
LocationSummary::kCallOnSlowPath);
- summary->set_in(0, Location::RequiresFpuRegister());
+ summary->set_in(0, Location::Pair(Location::RequiresRegister(),
+ Location::RequiresRegister()));
summary->set_temp(0, Location::RequiresRegister());
- summary->set_temp(1, Location::RequiresRegister());
summary->set_out(0, Location::RequiresRegister());
return summary;
}
@@ -5856,40 +5879,39 @@
void BoxIntegerInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
BoxIntegerSlowPath* slow_path = new BoxIntegerSlowPath(this);
compiler->AddSlowPathCode(slow_path);
-
+ PairLocation* value_pair = locs()->in(0).AsPairLocation();
+ Register value_lo = value_pair->At(0).reg();
+ Register value_hi = value_pair->At(1).reg();
+ Register tmp = locs()->temp(0).reg();
Register out_reg = locs()->out(0).reg();
- QRegister value = locs()->in(0).fpu_reg();
- DRegister dvalue0 = EvenDRegisterOf(value);
- Register lo = locs()->temp(0).reg();
- Register hi = locs()->temp(1).reg();
// Unboxed operations produce smis or mint-sized values.
// Check if value fits into a smi.
__ Comment("BoxIntegerInstr");
Label not_smi, done, maybe_pos_smi, maybe_neg_smi, is_smi;
- __ vmovrrd(lo, hi, dvalue0);
- __ CompareImmediate(hi, 0);
+ // Check high word.
+ __ CompareImmediate(value_hi, 0);
__ b(&maybe_pos_smi, EQ);
- __ CompareImmediate(hi, -1);
+ __ CompareImmediate(value_hi, -1);
__ b(&maybe_neg_smi, EQ);
__ b(¬_smi);
__ Bind(&maybe_pos_smi);
- __ CompareImmediate(lo, kSmiMax);
+ __ CompareImmediate(value_lo, kSmiMax);
__ b(&is_smi, LS); // unsigned lower or same.
__ b(¬_smi);
__ Bind(&maybe_neg_smi);
- __ CompareImmediate(lo, 0);
+ __ CompareImmediate(value_lo, 0);
__ b(¬_smi, GE);
- __ CompareImmediate(lo, kSmiMin);
+ __ CompareImmediate(value_lo, kSmiMin);
__ b(¬_smi, LT);
// lo is a Smi. Tag it and return.
__ Bind(&is_smi);
- __ SmiTag(lo);
- __ mov(out_reg, ShifterOperand(lo));
+ __ mov(out_reg, ShifterOperand(value_lo));
+ __ SmiTag(out_reg);
__ b(&done);
// Not a smi. Box it.
@@ -5898,148 +5920,169 @@
Class::ZoneHandle(Isolate::Current()->object_store()->mint_class()),
slow_path->entry_label(),
out_reg,
- lo);
+ tmp);
__ Bind(slow_path->exit_label());
- __ StoreDToOffset(dvalue0, out_reg, Mint::value_offset() - kHeapObjectTag);
+ __ StoreToOffset(kWord,
+ value_lo,
+ out_reg,
+ Mint::value_offset() - kHeapObjectTag);
+ __ StoreToOffset(kWord,
+ value_hi,
+ out_reg,
+ Mint::value_offset() - kHeapObjectTag + kWordSize);
__ Bind(&done);
}
LocationSummary* BinaryMintOpInstr::MakeLocationSummary(bool opt) const {
const intptr_t kNumInputs = 2;
- const intptr_t kNumTemps =
- FLAG_throw_on_javascript_int_overflow ? 2 : 0;
+ const intptr_t kNumTemps = 0;
LocationSummary* summary =
new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
- summary->set_in(0, Location::RequiresFpuRegister());
- summary->set_in(1, Location::RequiresFpuRegister());
- if (FLAG_throw_on_javascript_int_overflow) {
- summary->set_temp(0, Location::RequiresRegister());
- summary->set_temp(1, Location::RequiresRegister());
- }
- if ((op_kind() == Token::kADD) || (op_kind() == Token::kSUB)) {
- // Need another temp for checking for overflow.
- summary->AddTemp(Location::RequiresFpuRegister());
- summary->AddTemp(Location::FpuRegisterLocation(Q7));
- }
- summary->set_out(0, Location::RequiresFpuRegister());
+ summary->set_in(0, Location::Pair(Location::RequiresRegister(),
+ Location::RequiresRegister()));
+ summary->set_in(1, Location::Pair(Location::RequiresRegister(),
+ Location::RequiresRegister()));
+ summary->set_out(0, Location::Pair(Location::RequiresRegister(),
+ Location::RequiresRegister()));
return summary;
}
void BinaryMintOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- QRegister left = locs()->in(0).fpu_reg();
- QRegister right = locs()->in(1).fpu_reg();
- QRegister out = locs()->out(0).fpu_reg();
+ PairLocation* left_pair = locs()->in(0).AsPairLocation();
+ Register left_lo = left_pair->At(0).reg();
+ Register left_hi = left_pair->At(1).reg();
+ PairLocation* right_pair = locs()->in(1).AsPairLocation();
+ Register right_lo = right_pair->At(0).reg();
+ Register right_hi = right_pair->At(1).reg();
+ PairLocation* out_pair = locs()->out(0).AsPairLocation();
+ Register out_lo = out_pair->At(0).reg();
+ Register out_hi = out_pair->At(1).reg();
Label* deopt = NULL;
if (FLAG_throw_on_javascript_int_overflow) {
deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryMintOp);
}
switch (op_kind()) {
- case Token::kBIT_AND: __ vandq(out, left, right); break;
- case Token::kBIT_OR: __ vorrq(out, left, right); break;
- case Token::kBIT_XOR: __ veorq(out, left, right); break;
+ case Token::kBIT_AND: {
+ __ and_(out_lo, left_lo, ShifterOperand(right_lo));
+ __ and_(out_hi, left_hi, ShifterOperand(right_hi));
+ }
+ break;
+ case Token::kBIT_OR: {
+ __ orr(out_lo, left_lo, ShifterOperand(right_lo));
+ __ orr(out_hi, left_hi, ShifterOperand(right_hi));
+ }
+ break;
+ case Token::kBIT_XOR: {
+ __ eor(out_lo, left_lo, ShifterOperand(right_lo));
+ __ eor(out_hi, left_hi, ShifterOperand(right_hi));
+ }
+ break;
case Token::kADD:
case Token::kSUB: {
- const intptr_t tmpidx = FLAG_throw_on_javascript_int_overflow ? 2 : 0;
- QRegister tmp = locs()->temp(tmpidx).fpu_reg();
- QRegister ro = locs()->temp(tmpidx + 1).fpu_reg();
- ASSERT(ro == Q7);
if (!FLAG_throw_on_javascript_int_overflow) {
deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryMintOp);
}
if (op_kind() == Token::kADD) {
- __ vaddqi(kWordPair, out, left, right);
+ __ adds(out_lo, left_lo, ShifterOperand(right_lo));
+ __ adcs(out_hi, left_hi, ShifterOperand(right_hi));
} else {
ASSERT(op_kind() == Token::kSUB);
- __ vsubqi(kWordPair, out, left, right);
+ __ subs(out_lo, left_lo, ShifterOperand(right_lo));
+ __ sbcs(out_hi, left_hi, ShifterOperand(right_hi));
}
- __ veorq(ro, out, left);
- __ veorq(tmp, left, right);
- __ vandq(ro, tmp, ro);
- __ vmovrs(TMP, OddSRegisterOf(EvenDRegisterOf(ro)));
- // If TMP < 0, there was overflow.
- __ cmp(TMP, ShifterOperand(0));
- __ b(deopt, LT);
+ // Deopt on overflow.
+ __ b(deopt, VS);
break;
}
- default: UNREACHABLE(); break;
+ default:
+ UNREACHABLE();
+ break;
}
if (FLAG_throw_on_javascript_int_overflow) {
- Register tmp1 = locs()->temp(0).reg();
- Register tmp2 = locs()->temp(1).reg();
- EmitJavascriptIntOverflowCheck(compiler, deopt, out, tmp1, tmp2);
+ EmitJavascriptIntOverflowCheck(compiler, deopt, out_lo, out_hi);
}
}
LocationSummary* ShiftMintOpInstr::MakeLocationSummary(bool opt) const {
const intptr_t kNumInputs = 2;
- const intptr_t kNumTemps =
- FLAG_throw_on_javascript_int_overflow ? 2 : 1;
+ const intptr_t kNumTemps = 1;
LocationSummary* summary =
new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
- summary->set_in(0, Location::RequiresFpuRegister());
+ summary->set_in(0, Location::Pair(Location::RequiresRegister(),
+ Location::RequiresRegister()));
summary->set_in(1, Location::WritableRegister());
- summary->set_temp(0, Location::FpuRegisterLocation(Q7));
- if (FLAG_throw_on_javascript_int_overflow) {
- summary->set_temp(1, Location::RequiresRegister());
- }
- summary->set_out(0, Location::RequiresFpuRegister());
+ summary->set_temp(0, Location::RequiresRegister());
+ summary->set_out(0, Location::Pair(Location::RequiresRegister(),
+ Location::RequiresRegister()));
return summary;
}
void ShiftMintOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- QRegister value = locs()->in(0).fpu_reg();
+ PairLocation* left_pair = locs()->in(0).AsPairLocation();
+ Register left_lo = left_pair->At(0).reg();
+ Register left_hi = left_pair->At(1).reg();
Register shift = locs()->in(1).reg();
- QRegister temp = locs()->temp(0).fpu_reg();
- ASSERT(temp == Q7);
- QRegister out = locs()->out(0).fpu_reg();
- DRegister dtemp0 = EvenDRegisterOf(temp);
- SRegister stemp0 = EvenSRegisterOf(dtemp0);
- SRegister stemp1 = OddSRegisterOf(dtemp0);
+ PairLocation* out_pair = locs()->out(0).AsPairLocation();
+ Register out_lo = out_pair->At(0).reg();
+ Register out_hi = out_pair->At(1).reg();
+ Register temp = locs()->temp(0).reg();
Label* deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptShiftMintOp);
Label done;
+ // Early out if shift is 0.
__ CompareImmediate(shift, 0);
- __ vmovq(out, value);
+ __ mov(out_lo, ShifterOperand(left_lo));
+ __ mov(out_hi, ShifterOperand(left_hi));
__ b(&done, EQ);
+
+ // Untag shift count.
__ SmiUntag(shift);
- // vshlq takes the shift value from low byte. Deopt if shift is
- // outside of [0, 63].
- __ CompareImmediate(shift, 63);
- __ b(deopt, GT);
- __ CompareImmediate(shift, 0);
+ // Deopt if shift is negative.
+ __ CompareImmediate(shift, 1);
__ b(deopt, LT);
- __ veorq(temp, temp, temp); // Zero out temp.
+ // Deopt if shift is larger than 63.
+ __ CompareImmediate(shift, 63);
+ __ b(deopt, GT);
+
switch (op_kind()) {
case Token::kSHR: {
- __ rsb(shift, shift, ShifterOperand(0)); // Negate shift.
- __ vmovsr(stemp0, shift); // Move the shift into the low S register.
- __ vshlqi(kWordPair, out, value, temp);
+ __ cmp(shift, ShifterOperand(32));
+
+ __ mov(out_lo, ShifterOperand(out_hi), HI);
+ __ Asr(out_hi, out_hi, 31, HI);
+ __ sub(shift, shift, ShifterOperand(32), HI);
+
+ __ rsb(temp, shift, ShifterOperand(32));
+ __ mov(temp, ShifterOperand(out_hi, LSL, temp));
+ __ orr(out_lo, temp, ShifterOperand(out_lo, LSR, shift));
+ __ Asr(out_hi, out_hi, shift);
break;
}
case Token::kSHL: {
- __ vmovsr(stemp0, shift); // Move the shift into the low S register.
- __ vshlqu(kWordPair, out, value, temp);
+ __ rsbs(temp, shift, ShifterOperand(32));
+ __ sub(temp, shift, ShifterOperand(32), MI);
+ __ mov(out_hi, ShifterOperand(out_lo, LSL, temp), MI);
+ __ mov(out_hi, ShifterOperand(out_hi, LSL, shift), PL);
+ __ orr(out_hi, out_hi, ShifterOperand(out_lo, LSR, temp), PL);
+ __ mov(out_lo, ShifterOperand(out_lo, LSL, shift));
- // check for overflow by shifting back and comparing.
- __ rsb(shift, shift, ShifterOperand(0));
- __ vmovsr(stemp0, shift);
- __ vshlqi(kWordPair, temp, out, temp);
- __ vceqqi(kWord, temp, temp, value);
- // Low 64 bits of temp should be all 1's, otherwise temp != value and
- // we deopt.
- __ vmovrs(shift, stemp0);
- __ CompareImmediate(shift, -1);
- __ b(deopt, NE);
- __ vmovrs(shift, stemp1);
- __ CompareImmediate(shift, -1);
+ // Check for overflow.
+
+ // Copy high word from output.
+ __ mov(temp, ShifterOperand(out_hi));
+ // Shift copy right.
+ __ Asr(temp, temp, shift);
+ // Compare with high word from input.
+ __ cmp(temp, ShifterOperand(left_hi));
+ // Overflow if they aren't equal.
__ b(deopt, NE);
break;
}
@@ -6050,42 +6093,43 @@
__ Bind(&done);
if (FLAG_throw_on_javascript_int_overflow) {
- Register tmp1 = locs()->in(1).reg();
- Register tmp2 = locs()->temp(1).reg();
- EmitJavascriptIntOverflowCheck(compiler, deopt, out, tmp1, tmp2);
+ EmitJavascriptIntOverflowCheck(compiler, deopt, out_lo, out_hi);
}
}
LocationSummary* UnaryMintOpInstr::MakeLocationSummary(bool opt) const {
const intptr_t kNumInputs = 1;
- const intptr_t kNumTemps =
- FLAG_throw_on_javascript_int_overflow ? 2 : 0;
+ const intptr_t kNumTemps = 0;
LocationSummary* summary =
new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
- summary->set_in(0, Location::RequiresFpuRegister());
- summary->set_out(0, Location::RequiresFpuRegister());
- if (FLAG_throw_on_javascript_int_overflow) {
- summary->set_temp(0, Location::RequiresRegister());
- summary->set_temp(1, Location::RequiresRegister());
- }
+ summary->set_in(0, Location::Pair(Location::RequiresRegister(),
+ Location::RequiresRegister()));
+ summary->set_out(0, Location::Pair(Location::RequiresRegister(),
+ Location::RequiresRegister()));
return summary;
}
void UnaryMintOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(op_kind() == Token::kBIT_NOT);
- QRegister value = locs()->in(0).fpu_reg();
- QRegister out = locs()->out(0).fpu_reg();
+ PairLocation* left_pair = locs()->in(0).AsPairLocation();
+ Register left_lo = left_pair->At(0).reg();
+ Register left_hi = left_pair->At(1).reg();
+
+ PairLocation* out_pair = locs()->out(0).AsPairLocation();
+ Register out_lo = out_pair->At(0).reg();
+ Register out_hi = out_pair->At(1).reg();
+
Label* deopt = NULL;
+
if (FLAG_throw_on_javascript_int_overflow) {
deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnaryMintOp);
}
- __ vmvnq(out, value);
+ __ mvn(out_lo, ShifterOperand(left_lo));
+ __ mvn(out_hi, ShifterOperand(left_hi));
if (FLAG_throw_on_javascript_int_overflow) {
- Register tmp1 = locs()->temp(0).reg();
- Register tmp2 = locs()->temp(1).reg();
- EmitJavascriptIntOverflowCheck(compiler, deopt, out, tmp1, tmp2);
+ EmitJavascriptIntOverflowCheck(compiler, deopt, out_lo, out_hi);
}
}
@@ -6244,7 +6288,7 @@
BranchLabels labels = { NULL, NULL, NULL };
Condition true_condition = EmitComparisonCode(compiler, labels);
- Register result = locs()->out(0).reg();
+ const Register result = locs()->out(0).reg();
__ LoadObject(result, Bool::True(), true_condition);
__ LoadObject(result, Bool::False(), NegateCondition(true_condition));
}
@@ -6268,8 +6312,8 @@
void BooleanNegateInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- Register value = locs()->in(0).reg();
- Register result = locs()->out(0).reg();
+ const Register value = locs()->in(0).reg();
+ const Register result = locs()->out(0).reg();
__ LoadObject(result, Bool::True());
__ cmp(result, ShifterOperand(value));
diff --git a/runtime/vm/intermediate_language_arm64.cc b/runtime/vm/intermediate_language_arm64.cc
index ae87f74..8564de6 100644
--- a/runtime/vm/intermediate_language_arm64.cc
+++ b/runtime/vm/intermediate_language_arm64.cc
@@ -76,7 +76,7 @@
// The entry needs to be patchable, no inlined objects are allowed in the area
// that will be overwritten by the patch instructions: a branch macro sequence.
void ReturnInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- Register result = locs()->in(0).reg();
+ const Register result = locs()->in(0).reg();
ASSERT(result == R0);
#if defined(DEBUG)
Label stack_ok;
@@ -237,7 +237,7 @@
void LoadLocalInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- Register result = locs()->out(0).reg();
+ const Register result = locs()->out(0).reg();
__ LoadFromOffset(result, FP, local().index() * kWordSize, PP);
}
@@ -250,8 +250,8 @@
void StoreLocalInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- Register value = locs()->in(0).reg();
- Register result = locs()->out(0).reg();
+ const Register value = locs()->in(0).reg();
+ const Register result = locs()->out(0).reg();
ASSERT(result == value); // Assert that register assignment is correct.
__ StoreToOffset(value, FP, local().index() * kWordSize, PP);
}
@@ -267,7 +267,7 @@
void ConstantInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// The register allocator drops constant definitions that have no uses.
if (!locs()->out(0).IsInvalid()) {
- Register result = locs()->out(0).reg();
+ const Register result = locs()->out(0).reg();
__ LoadObject(result, value(), PP);
}
}
@@ -340,8 +340,8 @@
void AssertBooleanInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- Register obj = locs()->in(0).reg();
- Register result = locs()->out(0).reg();
+ const Register obj = locs()->in(0).reg();
+ const Register result = locs()->out(0).reg();
EmitAssertBoolean(obj, token_pos(), deopt_id(), locs(), compiler);
ASSERT(obj == result);
@@ -502,7 +502,7 @@
EmitBranchOnCondition(compiler, true_condition, labels);
// TODO(zra): instead of branching, use the csel instruction to get
// True or False into result.
- Register result = locs()->out(0).reg();
+ const Register result = locs()->out(0).reg();
Label done;
__ Bind(&is_false);
__ LoadObject(result, Bool::False(), PP);
@@ -543,7 +543,7 @@
Condition TestSmiInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
BranchLabels labels) {
- Register left = locs()->in(0).reg();
+ const Register left = locs()->in(0).reg();
Location right = locs()->in(1);
if (right.IsConstant()) {
ASSERT(right.constant().IsSmi());
@@ -587,8 +587,8 @@
Condition TestCidsInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
BranchLabels labels) {
ASSERT((kind() == Token::kIS) || (kind() == Token::kISNOT));
- Register val_reg = locs()->in(0).reg();
- Register cid_reg = locs()->temp(0).reg();
+ const Register val_reg = locs()->in(0).reg();
+ const Register cid_reg = locs()->temp(0).reg();
Label* deopt = CanDeoptimize() ?
compiler->AddDeoptStub(deopt_id(), ICData::kDeoptTestCids) : NULL;
@@ -631,7 +631,7 @@
void TestCidsInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- Register result_reg = locs()->out(0).reg();
+ const Register result_reg = locs()->out(0).reg();
Label is_true, is_false, done;
BranchLabels labels = { &is_true, &is_false, &is_false };
EmitComparisonCode(compiler, labels);
@@ -694,7 +694,7 @@
EmitBranchOnCondition(compiler, true_condition, labels);
// TODO(zra): instead of branching, use the csel instruction to get
// True or False into result.
- Register result = locs()->out(0).reg();
+ const Register result = locs()->out(0).reg();
Label done;
__ Bind(&is_false);
__ LoadObject(result, Bool::False(), PP);
@@ -735,7 +735,7 @@
ASSERT(locs()->temp(0).reg() == R1);
ASSERT(locs()->temp(1).reg() == R2);
ASSERT(locs()->temp(2).reg() == R5);
- Register result = locs()->out(0).reg();
+ const Register result = locs()->out(0).reg();
// Push the result place holder initialized to NULL.
__ PushObject(Object::ZoneHandle(), PP);
@@ -833,7 +833,7 @@
void StringInterpolateInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- Register array = locs()->in(0).reg();
+ const Register array = locs()->in(0).reg();
__ Push(array);
const int kNumberOfArguments = 1;
const Array& kNoArgumentNames = Object::null_array();
@@ -856,8 +856,8 @@
void LoadUntaggedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- Register object = locs()->in(0).reg();
- Register result = locs()->out(0).reg();
+ const Register object = locs()->in(0).reg();
+ const Register result = locs()->out(0).reg();
__ LoadFieldFromOffset(result, object, offset(), PP);
}
@@ -871,8 +871,8 @@
void LoadClassIdInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- Register object = locs()->in(0).reg();
- Register result = locs()->out(0).reg();
+ const Register object = locs()->in(0).reg();
+ const Register result = locs()->out(0).reg();
Label load, done;
__ tsti(object, kSmiTagMask);
__ b(&load, NE);
@@ -976,9 +976,9 @@
void LoadIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- Register array = locs()->in(0).reg();
+ const Register array = locs()->in(0).reg();
ASSERT(locs()->in(1).IsRegister()); // TODO(regis): Revisit.
- Register index = locs()->in(1).reg();
+ const Register index = locs()->in(1).reg();
Address element_address(kNoRegister, 0);
@@ -1045,7 +1045,7 @@
return;
}
- Register result = locs()->out(0).reg();
+ const Register result = locs()->out(0).reg();
switch (class_id()) {
case kTypedDataInt8ArrayCid:
ASSERT(index_scale() == 1);
@@ -1171,7 +1171,7 @@
void StoreIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const Register array = locs()->in(0).reg();
ASSERT(locs()->in(1).IsRegister()); // TODO(regis): Revisit.
- Register index = locs()->in(1).reg();
+ const Register index = locs()->in(1).reg();
Address element_address(kNoRegister, 0);
@@ -1369,11 +1369,11 @@
const intptr_t value_cid = value()->Type()->ToCid();
- Register value_reg = locs()->in(0).reg();
+ const Register value_reg = locs()->in(0).reg();
- Register value_cid_reg = locs()->temp(0).reg();
+ const Register value_cid_reg = locs()->temp(0).reg();
- Register temp_reg = locs()->temp(1).reg();
+ const Register temp_reg = locs()->temp(1).reg();
Register field_reg = needs_field_temp_reg ?
locs()->temp(locs()->temp_count() - 1).reg() : kNoRegister;
@@ -1707,8 +1707,6 @@
: Location::RequiresRegister());
summary->AddTemp(Location::RequiresRegister());
summary->AddTemp(Location::RequiresRegister());
- summary->AddTemp(opt ? Location::RequiresFpuRegister()
- : Location::FpuRegisterLocation(V1));
} else {
summary->set_in(1, ShouldEmitStoreBarrier()
? Location::WritableRegister()
@@ -1721,7 +1719,7 @@
void StoreInstanceFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
Label skip_store;
- Register instance_reg = locs()->in(0).reg();
+ const Register instance_reg = locs()->in(0).reg();
if (IsUnboxedStore() && compiler->is_optimizing()) {
const VRegister value = locs()->in(1).fpu_reg();
@@ -1752,7 +1750,6 @@
__ TryAllocate(*cls,
slow_path->entry_label(),
temp,
- temp2,
PP);
__ Bind(slow_path->exit_label());
__ mov(temp2, temp);
@@ -1784,7 +1781,6 @@
const Register value_reg = locs()->in(1).reg();
const Register temp = locs()->temp(0).reg();
const Register temp2 = locs()->temp(1).reg();
- const VRegister fpu_temp = locs()->temp(2).fpu_reg();
Label store_pointer;
Label store_double;
@@ -1837,14 +1833,13 @@
__ TryAllocate(compiler->double_class(),
slow_path->entry_label(),
temp,
- temp2,
PP);
__ Bind(slow_path->exit_label());
__ mov(temp2, temp);
__ StoreIntoObjectOffset(instance_reg, offset_in_bytes_, temp2, PP);
__ Bind(©_double);
- __ LoadDFieldFromOffset(fpu_temp, value_reg, Double::value_offset(), PP);
- __ StoreDFieldToOffset(fpu_temp, temp, Double::value_offset(), PP);
+ __ LoadDFieldFromOffset(VTMP, value_reg, Double::value_offset(), PP);
+ __ StoreDFieldToOffset(VTMP, temp, Double::value_offset(), PP);
__ b(&skip_store);
}
@@ -1862,16 +1857,13 @@
__ TryAllocate(compiler->float32x4_class(),
slow_path->entry_label(),
temp,
- temp2,
PP);
__ Bind(slow_path->exit_label());
__ mov(temp2, temp);
__ StoreIntoObjectOffset(instance_reg, offset_in_bytes_, temp2, PP);
__ Bind(©_float32x4);
- __ LoadQFieldFromOffset(
- fpu_temp, value_reg, Float32x4::value_offset(), PP);
- __ StoreQFieldToOffset(
- fpu_temp, value_reg, Float32x4::value_offset(), PP);
+ __ LoadQFieldFromOffset(VTMP, value_reg, Float32x4::value_offset(), PP);
+ __ StoreQFieldToOffset(VTMP, temp, Float32x4::value_offset(), PP);
__ b(&skip_store);
}
@@ -1889,16 +1881,13 @@
__ TryAllocate(compiler->float64x2_class(),
slow_path->entry_label(),
temp,
- temp2,
PP);
__ Bind(slow_path->exit_label());
__ mov(temp2, temp);
__ StoreIntoObjectOffset(instance_reg, offset_in_bytes_, temp2, PP);
__ Bind(©_float64x2);
- __ LoadQFieldFromOffset(
- fpu_temp, value_reg, Float64x2::value_offset(), PP);
- __ StoreQFieldToOffset(
- fpu_temp, value_reg, Float64x2::value_offset(), PP);
+ __ LoadQFieldFromOffset(VTMP, value_reg, Float64x2::value_offset(), PP);
+ __ StoreQFieldToOffset(VTMP, temp, Float64x2::value_offset(), PP);
__ b(&skip_store);
}
@@ -1946,8 +1935,8 @@
//
// This is safe only so long as LoadStaticFieldInstr cannot deoptimize.
void LoadStaticFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- Register field = locs()->in(0).reg();
- Register result = locs()->out(0).reg();
+ const Register field = locs()->in(0).reg();
+ const Register result = locs()->out(0).reg();
__ LoadFieldFromOffset(result, field, Field::value_offset(), PP);
}
@@ -1962,8 +1951,8 @@
void StoreStaticFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- Register value = locs()->in(0).reg();
- Register temp = locs()->temp(0).reg();
+ const Register value = locs()->in(0).reg();
+ const Register temp = locs()->temp(0).reg();
__ LoadObject(temp, field(), PP);
if (this->value()->NeedsStoreBuffer()) {
@@ -2058,6 +2047,70 @@
};
+class BoxFloat32x4SlowPath : public SlowPathCode {
+ public:
+ explicit BoxFloat32x4SlowPath(Instruction* instruction)
+ : instruction_(instruction) { }
+
+ virtual void EmitNativeCode(FlowGraphCompiler* compiler) {
+ __ Comment("BoxFloat32x4SlowPath");
+ __ Bind(entry_label());
+ const Class& float32x4_class = compiler->float32x4_class();
+ const Code& stub =
+ Code::Handle(StubCode::GetAllocationStubForClass(float32x4_class));
+ const ExternalLabel label(float32x4_class.ToCString(), stub.EntryPoint());
+
+ LocationSummary* locs = instruction_->locs();
+ locs->live_registers()->Remove(locs->out(0));
+
+ compiler->SaveLiveRegisters(locs);
+ compiler->GenerateCall(Scanner::kNoSourcePos, // No token position.
+ &label,
+ PcDescriptors::kOther,
+ locs);
+ __ mov(locs->out(0).reg(), R0);
+ compiler->RestoreLiveRegisters(locs);
+
+ __ b(exit_label());
+ }
+
+ private:
+ Instruction* instruction_;
+};
+
+
+class BoxFloat64x2SlowPath : public SlowPathCode {
+ public:
+ explicit BoxFloat64x2SlowPath(Instruction* instruction)
+ : instruction_(instruction) { }
+
+ virtual void EmitNativeCode(FlowGraphCompiler* compiler) {
+ __ Comment("BoxFloat64x2SlowPath");
+ __ Bind(entry_label());
+ const Class& float64x2_class = compiler->float64x2_class();
+ const Code& stub =
+ Code::Handle(StubCode::GetAllocationStubForClass(float64x2_class));
+ const ExternalLabel label(float64x2_class.ToCString(), stub.EntryPoint());
+
+ LocationSummary* locs = instruction_->locs();
+ locs->live_registers()->Remove(locs->out(0));
+
+ compiler->SaveLiveRegisters(locs);
+ compiler->GenerateCall(Scanner::kNoSourcePos, // No token position.
+ &label,
+ PcDescriptors::kOther,
+ locs);
+ __ mov(locs->out(0).reg(), R0);
+ compiler->RestoreLiveRegisters(locs);
+
+ __ b(exit_label());
+ }
+
+ private:
+ Instruction* instruction_;
+};
+
+
LocationSummary* LoadFieldInstr::MakeLocationSummary(bool opt) const {
const intptr_t kNumInputs = 1;
const intptr_t kNumTemps = 0;
@@ -2073,8 +2126,6 @@
if (IsUnboxedLoad() && opt) {
locs->AddTemp(Location::RequiresRegister());
} else if (IsPotentialUnboxedLoad()) {
- locs->AddTemp(opt ? Location::RequiresFpuRegister()
- : Location::FpuRegisterLocation(V1));
locs->AddTemp(Location::RequiresRegister());
}
locs->set_out(0, Location::RequiresRegister());
@@ -2083,7 +2134,7 @@
void LoadFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- Register instance_reg = locs()->in(0).reg();
+ const Register instance_reg = locs()->in(0).reg();
if (IsUnboxedLoad() && compiler->is_optimizing()) {
const VRegister result = locs()->out(0).fpu_reg();
const Register temp = locs()->temp(0).reg();
@@ -2095,8 +2146,10 @@
__ LoadDFieldFromOffset(result, temp, Double::value_offset(), PP);
break;
case kFloat32x4Cid:
+ __ LoadQFieldFromOffset(result, temp, Float32x4::value_offset(), PP);
+ break;
case kFloat64x2Cid:
- UNIMPLEMENTED();
+ __ LoadQFieldFromOffset(result, temp, Float64x2::value_offset(), PP);
break;
default:
UNREACHABLE();
@@ -2105,10 +2158,9 @@
}
Label done;
- Register result_reg = locs()->out(0).reg();
+ const Register result_reg = locs()->out(0).reg();
if (IsPotentialUnboxedLoad()) {
- const Register temp = locs()->temp(1).reg();
- const VRegister value = locs()->temp(0).fpu_reg();
+ const Register temp = locs()->temp(0).reg();
Label load_pointer;
Label load_double;
@@ -2152,24 +2204,44 @@
__ TryAllocate(compiler->double_class(),
slow_path->entry_label(),
result_reg,
- temp,
PP);
__ Bind(slow_path->exit_label());
__ LoadFieldFromOffset(temp, instance_reg, offset_in_bytes(), PP);
- __ LoadDFieldFromOffset(value, temp, Double::value_offset(), PP);
- __ StoreDFieldToOffset(value, result_reg, Double::value_offset(), PP);
+ __ LoadDFieldFromOffset(VTMP, temp, Double::value_offset(), PP);
+ __ StoreDFieldToOffset(VTMP, result_reg, Double::value_offset(), PP);
__ b(&done);
}
- // TODO(zra): Implement these when we add simd loads and stores.
{
__ Bind(&load_float32x4);
- __ Stop("Float32x4 Unimplemented");
+ BoxFloat32x4SlowPath* slow_path = new BoxFloat32x4SlowPath(this);
+ compiler->AddSlowPathCode(slow_path);
+
+ __ TryAllocate(compiler->float32x4_class(),
+ slow_path->entry_label(),
+ result_reg,
+ PP);
+ __ Bind(slow_path->exit_label());
+ __ LoadFieldFromOffset(temp, instance_reg, offset_in_bytes(), PP);
+ __ LoadQFieldFromOffset(VTMP, temp, Float32x4::value_offset(), PP);
+ __ StoreQFieldToOffset(VTMP, result_reg, Float32x4::value_offset(), PP);
+ __ b(&done);
}
{
__ Bind(&load_float64x2);
- __ Stop("Float64x2 Unimplemented");
+ BoxFloat64x2SlowPath* slow_path = new BoxFloat64x2SlowPath(this);
+ compiler->AddSlowPathCode(slow_path);
+
+ __ TryAllocate(compiler->float64x2_class(),
+ slow_path->entry_label(),
+ result_reg,
+ PP);
+ __ Bind(slow_path->exit_label());
+ __ LoadFieldFromOffset(temp, instance_reg, offset_in_bytes(), PP);
+ __ LoadQFieldFromOffset(VTMP, temp, Float64x2::value_offset(), PP);
+ __ StoreQFieldToOffset(VTMP, result_reg, Float64x2::value_offset(), PP);
+ __ b(&done);
}
__ Bind(&load_pointer);
@@ -2191,8 +2263,8 @@
void InstantiateTypeInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- Register instantiator_reg = locs()->in(0).reg();
- Register result_reg = locs()->out(0).reg();
+ const Register instantiator_reg = locs()->in(0).reg();
+ const Register result_reg = locs()->out(0).reg();
// 'instantiator_reg' is the instantiator TypeArguments object (or null).
// A runtime call to instantiate the type is required.
@@ -2224,8 +2296,8 @@
void InstantiateTypeArgumentsInstr::EmitNativeCode(
FlowGraphCompiler* compiler) {
- Register instantiator_reg = locs()->in(0).reg();
- Register result_reg = locs()->out(0).reg();
+ const Register instantiator_reg = locs()->in(0).reg();
+ const Register result_reg = locs()->out(0).reg();
ASSERT(instantiator_reg == R0);
ASSERT(instantiator_reg == result_reg);
@@ -2315,8 +2387,8 @@
void CloneContextInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- Register context_value = locs()->in(0).reg();
- Register result = locs()->out(0).reg();
+ const Register context_value = locs()->in(0).reg();
+ const Register result = locs()->out(0).reg();
__ PushObject(Object::ZoneHandle(), PP); // Make room for the result.
__ Push(context_value);
@@ -2387,7 +2459,7 @@
virtual void EmitNativeCode(FlowGraphCompiler* compiler) {
if (FLAG_use_osr) {
uword flags_address = Isolate::Current()->stack_overflow_flags_address();
- Register value = instruction_->locs()->temp(0).reg();
+ const Register value = instruction_->locs()->temp(0).reg();
__ Comment("CheckStackOverflowSlowPathOsr");
__ Bind(osr_entry_label());
__ LoadImmediate(TMP, flags_address, PP);
@@ -2439,7 +2511,7 @@
__ CompareRegisters(SP, TMP);
__ b(slow_path->entry_label(), LS);
if (compiler->CanOSRFunction() && in_loop()) {
- Register temp = locs()->temp(0).reg();
+ const Register temp = locs()->temp(0).reg();
// In unoptimized code check the usage counter to trigger OSR at loop
// stack checks. Use progressively higher thresholds for more deeply
// nested loops to attempt to hit outer loops with OSR when possible.
@@ -2475,8 +2547,8 @@
BinarySmiOpInstr* shift_left) {
const bool is_truncating = shift_left->is_truncating();
const LocationSummary& locs = *shift_left->locs();
- Register left = locs.in(0).reg();
- Register result = locs.out(0).reg();
+ const Register left = locs.in(0).reg();
+ const Register result = locs.out(0).reg();
Label* deopt = shift_left->CanDeoptimize() ?
compiler->AddDeoptStub(shift_left->deopt_id(), ICData::kDeoptBinarySmiOp)
: NULL;
@@ -2514,7 +2586,7 @@
}
// Right (locs.in(1)) is not constant.
- Register right = locs.in(1).reg();
+ const Register right = locs.in(1).reg();
Range* right_range = shift_left->right()->definition()->range();
if (shift_left->left()->BindsToConstant() && !is_truncating) {
// TODO(srdjan): Implement code below for is_truncating().
@@ -2580,7 +2652,7 @@
// Check if count too large for handling it inlined.
__ Asr(TMP, right, kSmiTagSize); // SmiUntag right into TMP.
// Overflow test (preserve left, right, and TMP);
- Register temp = locs.temp(0).reg();
+ const Register temp = locs.temp(0).reg();
__ lslv(temp, left, TMP);
__ asrv(TMP2, temp, TMP);
__ CompareRegisters(left, TMP2);
@@ -2771,7 +2843,7 @@
return;
}
- Register right = locs()->in(1).reg();
+ const Register right = locs()->in(1).reg();
Range* right_range = this->right()->definition()->range();
switch (op_kind()) {
case Token::kADD: {
@@ -2887,7 +2959,7 @@
__ CompareRegisters(TMP, TMP2);
__ csel(TMP, TMP2, TMP, GT);
}
- Register temp = locs()->temp(0).reg();
+ const Register temp = locs()->temp(0).reg();
__ Asr(temp, left, kSmiTagSize); // SmiUntag left into temp.
__ asrv(result, temp, TMP);
__ SmiTag(result);
@@ -2935,8 +3007,8 @@
ICData::kDeoptBinaryDoubleOp);
intptr_t left_cid = left()->Type()->ToCid();
intptr_t right_cid = right()->Type()->ToCid();
- Register left = locs()->in(0).reg();
- Register right = locs()->in(1).reg();
+ const Register left = locs()->in(0).reg();
+ const Register right = locs()->in(1).reg();
if (left_cid == kSmiCid) {
__ tsti(right, kSmiTagMask);
} else if (right_cid == kSmiCid) {
@@ -2951,13 +3023,12 @@
LocationSummary* BoxDoubleInstr::MakeLocationSummary(bool opt) const {
const intptr_t kNumInputs = 1;
- const intptr_t kNumTemps = 1;
+ const intptr_t kNumTemps = 0;
LocationSummary* summary =
new LocationSummary(kNumInputs,
kNumTemps,
LocationSummary::kCallOnSlowPath);
summary->set_in(0, Location::RequiresFpuRegister());
- summary->set_temp(0, Location::RequiresRegister());
summary->set_out(0, Location::RequiresRegister());
return summary;
}
@@ -2973,7 +3044,6 @@
__ TryAllocate(compiler->double_class(),
slow_path->entry_label(),
out_reg,
- locs()->temp(0).reg(),
PP);
__ Bind(slow_path->exit_label());
__ StoreDFieldToOffset(value, out_reg, Double::value_offset(), PP);
@@ -3029,68 +3099,208 @@
LocationSummary* BoxFloat32x4Instr::MakeLocationSummary(bool opt) const {
- UNIMPLEMENTED();
- return NULL;
+ const intptr_t kNumInputs = 1;
+ const intptr_t kNumTemps = 0;
+ LocationSummary* summary =
+ new LocationSummary(kNumInputs,
+ kNumTemps,
+ LocationSummary::kCallOnSlowPath);
+ summary->set_in(0, Location::RequiresFpuRegister());
+ summary->set_out(0, Location::RequiresRegister());
+ return summary;
}
void BoxFloat32x4Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
- UNIMPLEMENTED();
+ BoxFloat32x4SlowPath* slow_path = new BoxFloat32x4SlowPath(this);
+ compiler->AddSlowPathCode(slow_path);
+
+ const Register out_reg = locs()->out(0).reg();
+ const VRegister value = locs()->in(0).fpu_reg();
+
+ __ TryAllocate(compiler->float32x4_class(),
+ slow_path->entry_label(),
+ out_reg,
+ PP);
+ __ Bind(slow_path->exit_label());
+
+ __ StoreQFieldToOffset(value, out_reg, Float32x4::value_offset(), PP);
}
LocationSummary* UnboxFloat32x4Instr::MakeLocationSummary(bool opt) const {
- UNIMPLEMENTED();
- return NULL;
+ const intptr_t kNumInputs = 1;
+ const intptr_t kNumTemps = 0;
+ LocationSummary* summary =
+ new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
+ summary->set_in(0, Location::RequiresRegister());
+ summary->set_out(0, Location::RequiresFpuRegister());
+ return summary;
}
void UnboxFloat32x4Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
- UNIMPLEMENTED();
+ const intptr_t value_cid = value()->Type()->ToCid();
+ const Register value = locs()->in(0).reg();
+ const VRegister result = locs()->out(0).fpu_reg();
+
+ if (value_cid != kFloat32x4Cid) {
+ Label* deopt = compiler->AddDeoptStub(deopt_id_, ICData::kDeoptCheckClass);
+ __ tsti(value, kSmiTagMask);
+ __ b(deopt, EQ);
+ __ CompareClassId(value, kFloat32x4Cid, PP);
+ __ b(deopt, NE);
+ }
+
+ __ LoadQFieldFromOffset(result, value, Float32x4::value_offset(), PP);
}
LocationSummary* BoxFloat64x2Instr::MakeLocationSummary(bool opt) const {
- UNIMPLEMENTED();
- return NULL;
+ const intptr_t kNumInputs = 1;
+ const intptr_t kNumTemps = 0;
+ LocationSummary* summary =
+ new LocationSummary(kNumInputs,
+ kNumTemps,
+ LocationSummary::kCallOnSlowPath);
+ summary->set_in(0, Location::RequiresFpuRegister());
+ summary->set_out(0, Location::RequiresRegister());
+ return summary;
}
void BoxFloat64x2Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
- UNIMPLEMENTED();
+ BoxFloat64x2SlowPath* slow_path = new BoxFloat64x2SlowPath(this);
+ compiler->AddSlowPathCode(slow_path);
+
+ const Register out_reg = locs()->out(0).reg();
+ const VRegister value = locs()->in(0).fpu_reg();
+
+ __ TryAllocate(compiler->float64x2_class(),
+ slow_path->entry_label(),
+ out_reg,
+ PP);
+ __ Bind(slow_path->exit_label());
+
+ __ StoreQFieldToOffset(value, out_reg, Float64x2::value_offset(), PP);
}
LocationSummary* UnboxFloat64x2Instr::MakeLocationSummary(bool opt) const {
- UNIMPLEMENTED();
- return NULL;
+ const intptr_t kNumInputs = 1;
+ const intptr_t kNumTemps = 0;
+ LocationSummary* summary =
+ new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
+ summary->set_in(0, Location::RequiresRegister());
+ summary->set_out(0, Location::RequiresFpuRegister());
+ return summary;
}
void UnboxFloat64x2Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
- UNIMPLEMENTED();
+ const intptr_t value_cid = value()->Type()->ToCid();
+ const Register value = locs()->in(0).reg();
+ const VRegister result = locs()->out(0).fpu_reg();
+
+ if (value_cid != kFloat64x2Cid) {
+ Label* deopt = compiler->AddDeoptStub(deopt_id_, ICData::kDeoptCheckClass);
+ __ tsti(value, kSmiTagMask);
+ __ b(deopt, EQ);
+ __ CompareClassId(value, kFloat64x2Cid, PP);
+ __ b(deopt, NE);
+ }
+
+ __ LoadQFieldFromOffset(result, value, Float64x2::value_offset(), PP);
}
LocationSummary* BoxInt32x4Instr::MakeLocationSummary(bool opt) const {
- UNIMPLEMENTED();
- return NULL;
+ const intptr_t kNumInputs = 1;
+ const intptr_t kNumTemps = 0;
+ LocationSummary* summary =
+ new LocationSummary(kNumInputs,
+ kNumTemps,
+ LocationSummary::kCallOnSlowPath);
+ summary->set_in(0, Location::RequiresFpuRegister());
+ summary->set_out(0, Location::RequiresRegister());
+ return summary;
}
+class BoxInt32x4SlowPath : public SlowPathCode {
+ public:
+ explicit BoxInt32x4SlowPath(BoxInt32x4Instr* instruction)
+ : instruction_(instruction) { }
+
+ virtual void EmitNativeCode(FlowGraphCompiler* compiler) {
+ __ Comment("BoxInt32x4SlowPath");
+ __ Bind(entry_label());
+ const Class& int32x4_class = compiler->int32x4_class();
+ const Code& stub =
+ Code::Handle(StubCode::GetAllocationStubForClass(int32x4_class));
+ const ExternalLabel label(int32x4_class.ToCString(), stub.EntryPoint());
+
+ LocationSummary* locs = instruction_->locs();
+ locs->live_registers()->Remove(locs->out(0));
+
+ compiler->SaveLiveRegisters(locs);
+ compiler->GenerateCall(Scanner::kNoSourcePos, // No token position.
+ &label,
+ PcDescriptors::kOther,
+ locs);
+ __ mov(locs->out(0).reg(), R0);
+ compiler->RestoreLiveRegisters(locs);
+
+ __ b(exit_label());
+ }
+
+ private:
+ BoxInt32x4Instr* instruction_;
+};
+
+
void BoxInt32x4Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
- UNIMPLEMENTED();
+ BoxInt32x4SlowPath* slow_path = new BoxInt32x4SlowPath(this);
+ compiler->AddSlowPathCode(slow_path);
+
+ const Register out_reg = locs()->out(0).reg();
+ const VRegister value = locs()->in(0).fpu_reg();
+
+ __ TryAllocate(compiler->int32x4_class(),
+ slow_path->entry_label(),
+ out_reg,
+ PP);
+ __ Bind(slow_path->exit_label());
+
+ __ StoreQFieldToOffset(value, out_reg, Int32x4::value_offset(), PP);
}
LocationSummary* UnboxInt32x4Instr::MakeLocationSummary(bool opt) const {
- UNIMPLEMENTED();
- return NULL;
+ const intptr_t kNumInputs = 1;
+ const intptr_t kNumTemps = 0;
+ LocationSummary* summary =
+ new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
+ summary->set_in(0, Location::RequiresRegister());
+ summary->set_out(0, Location::RequiresFpuRegister());
+ return summary;
}
void UnboxInt32x4Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
- UNIMPLEMENTED();
+ const intptr_t value_cid = value()->Type()->ToCid();
+ const Register value = locs()->in(0).reg();
+ const VRegister result = locs()->out(0).fpu_reg();
+
+ if (value_cid != kInt32x4Cid) {
+ Label* deopt = compiler->AddDeoptStub(deopt_id_, ICData::kDeoptCheckClass);
+ __ tsti(value, kSmiTagMask);
+ __ b(deopt, EQ);
+ __ CompareClassId(value, kInt32x4Cid, PP);
+ __ b(deopt, NE);
+ }
+
+ __ LoadQFieldFromOffset(result, value, Int32x4::value_offset(), PP);
}
@@ -3121,24 +3331,56 @@
LocationSummary* BinaryFloat32x4OpInstr::MakeLocationSummary(bool opt) const {
- UNIMPLEMENTED();
- return NULL;
+ const intptr_t kNumInputs = 2;
+ const intptr_t kNumTemps = 0;
+ LocationSummary* summary =
+ new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
+ summary->set_in(0, Location::RequiresFpuRegister());
+ summary->set_in(1, Location::RequiresFpuRegister());
+ summary->set_out(0, Location::RequiresFpuRegister());
+ return summary;
}
void BinaryFloat32x4OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- UNIMPLEMENTED();
+ const VRegister left = locs()->in(0).fpu_reg();
+ const VRegister right = locs()->in(1).fpu_reg();
+ const VRegister result = locs()->out(0).fpu_reg();
+
+ switch (op_kind()) {
+ case Token::kADD: __ vadds(result, left, right); break;
+ case Token::kSUB: __ vsubs(result, left, right); break;
+ case Token::kMUL: __ vmuls(result, left, right); break;
+ case Token::kDIV: __ vdivs(result, left, right); break;
+ default: UNREACHABLE();
+ }
}
LocationSummary* BinaryFloat64x2OpInstr::MakeLocationSummary(bool opt) const {
- UNIMPLEMENTED();
- return NULL;
+ const intptr_t kNumInputs = 2;
+ const intptr_t kNumTemps = 0;
+ LocationSummary* summary =
+ new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
+ summary->set_in(0, Location::RequiresFpuRegister());
+ summary->set_in(1, Location::RequiresFpuRegister());
+ summary->set_out(0, Location::RequiresFpuRegister());
+ return summary;
}
void BinaryFloat64x2OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- UNIMPLEMENTED();
+ const VRegister left = locs()->in(0).fpu_reg();
+ const VRegister right = locs()->in(1).fpu_reg();
+ const VRegister result = locs()->out(0).fpu_reg();
+
+ switch (op_kind()) {
+ case Token::kADD: __ vaddd(result, left, right); break;
+ case Token::kSUB: __ vsubd(result, left, right); break;
+ case Token::kMUL: __ vmuld(result, left, right); break;
+ case Token::kDIV: __ vdivd(result, left, right); break;
+ default: UNREACHABLE();
+ }
}
@@ -3177,35 +3419,73 @@
LocationSummary* Float32x4ConstructorInstr::MakeLocationSummary(
bool opt) const {
- UNIMPLEMENTED();
- return NULL;
+ const intptr_t kNumInputs = 4;
+ const intptr_t kNumTemps = 0;
+ LocationSummary* summary =
+ new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
+ summary->set_in(0, Location::RequiresFpuRegister());
+ summary->set_in(1, Location::RequiresFpuRegister());
+ summary->set_in(2, Location::RequiresFpuRegister());
+ summary->set_in(3, Location::RequiresFpuRegister());
+ summary->set_out(0, Location::RequiresFpuRegister());
+ return summary;
}
void Float32x4ConstructorInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- UNIMPLEMENTED();
+ const VRegister v0 = locs()->in(0).fpu_reg();
+ const VRegister v1 = locs()->in(1).fpu_reg();
+ const VRegister v2 = locs()->in(2).fpu_reg();
+ const VRegister v3 = locs()->in(3).fpu_reg();
+ const VRegister r = locs()->out(0).fpu_reg();
+
+ __ fcvtsd(v0, v0);
+ __ vinss(r, 0, v0, 0);
+ __ fcvtsd(v1, v1);
+ __ vinss(r, 1, v1, 1);
+ __ fcvtsd(v2, v2);
+ __ vinss(r, 2, v2, 2);
+ __ fcvtsd(v3, v3);
+ __ vinss(r, 3, v3, 3);
}
LocationSummary* Float32x4ZeroInstr::MakeLocationSummary(bool opt) const {
- UNIMPLEMENTED();
- return NULL;
+ const intptr_t kNumInputs = 0;
+ const intptr_t kNumTemps = 0;
+ LocationSummary* summary =
+ new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
+ summary->set_out(0, Location::RequiresFpuRegister());
+ return summary;
}
void Float32x4ZeroInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- UNIMPLEMENTED();
+ const VRegister v = locs()->out(0).fpu_reg();
+ __ LoadDImmediate(v, 0.0, PP);
}
LocationSummary* Float32x4SplatInstr::MakeLocationSummary(bool opt) const {
- UNIMPLEMENTED();
- return NULL;
+ const intptr_t kNumInputs = 1;
+ const intptr_t kNumTemps = 0;
+ LocationSummary* summary =
+ new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
+ summary->set_in(0, Location::RequiresFpuRegister());
+ summary->set_out(0, Location::RequiresFpuRegister());
+ return summary;
}
void Float32x4SplatInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- UNIMPLEMENTED();
+ const VRegister value = locs()->in(0).fpu_reg();
+ const VRegister result = locs()->out(0).fpu_reg();
+
+ // Convert to Float32.
+ __ fcvtsd(VTMP, value);
+
+ // Splat across all lanes.
+ __ vdups(result, VTMP, 0);
}
@@ -3243,13 +3523,30 @@
LocationSummary* Float32x4ScaleInstr::MakeLocationSummary(bool opt) const {
- UNIMPLEMENTED();
- return NULL;
+ const intptr_t kNumInputs = 2;
+ const intptr_t kNumTemps = 0;
+ LocationSummary* summary =
+ new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
+ summary->set_in(0, Location::RequiresFpuRegister());
+ summary->set_in(1, Location::RequiresFpuRegister());
+ summary->set_out(0, Location::RequiresFpuRegister());
+ return summary;
}
void Float32x4ScaleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- UNIMPLEMENTED();
+ const VRegister left = locs()->in(0).fpu_reg();
+ const VRegister right = locs()->in(1).fpu_reg();
+ const VRegister result = locs()->out(0).fpu_reg();
+
+ switch (op_kind()) {
+ case MethodRecognizer::kFloat32x4Scale:
+ __ fcvtsd(VTMP, left);
+ __ vdups(result, VTMP, 0);
+ __ vmuls(result, result, right);
+ break;
+ default: UNREACHABLE();
+ }
}
@@ -3298,47 +3595,85 @@
LocationSummary* Simd64x2ShuffleInstr::MakeLocationSummary(bool opt) const {
- UNIMPLEMENTED();
- return NULL;
+ const intptr_t kNumInputs = 1;
+ const intptr_t kNumTemps = 0;
+ LocationSummary* summary =
+ new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
+ summary->set_in(0, Location::RequiresFpuRegister());
+ summary->set_out(0, Location::RequiresFpuRegister());
+ return summary;
}
void Simd64x2ShuffleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- UNIMPLEMENTED();
+ const VRegister value = locs()->in(0).fpu_reg();
+ const VRegister result = locs()->out(0).fpu_reg();
+
+ switch (op_kind()) {
+ case MethodRecognizer::kFloat64x2GetX:
+ __ vinsd(result, 0, value, 0);
+ break;
+ case MethodRecognizer::kFloat64x2GetY:
+ __ vinsd(result, 0, value, 1);
+ break;
+ default: UNREACHABLE();
+ }
}
LocationSummary* Float64x2ZeroInstr::MakeLocationSummary(bool opt) const {
- UNIMPLEMENTED();
- return NULL;
+ const intptr_t kNumInputs = 0;
+ const intptr_t kNumTemps = 0;
+ LocationSummary* summary =
+ new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
+ summary->set_out(0, Location::RequiresFpuRegister());
+ return summary;
}
void Float64x2ZeroInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- UNIMPLEMENTED();
+ const VRegister v = locs()->out(0).fpu_reg();
+ __ LoadDImmediate(v, 0.0, PP);
}
LocationSummary* Float64x2SplatInstr::MakeLocationSummary(bool opt) const {
- UNIMPLEMENTED();
- return NULL;
+ const intptr_t kNumInputs = 1;
+ const intptr_t kNumTemps = 0;
+ LocationSummary* summary =
+ new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
+ summary->set_in(0, Location::RequiresFpuRegister());
+ summary->set_out(0, Location::RequiresFpuRegister());
+ return summary;
}
void Float64x2SplatInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- UNIMPLEMENTED();
+ const VRegister value = locs()->in(0).fpu_reg();
+ const VRegister result = locs()->out(0).fpu_reg();
+ __ vdupd(result, value, 0);
}
LocationSummary* Float64x2ConstructorInstr::MakeLocationSummary(
bool opt) const {
- UNIMPLEMENTED();
- return NULL;
+ const intptr_t kNumInputs = 2;
+ const intptr_t kNumTemps = 0;
+ LocationSummary* summary =
+ new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
+ summary->set_in(0, Location::RequiresFpuRegister());
+ summary->set_in(1, Location::RequiresFpuRegister());
+ summary->set_out(0, Location::RequiresFpuRegister());
+ return summary;
}
void Float64x2ConstructorInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- UNIMPLEMENTED();
+ const VRegister v0 = locs()->in(0).fpu_reg();
+ const VRegister v1 = locs()->in(1).fpu_reg();
+ const VRegister r = locs()->out(0).fpu_reg();
+ __ vinsd(r, 0, v0, 0);
+ __ vinsd(r, 0, v1, 0);
}
@@ -3479,12 +3814,12 @@
void MathUnaryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
if (kind() == MathUnaryInstr::kSqrt) {
- VRegister val = locs()->in(0).fpu_reg();
- VRegister result = locs()->out(0).fpu_reg();
+ const VRegister val = locs()->in(0).fpu_reg();
+ const VRegister result = locs()->out(0).fpu_reg();
__ fsqrtd(result, val);
} else if (kind() == MathUnaryInstr::kDoubleSquare) {
- VRegister val = locs()->in(0).fpu_reg();
- VRegister result = locs()->out(0).fpu_reg();
+ const VRegister val = locs()->in(0).fpu_reg();
+ const VRegister result = locs()->out(0).fpu_reg();
__ fmuld(result, val, val);
} else {
ASSERT((kind() == MathUnaryInstr::kSin) ||
@@ -3566,9 +3901,9 @@
}
ASSERT(result_cid() == kSmiCid);
- Register left = locs()->in(0).reg();
- Register right = locs()->in(1).reg();
- Register result = locs()->out(0).reg();
+ const Register left = locs()->in(0).reg();
+ const Register right = locs()->in(1).reg();
+ const Register result = locs()->out(0).reg();
__ CompareRegisters(left, right);
ASSERT(result == left);
if (is_min) {
@@ -3593,8 +3928,8 @@
void UnarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- Register value = locs()->in(0).reg();
- Register result = locs()->out(0).reg();
+ const Register value = locs()->in(0).reg();
+ const Register result = locs()->out(0).reg();
switch (op_kind()) {
case Token::kNEGATE: {
Label* deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnaryOp);
@@ -3628,8 +3963,8 @@
void UnaryDoubleOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- VRegister result = locs()->out(0).fpu_reg();
- VRegister value = locs()->in(0).fpu_reg();
+ const VRegister result = locs()->out(0).fpu_reg();
+ const VRegister value = locs()->in(0).fpu_reg();
__ fnegd(result, value);
}
@@ -3646,8 +3981,8 @@
void SmiToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- Register value = locs()->in(0).reg();
- VRegister result = locs()->out(0).fpu_reg();
+ const Register value = locs()->in(0).reg();
+ const VRegister result = locs()->out(0).fpu_reg();
__ SmiUntag(value);
__ scvtfd(result, value);
}
@@ -3961,13 +4296,13 @@
PairLocation* pair = locs()->in(0).AsPairLocation();
Location in_loc = pair->At(index());
if (representation() == kUnboxedDouble) {
- VRegister out = locs()->out(0).fpu_reg();
- VRegister in = in_loc.fpu_reg();
+ const VRegister out = locs()->out(0).fpu_reg();
+ const VRegister in = in_loc.fpu_reg();
__ fmovdd(out, in);
} else {
ASSERT(representation() == kTagged);
- Register out = locs()->out(0).reg();
- Register in = in_loc.reg();
+ const Register out = locs()->out(0).reg();
+ const Register in = in_loc.reg();
__ mov(out, in);
}
}
@@ -4135,8 +4470,8 @@
ASSERT((unary_checks().GetReceiverClassIdAt(0) != kSmiCid) ||
(unary_checks().NumberOfChecks() > 1));
- Register value = locs()->in(0).reg();
- Register temp = locs()->temp(0).reg();
+ const Register value = locs()->in(0).reg();
+ const Register temp = locs()->temp(0).reg();
Label* deopt = compiler->AddDeoptStub(deopt_id(), deopt_reason);
Label is_ok;
intptr_t cix = 0;
@@ -4174,7 +4509,7 @@
void CheckSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- Register value = locs()->in(0).reg();
+ const Register value = locs()->in(0).reg();
Label* deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptCheckSmi);
__ tsti(value, kSmiTagMask);
__ b(deopt, NE);
@@ -4217,18 +4552,18 @@
}
if (index_loc.IsConstant()) {
- Register length = length_loc.reg();
+ const Register length = length_loc.reg();
const Smi& index = Smi::Cast(index_loc.constant());
__ CompareImmediate(length, reinterpret_cast<int64_t>(index.raw()), PP);
__ b(deopt, LS);
} else if (length_loc.IsConstant()) {
const Smi& length = Smi::Cast(length_loc.constant());
- Register index = index_loc.reg();
+ const Register index = index_loc.reg();
__ CompareImmediate(index, reinterpret_cast<int64_t>(length.raw()), PP);
__ b(deopt, CS);
} else {
- Register length = length_loc.reg();
- Register index = index_loc.reg();
+ const Register length = length_loc.reg();
+ const Register index = index_loc.reg();
__ CompareRegisters(index, length);
__ b(deopt, CS);
}
@@ -4446,7 +4781,7 @@
Condition true_condition = EmitComparisonCode(compiler, labels);
EmitBranchOnCondition(compiler, true_condition, labels);
- Register result = locs()->out(0).reg();
+ const Register result = locs()->out(0).reg();
Label done;
__ Bind(&is_false);
__ LoadObject(result, Bool::False(), PP);
@@ -4475,8 +4810,8 @@
void BooleanNegateInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- Register value = locs()->in(0).reg();
- Register result = locs()->out(0).reg();
+ const Register value = locs()->in(0).reg();
+ const Register result = locs()->out(0).reg();
__ LoadObject(result, Bool::True(), PP);
__ LoadObject(TMP, Bool::False(), PP);
diff --git a/runtime/vm/intermediate_language_ia32.cc b/runtime/vm/intermediate_language_ia32.cc
index 6ff2f13..7e082dd 100644
--- a/runtime/vm/intermediate_language_ia32.cc
+++ b/runtime/vm/intermediate_language_ia32.cc
@@ -253,12 +253,13 @@
LocationSummary* EqualityCompareInstr::MakeLocationSummary(bool opt) const {
const intptr_t kNumInputs = 2;
if (operation_cid() == kMintCid) {
- const intptr_t kNumTemps = 1;
+ const intptr_t kNumTemps = 0;
LocationSummary* locs =
new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
- locs->set_in(0, Location::RequiresFpuRegister());
- locs->set_in(1, Location::RequiresFpuRegister());
- locs->set_temp(0, Location::RequiresRegister());
+ locs->set_in(0, Location::Pair(Location::RequiresRegister(),
+ Location::RequiresRegister()));
+ locs->set_in(1, Location::Pair(Location::RequiresRegister(),
+ Location::RequiresRegister()));
locs->set_out(0, Location::RequiresRegister());
return locs;
}
@@ -392,22 +393,19 @@
static void EmitJavascriptIntOverflowCheck(FlowGraphCompiler* compiler,
Label* overflow,
- XmmRegister result,
- Register tmp) {
+ Register result_lo,
+ Register result_hi) {
// Compare upper half.
Label check_lower;
- __ pextrd(tmp, result, Immediate(1));
- __ cmpl(tmp, Immediate(0x00200000));
+ __ cmpl(result_hi, Immediate(0x00200000));
__ j(GREATER, overflow);
__ j(NOT_EQUAL, &check_lower);
- __ pextrd(tmp, result, Immediate(0));
- __ cmpl(tmp, Immediate(0));
+ __ cmpl(result_lo, Immediate(0));
__ j(ABOVE, overflow);
__ Bind(&check_lower);
- __ pextrd(tmp, result, Immediate(1));
- __ cmpl(tmp, Immediate(-0x00200000));
+ __ cmpl(result_hi, Immediate(-0x00200000));
__ j(LESS, overflow);
// Anything in the lower part would make the number bigger than the lower
// bound, so we are done.
@@ -434,15 +432,20 @@
Token::Kind kind,
BranchLabels labels) {
ASSERT(Token::IsEqualityOperator(kind));
- XmmRegister left = locs.in(0).fpu_reg();
- XmmRegister right = locs.in(1).fpu_reg();
- Register temp = locs.temp(0).reg();
- __ movaps(XMM0, left);
- __ pcmpeqq(XMM0, right);
- __ movd(temp, XMM0);
-
+ PairLocation* left_pair = locs.in(0).AsPairLocation();
+ Register left1 = left_pair->At(0).reg();
+ Register left2 = left_pair->At(1).reg();
+ PairLocation* right_pair = locs.in(1).AsPairLocation();
+ Register right1 = right_pair->At(0).reg();
+ Register right2 = right_pair->At(1).reg();
+ Label done;
+ // Compare lower.
+ __ cmpl(left1, right1);
+ __ j(NOT_EQUAL, &done);
+ // Lower is equal, compare upper.
+ __ cmpl(left2, right2);
+ __ Bind(&done);
Condition true_condition = TokenKindToMintCondition(kind);
- __ cmpl(temp, Immediate(-1));
return true_condition;
}
@@ -451,10 +454,12 @@
const LocationSummary& locs,
Token::Kind kind,
BranchLabels labels) {
- XmmRegister left = locs.in(0).fpu_reg();
- XmmRegister right = locs.in(1).fpu_reg();
- Register left_tmp = locs.temp(0).reg();
- Register right_tmp = locs.temp(1).reg();
+ PairLocation* left_pair = locs.in(0).AsPairLocation();
+ Register left1 = left_pair->At(0).reg();
+ Register left2 = left_pair->At(1).reg();
+ PairLocation* right_pair = locs.in(1).AsPairLocation();
+ Register right1 = right_pair->At(0).reg();
+ Register right2 = right_pair->At(1).reg();
Condition hi_cond = OVERFLOW, lo_cond = OVERFLOW;
switch (kind) {
@@ -480,16 +485,12 @@
ASSERT(hi_cond != OVERFLOW && lo_cond != OVERFLOW);
Label is_true, is_false;
// Compare upper halves first.
- __ pextrd(left_tmp, left, Immediate(1));
- __ pextrd(right_tmp, right, Immediate(1));
- __ cmpl(left_tmp, right_tmp);
+ __ cmpl(left2, right2);
__ j(hi_cond, labels.true_label);
__ j(FlipCondition(hi_cond), labels.false_label);
// If upper is equal, compare lower half.
- __ pextrd(left_tmp, left, Immediate(0));
- __ pextrd(right_tmp, right, Immediate(0));
- __ cmpl(left_tmp, right_tmp);
+ __ cmpl(left1, right1);
return lo_cond;
}
@@ -688,13 +689,13 @@
const intptr_t kNumInputs = 2;
const intptr_t kNumTemps = 0;
if (operation_cid() == kMintCid) {
- const intptr_t kNumTemps = 2;
+ const intptr_t kNumTemps = 0;
LocationSummary* locs =
new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
- locs->set_in(0, Location::RequiresFpuRegister());
- locs->set_in(1, Location::RequiresFpuRegister());
- locs->set_temp(0, Location::RequiresRegister());
- locs->set_temp(1, Location::RequiresRegister());
+ locs->set_in(0, Location::Pair(Location::RequiresRegister(),
+ Location::RequiresRegister()));
+ locs->set_in(1, Location::Pair(Location::RequiresRegister(),
+ Location::RequiresRegister()));
locs->set_out(0, Location::RequiresRegister());
return locs;
}
@@ -1020,7 +1021,17 @@
(representation() == kUnboxedInt32x4) ||
(representation() == kUnboxedFloat64x2)) {
locs->set_out(0, Location::RequiresFpuRegister());
+ } else if (representation() == kUnboxedMint) {
+ if (class_id() == kTypedDataInt32ArrayCid) {
+ locs->set_out(0, Location::Pair(Location::RegisterLocation(EAX),
+ Location::RegisterLocation(EDX)));
+ } else {
+ ASSERT(class_id() == kTypedDataUint32ArrayCid);
+ locs->set_out(0, Location::Pair(Location::RequiresRegister(),
+ Location::RequiresRegister()));
+ }
} else {
+ ASSERT(representation() == kTagged);
locs->set_out(0, Location::RequiresRegister());
}
return locs;
@@ -1034,22 +1045,21 @@
Address element_address(kNoRegister, 0);
if (IsExternal()) {
element_address = index.IsRegister()
- ? FlowGraphCompiler::ExternalElementAddressForRegIndex(
+ ? compiler->ExternalElementAddressForRegIndex(
index_scale(), array, index.reg())
- : FlowGraphCompiler::ExternalElementAddressForIntIndex(
+ : compiler->ExternalElementAddressForIntIndex(
index_scale(), array, Smi::Cast(index.constant()).Value());
} else {
ASSERT(this->array()->definition()->representation() == kTagged);
element_address = index.IsRegister()
- ? FlowGraphCompiler::ElementAddressForRegIndex(
+ ? compiler->ElementAddressForRegIndex(
class_id(), index_scale(), array, index.reg())
- : FlowGraphCompiler::ElementAddressForIntIndex(
+ : compiler->ElementAddressForIntIndex(
class_id(), index_scale(), array,
Smi::Cast(index.constant()).Value());
}
if ((representation() == kUnboxedDouble) ||
- (representation() == kUnboxedMint) ||
(representation() == kUnboxedFloat32x4) ||
(representation() == kUnboxedInt32x4) ||
(representation() == kUnboxedFloat64x2)) {
@@ -1058,14 +1068,6 @@
__ SmiUntag(index.reg());
}
switch (class_id()) {
- case kTypedDataInt32ArrayCid:
- __ movss(result, element_address);
- __ pmovsxdq(result, result);
- break;
- case kTypedDataUint32ArrayCid:
- __ xorpd(result, result);
- __ movss(result, element_address);
- break;
case kTypedDataFloat32ArrayCid:
__ movss(result, element_address);
break;
@@ -1077,10 +1079,39 @@
case kTypedDataFloat64x2ArrayCid:
__ movups(result, element_address);
break;
+ default:
+ UNREACHABLE();
}
return;
}
+ if (representation() == kUnboxedMint) {
+ ASSERT(locs()->out(0).IsPairLocation());
+ PairLocation* result_pair = locs()->out(0).AsPairLocation();
+ Register result1 = result_pair->At(0).reg();
+ Register result2 = result_pair->At(1).reg();
+ if ((index_scale() == 1) && index.IsRegister()) {
+ __ SmiUntag(index.reg());
+ }
+ switch (class_id()) {
+ case kTypedDataInt32ArrayCid:
+ ASSERT(result1 == EAX);
+ ASSERT(result2 == EDX);
+ __ movl(result1, element_address);
+ __ cdq();
+ break;
+ case kTypedDataUint32ArrayCid:
+ __ movl(result1, element_address);
+ __ xorl(result2, result2);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ return;
+ }
+
+ ASSERT(representation() == kTagged);
+
Register result = locs()->out(0).reg();
if ((index_scale() == 1) && index.IsRegister()) {
__ SmiUntag(index.reg());
@@ -1212,11 +1243,16 @@
break;
case kTypedDataInt32ArrayCid:
case kTypedDataUint32ArrayCid:
- // Mints are stored in XMM registers. For smis, use a writable register
- // because the value must be untagged before storing.
- locs->set_in(2, value()->IsSmiValue()
- ? Location::WritableRegister()
- : Location::RequiresFpuRegister());
+ // For smis, use a writable register because the value must be untagged
+ // before storing. Mints are stored in registers pairs.
+ if (value()->IsSmiValue()) {
+ locs->set_in(2, Location::WritableRegister());
+ } else {
+ // We only move the lower 32-bits so we don't care where the high bits
+ // are located.
+ locs->set_in(2, Location::Pair(Location::RequiresRegister(),
+ Location::Any()));
+ }
break;
case kTypedDataFloat32ArrayCid:
case kTypedDataFloat64ArrayCid:
@@ -1243,16 +1279,16 @@
Address element_address(kNoRegister, 0);
if (IsExternal()) {
element_address = index.IsRegister()
- ? FlowGraphCompiler::ExternalElementAddressForRegIndex(
+ ? compiler->ExternalElementAddressForRegIndex(
index_scale(), array, index.reg())
- : FlowGraphCompiler::ExternalElementAddressForIntIndex(
+ : compiler->ExternalElementAddressForIntIndex(
index_scale(), array, Smi::Cast(index.constant()).Value());
} else {
ASSERT(this->array()->definition()->representation() == kTagged);
element_address = index.IsRegister()
- ? FlowGraphCompiler::ElementAddressForRegIndex(
+ ? compiler->ElementAddressForRegIndex(
class_id(), index_scale(), array, index.reg())
- : FlowGraphCompiler::ElementAddressForIntIndex(
+ : compiler->ElementAddressForIntIndex(
class_id(), index_scale(), array,
Smi::Cast(index.constant()).Value());
}
@@ -1333,7 +1369,9 @@
__ movl(element_address, value);
} else {
ASSERT(RequiredInputRepresentation(2) == kUnboxedMint);
- __ movss(element_address, locs()->in(2).fpu_reg());
+ PairLocation* value_pair = locs()->in(2).AsPairLocation();
+ Register value1 = value_pair->At(0).reg();
+ __ movl(element_address, value1);
}
break;
case kTypedDataFloat32ArrayCid:
@@ -5536,19 +5574,12 @@
LocationSummary* UnboxIntegerInstr::MakeLocationSummary(bool opt) const {
const intptr_t kNumInputs = 1;
- const intptr_t value_cid = value()->Type()->ToCid();
- const bool needs_temp = ((value_cid != kSmiCid) && (value_cid != kMintCid));
- const bool needs_writable_input = (value_cid == kSmiCid);
- const intptr_t kNumTemps = needs_temp ? 1 : 0;
+ const intptr_t kNumTemps = 0;
LocationSummary* summary =
new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
- summary->set_in(0, needs_writable_input
- ? Location::WritableRegister()
- : Location::RequiresRegister());
- if (needs_temp) {
- summary->set_temp(0, Location::RequiresRegister());
- }
- summary->set_out(0, Location::RequiresFpuRegister());
+ summary->set_in(0, Location::RequiresRegister());
+ summary->set_out(0, Location::Pair(Location::RegisterLocation(EAX),
+ Location::RegisterLocation(EDX)));
return summary;
}
@@ -5556,30 +5587,39 @@
void UnboxIntegerInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const intptr_t value_cid = value()->Type()->ToCid();
const Register value = locs()->in(0).reg();
- const XmmRegister result = locs()->out(0).fpu_reg();
+ PairLocation* result_pair = locs()->out(0).AsPairLocation();
+ Register result_lo = result_pair->At(0).reg();
+ Register result_hi = result_pair->At(1).reg();
+
+ ASSERT(value != result_lo);
+ ASSERT(value != result_hi);
+ ASSERT(result_lo == EAX);
+ ASSERT(result_hi == EDX);
if (value_cid == kMintCid) {
- __ movsd(result, FieldAddress(value, Mint::value_offset()));
+ __ movl(result_lo, FieldAddress(value, Mint::value_offset()));
+ __ movl(result_hi, FieldAddress(value, Mint::value_offset() + kWordSize));
} else if (value_cid == kSmiCid) {
- __ SmiUntag(value); // Untag input before conversion.
- __ movd(result, value);
- __ pmovsxdq(result, result);
+ __ movl(result_lo, value);
+ __ SmiUntag(result_lo);
+ // Sign extend into result_hi.
+ __ cdq();
} else {
- Register temp = locs()->temp(0).reg();
Label* deopt = compiler->AddDeoptStub(deopt_id_,
ICData::kDeoptUnboxInteger);
Label is_smi, done;
__ testl(value, Immediate(kSmiTagMask));
__ j(ZERO, &is_smi);
- __ CompareClassId(value, kMintCid, temp);
+ __ CompareClassId(value, kMintCid, result_lo);
__ j(NOT_EQUAL, deopt);
- __ movsd(result, FieldAddress(value, Mint::value_offset()));
+ __ movl(result_lo, FieldAddress(value, Mint::value_offset()));
+ __ movl(result_hi, FieldAddress(value, Mint::value_offset() + kWordSize));
__ jmp(&done);
__ Bind(&is_smi);
- __ movl(temp, value);
- __ SmiUntag(temp);
- __ movd(result, temp);
- __ pmovsxdq(result, result);
+ __ movl(result_lo, value);
+ __ SmiUntag(result_lo);
+ // Sign extend into result_hi.
+ __ cdq();
__ Bind(&done);
}
}
@@ -5587,15 +5627,14 @@
LocationSummary* BoxIntegerInstr::MakeLocationSummary(bool opt) const {
const intptr_t kNumInputs = 1;
- const intptr_t kNumTemps = 2;
+ const intptr_t kNumTemps = 1;
LocationSummary* summary =
new LocationSummary(kNumInputs,
kNumTemps,
LocationSummary::kCallOnSlowPath);
- summary->set_in(0, Location::RequiresFpuRegister());
- summary->set_temp(0, Location::RegisterLocation(EAX));
- summary->set_temp(1, Location::RegisterLocation(EDX));
- // TODO(fschneider): Save one temp by using result register as a temp.
+ summary->set_in(0, Location::Pair(Location::RequiresRegister(),
+ Location::RequiresRegister()));
+ summary->set_temp(0, Location::RequiresRegister());
summary->set_out(0, Location::RequiresRegister());
return summary;
}
@@ -5637,30 +5676,32 @@
void BoxIntegerInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
BoxIntegerSlowPath* slow_path = new BoxIntegerSlowPath(this);
compiler->AddSlowPathCode(slow_path);
-
+ PairLocation* value_pair = locs()->in(0).AsPairLocation();
+ Register value_lo = value_pair->At(0).reg();
+ Register value_hi = value_pair->At(1).reg();
Register out_reg = locs()->out(0).reg();
- XmmRegister value = locs()->in(0).fpu_reg();
+
+ // Copy value_hi into out_reg as a temporary.
+ // We modify value_lo but restore it before using it.
+ __ movl(out_reg, value_hi);
// Unboxed operations produce smis or mint-sized values.
// Check if value fits into a smi.
Label not_smi, done;
- __ pextrd(EDX, value, Immediate(1)); // Upper half.
- __ pextrd(EAX, value, Immediate(0)); // Lower half.
+
// 1. Compute (x + -kMinSmi) which has to be in the range
// 0 .. -kMinSmi+kMaxSmi for x to fit into a smi.
- __ addl(EAX, Immediate(0x40000000));
- __ adcl(EDX, Immediate(0));
+ __ addl(value_lo, Immediate(0x40000000));
+ __ adcl(out_reg, Immediate(0));
// 2. Unsigned compare to -kMinSmi+kMaxSmi.
- __ cmpl(EAX, Immediate(0x80000000));
- __ sbbl(EDX, Immediate(0));
+ __ cmpl(value_lo, Immediate(0x80000000));
+ __ sbbl(out_reg, Immediate(0));
__ j(ABOVE_EQUAL, ¬_smi);
// 3. Restore lower half if result is a smi.
- __ subl(EAX, Immediate(0x40000000));
-
- __ SmiTag(EAX);
- __ movl(out_reg, EAX);
+ __ subl(value_lo, Immediate(0x40000000));
+ __ movl(out_reg, value_lo);
+ __ SmiTag(out_reg);
__ jmp(&done);
-
__ Bind(¬_smi);
__ TryAllocate(
Class::ZoneHandle(Isolate::Current()->object_store()->mint_class()),
@@ -5669,7 +5710,10 @@
out_reg,
kNoRegister);
__ Bind(slow_path->exit_label());
- __ movsd(FieldAddress(out_reg, Mint::value_offset()), value);
+ // 3. Restore lower half of input before using it.
+ __ subl(value_lo, Immediate(0x40000000));
+ __ movl(FieldAddress(out_reg, Mint::value_offset()), value_lo);
+ __ movl(FieldAddress(out_reg, Mint::value_offset() + kWordSize), value_hi);
__ Bind(&done);
}
@@ -5680,27 +5724,25 @@
case Token::kBIT_AND:
case Token::kBIT_OR:
case Token::kBIT_XOR: {
- const intptr_t kNumTemps =
- FLAG_throw_on_javascript_int_overflow ? 1 : 0;
+ const intptr_t kNumTemps = 0;
LocationSummary* summary =
new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
- summary->set_in(0, Location::RequiresFpuRegister());
- summary->set_in(1, Location::RequiresFpuRegister());
- if (FLAG_throw_on_javascript_int_overflow) {
- summary->set_temp(0, Location::RequiresRegister());
- }
+ summary->set_in(0, Location::Pair(Location::RequiresRegister(),
+ Location::RequiresRegister()));
+ summary->set_in(1, Location::Pair(Location::RequiresRegister(),
+ Location::RequiresRegister()));
summary->set_out(0, Location::SameAsFirstInput());
return summary;
}
case Token::kADD:
case Token::kSUB: {
- const intptr_t kNumTemps = 2;
+ const intptr_t kNumTemps = 0;
LocationSummary* summary =
new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
- summary->set_in(0, Location::RequiresFpuRegister());
- summary->set_in(1, Location::RequiresFpuRegister());
- summary->set_temp(0, Location::RequiresRegister());
- summary->set_temp(1, Location::RequiresRegister());
+ summary->set_in(0, Location::Pair(Location::RequiresRegister(),
+ Location::RequiresRegister()));
+ summary->set_in(1, Location::Pair(Location::RequiresRegister(),
+ Location::RequiresRegister()));
summary->set_out(0, Location::SameAsFirstInput());
return summary;
}
@@ -5712,67 +5754,68 @@
void BinaryMintOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- XmmRegister left = locs()->in(0).fpu_reg();
- XmmRegister right = locs()->in(1).fpu_reg();
-
- ASSERT(locs()->out(0).fpu_reg() == left);
+ PairLocation* left_pair = locs()->in(0).AsPairLocation();
+ Register left_lo = left_pair->At(0).reg();
+ Register left_hi = left_pair->At(1).reg();
+ PairLocation* right_pair = locs()->in(1).AsPairLocation();
+ Register right_lo = right_pair->At(0).reg();
+ Register right_hi = right_pair->At(1).reg();
+ PairLocation* out_pair = locs()->out(0).AsPairLocation();
+ Register out_lo = out_pair->At(0).reg();
+ Register out_hi = out_pair->At(1).reg();
+ ASSERT(out_lo == left_lo);
+ ASSERT(out_hi == left_hi);
Label* deopt = NULL;
if (FLAG_throw_on_javascript_int_overflow) {
deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryMintOp);
}
switch (op_kind()) {
- case Token::kBIT_AND: __ andpd(left, right); break;
- case Token::kBIT_OR: __ orpd(left, right); break;
- case Token::kBIT_XOR: __ xorpd(left, right); break;
+ case Token::kBIT_AND:
+ __ andl(left_lo, right_lo);
+ __ andl(left_hi, right_hi);
+ break;
+ case Token::kBIT_OR:
+ __ orl(left_lo, right_lo);
+ __ orl(left_hi, right_hi);
+ break;
+ case Token::kBIT_XOR:
+ __ xorl(left_lo, right_lo);
+ __ xorl(left_hi, right_hi);
+ break;
case Token::kADD:
case Token::kSUB: {
- Register lo = locs()->temp(0).reg();
- Register hi = locs()->temp(1).reg();
if (!FLAG_throw_on_javascript_int_overflow) {
deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryMintOp);
}
-
- Label done, overflow;
- __ pextrd(lo, right, Immediate(0)); // Lower half
- __ pextrd(hi, right, Immediate(1)); // Upper half
- __ subl(ESP, Immediate(2 * kWordSize));
- __ movq(Address(ESP, 0), left);
if (op_kind() == Token::kADD) {
- __ addl(Address(ESP, 0), lo);
- __ adcl(Address(ESP, 1 * kWordSize), hi);
+ __ addl(left_lo, right_lo);
+ __ adcl(left_hi, right_hi);
} else {
- __ subl(Address(ESP, 0), lo);
- __ sbbl(Address(ESP, 1 * kWordSize), hi);
+ __ subl(left_lo, right_lo);
+ __ sbbl(left_hi, right_hi);
}
- __ j(OVERFLOW, &overflow);
- __ movq(left, Address(ESP, 0));
- __ addl(ESP, Immediate(2 * kWordSize));
- __ jmp(&done);
- __ Bind(&overflow);
- __ addl(ESP, Immediate(2 * kWordSize));
- __ jmp(deopt);
- __ Bind(&done);
+ __ j(OVERFLOW, deopt);
break;
}
default: UNREACHABLE();
}
if (FLAG_throw_on_javascript_int_overflow) {
- Register tmp = locs()->temp(0).reg();
- EmitJavascriptIntOverflowCheck(compiler, deopt, left, tmp);
+ EmitJavascriptIntOverflowCheck(compiler, deopt, left_lo, left_hi);
}
}
LocationSummary* ShiftMintOpInstr::MakeLocationSummary(bool opt) const {
const intptr_t kNumInputs = 2;
- const intptr_t kNumTemps = op_kind() == Token::kSHL ? 2 : 1;
+ const intptr_t kNumTemps = op_kind() == Token::kSHL ? 2 : 0;
LocationSummary* summary =
new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
- summary->set_in(0, Location::RequiresFpuRegister());
+ summary->set_in(0, Location::Pair(Location::RequiresRegister(),
+ Location::RequiresRegister()));
summary->set_in(1, Location::RegisterLocation(ECX));
- summary->set_temp(0, Location::RequiresRegister());
if (op_kind() == Token::kSHL) {
+ summary->set_temp(0, Location::RequiresRegister());
summary->set_temp(1, Location::RequiresRegister());
}
summary->set_out(0, Location::SameAsFirstInput());
@@ -5781,16 +5824,19 @@
void ShiftMintOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- XmmRegister left = locs()->in(0).fpu_reg();
- ASSERT(locs()->in(1).reg() == ECX);
- ASSERT(locs()->out(0).fpu_reg() == left);
+ PairLocation* left_pair = locs()->in(0).AsPairLocation();
+ Register left_lo = left_pair->At(0).reg();
+ Register left_hi = left_pair->At(1).reg();
+ PairLocation* out_pair = locs()->out(0).AsPairLocation();
+ Register out_lo = out_pair->At(0).reg();
+ Register out_hi = out_pair->At(1).reg();
+ ASSERT(out_lo == left_lo);
+ ASSERT(out_hi == left_hi);
Label* deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptShiftMintOp);
Label done;
__ testl(ECX, ECX);
__ j(ZERO, &done); // Shift by 0 is a nop.
- __ subl(ESP, Immediate(2 * kWordSize));
- __ movq(Address(ESP, 0), left);
// Deoptimize if shift count is > 31.
// sarl operation masks the count to 5 bits and
// shrd is undefined with count > operand size (32)
@@ -5801,23 +5847,21 @@
__ j(ABOVE, deopt);
switch (op_kind()) {
case Token::kSHR: {
- Register temp = locs()->temp(0).reg();
- __ movl(temp, Address(ESP, 1 * kWordSize)); // High half.
- __ shrd(Address(ESP, 0), temp); // Shift count in CL.
- __ sarl(Address(ESP, 1 * kWordSize), ECX); // Shift count in CL.
+ __ shrd(left_lo, left_hi); // Shift count in CL.
+ __ sarl(left_hi, ECX); // Shift count in CL.
break;
}
case Token::kSHL: {
Register temp1 = locs()->temp(0).reg();
Register temp2 = locs()->temp(1).reg();
- __ movl(temp1, Address(ESP, 0 * kWordSize)); // Low 32 bits.
- __ movl(temp2, Address(ESP, 1 * kWordSize)); // High 32 bits.
- __ shll(Address(ESP, 0 * kWordSize), ECX); // Shift count in CL.
- __ shld(Address(ESP, 1 * kWordSize), temp1); // Shift count in CL.
+ __ movl(temp1, left_lo); // Low 32 bits.
+ __ movl(temp2, left_hi); // High 32 bits.
+ __ shll(left_lo, ECX); // Shift count in CL.
+ __ shld(left_hi, temp1); // Shift count in CL.
// Check for overflow by shifting back the high 32 bits
// and comparing with the input.
__ movl(temp1, temp2);
- __ movl(temp2, Address(ESP, 1 * kWordSize));
+ __ movl(temp2, left_hi);
__ sarl(temp2, ECX);
__ cmpl(temp1, temp2);
__ j(NOT_EQUAL, deopt);
@@ -5827,23 +5871,20 @@
UNREACHABLE();
break;
}
- __ movq(left, Address(ESP, 0));
- __ addl(ESP, Immediate(2 * kWordSize));
__ Bind(&done);
if (FLAG_throw_on_javascript_int_overflow) {
- Register tmp = locs()->temp(0).reg();
- EmitJavascriptIntOverflowCheck(compiler, deopt, left, tmp);
+ EmitJavascriptIntOverflowCheck(compiler, deopt, left_lo, left_hi);
}
}
LocationSummary* UnaryMintOpInstr::MakeLocationSummary(bool opt) const {
const intptr_t kNumInputs = 1;
- const intptr_t kNumTemps =
- FLAG_throw_on_javascript_int_overflow ? 1 : 0;
+ const intptr_t kNumTemps = 0;
LocationSummary* summary =
new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
- summary->set_in(0, Location::RequiresFpuRegister());
+ summary->set_in(0, Location::Pair(Location::RequiresRegister(),
+ Location::RequiresRegister()));
summary->set_out(0, Location::SameAsFirstInput());
if (FLAG_throw_on_javascript_int_overflow) {
summary->set_temp(0, Location::RequiresRegister());
@@ -5854,17 +5895,25 @@
void UnaryMintOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(op_kind() == Token::kBIT_NOT);
- XmmRegister value = locs()->in(0).fpu_reg();
- ASSERT(value == locs()->out(0).fpu_reg());
+ PairLocation* left_pair = locs()->in(0).AsPairLocation();
+ Register left_lo = left_pair->At(0).reg();
+ Register left_hi = left_pair->At(1).reg();
+ PairLocation* out_pair = locs()->out(0).AsPairLocation();
+ Register out_lo = out_pair->At(0).reg();
+ Register out_hi = out_pair->At(1).reg();
+ ASSERT(out_lo == left_lo);
+ ASSERT(out_hi == left_hi);
+
Label* deopt = NULL;
if (FLAG_throw_on_javascript_int_overflow) {
deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnaryMintOp);
}
- __ pcmpeqq(XMM0, XMM0); // Generate all 1's.
- __ pxor(value, XMM0);
+
+ __ notl(left_lo);
+ __ notl(left_hi);
+
if (FLAG_throw_on_javascript_int_overflow) {
- Register tmp = locs()->temp(0).reg();
- EmitJavascriptIntOverflowCheck(compiler, deopt, value, tmp);
+ EmitJavascriptIntOverflowCheck(compiler, deopt, left_lo, left_hi);
}
}
diff --git a/runtime/vm/intermediate_language_x64.cc b/runtime/vm/intermediate_language_x64.cc
index 3c03cae..063890c 100644
--- a/runtime/vm/intermediate_language_x64.cc
+++ b/runtime/vm/intermediate_language_x64.cc
@@ -961,16 +961,16 @@
if (is_external) {
element_address = index.IsRegister()
- ? FlowGraphCompiler::ExternalElementAddressForRegIndex(
+ ? compiler->ExternalElementAddressForRegIndex(
index_scale(), array, index.reg())
- : FlowGraphCompiler::ExternalElementAddressForIntIndex(
+ : compiler->ExternalElementAddressForIntIndex(
index_scale(), array, Smi::Cast(index.constant()).Value());
} else {
ASSERT(this->array()->definition()->representation() == kTagged);
element_address = index.IsRegister()
- ? FlowGraphCompiler::ElementAddressForRegIndex(
+ ? compiler->ElementAddressForRegIndex(
class_id(), index_scale(), array, index.reg())
- : FlowGraphCompiler::ElementAddressForIntIndex(
+ : compiler->ElementAddressForIntIndex(
class_id(), index_scale(), array,
Smi::Cast(index.constant()).Value());
}
@@ -1143,16 +1143,16 @@
Address element_address(kNoRegister, 0);
if (is_external) {
element_address = index.IsRegister()
- ? FlowGraphCompiler::ExternalElementAddressForRegIndex(
+ ? compiler->ExternalElementAddressForRegIndex(
index_scale(), array, index.reg())
- : FlowGraphCompiler::ExternalElementAddressForIntIndex(
+ : compiler->ExternalElementAddressForIntIndex(
index_scale(), array, Smi::Cast(index.constant()).Value());
} else {
ASSERT(this->array()->definition()->representation() == kTagged);
element_address = index.IsRegister()
- ? FlowGraphCompiler::ElementAddressForRegIndex(
+ ? compiler->ElementAddressForRegIndex(
class_id(), index_scale(), array, index.reg())
- : FlowGraphCompiler::ElementAddressForIntIndex(
+ : compiler->ElementAddressForIntIndex(
class_id(), index_scale(), array,
Smi::Cast(index.constant()).Value());
}
@@ -2264,6 +2264,7 @@
__ movsd(FieldAddress(result, Double::value_offset()), value);
__ jmp(&done);
}
+
{
__ Bind(&load_float32x4);
BoxFloat32x4SlowPath* slow_path = new BoxFloat32x4SlowPath(this);
@@ -2280,9 +2281,9 @@
__ movups(FieldAddress(result, Float32x4::value_offset()), value);
__ jmp(&done);
}
+
{
__ Bind(&load_float64x2);
-
BoxFloat64x2SlowPath* slow_path = new BoxFloat64x2SlowPath(this);
compiler->AddSlowPathCode(slow_path);
diff --git a/runtime/vm/intrinsifier_arm64.cc b/runtime/vm/intrinsifier_arm64.cc
index b34ebaf..a6fbb38 100644
--- a/runtime/vm/intrinsifier_arm64.cc
+++ b/runtime/vm/intrinsifier_arm64.cc
@@ -197,7 +197,7 @@
// Set the length field in the growable array object to 0.
__ LoadImmediate(R1, 0, kNoPP);
__ str(R1, FieldAddress(R0, GrowableObjectArray::length_offset()));
- __ UpdateAllocationStats(kGrowableObjectArrayCid, R1, kNoPP);
+ __ UpdateAllocationStats(kGrowableObjectArrayCid, kNoPP);
__ ret(); // Returns the newly allocated object in R0.
__ Bind(&fall_through);
@@ -409,7 +409,7 @@
__ LoadImmediate(R3, heap->TopAddress(), kNoPP); \
__ str(R1, Address(R3, 0)); \
__ AddImmediate(R0, R0, kHeapObjectTag, kNoPP); \
- __ UpdateAllocationStatsWithSize(cid, R2, R4, kNoPP); \
+ __ UpdateAllocationStatsWithSize(cid, R2, kNoPP); \
/* Initialize the tags. */ \
/* R0: new object start as a tagged pointer. */ \
/* R1: new object end address. */ \
@@ -959,7 +959,7 @@
}
const Class& double_class = Class::Handle(
Isolate::Current()->object_store()->double_class());
- __ TryAllocate(double_class, &fall_through, R0, R1, kNoPP);
+ __ TryAllocate(double_class, &fall_through, R0, kNoPP);
__ StoreDFieldToOffset(V0, R0, Double::value_offset(), kNoPP);
__ ret();
__ Bind(&fall_through);
@@ -1001,7 +1001,7 @@
__ fmuld(V0, V0, V1);
const Class& double_class = Class::Handle(
Isolate::Current()->object_store()->double_class());
- __ TryAllocate(double_class, &fall_through, R0, R1, kNoPP);
+ __ TryAllocate(double_class, &fall_through, R0, kNoPP);
__ StoreDFieldToOffset(V0, R0, Double::value_offset(), kNoPP);
__ ret();
__ Bind(&fall_through);
@@ -1019,7 +1019,7 @@
__ scvtfd(V0, R0);
const Class& double_class = Class::Handle(
Isolate::Current()->object_store()->double_class());
- __ TryAllocate(double_class, &fall_through, R0, R1, kNoPP);
+ __ TryAllocate(double_class, &fall_through, R0, kNoPP);
__ StoreDFieldToOffset(V0, R0, Double::value_offset(), kNoPP);
__ ret();
__ Bind(&fall_through);
@@ -1099,7 +1099,7 @@
__ fsqrtd(V0, V1);
const Class& double_class = Class::Handle(
Isolate::Current()->object_store()->double_class());
- __ TryAllocate(double_class, &fall_through, R0, R1, kNoPP);
+ __ TryAllocate(double_class, &fall_through, R0, kNoPP);
__ StoreDFieldToOffset(V0, R0, Double::value_offset(), kNoPP);
__ ret();
__ Bind(&is_smi);
@@ -1322,7 +1322,7 @@
// next object start and initialize the object.
__ str(R1, Address(R3));
__ AddImmediate(R0, R0, kHeapObjectTag, kNoPP);
- __ UpdateAllocationStatsWithSize(kOneByteStringCid, R2, R3, kNoPP);
+ __ UpdateAllocationStatsWithSize(kOneByteStringCid, R2, kNoPP);
// Initialize the tags.
// R0: new object start as a tagged pointer.
diff --git a/runtime/vm/intrinsifier_ia32.cc b/runtime/vm/intrinsifier_ia32.cc
index 519eef1..438a994 100644
--- a/runtime/vm/intrinsifier_ia32.cc
+++ b/runtime/vm/intrinsifier_ia32.cc
@@ -1248,16 +1248,12 @@
__ movl(EAX, Address(ESP, + 1 * kWordSize)); // Receiver.
__ movl(EBX, FieldAddress(EAX, state_field.Offset())); // Field '_state'.
// Addresses of _state[0] and _state[1].
- Address addr_0 = FlowGraphCompiler::ElementAddressForIntIndex(
- kTypedDataUint32ArrayCid,
- FlowGraphCompiler::ElementSizeFor(kTypedDataUint32ArrayCid),
- EBX,
- 0);
- Address addr_1 = FlowGraphCompiler::ElementAddressForIntIndex(
- kTypedDataUint32ArrayCid,
- FlowGraphCompiler::ElementSizeFor(kTypedDataUint32ArrayCid),
- EBX,
- 1);
+ const intptr_t index_scale =
+ FlowGraphCompiler::ElementSizeFor(kTypedDataUint32ArrayCid);
+ const intptr_t offset =
+ FlowGraphCompiler::DataOffsetFor(kTypedDataUint32ArrayCid);
+ Address addr_0 = FieldAddress(EBX, 0 * index_scale + offset);
+ Address addr_1 = FieldAddress(EBX, 1 * index_scale + offset);
__ movl(EAX, Immediate(a_int32_value));
// 64-bit multiply EAX * value -> EDX:EAX.
__ mull(addr_0);
diff --git a/runtime/vm/intrinsifier_x64.cc b/runtime/vm/intrinsifier_x64.cc
index a34cc8b..5364ce1 100644
--- a/runtime/vm/intrinsifier_x64.cc
+++ b/runtime/vm/intrinsifier_x64.cc
@@ -1150,17 +1150,12 @@
__ movq(RAX, Address(RSP, + 1 * kWordSize)); // Receiver.
__ movq(RBX, FieldAddress(RAX, state_field.Offset())); // Field '_state'.
// Addresses of _state[0] and _state[1].
- Address addr_0 = FlowGraphCompiler::ElementAddressForIntIndex(
- kTypedDataUint32ArrayCid,
- FlowGraphCompiler::ElementSizeFor(kTypedDataUint32ArrayCid),
- RBX,
- 0);
- Address addr_1 = FlowGraphCompiler::ElementAddressForIntIndex(
- kTypedDataUint32ArrayCid,
- FlowGraphCompiler::ElementSizeFor(kTypedDataUint32ArrayCid),
- RBX,
- 1);
-
+ const intptr_t index_scale =
+ FlowGraphCompiler::ElementSizeFor(kTypedDataUint32ArrayCid);
+ const intptr_t offset =
+ FlowGraphCompiler::DataOffsetFor(kTypedDataUint32ArrayCid);
+ Address addr_0 = FieldAddress(RBX, 0 * index_scale + offset);
+ Address addr_1 = FieldAddress(RBX, 1 * index_scale + offset);
__ movq(RAX, Immediate(a_int_value));
__ movl(RCX, addr_0);
__ imulq(RCX, RAX);
diff --git a/runtime/vm/locations.cc b/runtime/vm/locations.cc
index d9d044a..9809707 100644
--- a/runtime/vm/locations.cc
+++ b/runtime/vm/locations.cc
@@ -210,6 +210,12 @@
f->Print("DS%+" Pd "", stack_index());
} else if (kind() == kQuadStackSlot) {
f->Print("QS%+" Pd "", stack_index());
+ } else if (IsPairLocation()) {
+ f->Print("(");
+ AsPairLocation()->At(0).PrintTo(f);
+ f->Print(", ");
+ AsPairLocation()->At(1).PrintTo(f);
+ f->Print(")");
} else {
f->Print("%s", Name());
}
@@ -233,6 +239,19 @@
}
+Location Location::Copy() const {
+ if (IsPairLocation()) {
+ PairLocation* pair = AsPairLocation();
+ ASSERT(!pair->At(0).IsPairLocation());
+ ASSERT(!pair->At(1).IsPairLocation());
+ return Location::Pair(pair->At(0).Copy(), pair->At(1).Copy());
+ } else {
+ // Copy by value.
+ return *this;
+ }
+}
+
+
void LocationSummary::PrintTo(BufferFormatter* f) const {
if (input_count() > 0) {
f->Print(" (");
diff --git a/runtime/vm/locations.h b/runtime/vm/locations.h
index cabb8db..cf6a60b 100644
--- a/runtime/vm/locations.h
+++ b/runtime/vm/locations.h
@@ -331,11 +331,15 @@
}
intptr_t stack_index() const {
- ASSERT(IsStackSlot() || IsDoubleStackSlot() || IsQuadStackSlot());
+ ASSERT(HasStackIndex());
// Decode stack index manually to preserve sign.
return payload() - kStackIndexBias;
}
+ bool HasStackIndex() const {
+ return IsStackSlot() || IsDoubleStackSlot() || IsQuadStackSlot();
+ }
+
// Return a memory operand for stack slot locations.
Address ToStackSlotAddress() const;
@@ -365,6 +369,8 @@
return KindField::decode(value_);
}
+ Location Copy() const;
+
private:
explicit Location(uword value) : value_(value) { }
@@ -428,15 +434,20 @@
class RegisterSet : public ValueObject {
public:
- RegisterSet() : cpu_registers_(0), fpu_registers_(0) {
+ RegisterSet() : cpu_registers_(0), untagged_cpu_registers_(0),
+ fpu_registers_(0) {
ASSERT(kNumberOfCpuRegisters <= (kWordSize * kBitsPerByte));
ASSERT(kNumberOfFpuRegisters <= (kWordSize * kBitsPerByte));
}
- void Add(Location loc) {
+ void Add(Location loc, Representation rep = kTagged) {
if (loc.IsRegister()) {
cpu_registers_ |= (1 << loc.reg());
+ if (rep != kTagged) {
+ // CPU register contains an untagged value.
+ MarkUntagged(loc);
+ }
} else if (loc.IsFpuRegister()) {
fpu_registers_ |= (1 << loc.fpu_reg());
}
@@ -450,6 +461,32 @@
}
}
+ void DebugPrint() {
+ for (intptr_t i = 0; i < kNumberOfCpuRegisters; i++) {
+ Register r = static_cast<Register>(i);
+ if (ContainsRegister(r)) {
+ OS::Print("%s %s\n", Assembler::RegisterName(r),
+ IsTagged(r) ? "tagged" : "untagged");
+ }
+ }
+
+ for (intptr_t i = 0; i < kNumberOfFpuRegisters; i++) {
+ FpuRegister r = static_cast<FpuRegister>(i);
+ if (ContainsFpuRegister(r)) {
+ OS::Print("%s\n", Assembler::FpuRegisterName(r));
+ }
+ }
+ }
+
+ void MarkUntagged(Location loc) {
+ ASSERT(loc.IsRegister());
+ untagged_cpu_registers_ |= (1 << loc.reg());
+ }
+
+ bool IsTagged(Register reg) const {
+ return (untagged_cpu_registers_ & (1 << reg)) == 0;
+ }
+
bool ContainsRegister(Register reg) const {
return (cpu_registers_ & (1 << reg)) != 0;
}
@@ -468,6 +505,7 @@
private:
intptr_t cpu_registers_;
+ intptr_t untagged_cpu_registers_;
intptr_t fpu_registers_;
DISALLOW_COPY_AND_ASSIGN(RegisterSet);
@@ -561,6 +599,10 @@
return contains_call_ != kNoCall;
}
+ bool HasCallOnSlowPath() {
+ return can_call() && !always_calls();
+ }
+
void PrintTo(BufferFormatter* f) const;
static LocationSummary* Make(intptr_t input_count,
diff --git a/runtime/vm/object.cc b/runtime/vm/object.cc
index 9f677fd..3c18a07 100644
--- a/runtime/vm/object.cc
+++ b/runtime/vm/object.cc
@@ -2514,7 +2514,14 @@
function.SwitchToUnoptimizedCode();
} else if (function.unoptimized_code() == code.raw()) {
ReportSwitchingCode(code);
+ // Remove the code object from the function. The next time the
+ // function is invoked, it will be compiled again.
function.ClearCode();
+ // Invalidate the old code object so existing references to it
+ // (from optimized code) will fail when invoked.
+ if (!CodePatcher::IsEntryPatched(code)) {
+ CodePatcher::PatchEntry(code);
+ }
}
}
}
@@ -7847,7 +7854,7 @@
return;
}
// Get the source, scan and allocate the token stream.
- VMTagScope tagScope(isolate, VMTag::kCompileTagId);
+ VMTagScope tagScope(isolate, VMTag::kCompileScannerTagId);
TimerScope timer(FLAG_compiler_stats, &CompilerStats::scanner_timer);
const String& src = String::Handle(isolate, Source());
Scanner scanner(src, private_key);
@@ -9559,9 +9566,10 @@
virtual void ReportSwitchingCode(const Code& code) {
if (FLAG_trace_deoptimization || FLAG_trace_deoptimization_verbose) {
- OS::PrintErr("Prefix '%s': deleting %s code for function '%s'\n",
+ OS::PrintErr("Prefix '%s': deleting %s code for %s function '%s'\n",
String::Handle(prefix_.name()).ToCString(),
code.is_optimized() ? "optimized" : "unoptimized",
+ CodePatcher::IsEntryPatched(code) ? "patched" : "unpatched",
Function::Handle(code.function()).ToCString());
}
}
@@ -13830,7 +13838,7 @@
return true;
}
const TypeArguments& args = TypeArguments::Handle(arguments());
- const intptr_t num_type_args = args.Length();
+ intptr_t num_type_args = args.Length();
intptr_t len = num_type_args; // Check the full vector of type args.
ASSERT(num_type_args > 0);
// This type is not instantiated if it refers to type parameters.
@@ -13841,8 +13849,10 @@
// arguments and not just at the type parameters.
if (HasResolvedTypeClass()) {
const Class& cls = Class::Handle(type_class());
+ len = cls.NumTypeArguments();
+ ASSERT(num_type_args >= len); // The vector may be longer than necessary.
+ num_type_args = len;
len = cls.NumTypeParameters(); // Check the type parameters only.
- ASSERT(num_type_args == cls.NumTypeArguments());
}
return (len == 0) || args.IsSubvectorInstantiated(num_type_args - len, len);
}
diff --git a/runtime/vm/parser.cc b/runtime/vm/parser.cc
index 4e61823..51cf2c1 100644
--- a/runtime/vm/parser.cc
+++ b/runtime/vm/parser.cc
@@ -381,7 +381,7 @@
Isolate* isolate = Isolate::Current();
ASSERT(isolate->long_jump_base()->IsSafeToJump());
TimerScope timer(FLAG_compiler_stats, &CompilerStats::parser_timer);
- VMTagScope tagScope(isolate, VMTag::kCompileTagId);
+ VMTagScope tagScope(isolate, VMTag::kCompileTopLevelTagId);
Parser parser(script, library, 0);
parser.ParseTopLevel();
}
@@ -730,7 +730,6 @@
void Parser::ParseClass(const Class& cls) {
if (!cls.is_synthesized_class()) {
Isolate* isolate = Isolate::Current();
- VMTagScope tagScope(isolate, VMTag::kCompileTagId);
TimerScope timer(FLAG_compiler_stats, &CompilerStats::parser_timer);
ASSERT(isolate->long_jump_base()->IsSafeToJump());
const Script& script = Script::Handle(isolate, cls.script());
@@ -789,7 +788,6 @@
void Parser::ParseFunction(ParsedFunction* parsed_function) {
Isolate* isolate = Isolate::Current();
- VMTagScope tagScope(isolate, VMTag::kCompileTagId);
TimerScope timer(FLAG_compiler_stats, &CompilerStats::parser_timer);
CompilerStats::num_functions_compiled++;
ASSERT(isolate->long_jump_base()->IsSafeToJump());
@@ -8777,14 +8775,15 @@
// Resolve class.
if (!type->HasResolvedTypeClass()) {
const UnresolvedClass& unresolved_class =
- UnresolvedClass::Handle(type->unresolved_class());
+ UnresolvedClass::Handle(isolate(), type->unresolved_class());
const String& unresolved_class_name =
- String::Handle(unresolved_class.ident());
- Class& resolved_type_class = Class::Handle();
+ String::Handle(isolate(), unresolved_class.ident());
+ Class& resolved_type_class = Class::Handle(isolate());
if (unresolved_class.library_prefix() == LibraryPrefix::null()) {
if (!scope_class.IsNull()) {
// First check if the type is a type parameter of the given scope class.
const TypeParameter& type_parameter = TypeParameter::Handle(
+ isolate(),
scope_class.LookupTypeParameter(unresolved_class_name));
if (!type_parameter.IsNull()) {
// A type parameter is considered to be a malformed type when
@@ -8792,23 +8791,23 @@
if (ParsingStaticMember()) {
ASSERT(scope_class.raw() == current_class().raw());
*type = ClassFinalizer::NewFinalizedMalformedType(
- Error::Handle(), // No previous error.
+ Error::Handle(isolate()), // No previous error.
script_,
type->token_pos(),
"type parameter '%s' cannot be referenced "
"from static member",
- String::Handle(type_parameter.name()).ToCString());
+ String::Handle(isolate(), type_parameter.name()).ToCString());
return;
}
// A type parameter cannot be parameterized, so make the type
// malformed if type arguments have previously been parsed.
- if (!TypeArguments::Handle(type->arguments()).IsNull()) {
+ if (!TypeArguments::Handle(isolate(), type->arguments()).IsNull()) {
*type = ClassFinalizer::NewFinalizedMalformedType(
- Error::Handle(), // No previous error.
+ Error::Handle(isolate()), // No previous error.
script_,
type_parameter.token_pos(),
"type parameter '%s' cannot be parameterized",
- String::Handle(type_parameter.name()).ToCString());
+ String::Handle(isolate(), type_parameter.name()).ToCString());
return;
}
*type = type_parameter.raw();
@@ -8824,7 +8823,7 @@
}
} else {
LibraryPrefix& lib_prefix =
- LibraryPrefix::Handle(unresolved_class.library_prefix());
+ LibraryPrefix::Handle(isolate(), unresolved_class.library_prefix());
// Resolve class name in the scope of the library prefix.
resolved_type_class =
ResolveClassInPrefixScope(lib_prefix, unresolved_class_name);
@@ -8836,21 +8835,24 @@
parameterized_type.set_type_class(resolved_type_class);
} else if (finalization >= ClassFinalizer::kCanonicalize) {
ClassFinalizer::FinalizeMalformedType(
- Error::Handle(), // No previous error.
+ Error::Handle(isolate()), // No previous error.
script_,
parameterized_type,
"type '%s' is not loaded",
- String::Handle(parameterized_type.UserVisibleName()).ToCString());
+ String::Handle(isolate(),
+ parameterized_type.UserVisibleName()).ToCString());
return;
}
}
// Resolve type arguments, if any.
- const TypeArguments& arguments = TypeArguments::Handle(type->arguments());
- TypeArguments::Handle(type->arguments());
+ const TypeArguments& arguments = TypeArguments::Handle(isolate(),
+ type->arguments());
+ TypeArguments::Handle(isolate(), type->arguments());
if (!arguments.IsNull()) {
const intptr_t num_arguments = arguments.Length();
for (intptr_t i = 0; i < num_arguments; i++) {
- AbstractType& type_argument = AbstractType::Handle(arguments.TypeAt(i));
+ AbstractType& type_argument = AbstractType::Handle(isolate(),
+ arguments.TypeAt(i));
ResolveTypeFromClass(scope_class, finalization, &type_argument);
arguments.SetTypeAt(i, type_argument);
}
diff --git a/runtime/vm/raw_object.h b/runtime/vm/raw_object.h
index 5d394f6..ebd2727 100644
--- a/runtime/vm/raw_object.h
+++ b/runtime/vm/raw_object.h
@@ -259,7 +259,7 @@
return SizeBits::update(SizeToTagValue(size), tag);
}
- private:
+ private:
// The actual unscaled bit field used within the tag field.
class SizeBits : public BitField<intptr_t, kSizeTagPos, kSizeTagSize> {};
@@ -1259,6 +1259,7 @@
int64_t value_;
+ friend class Api;
friend class SnapshotReader;
};
@@ -1289,6 +1290,7 @@
double value_;
+ friend class Api;
friend class SnapshotReader;
};
diff --git a/runtime/vm/service.cc b/runtime/vm/service.cc
index 41c3ff5..0733461 100644
--- a/runtime/vm/service.cc
+++ b/runtime/vm/service.cc
@@ -851,6 +851,33 @@
}
+static bool ContainsNonInstance(const Object& obj) {
+ if (obj.IsArray()) {
+ const Array& array = Array::Cast(obj);
+ Object& element = Object::Handle();
+ for (intptr_t i = 0; i < array.Length(); ++i) {
+ element = array.At(i);
+ if (!element.IsInstance()) {
+ return true;
+ }
+ }
+ return false;
+ } else if (obj.IsGrowableObjectArray()) {
+ const GrowableObjectArray& array = GrowableObjectArray::Cast(obj);
+ Object& element = Object::Handle();
+ for (intptr_t i = 0; i < array.Length(); ++i) {
+ element = array.At(i);
+ if (!element.IsInstance()) {
+ return true;
+ }
+ }
+ return false;
+ } else {
+ return !obj.IsInstance();
+ }
+}
+
+
static bool HandleInstanceCommands(Isolate* isolate,
const Object& obj,
JSONStream* js,
@@ -876,6 +903,10 @@
js->num_arguments());
return true;
}
+ if (ContainsNonInstance(obj)) {
+ PrintError(js, "attempt to evaluate against internal VM object\n");
+ return true;
+ }
const char* expr = js->LookupOption("expr");
if (expr == NULL) {
PrintError(js, "eval expects an 'expr' option\n",
diff --git a/runtime/vm/service_test.cc b/runtime/vm/service_test.cc
index 812ce09..ae67790 100644
--- a/runtime/vm/service_test.cc
+++ b/runtime/vm/service_test.cc
@@ -562,6 +562,20 @@
ExpectSubstringF(handler.msg(),
"\"id\":\"objects\\/int-%" Pd "\"",
arr.raw()->Size() + arr.At(0)->Size());
+
+ // eval against list containing an internal object.
+ Object& internal_object = Object::Handle();
+ internal_object = LiteralToken::New();
+ arr.SetAt(0, internal_object);
+ service_msg = Eval(lib,
+ "[port, ['objects', '$validId', 'eval'], "
+ "['expr'], ['toString()']]");
+ Service::HandleIsolateMessage(isolate, service_msg);
+ handler.HandleNextMessage();
+ ExpectSubstringF(handler.msg(), "\"type\":\"Error\"");
+ ExpectSubstringF(
+ handler.msg(),
+ "\"message\":\"attempt to evaluate against internal VM object\\n\"");
}
diff --git a/runtime/vm/simulator_arm64.cc b/runtime/vm/simulator_arm64.cc
index d2006a2..0085686 100644
--- a/runtime/vm/simulator_arm64.cc
+++ b/runtime/vm/simulator_arm64.cc
@@ -93,7 +93,9 @@
Simulator* sim_;
bool GetValue(char* desc, int64_t* value);
+ bool GetSValue(char* desc, int32_t* value);
bool GetDValue(char* desc, int64_t* value);
+ bool GetQValue(char* desc, simd_value_t* value);
// TODO(zra): Breakpoints.
};
@@ -185,10 +187,30 @@
}
+bool SimulatorDebugger::GetSValue(char* desc, int32_t* value) {
+ VRegister vreg = LookupVRegisterByName(desc);
+ if (vreg != kNoVRegister) {
+ *value = sim_->get_vregisters(vreg, 0);
+ return true;
+ }
+ if (desc[0] == '*') {
+ int64_t addr;
+ if (GetValue(desc + 1, &addr)) {
+ if (Simulator::IsIllegalAddress(addr)) {
+ return false;
+ }
+ *value = *(reinterpret_cast<int32_t*>(addr));
+ return true;
+ }
+ }
+ return false;
+}
+
+
bool SimulatorDebugger::GetDValue(char* desc, int64_t* value) {
VRegister vreg = LookupVRegisterByName(desc);
if (vreg != kNoVRegister) {
- *value = sim_->get_vregisterd(vreg);
+ *value = sim_->get_vregisterd(vreg, 0);
return true;
}
if (desc[0] == '*') {
@@ -205,6 +227,26 @@
}
+bool SimulatorDebugger::GetQValue(char* desc, simd_value_t* value) {
+ VRegister vreg = LookupVRegisterByName(desc);
+ if (vreg != kNoVRegister) {
+ sim_->get_vregister(vreg, value);
+ return true;
+ }
+ if (desc[0] == '*') {
+ int64_t addr;
+ if (GetValue(desc + 1, &addr)) {
+ if (Simulator::IsIllegalAddress(addr)) {
+ return false;
+ }
+ *value = *(reinterpret_cast<simd_value_t*>(addr));
+ return true;
+ }
+ }
+ return false;
+}
+
+
void SimulatorDebugger::Debug() {
intptr_t last_pc = -1;
bool done = false;
@@ -259,7 +301,9 @@
"gdb -- transfer control to gdb\n"
"h/help -- print this help string\n"
"p/print <reg or value or *addr> -- print integer value\n"
- "pd/printdouble <dreg or *addr> -- print double value\n"
+ "pf/printfloat <vreg or *addr> --print float value\n"
+ "pd/printdouble <vreg or *addr> -- print double value\n"
+ "pq/printquad <vreg or *addr> -- print vector register\n"
"po/printobject <*reg or *addr> -- print object\n"
"si/stepi -- single step an instruction\n"
"q/quit -- Quit the debugger and exit the program\n");
@@ -284,6 +328,20 @@
} else {
OS::Print("print <reg or value or *addr>\n");
}
+ } else if ((strcmp(cmd, "pf") == 0) ||
+ (strcmp(cmd, "printfloat") == 0)) {
+ if (args == 2) {
+ int32_t value;
+ if (GetSValue(arg1, &value)) {
+ float svalue = bit_cast<float, int32_t>(value);
+ OS::Print("%s: %d 0x%x %.8g\n",
+ arg1, value, value, svalue);
+ } else {
+ OS::Print("%s unrecognized\n", arg1);
+ }
+ } else {
+ OS::Print("printfloat <vreg or *addr>\n");
+ }
} else if ((strcmp(cmd, "pd") == 0) ||
(strcmp(cmd, "printdouble") == 0)) {
if (args == 2) {
@@ -296,7 +354,36 @@
OS::Print("%s unrecognized\n", arg1);
}
} else {
- OS::Print("printdouble <dreg or *addr>\n");
+ OS::Print("printdouble <vreg or *addr>\n");
+ }
+ } else if ((strcmp(cmd, "pq") == 0) ||
+ (strcmp(cmd, "printquad") == 0)) {
+ if (args == 2) {
+ simd_value_t quad_value;
+ if (GetQValue(arg1, &quad_value)) {
+ const int64_t d0 = quad_value.bits.i64[0];
+ const int64_t d1 = quad_value.bits.i64[1];
+ const double dval0 = bit_cast<double, int64_t>(d0);
+ const double dval1 = bit_cast<double, int64_t>(d1);
+ const int32_t s0 = quad_value.bits.i32[0];
+ const int32_t s1 = quad_value.bits.i32[1];
+ const int32_t s2 = quad_value.bits.i32[2];
+ const int32_t s3 = quad_value.bits.i32[3];
+ const float sval0 = bit_cast<float, int32_t>(s0);
+ const float sval1 = bit_cast<float, int32_t>(s1);
+ const float sval2 = bit_cast<float, int32_t>(s2);
+ const float sval3 = bit_cast<float, int32_t>(s3);
+ OS::Print("%s: %"Pu64" 0x%"Px64" %.8g\n", arg1, d0, d0, dval0);
+ OS::Print("%s: %"Pu64" 0x%"Px64" %.8g\n", arg1, d1, d1, dval1);
+ OS::Print("%s: %d 0x%x %.8g\n", arg1, s0, s0, sval0);
+ OS::Print("%s: %d 0x%x %.8g\n", arg1, s1, s1, sval1);
+ OS::Print("%s: %d 0x%x %.8g\n", arg1, s2, s2, sval2);
+ OS::Print("%s: %d 0x%x %.8g\n", arg1, s3, s3, sval3);
+ } else {
+ OS::Print("%s unrecognized\n", arg1);
+ }
+ } else {
+ OS::Print("printquad <vreg or *addr>\n");
}
} else if ((strcmp(cmd, "po") == 0) ||
(strcmp(cmd, "printobject") == 0)) {
@@ -471,8 +558,8 @@
v_flag_ = false;
for (int i = 0; i < kNumberOfVRegisters; i++) {
- vregisters_[i].lo = 0;
- vregisters_[i].hi = 0;
+ vregisters_[i].bits.i64[0] = 0;
+ vregisters_[i].bits.i64[1] = 0;
}
// The sp is initialized to point to the bottom (high address) of the
@@ -616,30 +703,45 @@
}
-int64_t Simulator::get_vregisterd(VRegister reg) const {
+int32_t Simulator::get_vregisters(VRegister reg, int idx) const {
ASSERT((reg >= 0) && (reg < kNumberOfVRegisters));
- return vregisters_[reg].lo;
+ ASSERT((idx >= 0) && (idx <= 3));
+ return vregisters_[reg].bits.i32[idx];
}
-void Simulator::set_vregisterd(VRegister reg, int64_t value) {
+void Simulator::set_vregisters(VRegister reg, int idx, int32_t value) {
ASSERT((reg >= 0) && (reg < kNumberOfVRegisters));
- vregisters_[reg].lo = value;
- vregisters_[reg].hi = 0;
+ ASSERT((idx >= 0) && (idx <= 3));
+ vregisters_[reg].bits.i32[idx] = value;
+}
+
+
+int64_t Simulator::get_vregisterd(VRegister reg, int idx) const {
+ ASSERT((reg >= 0) && (reg < kNumberOfVRegisters));
+ ASSERT((idx == 0) || (idx == 1));
+ return vregisters_[reg].bits.i64[idx];
+}
+
+
+void Simulator::set_vregisterd(VRegister reg, int idx, int64_t value) {
+ ASSERT((reg >= 0) && (reg < kNumberOfVRegisters));
+ ASSERT((idx == 0) || (idx == 1));
+ vregisters_[reg].bits.i64[idx] = value;
}
void Simulator::get_vregister(VRegister reg, simd_value_t* value) const {
ASSERT((reg >= 0) && (reg < kNumberOfVRegisters));
- value->lo = vregisters_[reg].lo;
- value->hi = vregisters_[reg].hi;
+ value->bits.i64[0] = vregisters_[reg].bits.i64[0];
+ value->bits.i64[1] = vregisters_[reg].bits.i64[1];
}
void Simulator::set_vregister(VRegister reg, const simd_value_t& value) {
ASSERT((reg >= 0) && (reg < kNumberOfVRegisters));
- vregisters_[reg].lo = value.lo;
- vregisters_[reg].hi = value.hi;
+ vregisters_[reg].bits.i64[0] = value.bits.i64[0];
+ vregisters_[reg].bits.i64[1] = value.bits.i64[1];
}
@@ -1217,16 +1319,17 @@
(redirection->argument_count() <= 8));
SimulatorLeafFloatRuntimeCall target =
reinterpret_cast<SimulatorLeafFloatRuntimeCall>(external);
- const double d0 = bit_cast<double, int64_t>(get_vregisterd(V0));
- const double d1 = bit_cast<double, int64_t>(get_vregisterd(V1));
- const double d2 = bit_cast<double, int64_t>(get_vregisterd(V2));
- const double d3 = bit_cast<double, int64_t>(get_vregisterd(V3));
- const double d4 = bit_cast<double, int64_t>(get_vregisterd(V4));
- const double d5 = bit_cast<double, int64_t>(get_vregisterd(V5));
- const double d6 = bit_cast<double, int64_t>(get_vregisterd(V6));
- const double d7 = bit_cast<double, int64_t>(get_vregisterd(V7));
+ const double d0 = bit_cast<double, int64_t>(get_vregisterd(V0, 0));
+ const double d1 = bit_cast<double, int64_t>(get_vregisterd(V1, 0));
+ const double d2 = bit_cast<double, int64_t>(get_vregisterd(V2, 0));
+ const double d3 = bit_cast<double, int64_t>(get_vregisterd(V3, 0));
+ const double d4 = bit_cast<double, int64_t>(get_vregisterd(V4, 0));
+ const double d5 = bit_cast<double, int64_t>(get_vregisterd(V5, 0));
+ const double d6 = bit_cast<double, int64_t>(get_vregisterd(V6, 0));
+ const double d7 = bit_cast<double, int64_t>(get_vregisterd(V7, 0));
const double res = target(d0, d1, d2, d3, d4, d5, d6, d7);
- set_vregisterd(V0, bit_cast<int64_t, double>(res));
+ set_vregisterd(V0, 0, bit_cast<int64_t, double>(res));
+ set_vregisterd(V0, 1, 0);
} else if (redirection->call_kind() == kBootstrapNativeCall) {
NativeArguments* arguments;
arguments = reinterpret_cast<NativeArguments*>(get_register(R0));
@@ -1477,7 +1580,7 @@
if (instr->Bit(26) == 1) {
if (instr->Bit(22) == 0) {
// Format(instr, "fstr'fsz 'vt, 'memop");
- const int64_t vt_val = get_vregisterd(vt);
+ const int64_t vt_val = get_vregisterd(vt, 0);
switch (size) {
case 2:
WriteW(address, vt_val & kWRegMask, instr);
@@ -1488,8 +1591,8 @@
case 4: {
simd_value_t val;
get_vregister(vt, &val);
- WriteX(address, val.lo, instr);
- WriteX(address + kWordSize, val.hi, instr);
+ WriteX(address, val.bits.i64[0], instr);
+ WriteX(address + kWordSize, val.bits.i64[1], instr);
break;
}
default:
@@ -1500,15 +1603,17 @@
// Format(instr, "fldr'fsz 'vt, 'memop");
switch (size) {
case 2:
- set_vregisterd(vt, static_cast<int64_t>(ReadWU(address, instr)));
+ set_vregisterd(vt, 0, static_cast<int64_t>(ReadWU(address, instr)));
+ set_vregisterd(vt, 1, 0);
break;
case 3:
- set_vregisterd(vt, ReadX(address, instr));
+ set_vregisterd(vt, 0, ReadX(address, instr));
+ set_vregisterd(vt, 1, 0);
break;
case 4: {
simd_value_t val;
- val.lo = ReadX(address, instr);
- val.hi = ReadX(address + kWordSize, instr);
+ val.bits.i64[0] = ReadX(address, instr);
+ val.bits.i64[1] = ReadX(address + kWordSize, instr);
set_vregister(vt, val);
break;
}
@@ -2067,8 +2172,145 @@
}
+void Simulator::DecodeSIMDCopy(Instr* instr) {
+ const int32_t Q = instr->Bit(30);
+ const int32_t op = instr->Bit(29);
+ const int32_t imm4 = instr->Bits(11, 4);
+ const int32_t imm5 = instr->Bits(16, 5);
+
+ int32_t idx4 = -1;
+ int32_t idx5 = -1;
+ int32_t element_bytes;
+ if (imm5 & 0x1) {
+ idx4 = imm4;
+ idx5 = imm5 >> 1;
+ element_bytes = 1;
+ } else if (imm5 & 0x2) {
+ idx4 = imm4 >> 1;
+ idx5 = imm5 >> 2;
+ element_bytes = 2;
+ } else if (imm5 & 0x4) {
+ idx4 = imm4 >> 2;
+ idx5 = imm5 >> 3;
+ element_bytes = 4;
+ } else if (imm5 & 0x8) {
+ idx4 = imm4 >> 3;
+ idx5 = imm5 >> 4;
+ element_bytes = 8;
+ } else {
+ UnimplementedInstruction(instr);
+ return;
+ }
+ ASSERT((idx4 != -1) && (idx5 != -1));
+
+ const VRegister vd = instr->VdField();
+ const VRegister vn = instr->VnField();
+ if ((Q == 1) && (op == 0) && (imm4 == 0)) {
+ // Format(instr, "vdup'csz 'vd, 'vn'idx5");
+ if (element_bytes == 4) {
+ for (int i = 0; i < 4; i++) {
+ set_vregisters(vd, i, get_vregisters(vn, idx5));
+ }
+ } else if (element_bytes == 8) {
+ for (int i = 0; i < 2; i++) {
+ set_vregisterd(vd, i, get_vregisterd(vn, idx5));
+ }
+ } else {
+ UnimplementedInstruction(instr);
+ return;
+ }
+ } else if ((Q == 1) && (op == 1)) {
+ // Format(instr, "vins'csz 'vd'idx5, 'vn'idx4");
+ if (element_bytes == 4) {
+ set_vregisters(vd, idx5, get_vregisters(vn, idx4));
+ } else if (element_bytes == 8) {
+ set_vregisterd(vd, idx5, get_vregisterd(vn, idx4));
+ } else {
+ UnimplementedInstruction(instr);
+ }
+ } else {
+ UnimplementedInstruction(instr);
+ }
+}
+
+
+void Simulator::DecodeSIMDThreeSame(Instr* instr) {
+ const int Q = instr->Bit(30);
+ const int U = instr->Bit(29);
+ const int opcode = instr->Bits(11, 5);
+
+ if (Q == 0) {
+ UnimplementedInstruction(instr);
+ return;
+ }
+
+ const VRegister vd = instr->VdField();
+ const VRegister vn = instr->VnField();
+ const VRegister vm = instr->VmField();
+ if (instr->Bit(22) == 0) {
+ // f32 case.
+ for (int idx = 0; idx < 4; idx++) {
+ const float vn_val = bit_cast<float, int32_t>(get_vregisters(vn, idx));
+ const float vm_val = bit_cast<float, int32_t>(get_vregisters(vm, idx));
+ float res = 0.0;
+ if ((U == 0) && (opcode == 0x1a)) {
+ if (instr->Bit(23) == 0) {
+ // Format(instr, "vadd'vsz 'vd, 'vn, 'vm");
+ res = vn_val + vm_val;
+ } else {
+ // Format(instr, "vsub'vsz 'vd, 'vn, 'vm");
+ res = vn_val - vm_val;
+ }
+ } else if ((U == 1) && (opcode == 0x1b)) {
+ // Format(instr, "vmul'vsz 'vd, 'vn, 'vm");
+ res = vn_val * vm_val;
+ } else if ((U == 1) && (opcode == 0x1f)) {
+ // Format(instr, "vdiv'vsz 'vd, 'vn, 'vm");
+ res = vn_val / vm_val;
+ } else {
+ UnimplementedInstruction(instr);
+ return;
+ }
+ set_vregisters(vd, idx, bit_cast<int32_t, float>(res));
+ }
+ } else {
+ // f64 case.
+ for (int idx = 0; idx < 2; idx++) {
+ const double vn_val = bit_cast<double, int64_t>(get_vregisterd(vn, idx));
+ const double vm_val = bit_cast<double, int64_t>(get_vregisterd(vm, idx));
+ double res = 0.0;
+ if ((U == 0) && (opcode == 0x1a)) {
+ if (instr->Bit(23) == 0) {
+ // Format(instr, "vadd'vsz 'vd, 'vn, 'vm");
+ res = vn_val + vm_val;
+ } else {
+ // Format(instr, "vsub'vsz 'vd, 'vn, 'vm");
+ res = vn_val - vm_val;
+ }
+ } else if ((U == 1) && (opcode == 0x1b)) {
+ // Format(instr, "vmul'vsz 'vd, 'vn, 'vm");
+ res = vn_val * vm_val;
+ } else if ((U == 1) && (opcode == 0x1f)) {
+ // Format(instr, "vdiv'vsz 'vd, 'vn, 'vm");
+ res = vn_val / vm_val;
+ } else {
+ UnimplementedInstruction(instr);
+ return;
+ }
+ set_vregisterd(vd, idx, bit_cast<int64_t, double>(res));
+ }
+ }
+}
+
+
void Simulator::DecodeDPSimd1(Instr* instr) {
- UnimplementedInstruction(instr);
+ if (instr->IsSIMDCopyOp()) {
+ DecodeSIMDCopy(instr);
+ } else if (instr->IsSIMDThreeSameOp()) {
+ DecodeSIMDThreeSame(instr);
+ } else {
+ UnimplementedInstruction(instr);
+ }
}
@@ -2083,7 +2325,8 @@
// Format(instr, "fmovd 'vd, #'immd");
const VRegister vd = instr->VdField();
const int64_t immd = Instr::VFPExpandImm(instr->Imm8Field());
- set_vregisterd(vd, immd);
+ set_vregisterd(vd, 0, immd);
+ set_vregisterd(vd, 1, 0);
} else {
// Single.
UnimplementedInstruction(instr);
@@ -2106,18 +2349,20 @@
// Format(instr, "scvtfd 'vd, 'vn");
const int64_t rn_val = get_register(rn, instr->RnMode());
const double vn_dbl = static_cast<double>(rn_val);
- set_vregisterd(vd, bit_cast<int64_t, double>(vn_dbl));
+ set_vregisterd(vd, 0, bit_cast<int64_t, double>(vn_dbl));
+ set_vregisterd(vd, 1, 0);
} else if (instr->Bits(16, 5) == 6) {
// Format(instr, "fmovrd 'rd, 'vn");
- const int64_t vn_val = get_vregisterd(vn);
+ const int64_t vn_val = get_vregisterd(vn, 0);
set_register(rd, vn_val, R31IsZR);
} else if (instr->Bits(16, 5) == 7) {
// Format(instr, "fmovdr 'vd, 'rn");
const int64_t rn_val = get_register(rn, R31IsZR);
- set_vregisterd(vd, rn_val);
+ set_vregisterd(vd, 0, rn_val);
+ set_vregisterd(vd, 1, 0);
} else if (instr->Bits(16, 5) == 24) {
// Format(instr, "fcvtzds 'rd, 'vn");
- const double vn_val = bit_cast<double, int64_t>(get_vregisterd(vn));
+ const double vn_val = bit_cast<double, int64_t>(get_vregisterd(vn, 0));
set_register(rd, static_cast<int64_t>(vn_val), instr->RdMode());
} else {
UnimplementedInstruction(instr);
@@ -2129,7 +2374,7 @@
const int opc = instr->Bits(15, 6);
const VRegister vd = instr->VdField();
const VRegister vn = instr->VnField();
- const int64_t vn_val = get_vregisterd(vn);
+ const int64_t vn_val = get_vregisterd(vn, 0);
const int32_t vn_val32 = vn_val & kWRegMask;
const double vn_dbl = bit_cast<double, int64_t>(vn_val);
const float vn_flt = bit_cast<float, int32_t>(vn_val32);
@@ -2145,7 +2390,7 @@
switch (opc) {
case 0:
// Format("fmovdd 'vd, 'vn");
- res_val = get_vregisterd(vn);
+ res_val = get_vregisterd(vn, 0);
break;
case 1:
// Format("fabsd 'vd, 'vn");
@@ -2175,7 +2420,8 @@
break;
}
- set_vregisterd(vd, res_val);
+ set_vregisterd(vd, 0, res_val);
+ set_vregisterd(vd, 1, 0);
}
@@ -2187,8 +2433,8 @@
const VRegister vd = instr->VdField();
const VRegister vn = instr->VnField();
const VRegister vm = instr->VmField();
- const double vn_val = bit_cast<double, int64_t>(get_vregisterd(vn));
- const double vm_val = bit_cast<double, int64_t>(get_vregisterd(vm));
+ const double vn_val = bit_cast<double, int64_t>(get_vregisterd(vn, 0));
+ const double vm_val = bit_cast<double, int64_t>(get_vregisterd(vm, 0));
const int opc = instr->Bits(12, 4);
double result;
@@ -2214,19 +2460,20 @@
return;
}
- set_vregisterd(vd, bit_cast<int64_t, double>(result));
+ set_vregisterd(vd, 0, bit_cast<int64_t, double>(result));
+ set_vregisterd(vd, 1, 0);
}
void Simulator::DecodeFPCompare(Instr* instr) {
const VRegister vn = instr->VnField();
const VRegister vm = instr->VmField();
- const double vn_val = bit_cast<double, int64_t>(get_vregisterd(vn));
+ const double vn_val = bit_cast<double, int64_t>(get_vregisterd(vn, 0));
double vm_val;
if ((instr->Bit(22) == 1) && (instr->Bits(3, 2) == 0)) {
// Format(instr, "fcmpd 'vn, 'vm");
- vm_val = bit_cast<double, int64_t>(get_vregisterd(vm));
+ vm_val = bit_cast<double, int64_t>(get_vregisterd(vm, 0));
} else if ((instr->Bit(22) == 1) && (instr->Bits(3, 2) == 1)) {
if (instr->VmField() == V0) {
// Format(instr, "fcmpd 'vn, #0.0");
@@ -2366,10 +2613,14 @@
// Setup parameters.
if (fp_args) {
- set_vregisterd(V0, parameter0);
- set_vregisterd(V1, parameter1);
- set_vregisterd(V2, parameter2);
- set_vregisterd(V3, parameter3);
+ set_vregisterd(V0, 0, parameter0);
+ set_vregisterd(V0, 1, 0);
+ set_vregisterd(V1, 0, parameter1);
+ set_vregisterd(V1, 1, 0);
+ set_vregisterd(V2, 0, parameter2);
+ set_vregisterd(V2, 1, 0);
+ set_vregisterd(V3, 0, parameter3);
+ set_vregisterd(V3, 1, 0);
} else {
set_register(R0, parameter0);
set_register(R1, parameter1);
@@ -2408,8 +2659,9 @@
int64_t preserved_dvals[kAbiPreservedFpuRegCount];
for (int i = kAbiFirstPreservedFpuReg; i <= kAbiLastPreservedFpuReg; i++) {
const VRegister r = static_cast<VRegister>(i);
- preserved_dvals[i - kAbiFirstPreservedFpuReg] = get_vregisterd(r);
- set_vregisterd(r, callee_saved_value);
+ preserved_dvals[i - kAbiFirstPreservedFpuReg] = get_vregisterd(r, 0);
+ set_vregisterd(r, 0, callee_saved_value);
+ set_vregisterd(r, 1, 0);
}
// Start the simulation.
@@ -2425,15 +2677,16 @@
for (int i = kAbiFirstPreservedFpuReg; i <= kAbiLastPreservedFpuReg; i++) {
const VRegister r = static_cast<VRegister>(i);
- ASSERT(callee_saved_value == get_vregisterd(r));
- set_vregisterd(r, preserved_dvals[i - kAbiFirstPreservedFpuReg]);
+ ASSERT(callee_saved_value == get_vregisterd(r, 0));
+ set_vregisterd(r, 0, preserved_dvals[i - kAbiFirstPreservedFpuReg]);
+ set_vregisterd(r, 1, 0);
}
// Restore the SP register and return R0.
set_register(R31, sp_before_call, R31IsSP);
int64_t return_value;
if (fp_return) {
- return_value = get_vregisterd(V0);
+ return_value = get_vregisterd(V0, 0);
} else {
return_value = get_register(R0);
}
diff --git a/runtime/vm/simulator_arm64.h b/runtime/vm/simulator_arm64.h
index 4480406..6db0360 100644
--- a/runtime/vm/simulator_arm64.h
+++ b/runtime/vm/simulator_arm64.h
@@ -25,8 +25,10 @@
class SimulatorSetjmpBuffer;
typedef struct {
- int64_t lo;
- int64_t hi;
+ union {
+ int64_t i64[2];
+ int32_t i32[4];
+ } bits;
} simd_value_t;
class Simulator {
@@ -51,10 +53,12 @@
void set_wregister(Register reg, int32_t value, R31Type r31t = R31IsSP);
int32_t get_wregister(Register reg, R31Type r31t = R31IsSP) const;
- // Get and set a V register in double ('d') mode. Setting clears the high
- // 64 bits of the V register. Getting ignores the high 64 bits.
- int64_t get_vregisterd(VRegister reg) const;
- void set_vregisterd(VRegister reg, int64_t value);
+ int32_t get_vregisters(VRegister reg, int idx) const;
+ void set_vregisters(VRegister reg, int idx, int32_t value);
+
+ int64_t get_vregisterd(VRegister reg, int idx) const;
+ void set_vregisterd(VRegister reg, int idx, int64_t value);
+
void get_vregister(VRegister reg, simd_value_t* value) const;
void set_vregister(VRegister reg, const simd_value_t& value);
diff --git a/runtime/vm/stub_code_arm64.cc b/runtime/vm/stub_code_arm64.cc
index ca624df..9670eee 100644
--- a/runtime/vm/stub_code_arm64.cc
+++ b/runtime/vm/stub_code_arm64.cc
@@ -661,7 +661,7 @@
// R8: Points to new space object.
__ StoreToOffset(R7, R8, Scavenger::top_offset(), kNoPP);
__ add(R0, R0, Operand(kHeapObjectTag));
- __ UpdateAllocationStatsWithSize(kArrayCid, R3, R8, kNoPP);
+ __ UpdateAllocationStatsWithSize(kArrayCid, R3, kNoPP);
// R0: new object start as a tagged pointer.
// R1: array element type.
@@ -938,7 +938,7 @@
// R3: next object start.
__ str(R3, Address(R5));
__ add(R0, R0, Operand(kHeapObjectTag));
- __ UpdateAllocationStatsWithSize(context_class.id(), R2, R5, kNoPP);
+ __ UpdateAllocationStatsWithSize(context_class.id(), R2, kNoPP);
// Calculate the size tag.
// R0: new object.
@@ -1120,7 +1120,7 @@
__ b(&slow_case, CS); // Unsigned higher or equal.
}
__ str(R3, Address(R5));
- __ UpdateAllocationStats(cls.id(), R5, kNoPP);
+ __ UpdateAllocationStats(cls.id(), kNoPP);
// R2: new object start.
// R3: next object start.
diff --git a/runtime/vm/tags.h b/runtime/vm/tags.h
index 5d5394c..d8644b9 100644
--- a/runtime/vm/tags.h
+++ b/runtime/vm/tags.h
@@ -16,7 +16,10 @@
#define VM_TAG_LIST(V) \
V(Idle) \
V(VM) /* Catch all */ \
- V(Compile) \
+ V(CompileOptimized) \
+ V(CompileUnoptimized) \
+ V(CompileTopLevel) \
+ V(CompileScanner) \
V(Script) \
V(GCNewSpace) \
V(GCOldSpace) \
diff --git a/runtime/vm/unit_test.cc b/runtime/vm/unit_test.cc
index 93b5ed8..1830584 100644
--- a/runtime/vm/unit_test.cc
+++ b/runtime/vm/unit_test.cc
@@ -241,7 +241,8 @@
Isolate* isolate = Isolate::Current();
ASSERT(isolate != NULL);
ASSERT(ClassFinalizer::AllClassesFinalized());
- const Error& error = Error::Handle(Compiler::CompileFunction(function));
+ const Error& error = Error::Handle(Compiler::CompileFunction(isolate,
+ function));
return error.IsNull();
}
diff --git a/sdk/lib/_blink/dartium/_blink_dartium.dart b/sdk/lib/_blink/dartium/_blink_dartium.dart
index 0cac4d5..292ca9a 100644
--- a/sdk/lib/_blink/dartium/_blink_dartium.dart
+++ b/sdk/lib/_blink/dartium/_blink_dartium.dart
@@ -46,7 +46,7 @@
Native_ANGLEInstancedArrays_vertexAttribDivisorANGLE_Callback(mthis, index, divisor) native "ANGLEInstancedArrays_vertexAttribDivisorANGLE_Callback_RESOLVER_STRING_2_unsigned long_long";
-Native_Algorithm_name_Getter(mthis) native "Algorithm_name_Getter";
+Native_Algorithm_name_Getter(mthis) native "KeyAlgorithm_name_Getter";
Native_EventTarget_addEventListener_Callback(mthis, type, listener, useCapture) native "EventTarget_addEventListener_Callback_RESOLVER_STRING_3_DOMString_EventListener_boolean";
@@ -863,7 +863,7 @@
return;
}
-Native_CanvasRenderingContext2D__clip_1_Callback(mthis, winding) native "CanvasRenderingContext2D_clip_Callback_RESOLVER_STRING_1_CanvasWindingRule";
+Native_CanvasRenderingContext2D__clip_1_Callback(mthis, winding) native "CanvasRenderingContext2D_clip_Callback_RESOLVER_STRING_1_DOMString";
Native_CanvasRenderingContext2D__clip_2_Callback(mthis) native "CanvasRenderingContext2D_clip_Callback_RESOLVER_STRING_0_";
@@ -960,8 +960,6 @@
Native_CanvasRenderingContext2D__drawImage_12_Callback(mthis, canvas_OR_image_OR_imageBitmap_OR_video, sx_OR_x, sy_OR_y, sw_OR_width, height_OR_sh, dx, dy, dw, dh) native "CanvasRenderingContext2D_drawImage_Callback_RESOLVER_STRING_9_ImageBitmap_float_float_float_float_float_float_float_float";
-Native_CanvasRenderingContext2D_drawSystemFocusRing_Callback(mthis, element) native "CanvasRenderingContext2D_drawSystemFocusRing_Callback_RESOLVER_STRING_1_Element";
-
Native_CanvasRenderingContext2D_ellipse_Callback(mthis, x, y, radiusX, radiusY, rotation, startAngle, endAngle, anticlockwise) native "CanvasRenderingContext2D_ellipse_Callback_RESOLVER_STRING_8_float_float_float_float_float_float_float_boolean";
// Generated overload resolver
@@ -974,7 +972,7 @@
return;
}
-Native_CanvasRenderingContext2D__fill_1_Callback(mthis, winding) native "CanvasRenderingContext2D_fill_Callback_RESOLVER_STRING_1_CanvasWindingRule";
+Native_CanvasRenderingContext2D__fill_1_Callback(mthis, winding) native "CanvasRenderingContext2D_fill_Callback_RESOLVER_STRING_1_DOMString";
Native_CanvasRenderingContext2D__fill_2_Callback(mthis) native "CanvasRenderingContext2D_fill_Callback_RESOLVER_STRING_0_";
@@ -1008,7 +1006,7 @@
return Native_CanvasRenderingContext2D__isPointInPath_2_Callback(mthis, x, y);
}
-Native_CanvasRenderingContext2D__isPointInPath_1_Callback(mthis, x, y, winding) native "CanvasRenderingContext2D_isPointInPath_Callback_RESOLVER_STRING_3_float_float_CanvasWindingRule";
+Native_CanvasRenderingContext2D__isPointInPath_1_Callback(mthis, x, y, winding) native "CanvasRenderingContext2D_isPointInPath_Callback_RESOLVER_STRING_3_float_float_DOMString";
Native_CanvasRenderingContext2D__isPointInPath_2_Callback(mthis, x, y) native "CanvasRenderingContext2D_isPointInPath_Callback_RESOLVER_STRING_2_float_float";
@@ -2118,7 +2116,7 @@
return;
}
-Native_FontFaceSet__forEach_1_Callback(mthis, callback, thisArg) native "FontFaceSet_forEach_Callback_RESOLVER_STRING_2_FontFaceSetForEachCallback_any";
+Native_FontFaceSet__forEach_1_Callback(mthis, callback, thisArg) native "FontFaceSet_forEach_Callback_RESOLVER_STRING_2_FontFaceSetForEachCallback_ScriptValue";
Native_FontFaceSet__forEach_2_Callback(mthis, callback) native "FontFaceSet_forEach_Callback_RESOLVER_STRING_1_FontFaceSetForEachCallback";
@@ -2206,49 +2204,49 @@
Native_HTMLElement_click_Callback(mthis) native "HTMLElement_click_Callback_RESOLVER_STRING_0_";
-Native_URLUtils_hash_Getter(mthis) native "URLUtils_hash_Getter";
+Native_URLUtils_hash_Getter(mthis) native "URL_hash_Getter";
-Native_URLUtils_hash_Setter(mthis, value) native "URLUtils_hash_Setter";
+Native_URLUtils_hash_Setter(mthis, value) native "URL_hash_Setter";
-Native_URLUtils_host_Getter(mthis) native "URLUtils_host_Getter";
+Native_URLUtils_host_Getter(mthis) native "URL_host_Getter";
-Native_URLUtils_host_Setter(mthis, value) native "URLUtils_host_Setter";
+Native_URLUtils_host_Setter(mthis, value) native "URL_host_Setter";
-Native_URLUtils_hostname_Getter(mthis) native "URLUtils_hostname_Getter";
+Native_URLUtils_hostname_Getter(mthis) native "URL_hostname_Getter";
-Native_URLUtils_hostname_Setter(mthis, value) native "URLUtils_hostname_Setter";
+Native_URLUtils_hostname_Setter(mthis, value) native "URL_hostname_Setter";
-Native_URLUtils_href_Getter(mthis) native "URLUtils_href_Getter";
+Native_URLUtils_href_Getter(mthis) native "URL_href_Getter";
-Native_URLUtils_href_Setter(mthis, value) native "URLUtils_href_Setter";
+Native_URLUtils_href_Setter(mthis, value) native "URL_href_Setter";
-Native_URLUtils_origin_Getter(mthis) native "URLUtils_origin_Getter";
+Native_URLUtils_origin_Getter(mthis) native "URL_origin_Getter";
-Native_URLUtils_password_Getter(mthis) native "URLUtils_password_Getter";
+Native_URLUtils_password_Getter(mthis) native "URL_password_Getter";
-Native_URLUtils_password_Setter(mthis, value) native "URLUtils_password_Setter";
+Native_URLUtils_password_Setter(mthis, value) native "URL_password_Setter";
-Native_URLUtils_pathname_Getter(mthis) native "URLUtils_pathname_Getter";
+Native_URLUtils_pathname_Getter(mthis) native "URL_pathname_Getter";
-Native_URLUtils_pathname_Setter(mthis, value) native "URLUtils_pathname_Setter";
+Native_URLUtils_pathname_Setter(mthis, value) native "URL_pathname_Setter";
-Native_URLUtils_port_Getter(mthis) native "URLUtils_port_Getter";
+Native_URLUtils_port_Getter(mthis) native "URL_port_Getter";
-Native_URLUtils_port_Setter(mthis, value) native "URLUtils_port_Setter";
+Native_URLUtils_port_Setter(mthis, value) native "URL_port_Setter";
-Native_URLUtils_protocol_Getter(mthis) native "URLUtils_protocol_Getter";
+Native_URLUtils_protocol_Getter(mthis) native "URL_protocol_Getter";
-Native_URLUtils_protocol_Setter(mthis, value) native "URLUtils_protocol_Setter";
+Native_URLUtils_protocol_Setter(mthis, value) native "URL_protocol_Setter";
-Native_URLUtils_search_Getter(mthis) native "URLUtils_search_Getter";
+Native_URLUtils_search_Getter(mthis) native "URL_search_Getter";
-Native_URLUtils_search_Setter(mthis, value) native "URLUtils_search_Setter";
+Native_URLUtils_search_Setter(mthis, value) native "URL_search_Setter";
-Native_URLUtils_username_Getter(mthis) native "URLUtils_username_Getter";
+Native_URLUtils_username_Getter(mthis) native "URL_username_Getter";
-Native_URLUtils_username_Setter(mthis, value) native "URLUtils_username_Setter";
+Native_URLUtils_username_Setter(mthis, value) native "URL_username_Setter";
-Native_URLUtils_toString_Callback(mthis) native "URLUtils_toString_Callback_RESOLVER_STRING_0_";
+Native_URLUtils_toString_Callback(mthis) native "URL_toString_Callback_RESOLVER_STRING_0_";
Native_HTMLAnchorElement_download_Getter(mthis) native "HTMLAnchorElement_download_Getter";
@@ -3719,13 +3717,13 @@
Native_IDBCursor_advance_Callback(mthis, count) native "IDBCursor_advance_Callback_RESOLVER_STRING_1_unsigned long";
-Native_IDBCursor_continuePrimaryKey_Callback(mthis, key, primaryKey) native "IDBCursor_continuePrimaryKey_Callback_RESOLVER_STRING_2_any_any";
+Native_IDBCursor_continuePrimaryKey_Callback(mthis, key, primaryKey) native "IDBCursor_continuePrimaryKey_Callback_RESOLVER_STRING_2_ScriptValue_ScriptValue";
Native_IDBCursor_delete_Callback(mthis) native "IDBCursor_delete_Callback_RESOLVER_STRING_0_";
-Native_IDBCursor_next_Callback(mthis, key) native "IDBCursor_continue_Callback_RESOLVER_STRING_1_any";
+Native_IDBCursor_next_Callback(mthis, key) native "IDBCursor_continue_Callback_RESOLVER_STRING_1_ScriptValue";
-Native_IDBCursor_update_Callback(mthis, value) native "IDBCursor_update_Callback_RESOLVER_STRING_1_any";
+Native_IDBCursor_update_Callback(mthis, value) native "IDBCursor_update_Callback_RESOLVER_STRING_1_ScriptValue";
Native_IDBCursorWithValue_value_Getter(mthis) native "IDBCursorWithValue_value_Getter";
@@ -3767,7 +3765,7 @@
Native_IDBDatabase_transactionStores_Callback(mthis, storeNames, mode) native "IDBDatabase_transaction_Callback_RESOLVER_STRING_2_DOMStringList_DOMString";
-Native_IDBFactory_cmp_Callback(mthis, first, second) native "IDBFactory_cmp_Callback_RESOLVER_STRING_2_any_any";
+Native_IDBFactory_cmp_Callback(mthis, first, second) native "IDBFactory_cmp_Callback_RESOLVER_STRING_2_ScriptValue_ScriptValue";
Native_IDBFactory_deleteDatabase_Callback(mthis, name) native "IDBFactory_deleteDatabase_Callback_RESOLVER_STRING_1_DOMString";
@@ -3795,15 +3793,15 @@
Native_IDBIndex_unique_Getter(mthis) native "IDBIndex_unique_Getter";
-Native_IDBIndex_count_Callback(mthis, key) native "IDBIndex_count_Callback_RESOLVER_STRING_1_any";
+Native_IDBIndex_count_Callback(mthis, key) native "IDBIndex_count_Callback_RESOLVER_STRING_1_ScriptValue";
-Native_IDBIndex_get_Callback(mthis, key) native "IDBIndex_get_Callback_RESOLVER_STRING_1_any";
+Native_IDBIndex_get_Callback(mthis, key) native "IDBIndex_get_Callback_RESOLVER_STRING_1_ScriptValue";
-Native_IDBIndex_getKey_Callback(mthis, key) native "IDBIndex_getKey_Callback_RESOLVER_STRING_1_any";
+Native_IDBIndex_getKey_Callback(mthis, key) native "IDBIndex_getKey_Callback_RESOLVER_STRING_1_ScriptValue";
-Native_IDBIndex_openCursor_Callback(mthis, key, direction) native "IDBIndex_openCursor_Callback_RESOLVER_STRING_2_any_DOMString";
+Native_IDBIndex_openCursor_Callback(mthis, key, direction) native "IDBIndex_openCursor_Callback_RESOLVER_STRING_2_ScriptValue_DOMString";
-Native_IDBIndex_openKeyCursor_Callback(mthis, key, direction) native "IDBIndex_openKeyCursor_Callback_RESOLVER_STRING_2_any_DOMString";
+Native_IDBIndex_openKeyCursor_Callback(mthis, key, direction) native "IDBIndex_openKeyCursor_Callback_RESOLVER_STRING_2_ScriptValue_DOMString";
Native_IDBKeyRange_lower_Getter(mthis) native "IDBKeyRange_lower_Getter";
@@ -3813,13 +3811,13 @@
Native_IDBKeyRange_upperOpen_Getter(mthis) native "IDBKeyRange_upperOpen_Getter";
-Native_IDBKeyRange_bound__Callback(lower, upper, lowerOpen, upperOpen) native "IDBKeyRange_bound_Callback_RESOLVER_STRING_4_any_any_boolean_boolean";
+Native_IDBKeyRange_bound__Callback(lower, upper, lowerOpen, upperOpen) native "IDBKeyRange_bound_Callback_RESOLVER_STRING_4_ScriptValue_ScriptValue_boolean_boolean";
-Native_IDBKeyRange_lowerBound__Callback(bound, open) native "IDBKeyRange_lowerBound_Callback_RESOLVER_STRING_2_any_boolean";
+Native_IDBKeyRange_lowerBound__Callback(bound, open) native "IDBKeyRange_lowerBound_Callback_RESOLVER_STRING_2_ScriptValue_boolean";
-Native_IDBKeyRange_only__Callback(value) native "IDBKeyRange_only_Callback_RESOLVER_STRING_1_any";
+Native_IDBKeyRange_only__Callback(value) native "IDBKeyRange_only_Callback_RESOLVER_STRING_1_ScriptValue";
-Native_IDBKeyRange_upperBound__Callback(bound, open) native "IDBKeyRange_upperBound_Callback_RESOLVER_STRING_2_any_boolean";
+Native_IDBKeyRange_upperBound__Callback(bound, open) native "IDBKeyRange_upperBound_Callback_RESOLVER_STRING_2_ScriptValue_boolean";
Native_IDBObjectStore_autoIncrement_Getter(mthis) native "IDBObjectStore_autoIncrement_Getter";
@@ -3831,11 +3829,11 @@
Native_IDBObjectStore_transaction_Getter(mthis) native "IDBObjectStore_transaction_Getter";
-Native_IDBObjectStore_add_Callback(mthis, value, key) native "IDBObjectStore_add_Callback_RESOLVER_STRING_2_any_any";
+Native_IDBObjectStore_add_Callback(mthis, value, key) native "IDBObjectStore_add_Callback_RESOLVER_STRING_2_ScriptValue_ScriptValue";
Native_IDBObjectStore_clear_Callback(mthis) native "IDBObjectStore_clear_Callback_RESOLVER_STRING_0_";
-Native_IDBObjectStore_count_Callback(mthis, key) native "IDBObjectStore_count_Callback_RESOLVER_STRING_1_any";
+Native_IDBObjectStore_count_Callback(mthis, key) native "IDBObjectStore_count_Callback_RESOLVER_STRING_1_ScriptValue";
// Generated overload resolver
Native_IDBObjectStore__createIndex(mthis, name, keyPath, options) {
@@ -3852,19 +3850,19 @@
Native_IDBObjectStore__createIndex_2_Callback(mthis, name, keyPath, options) native "IDBObjectStore_createIndex_Callback_RESOLVER_STRING_3_DOMString_DOMString_Dictionary";
-Native_IDBObjectStore_delete_Callback(mthis, key) native "IDBObjectStore_delete_Callback_RESOLVER_STRING_1_any";
+Native_IDBObjectStore_delete_Callback(mthis, key) native "IDBObjectStore_delete_Callback_RESOLVER_STRING_1_ScriptValue";
Native_IDBObjectStore_deleteIndex_Callback(mthis, name) native "IDBObjectStore_deleteIndex_Callback_RESOLVER_STRING_1_DOMString";
-Native_IDBObjectStore_get_Callback(mthis, key) native "IDBObjectStore_get_Callback_RESOLVER_STRING_1_any";
+Native_IDBObjectStore_get_Callback(mthis, key) native "IDBObjectStore_get_Callback_RESOLVER_STRING_1_ScriptValue";
Native_IDBObjectStore_index_Callback(mthis, name) native "IDBObjectStore_index_Callback_RESOLVER_STRING_1_DOMString";
-Native_IDBObjectStore_openCursor_Callback(mthis, key, direction) native "IDBObjectStore_openCursor_Callback_RESOLVER_STRING_2_any_DOMString";
+Native_IDBObjectStore_openCursor_Callback(mthis, key, direction) native "IDBObjectStore_openCursor_Callback_RESOLVER_STRING_2_ScriptValue_DOMString";
-Native_IDBObjectStore_openKeyCursor_Callback(mthis, range, direction) native "IDBObjectStore_openKeyCursor_Callback_RESOLVER_STRING_2_any_DOMString";
+Native_IDBObjectStore_openKeyCursor_Callback(mthis, range, direction) native "IDBObjectStore_openKeyCursor_Callback_RESOLVER_STRING_2_ScriptValue_DOMString";
-Native_IDBObjectStore_put_Callback(mthis, value, key) native "IDBObjectStore_put_Callback_RESOLVER_STRING_2_any_any";
+Native_IDBObjectStore_put_Callback(mthis, value, key) native "IDBObjectStore_put_Callback_RESOLVER_STRING_2_ScriptValue_ScriptValue";
Native_IDBRequest_error_Getter(mthis) native "IDBRequest_error_Getter";
@@ -3914,7 +3912,7 @@
Native_InputMethodContext_confirmComposition_Callback(mthis) native "InputMethodContext_confirmComposition_Callback_RESOLVER_STRING_0_";
-Native_InstallPhaseEvent_waitUntil_Callback(mthis, value) native "InstallPhaseEvent_waitUntil_Callback_RESOLVER_STRING_1_any";
+Native_InstallPhaseEvent_waitUntil_Callback(mthis, value) native "InstallPhaseEvent_waitUntil_Callback_RESOLVER_STRING_1_ScriptValue";
Native_InstallEvent_replace_Callback(mthis) native "InstallEvent_replace_Callback_RESOLVER_STRING_0_";
@@ -4173,7 +4171,7 @@
return;
}
-Native_MediaSource__endOfStream_1_Callback(mthis, error) native "MediaSource_endOfStream_Callback_RESOLVER_STRING_1_EndOfStreamError";
+Native_MediaSource__endOfStream_1_Callback(mthis, error) native "MediaSource_endOfStream_Callback_RESOLVER_STRING_1_DOMString";
Native_MediaSource__endOfStream_2_Callback(mthis) native "MediaSource_endOfStream_Callback_RESOLVER_STRING_0_";
@@ -4613,27 +4611,27 @@
throw new ArgumentError("Incorrect number or type of arguments");
}
-Native_Path__create_1constructorCallback() native "Path_constructorCallback_RESOLVER_STRING_0_";
+Native_Path__create_1constructorCallback() native "Path2D_constructorCallback_RESOLVER_STRING_0_";
-Native_Path__create_2constructorCallback(path_OR_text) native "Path_constructorCallback_RESOLVER_STRING_1_Path";
+Native_Path__create_2constructorCallback(path_OR_text) native "Path2D_constructorCallback_RESOLVER_STRING_1_Path2D";
-Native_Path__create_3constructorCallback(path_OR_text) native "Path_constructorCallback_RESOLVER_STRING_1_DOMString";
+Native_Path__create_3constructorCallback(path_OR_text) native "Path2D_constructorCallback_RESOLVER_STRING_1_DOMString";
-Native_Path_arc_Callback(mthis, x, y, radius, startAngle, endAngle, anticlockwise) native "Path_arc_Callback_RESOLVER_STRING_6_float_float_float_float_float_boolean";
+Native_Path_arc_Callback(mthis, x, y, radius, startAngle, endAngle, anticlockwise) native "Path2D_arc_Callback_RESOLVER_STRING_6_float_float_float_float_float_boolean";
-Native_Path_arcTo_Callback(mthis, x1, y1, x2, y2, radius) native "Path_arcTo_Callback_RESOLVER_STRING_5_float_float_float_float_float";
+Native_Path_arcTo_Callback(mthis, x1, y1, x2, y2, radius) native "Path2D_arcTo_Callback_RESOLVER_STRING_5_float_float_float_float_float";
-Native_Path_bezierCurveTo_Callback(mthis, cp1x, cp1y, cp2x, cp2y, x, y) native "Path_bezierCurveTo_Callback_RESOLVER_STRING_6_float_float_float_float_float_float";
+Native_Path_bezierCurveTo_Callback(mthis, cp1x, cp1y, cp2x, cp2y, x, y) native "Path2D_bezierCurveTo_Callback_RESOLVER_STRING_6_float_float_float_float_float_float";
-Native_Path_closePath_Callback(mthis) native "Path_closePath_Callback_RESOLVER_STRING_0_";
+Native_Path_closePath_Callback(mthis) native "Path2D_closePath_Callback_RESOLVER_STRING_0_";
-Native_Path_lineTo_Callback(mthis, x, y) native "Path_lineTo_Callback_RESOLVER_STRING_2_float_float";
+Native_Path_lineTo_Callback(mthis, x, y) native "Path2D_lineTo_Callback_RESOLVER_STRING_2_float_float";
-Native_Path_moveTo_Callback(mthis, x, y) native "Path_moveTo_Callback_RESOLVER_STRING_2_float_float";
+Native_Path_moveTo_Callback(mthis, x, y) native "Path2D_moveTo_Callback_RESOLVER_STRING_2_float_float";
-Native_Path_quadraticCurveTo_Callback(mthis, cpx, cpy, x, y) native "Path_quadraticCurveTo_Callback_RESOLVER_STRING_4_float_float_float_float";
+Native_Path_quadraticCurveTo_Callback(mthis, cpx, cpy, x, y) native "Path2D_quadraticCurveTo_Callback_RESOLVER_STRING_4_float_float_float_float";
-Native_Path_rect_Callback(mthis, x, y, width, height) native "Path_rect_Callback_RESOLVER_STRING_4_float_float_float_float";
+Native_Path_rect_Callback(mthis, x, y, width, height) native "Path2D_rect_Callback_RESOLVER_STRING_4_float_float_float_float";
Native_Performance_memory_Getter(mthis) native "Performance_memory_Getter";
@@ -5079,7 +5077,7 @@
Native_SQLResultSetRowList_length_Getter(mthis) native "SQLResultSetRowList_length_Getter";
-Native_SQLResultSetRowList_NativeIndexed_Getter(mthis, index) native "SQLResultSetRowList_item_Callback_RESOLVER_STRING_1_unsigned long";
+Native_SQLResultSetRowList_NativeIndexed_Getter(mthis, index) native "SQLResultSetRowList_item_Callback";
Native_SQLResultSetRowList_item_Callback(mthis, index) native "SQLResultSetRowList_item_Callback";
@@ -6535,26 +6533,13 @@
Native_Screen_width_Getter(mthis) native "Screen_width_Getter";
- // Generated overload resolver
-Native_Screen_lockOrientation(mthis, orientation_OR_orientations) {
- if ((orientation_OR_orientations is String || orientation_OR_orientations == null)) {
- return Native_Screen__lockOrientation_1_Callback(mthis, orientation_OR_orientations);
- }
- if ((orientation_OR_orientations is List<String> || orientation_OR_orientations == null)) {
- return Native_Screen__lockOrientation_2_Callback(mthis, orientation_OR_orientations);
- }
- throw new ArgumentError("Incorrect number or type of arguments");
- }
-
-Native_Screen__lockOrientation_1_Callback(mthis, orientation_OR_orientations) native "Screen_lockOrientation_Callback_RESOLVER_STRING_1_DOMString";
-
-Native_Screen__lockOrientation_2_Callback(mthis, orientation_OR_orientations) native "Screen_lockOrientation_Callback_RESOLVER_STRING_1_sequence<DOMString>";
+Native_Screen_lockOrientation_Callback(mthis, orientation) native "Screen_lockOrientation_Callback_RESOLVER_STRING_1_DOMString";
Native_Screen_unlockOrientation_Callback(mthis) native "Screen_unlockOrientation_Callback_RESOLVER_STRING_0_";
Native_ScriptProcessorNode_bufferSize_Getter(mthis) native "ScriptProcessorNode_bufferSize_Getter";
-Native_ScriptProcessorNode__setEventListener_Callback(mthis, eventListener) native "ScriptProcessorNode__setEventListener_Callback";
+Native_ScriptProcessorNode__setEventListener_Callback(mthis, eventListener) native "ScriptProcessorNode_setEventListener_Callback";
Native_SecurityPolicyViolationEvent_blockedURI_Getter(mthis) native "SecurityPolicyViolationEvent_blockedURI_Getter";
@@ -7270,23 +7255,23 @@
Native_URL_toString_Callback(mthis) native "URL_toString_Callback_RESOLVER_STRING_0_";
-Native_URLUtilsReadOnly_hash_Getter(mthis) native "URLUtilsReadOnly_hash_Getter";
+Native_URLUtilsReadOnly_hash_Getter(mthis) native "WorkerLocation_hash_Getter";
-Native_URLUtilsReadOnly_host_Getter(mthis) native "URLUtilsReadOnly_host_Getter";
+Native_URLUtilsReadOnly_host_Getter(mthis) native "WorkerLocation_host_Getter";
-Native_URLUtilsReadOnly_hostname_Getter(mthis) native "URLUtilsReadOnly_hostname_Getter";
+Native_URLUtilsReadOnly_hostname_Getter(mthis) native "WorkerLocation_hostname_Getter";
-Native_URLUtilsReadOnly_href_Getter(mthis) native "URLUtilsReadOnly_href_Getter";
+Native_URLUtilsReadOnly_href_Getter(mthis) native "WorkerLocation_href_Getter";
-Native_URLUtilsReadOnly_pathname_Getter(mthis) native "URLUtilsReadOnly_pathname_Getter";
+Native_URLUtilsReadOnly_pathname_Getter(mthis) native "WorkerLocation_pathname_Getter";
-Native_URLUtilsReadOnly_port_Getter(mthis) native "URLUtilsReadOnly_port_Getter";
+Native_URLUtilsReadOnly_port_Getter(mthis) native "WorkerLocation_port_Getter";
-Native_URLUtilsReadOnly_protocol_Getter(mthis) native "URLUtilsReadOnly_protocol_Getter";
+Native_URLUtilsReadOnly_protocol_Getter(mthis) native "WorkerLocation_protocol_Getter";
-Native_URLUtilsReadOnly_search_Getter(mthis) native "URLUtilsReadOnly_search_Getter";
+Native_URLUtilsReadOnly_search_Getter(mthis) native "WorkerLocation_search_Getter";
-Native_URLUtilsReadOnly_toString_Callback(mthis) native "URLUtilsReadOnly_toString_Callback_RESOLVER_STRING_0_";
+Native_URLUtilsReadOnly_toString_Callback(mthis) native "WorkerLocation_toString_Callback_RESOLVER_STRING_0_";
// Generated overload resolver
Native_VTTCue_VttCue(startTime, endTime, text) {
@@ -7450,7 +7435,7 @@
Native_WebGLDebugShaders_getTranslatedShaderSource_Callback(mthis, shader) native "WebGLDebugShaders_getTranslatedShaderSource_Callback_RESOLVER_STRING_1_WebGLShader";
-Native_WebGLDrawBuffers_drawBuffersWEBGL_Callback(mthis, buffers) native "WebGLDrawBuffers_drawBuffersWEBGL_Callback_RESOLVER_STRING_1_sequence<GLenum>";
+Native_WebGLDrawBuffers_drawBuffersWEBGL_Callback(mthis, buffers) native "WebGLDrawBuffers_drawBuffersWEBGL_Callback_RESOLVER_STRING_1_sequence<unsigned long>";
Native_WebGLLoseContext_loseContext_Callback(mthis) native "WebGLLoseContext_loseContext_Callback_RESOLVER_STRING_0_";
@@ -8116,7 +8101,7 @@
Native_Window____getter___1_Callback(mthis, index_OR_name) native "Window___getter___Callback_RESOLVER_STRING_1_unsigned long";
-Native_Window____getter___2_Callback(mthis, index_OR_name) native "Window___getter___Callback_RESOLVER_STRING_1_DOMString";
+Native_Window____getter___2_Callback(mthis, index_OR_name) native "Window___getter___Callback";
Native_Window_alert_Callback(mthis, message) native "Window_alert_Callback_RESOLVER_STRING_1_DOMString";
diff --git a/sdk/lib/_internal/compiler/implementation/closure.dart b/sdk/lib/_internal/compiler/implementation/closure.dart
index 73a2197..a7ae252 100644
--- a/sdk/lib/_internal/compiler/implementation/closure.dart
+++ b/sdk/lib/_internal/compiler/implementation/closure.dart
@@ -10,8 +10,7 @@
import "scanner/scannerlib.dart" show Token;
import "tree/tree.dart";
import "util/util.dart";
-import "elements/modelx.dart" show ElementX, SynthesizedCallMethodElementX,
- ClassElementX;
+import "elements/modelx.dart" show ElementX, FunctionElementX, ClassElementX;
import "elements/visitor.dart" show ElementVisitor;
class ClosureNamer {
@@ -220,6 +219,23 @@
accept(ElementVisitor visitor) => visitor.visitThisElement(this);
}
+/// Call method of a closure class.
+class SynthesizedCallMethodElementX extends FunctionElementX {
+ final FunctionElement expression;
+
+ SynthesizedCallMethodElementX(String name,
+ FunctionElementX other,
+ Element enclosing)
+ : expression = other,
+ super(name, other.kind, other.modifiers, enclosing, false) {
+ functionSignatureCache = other.functionSignature;
+ }
+
+ FunctionExpression get node => expression.node;
+
+ FunctionExpression parseNode(DiagnosticListener listener) => node;
+}
+
// The box-element for a scope, and the captured variables that need to be
// stored in the box.
class ClosureScope {
diff --git a/sdk/lib/_internal/compiler/implementation/compile_time_constants.dart b/sdk/lib/_internal/compiler/implementation/compile_time_constants.dart
index c091eb2..4bdad14 100644
--- a/sdk/lib/_internal/compiler/implementation/compile_time_constants.dart
+++ b/sdk/lib/_internal/compiler/implementation/compile_time_constants.dart
@@ -787,7 +787,7 @@
class ConstructorEvaluator extends CompileTimeConstantEvaluator {
final InterfaceType constructedType;
- final FunctionElement constructor;
+ final ConstructorElement constructor;
final Map<Element, Constant> definitions;
final Map<Element, Constant> fieldValues;
@@ -884,7 +884,7 @@
Function compileArgument = (element) => definitions[element];
Function compileConstant = handler.compileConstant;
- FunctionElement target = constructor.targetConstructor.implementation;
+ FunctionElement target = constructor.definingConstructor.implementation;
Selector.addForwardingElementArgumentsToList(constructor,
compiledArguments,
target,
diff --git a/sdk/lib/_internal/compiler/implementation/dart2jslib.dart b/sdk/lib/_internal/compiler/implementation/dart2jslib.dart
index 137bd26..1a6f5f4 100644
--- a/sdk/lib/_internal/compiler/implementation/dart2jslib.dart
+++ b/sdk/lib/_internal/compiler/implementation/dart2jslib.dart
@@ -19,8 +19,7 @@
PrefixElementX,
VoidElementX,
AnalyzableElement,
- DeferredLoaderGetterElementX,
- SynthesizedCallMethodElementX;
+ DeferredLoaderGetterElementX;
import 'helpers/helpers.dart';
import 'js_backend/js_backend.dart' as js_backend;
import 'native_handler.dart' as native;
diff --git a/sdk/lib/_internal/compiler/implementation/dart_backend/dart_codegen.dart b/sdk/lib/_internal/compiler/implementation/dart_backend/dart_codegen.dart
index 18f094d..bd243e5 100644
--- a/sdk/lib/_internal/compiler/implementation/dart_backend/dart_codegen.dart
+++ b/sdk/lib/_internal/compiler/implementation/dart_backend/dart_codegen.dart
@@ -170,6 +170,10 @@
}
}
+ void visitContinue(tree.Continue stmt) {
+ statementBuffer.add(new Continue(stmt.target.name));
+ }
+
void visitIf(tree.If stmt) {
Expression condition = visitExpression(stmt.condition);
List<Statement> savedBuffer = statementBuffer;
@@ -182,6 +186,18 @@
statementBuffer = savedBuffer;
}
+ void visitWhile(tree.While stmt) {
+ Expression condition = new Literal(new dart2js.BoolConstant(true));
+ List<Statement> savedBuffer = statementBuffer;
+ statementBuffer = <Statement>[];
+ visitStatement(stmt.body);
+ savedBuffer.add(
+ new LabeledStatement(
+ stmt.label.name,
+ new While(condition, new Block(statementBuffer))));
+ statementBuffer = savedBuffer;
+ }
+
Expression visitConstant(tree.Constant exp) {
return emitConstant(exp.value);
}
diff --git a/sdk/lib/_internal/compiler/implementation/dart_backend/dart_tree.dart b/sdk/lib/_internal/compiler/implementation/dart_backend/dart_tree.dart
index 99fcad4..4d93221 100644
--- a/sdk/lib/_internal/compiler/implementation/dart_backend/dart_tree.dart
+++ b/sdk/lib/_internal/compiler/implementation/dart_backend/dart_tree.dart
@@ -10,10 +10,7 @@
ClassElement;
import '../universe/universe.dart';
import '../ir/ir_nodes.dart' as ir;
-import '../tree/tree.dart' as ast;
-import '../scanner/scannerlib.dart';
import '../dart_types.dart' show DartType, GenericType;
-import '../helpers/helpers.dart';
import '../universe/universe.dart' show Selector;
// The Tree language is the target of translation out of the CPS-based IR.
@@ -41,7 +38,6 @@
* The base class of [Expression]s.
*/
abstract class Expression extends Node {
- bool get isPure;
accept(Visitor v);
/// Temporary variable used by [StatementRewriter].
@@ -102,8 +98,6 @@
Variable(this.element);
- final bool isPure = true;
-
accept(Visitor visitor) => visitor.visitVariable(this);
}
@@ -127,8 +121,6 @@
InvokeStatic(this.target, this.selector, this.arguments);
- final bool isPure = false;
-
accept(Visitor visitor) => visitor.visitInvokeStatic(this);
}
@@ -147,8 +139,6 @@
assert(receiver != null);
}
- final bool isPure = false;
-
accept(Visitor visitor) => visitor.visitInvokeMethod(this);
}
@@ -165,8 +155,6 @@
ClassElement get targetClass => target.enclosingElement;
- final bool isPure = false;
-
accept(Visitor visitor) => visitor.visitInvokeConstructor(this);
}
@@ -176,8 +164,6 @@
ConcatenateStrings(this.arguments);
- final bool isPure = false; // invokes toString
-
accept(Visitor visitor) => visitor.visitConcatenateStrings(this);
}
@@ -189,8 +175,6 @@
Constant(this.value);
- final bool isPure = true;
-
accept(Visitor visitor) => visitor.visitConstant(this);
}
@@ -202,14 +186,6 @@
Conditional(this.condition, this.thenExpression, this.elseExpression);
- // TODO(asgerf): Repeatedly computing isPure is potentially expensive,
- // but caching isPure in a field is dangerous because a subexpression could
- // become impure during a transformation (e.g. assignment propagation).
- // Improve the situation somehow.
- bool get isPure => condition.isPure &&
- thenExpression.isPure &&
- elseExpression.isPure;
-
accept(Visitor visitor) => visitor.visitConditional(this);
}
@@ -226,8 +202,6 @@
String get operator => isAnd ? '&&' : '||';
- bool get isPure => left.isPure && right.isPure;
-
accept(Visitor visitor) => visitor.visitLogicalOperator(this);
}
@@ -237,8 +211,6 @@
Not(this.operand);
- bool get isPure => operand.isPure;
-
accept(Visitor visitor) => visitor.visitNot(this);
}
@@ -319,6 +291,21 @@
}
/**
+ * A continue to an enclosing [While] loop. The continue targets the
+ * loop's body.
+ */
+class Continue extends Statement {
+ Label target;
+
+ Statement get next => null;
+ void set next(Statement s) => throw 'UNREACHABLE';
+
+ Continue(this.target);
+
+ accept(Visitor visitor) => visitor.visitContinue(this);
+}
+
+/**
* A conditional branch based on the true value of an [Expression].
*/
class If extends Statement {
@@ -334,6 +321,22 @@
accept(Visitor visitor) => visitor.visitIf(this);
}
+/**
+ * A labeled while(true) loop.
+ */
+class While extends Statement {
+ final Label label;
+ Statement body;
+
+ While(this.label, this.body);
+
+ Statement get next => null;
+ void set next(Statement s) => throw 'UNREACHABLE';
+
+ accept(Visitor visitor) => visitor.visitWhile(this);
+}
+
+
class ExpressionStatement extends Statement {
Statement next;
Expression expression;
@@ -367,7 +370,9 @@
S visitAssign(Assign node);
S visitReturn(Return node);
S visitBreak(Break node);
+ S visitContinue(Continue node);
S visitIf(If node);
+ S visitWhile(While node);
S visitExpressionStatement(ExpressionStatement node);
}
@@ -491,7 +496,7 @@
Statement visitLetCont(ir.LetCont node) {
Label label;
- if (!node.continuation.hasAtMostOneUse) {
+ if (node.continuation.hasMultipleUses) {
label = new Label();
labels[node.continuation] = label;
}
@@ -499,7 +504,15 @@
if (p.hasAtLeastOneUse) variables[p] = new Variable(null);
});
Statement body = visit(node.body);
- if (label == null) return body;
+ // The continuation's body is not always translated directly here because
+ // it may have been already translated:
+ // * For singly-used continuations, the continuation's body is
+ // translated at the site of the continuation invocation.
+ // * For recursive continuations, there is a single non-recursive
+ // invocation. The continuation's body is translated at the site
+ // of the non-recursive continuation invocation.
+ // See visitInvokeContinuation for the implementation.
+ if (label == null || node.continuation.isRecursive) return body;
return new LabeledStatement(label, body, visit(node.continuation.body));
}
@@ -575,9 +588,28 @@
} else {
List<Expression> arguments = translateArguments(node.arguments);
return buildParameterAssignments(cont.parameters, arguments,
- () => cont.hasExactlyOneUse
- ? visit(cont.body)
- : new Break(labels[cont]));
+ () {
+ // Translate invocations of recursive and non-recursive
+ // continuations differently.
+ // * Non-recursive continuations
+ // - If there is one use, translate the continuation body
+ // inline at the invocation site.
+ // - If there are multiple uses, translate to Break.
+ // * Recursive continuations
+ // - There is a single non-recursive invocation. Translate
+ // the continuation body inline as a labeled loop at the
+ // invocation site.
+ // - Translate the recursive invocations to Continue.
+ if (cont.isRecursive) {
+ return node.isRecursive
+ ? new Continue(labels[cont])
+ : new While(labels[cont], visit(cont.body));
+ } else {
+ return cont.hasExactlyOneUse
+ ? visit(cont.body)
+ : new Break(labels[cont]);
+ }
+ });
}
}
@@ -707,8 +739,7 @@
*/
class StatementRewriter extends Visitor<Statement, Expression> {
// The binding environment. The rightmost element of the list is the nearest
- // enclosing binding.
- // We use null to mark an impure expressions that does not bind a variable.
+ // available enclosing binding.
List<Assign> environment;
/// Substitution map for labels. Any break to a label L should be substituted
@@ -738,35 +769,12 @@
// Propagate a variable's definition to its use site if:
// 1. It has a single use, to avoid code growth and potential duplication
// of side effects, AND
- // 2a. It is pure (i.e., does not have side effects that prevent it from
- // being moved), OR
- // 2b. There are only pure expressions between the definition and use.
-
- // TODO(kmillikin): It's not always beneficial to propagate pure
- // definitions---it can prevent propagation of their inputs. Implement
- // a heuristic to avoid this.
-
- // TODO(kmillikin): Replace linear search with something faster in
- // practice.
- bool seenImpure = false;
- for (int i = environment.length - 1; i >= 0; --i) {
- if (environment[i] == null) {
- seenImpure = true;
- continue;
- }
- if (environment[i].variable == node) {
- if ((!seenImpure || environment[i].definition.isPure)
- && environment[i].hasExactlyOneUse) {
- // Use the definition if it is pure or if it is the first impure
- // definition (i.e., propagating past only pure expressions).
- return visitExpression(environment.removeAt(i).definition);
- }
- break;
- } else if (!environment[i].definition.isPure) {
- // Once the first impure definition is seen, impure definitions should
- // no longer be propagated. Continue searching for a pure definition.
- seenImpure = true;
- }
+ // 2. It was the most recent expression evaluated so that we do not
+ // reorder expressions with side effects.
+ if (!environment.isEmpty &&
+ environment.last.variable == node &&
+ environment.last.hasExactlyOneUse) {
+ return visitExpression(environment.removeLast().definition);
}
// If the definition could not be propagated, leave the variable use.
return node;
@@ -821,10 +829,13 @@
Expression visitConditional(Conditional node) {
node.condition = visitExpression(node.condition);
- environment.add(null); // impure expressions may not propagate across branch
+ List<Assign> savedEnvironment = environment;
+ environment = <Assign>[];
node.thenExpression = visitExpression(node.thenExpression);
+ assert(environment.isEmpty);
node.elseExpression = visitExpression(node.elseExpression);
- environment.removeLast();
+ assert(environment.isEmpty);
+ environment = savedEnvironment;
return node;
}
@@ -861,6 +872,10 @@
return node;
}
+ Statement visitContinue(Continue node) {
+ return node;
+ }
+
Statement visitLabeledStatement(LabeledStatement node) {
if (node.next is Break) {
// Eliminate label if next is just a break statement
@@ -891,10 +906,18 @@
Statement visitIf(If node) {
node.condition = visitExpression(node.condition);
- environment.add(null); // impure expressions may not propagate across branch
+ // Do not propagate assignments into branches. Doing so will lead to code
+ // duplication.
+ // TODO(kmillikin): Rethink this. Propagating some assignments (e.g.,
+ // constants or variables) is benign. If they can occur here, they should
+ // be handled well.
+ List<Assign> savedEnvironment = environment;
+ environment = <Assign>[];
node.thenStatement = visitStatement(node.thenStatement);
+ assert(environment.isEmpty);
node.elseStatement = visitStatement(node.elseStatement);
- environment.removeLast();
+ assert(environment.isEmpty);
+ environment = savedEnvironment;
tryCollapseIf(node);
@@ -913,19 +936,34 @@
return node;
}
+ Statement visitWhile(While node) {
+ // Do not propagate assignments into loops. Doing so is not safe for
+ // variables modified in the loop (the initial value will be propagated).
+ List<Assign> savedEnvironment = environment;
+ environment = <Assign>[];
+ node.body = visitStatement(node.body);
+ assert(environment.isEmpty);
+ environment = savedEnvironment;
+ return node;
+ }
+
Expression visitConstant(Constant node) {
return node;
}
Statement visitExpressionStatement(ExpressionStatement node) {
node.expression = visitExpression(node.expression);
- if (!node.expression.isPure) {
- environment.add(null); // insert impurity marker (TODO: refactor)
- }
+ // Do not allow propagation of assignments past an expression evaluated
+ // for its side effects because it risks reordering side effects.
+ // TODO(kmillikin): Rethink this. Some propagation is benign, e.g.,
+ // constants, variables, or other pure values that are not destroyed by
+ // the expression statement. If they can occur here they should be
+ // handled well.
+ List<Assign> savedEnvironment = environment;
+ environment = <Assign>[];
node.next = visitStatement(node.next);
- if (!node.expression.isPure) {
- environment.removeLast();
- }
+ assert(environment.isEmpty);
+ environment = savedEnvironment;
return node;
}
@@ -1067,10 +1105,12 @@
outerIf.thenStatement = innerThen;
--innerElse.target.breakCount;
- // Try to inline the remaining break
- environment.add(null); // Do not propagate impure definitions
+ // Try to inline the remaining break. Do not propagate assignments.
+ List<Assign> savedEnvironment = environment;
+ environment = <Assign>[];
outerIf.elseStatement = visitStatement(outerElse);
- environment.removeLast();
+ assert(environment.isEmpty);
+ environment = savedEnvironment;
return outerIf.elseStatement is If && innerThen is Break;
}
@@ -1174,6 +1214,10 @@
return node;
}
+ Statement visitContinue(Continue node) {
+ return node;
+ }
+
bool isFallthroughBreak(Statement node) {
return node is Break && node.target.binding.next == fallthrough;
}
@@ -1218,6 +1262,11 @@
return node;
}
+ Statement visitWhile(While node) {
+ node.body = visitStatement(node.body);
+ return node;
+ }
+
Statement visitExpressionStatement(ExpressionStatement node) {
// TODO(asgerf): in non-checked mode we can remove Not from the expression.
node.expression = visitExpression(node.expression);
diff --git a/sdk/lib/_internal/compiler/implementation/dart_backend/dart_tree_printer.dart b/sdk/lib/_internal/compiler/implementation/dart_backend/dart_tree_printer.dart
index 2c191ec..4e57587 100644
--- a/sdk/lib/_internal/compiler/implementation/dart_backend/dart_tree_printer.dart
+++ b/sdk/lib/_internal/compiler/implementation/dart_backend/dart_tree_printer.dart
@@ -9,7 +9,6 @@
import '../scanner/scannerlib.dart';
import '../util/util.dart';
import '../dart2jslib.dart' as dart2js;
-import '../util/characters.dart' as characters;
import '../elements/elements.dart' as elements;
import '../dart_types.dart' as types;
diff --git a/sdk/lib/_internal/compiler/implementation/dart_backend/tree_tracer.dart b/sdk/lib/_internal/compiler/implementation/dart_backend/tree_tracer.dart
index c636063..fbeea76 100644
--- a/sdk/lib/_internal/compiler/implementation/dart_backend/tree_tracer.dart
+++ b/sdk/lib/_internal/compiler/implementation/dart_backend/tree_tracer.dart
@@ -27,9 +27,10 @@
// the list.
final List<Block> blocks = [new Block()..index = 0];
- // Map tree [Label]s (break targets) and [Statement]s (if targets) to
- // blocks.
+ // Map tree [Label]s (break or continue targets) and [Statement]s
+ // (if targets) to blocks.
final Map<Label, Block> breakTargets = <Label, Block>{};
+ final Map<Label, Block> continueTargets = <Label, Block>{};
final Map<Statement, Block> ifTargets = <Statement, Block>{};
void _addStatement(Statement statement) {
@@ -77,6 +78,11 @@
blocks.last.addEdgeTo(breakTargets[node.target]);
}
+ visitContinue(Continue node) {
+ _addStatement(node);
+ blocks.last.addEdgeTo(continueTargets[node.target]);
+ }
+
visitIf(If node) {
_addStatement(node);
Block thenTarget = new Block();
@@ -91,6 +97,15 @@
visitStatement(node.elseStatement);
}
+ visitWhile(While node) {
+ Block continueTarget = new Block();
+ continueTargets[node.label] = continueTarget;
+ blocks.last.addEdgeTo(continueTarget);
+ _addBlock(continueTarget);
+ _addStatement(node);
+ visitStatement(node.body);
+ }
+
visitExpressionStatement(ExpressionStatement node) {
_addStatement(node);
visitStatement(node.next);
@@ -204,6 +219,11 @@
printStatement(null, "break ${collector.breakTargets[node.target].name}");
}
+ visitContinue(Continue node) {
+ printStatement(null,
+ "continue ${collector.breakTargets[node.target].name}");
+ }
+
visitIf(If node) {
String condition = expr(node.condition);
String thenTarget = collector.ifTargets[node.thenStatement].name;
@@ -211,6 +231,10 @@
printStatement(null, "if $condition then $thenTarget else $elseTarget");
}
+ visitWhile(While node) {
+ printStatement(null, "while true do");
+ }
+
visitExpressionStatement(ExpressionStatement node) {
visitExpression(node.expression);
}
@@ -316,7 +340,9 @@
String visitAssign(Assign node) => visitStatement(node);
String visitReturn(Return node) => visitStatement(node);
String visitBreak(Break node) => visitStatement(node);
+ String visitContinue(Continue node) => visitStatement(node);
String visitIf(If node) => visitStatement(node);
+ String visitWhile(While node) => visitStatement(node);
String visitExpressionStatement(ExpressionStatement node) {
return visitStatement(node);
}
diff --git a/sdk/lib/_internal/compiler/implementation/elements/elements.dart b/sdk/lib/_internal/compiler/implementation/elements/elements.dart
index 256d0b5..327ccff 100644
--- a/sdk/lib/_internal/compiler/implementation/elements/elements.dart
+++ b/sdk/lib/_internal/compiler/implementation/elements/elements.dart
@@ -384,11 +384,6 @@
Scope buildScope();
- /// If the element is a forwarding constructor, [targetConstructor] holds
- /// the generative constructor that the forwarding constructor points to
- /// (possibly via other forwarding constructors).
- FunctionElement get targetConstructor;
-
void diagnose(Element context, DiagnosticListener listener);
TreeElements get treeElements;
@@ -986,6 +981,20 @@
/// Compute the type of the effective target of this constructor for an
/// instantiation site with type [:newType:].
InterfaceType computeEffectiveTargetType(InterfaceType newType);
+
+ /// If this is a synthesized constructor [definingConstructor] points to
+ /// the generative constructor from which this constructor was created.
+ /// Otherwise [definingConstructor] is `null`.
+ ///
+ /// Consider for instance this hierarchy:
+ ///
+ /// class C { C.c(a, {b});
+ /// class D {}
+ /// class E = C with D;
+ ///
+ /// Class `E` has a synthesized constructor, `E.c`, whose defining constructor
+ /// is `C.c`.
+ ConstructorElement get definingConstructor;
}
abstract class ConstructorBodyElement extends FunctionElement {
diff --git a/sdk/lib/_internal/compiler/implementation/elements/modelx.dart b/sdk/lib/_internal/compiler/implementation/elements/modelx.dart
index acc035b..03224a3 100644
--- a/sdk/lib/_internal/compiler/implementation/elements/modelx.dart
+++ b/sdk/lib/_internal/compiler/implementation/elements/modelx.dart
@@ -260,8 +260,6 @@
bool get isAbstract => modifiers.isAbstract;
bool isForeign(Compiler compiler) => library == compiler.foreignLibrary;
- FunctionElement get targetConstructor => null;
-
void diagnose(Element context, DiagnosticListener listener) {}
TreeElements get treeElements => enclosingElement.treeElements;
@@ -318,6 +316,10 @@
computeEffectiveTargetType(InterfaceType newType) => unsupported();
+ get definingConstructor => this;
+
+ FunctionElement asFunctionElement() => this;
+
String get message => '${messageKind.message(messageArguments)}';
String toString() => '<$name: $message>';
@@ -1589,23 +1591,8 @@
'$this.'));
return effectiveTargetType.substByContext(newType);
}
-}
-class SynthesizedCallMethodElementX extends FunctionElementX {
- final FunctionElement expression;
-
- SynthesizedCallMethodElementX(String name,
- FunctionElementX other,
- Element enclosing)
- : expression = other,
- super.tooMuchOverloading(name, other.kind,
- other.modifiers, enclosing,
- other.functionSignature,
- false);
-
- FunctionExpression get node => expression.node;
-
- FunctionExpression parseNode(DiagnosticListener listener) => node;
+ ConstructorElement get definingConstructor => null;
}
class DeferredLoaderGetterElementX extends FunctionElementX {
@@ -1686,11 +1673,11 @@
* constructors for mixin applications.
*/
class SynthesizedConstructorElementX extends ConstructorElementX {
- final ConstructorElement superMember;
+ final ConstructorElement definingConstructor;
final bool isDefaultConstructor;
SynthesizedConstructorElementX(String name,
- this.superMember,
+ this.definingConstructor,
Element enclosing,
this.isDefaultConstructor)
: super(name,
@@ -1709,8 +1696,6 @@
bool get isSynthesized => true;
- FunctionElement get targetConstructor => superMember;
-
FunctionSignature computeSignature(compiler) {
if (functionSignatureCache != null) return functionSignatureCache;
if (isDefaultConstructor) {
@@ -1719,18 +1704,16 @@
const <Element>[],
new FunctionType(this, enclosingClass.thisType));
}
- if (superMember.isErroneous) {
+ if (definingConstructor.isErroneous) {
return functionSignatureCache =
compiler.objectClass.localLookup('').computeSignature(compiler);
}
// TODO(johnniwinther): Ensure that the function signature (and with it the
// function type) substitutes type variables correctly.
- return functionSignatureCache = superMember.computeSignature(compiler);
+ return functionSignatureCache =
+ definingConstructor.computeSignature(compiler);
}
- get declaration => this;
- get implementation => this;
-
accept(ElementVisitor visitor) {
return visitor.visitFunctionElement(this);
}
diff --git a/sdk/lib/_internal/compiler/implementation/inferrer/simple_types_inferrer.dart b/sdk/lib/_internal/compiler/implementation/inferrer/simple_types_inferrer.dart
index 0ea976d..efa3e868 100644
--- a/sdk/lib/_internal/compiler/implementation/inferrer/simple_types_inferrer.dart
+++ b/sdk/lib/_internal/compiler/implementation/inferrer/simple_types_inferrer.dart
@@ -507,7 +507,8 @@
ClassElement cls = analyzedElement.enclosingClass;
if (analyzedElement.isSynthesized) {
node = analyzedElement;
- synthesizeForwardingCall(node, analyzedElement.targetConstructor);
+ ConstructorElement constructor = analyzedElement;
+ synthesizeForwardingCall(node, constructor.definingConstructor);
} else {
visitingInitializers = true;
visit(node.initializers);
diff --git a/sdk/lib/_internal/compiler/implementation/ir/ir_builder.dart b/sdk/lib/_internal/compiler/implementation/ir/ir_builder.dart
index c373e7d..1d5e07e 100644
--- a/sdk/lib/_internal/compiler/implementation/ir/ir_builder.dart
+++ b/sdk/lib/_internal/compiler/implementation/ir/ir_builder.dart
@@ -305,6 +305,10 @@
return null;
}
+ /// Given delimited builders for the arms of a branch, return a list of
+ /// fresh join-point continuation parameters for the join continuation.
+ /// Fill in [leftArguments] and [rightArguments] with the left and right
+ /// continuation invocation arguments.
List<ir.Parameter> createJoinParameters(IrBuilder leftBuilder,
List<ir.Primitive> leftArguments,
IrBuilder rightBuilder,
@@ -327,7 +331,7 @@
// different values reaching the join point and needs to be passed as an
// argument to the join point continuation.
for (int i = 0; i < assignedVars.length; ++i) {
- // The last assignments if any reaching the end of the two subterms.
+ // The last assignments, if any, reaching the end of the two subterms.
ir.Definition leftAssignment =
leftBuilder.isOpen ? leftBuilder.assignedVars[i] : null;
ir.Definition rightAssignment =
@@ -339,8 +343,7 @@
// left and right subterms we will still have a join continuation with
// possibly arguments passed to it. Such singly-used continuations
// are eliminated by the shrinking conversions.
- ir.Parameter parameter = new ir.Parameter(null);
- parameters.add(parameter);
+ parameters.add(new ir.Parameter(null));
leftArguments.add(leftAssignment == null
? leftBuilder.freeVars[i]
: leftAssignment);
@@ -352,6 +355,53 @@
return parameters;
}
+ /// Allocate loop join continuation parameters and fill in arguments.
+ ///
+ /// Given delimited builders for a test at the top (while, for, or for-in)
+ /// loop's condition and for the loop body, return a list of fresh
+ /// join-point continuation parameters for the loop join. Fill in
+ /// [entryArguments] with the arguments to the non-recursive continuation
+ /// invocation and [loopArguments] with the arguments to the recursive
+ /// continuation invocation.
+ ///
+ /// The [bodyBuilder] is assumed to be open, otherwise there is no join
+ /// necessary.
+ List<ir.Parameter> createLoopJoinParametersAndFillArguments(
+ List<ir.Primitive> entryArguments,
+ IrBuilder condBuilder,
+ IrBuilder bodyBuilder,
+ List<ir.Primitive> loopArguments) {
+ assert(bodyBuilder.isOpen);
+ // The loop condition and body are delimited --- assignedVars are still
+ // those reaching the entry to the loop.
+ assert(assignedVars.length == condBuilder.freeVars.length);
+ assert(assignedVars.length == bodyBuilder.freeVars.length);
+ assert(assignedVars.length <= condBuilder.assignedVars.length);
+ assert(assignedVars.length <= bodyBuilder.assignedVars.length);
+
+ List<ir.Parameter> parameters = <ir.Parameter>[];
+ // When the free variables in the loop body are computed later, the
+ // parameters are assumed to appear in the same order as they appear in
+ // the assignedVars list.
+ for (int i = 0; i < assignedVars.length; ++i) {
+ // Was there an assignment in the body?
+ ir.Definition reachingAssignment = bodyBuilder.assignedVars[i];
+ // If not, was there an assignment in the condition?
+ if (reachingAssignment == null) {
+ reachingAssignment = condBuilder.assignedVars[i];
+ }
+ // If not, no value needs to be passed to the join point.
+ if (reachingAssignment == null) continue;
+
+ parameters.add(new ir.Parameter(null));
+ ir.Definition entryAssignment = assignedVars[i];
+ entryArguments.add(
+ entryAssignment == null ? freeVars[i] : entryAssignment);
+ loopArguments.add(reachingAssignment);
+ }
+ return parameters;
+ }
+
void captureFreeVariables(IrBuilder leftBuilder,
IrBuilder rightBuilder,
List<ir.Parameter> parameters) {
@@ -375,6 +425,45 @@
}
}
+ /// Capture free variables in a test at the top loop.
+ ///
+ /// Capture the free variables in the condition and the body of a test at
+ /// the top loop (e.g., while, for, or for-in). Also updates the
+ /// builder's assigned variables to be those reaching the loop successor
+ /// statement.
+ void captureFreeLoopVariables(IrBuilder condBuilder,
+ IrBuilder bodyBuilder,
+ List<ir.Parameter> parameters) {
+ // Capturing loop-body variables differs from capturing variables for
+ // the predecessors of a non-recursive join-point continuation. The
+ // join point continuation parameters are in scope for the condition
+ // and body in the case of a loop.
+ int parameterIndex = 0;
+ // The parameters are assumed to be in the same order as the corresponding
+ // variables appear in the assignedVars list.
+ for (int i = 0; i < assignedVars.length; ++i) {
+ // Add recursive join continuation parameters as assignments for the
+ // join body, if there is a join continuation (parameters != null).
+ // This is done first because free occurrences in the loop should be
+ // captured by the join continuation parameters.
+ if (parameters != null &&
+ (condBuilder.assignedVars[i] != null ||
+ bodyBuilder.assignedVars[i] != null)) {
+ assignedVars[i] = parameters[parameterIndex++];
+ }
+ ir.Definition reachingDefinition =
+ assignedVars[i] == null ? freeVars[i] : assignedVars[i];
+ // Free variables in the body can be captured by assignments in the
+ // condition.
+ if (condBuilder.assignedVars[i] == null) {
+ reachingDefinition.substituteFor(bodyBuilder.freeVars[i]);
+ } else {
+ condBuilder.assignedVars[i].substituteFor(bodyBuilder.freeVars[i]);
+ }
+ reachingDefinition.substituteFor(condBuilder.freeVars[i]);
+ }
+ }
+
ir.Primitive visitIf(ast.If node) {
assert(isOpen);
ir.Primitive condition = visit(node.condition);
@@ -437,6 +526,65 @@
return null;
}
+ ir.Primitive visitWhile(ast.While node) {
+ assert(isOpen);
+ // While loops use three named continuations: the entry to the body,
+ // the loop exit (break), and the loop back edge (continue).
+ // The CPS translation [[while (condition) body; successor]] is:
+ //
+ // let cont break() = [[successor]] in
+ // let cont continue(x, ...) =
+ // let cont body() = [[body]]; continue(v, ...) in
+ // let prim cond = [[condition]] in
+ // branch cond (body, break) in
+ // continue(v, ...)
+
+ // The condition and body are delimited.
+ IrBuilder condBuilder = new IrBuilder.delimited(this);
+ IrBuilder bodyBuilder = new IrBuilder.delimited(this);
+ ir.Primitive condition = condBuilder.visit(node.condition);
+ bodyBuilder.visit(node.body);
+
+ // Create body entry and loop exit continuations and a join-point
+ // continuation if control flow reaches the end of the body.
+ ir.Continuation bodyContinuation = new ir.Continuation([]);
+ ir.Continuation breakContinuation = new ir.Continuation([]);
+ condBuilder.add(new ir.Branch(new ir.IsTrue(condition),
+ bodyContinuation,
+ breakContinuation));
+ ir.Continuation continueContinuation;
+ List<ir.Parameter> parameters;
+ List<ir.Primitive> entryArguments = <ir.Primitive>[]; // The forward edge.
+ if (bodyBuilder.isOpen) {
+ List<ir.Primitive> loopArguments = <ir.Primitive>[]; // The back edge.
+ parameters =
+ createLoopJoinParametersAndFillArguments(entryArguments, condBuilder,
+ bodyBuilder, loopArguments);
+ continueContinuation = new ir.Continuation(parameters);
+ continueContinuation.body =
+ new ir.LetCont(bodyContinuation, condBuilder.root);
+ bodyBuilder.add(
+ new ir.InvokeContinuation(continueContinuation, loopArguments,
+ recursive:true));
+ }
+ bodyContinuation.body = bodyBuilder.root;
+
+ // Capture free variable occurrences in the loop body.
+ captureFreeLoopVariables(condBuilder, bodyBuilder, parameters);
+
+ if (continueContinuation != null) {
+ add(new ir.LetCont(breakContinuation,
+ new ir.LetCont(continueContinuation,
+ new ir.InvokeContinuation(continueContinuation,
+ entryArguments))));
+ } else {
+ add(new ir.LetCont(breakContinuation,
+ new ir.LetCont(bodyContinuation,
+ condBuilder.root)));
+ }
+ return null;
+ }
+
ir.Primitive visitVariableDefinitions(ast.VariableDefinitions node) {
assert(isOpen);
for (ast.Node definition in node.definitions.nodes) {
@@ -640,7 +788,7 @@
int index = variableIndex[element];
ir.Primitive value = assignedVars[index];
return value == null ? freeVars[index] : value;
- } else if (Elements.isInstanceField(element)) {
+ } else if (element == null || Elements.isInstanceField(element)) {
ir.Primitive receiver = visit(node.receiver);
ir.Parameter v = new ir.Parameter(null);
ir.Continuation k = new ir.Continuation([v]);
diff --git a/sdk/lib/_internal/compiler/implementation/ir/ir_nodes.dart b/sdk/lib/_internal/compiler/implementation/ir/ir_nodes.dart
index 4e99e50..a9cabeb 100644
--- a/sdk/lib/_internal/compiler/implementation/ir/ir_nodes.dart
+++ b/sdk/lib/_internal/compiler/implementation/ir/ir_nodes.dart
@@ -32,6 +32,7 @@
bool get hasAtMostOneUse => firstRef == null || firstRef.nextRef == null;
bool get hasExactlyOneUse => firstRef != null && firstRef.nextRef == null;
bool get hasAtLeastOneUse => firstRef != null;
+ bool get hasMultipleUses => !hasAtMostOneUse;
void substituteFor(Definition other) {
if (other.firstRef == null) return;
@@ -203,9 +204,18 @@
final Reference continuation;
final List<Reference> arguments;
- InvokeContinuation(Continuation cont, List<Definition> args)
+ // An invocation of a continuation is recursive if it occurs in the body of
+ // the continuation itself.
+ bool isRecursive;
+
+ InvokeContinuation(Continuation cont, List<Definition> args,
+ {recursive: false})
: continuation = new Reference(cont),
- arguments = args.map((t) => new Reference(t)).toList(growable: false);
+ arguments = args.map((t) => new Reference(t)).toList(growable: false),
+ isRecursive = recursive {
+ if (recursive) cont.isRecursive = true;
+ }
+
accept(Visitor visitor) => visitor.visitInvokeContinuation(this);
}
@@ -257,6 +267,9 @@
final List<Parameter> parameters;
Expression body = null;
+ // A continuation is recursive if it has any recursive invocations.
+ bool isRecursive = false;
+
Continuation(this.parameters);
Continuation.retrn() : parameters = null;
@@ -329,7 +342,7 @@
return name;
})
.join(' ');
- return '(FunctionDefinition ($parameters) ${visit(node.body)})';
+ return '(FunctionDefinition ($parameters return) ${visit(node.body)})';
}
String visitLetPrim(LetPrim node) {
@@ -352,14 +365,15 @@
.join('');
String contBody = visit(node.continuation.body);
String body = visit(node.body);
- return '(LetCont ($cont$parameters) $contBody) $body';
+ String op = node.continuation.isRecursive ? 'LetCont*' : 'LetCont';
+ return '($op ($cont$parameters) $contBody) $body';
}
String formatArguments(Invoke node) {
int positionalArgumentCount = node.selector.positionalArgumentCount;
List<String> args = new List<String>();
args.addAll(node.arguments.getRange(0, positionalArgumentCount)
- .map((v) => names[v.definition.toString()]));
+ .map((v) => names[v.definition]));
for (int i = 0; i < node.selector.namedArgumentCount; ++i) {
String name = node.selector.namedArguments[i];
Definition arg = node.arguments[positionalArgumentCount + i].definition;
@@ -372,7 +386,7 @@
String name = node.target.name;
String cont = names[node.continuation.definition];
String args = formatArguments(node);
- return '(InvokeStatic $name $cont $args)';
+ return '(InvokeStatic $name $args $cont)';
}
String visitInvokeMethod(InvokeMethod node) {
@@ -380,7 +394,7 @@
String rcv = names[node.receiver.definition];
String cont = names[node.continuation.definition];
String args = formatArguments(node);
- return '(InvokeMethod $rcv $name $cont $args)';
+ return '(InvokeMethod $rcv $name $args $cont)';
}
String visitInvokeConstructor(InvokeConstructor node) {
@@ -392,19 +406,21 @@
}
String cont = names[node.continuation.definition];
String args = formatArguments(node);
- return '(InvokeConstructor $callName $cont $args)';
+ return '(InvokeConstructor $callName $args $cont)';
}
String visitConcatenateStrings(ConcatenateStrings node) {
String cont = names[node.continuation.definition];
String args = node.arguments.map((v) => names[v.definition]).join(' ');
- return '(ConcatenateStrings $cont $args)';
+ return '(ConcatenateStrings $args $cont)';
}
String visitInvokeContinuation(InvokeContinuation node) {
String cont = names[node.continuation.definition];
String args = node.arguments.map((v) => names[v.definition]).join(' ');
- return '(InvokeContinuation $cont $args)';
+ String op =
+ node.isRecursive ? 'InvokeContinuation*' : 'InvokeContinuation';
+ return '($op $cont $args)';
}
String visitBranch(Branch node) {
diff --git a/sdk/lib/_internal/compiler/implementation/resolution/members.dart b/sdk/lib/_internal/compiler/implementation/resolution/members.dart
index b95c031..bf1cf22 100644
--- a/sdk/lib/_internal/compiler/implementation/resolution/members.dart
+++ b/sdk/lib/_internal/compiler/implementation/resolution/members.dart
@@ -451,8 +451,9 @@
}
if (element.isSynthesized) {
if (isConstructor) {
+ ConstructorElement constructor = element.asFunctionElement();
TreeElements elements = _ensureTreeElements(element);
- Element target = element.targetConstructor;
+ ConstructorElement target = constructor.definingConstructor;
// Ensure the signature of the synthesized element is
// resolved. This is the only place where the resolver is
// seeing this element.
@@ -4107,7 +4108,7 @@
constructor.computeSignature(compiler).parameterCount == 0;
}
- FunctionElement createForwardingConstructor(FunctionElement target,
+ FunctionElement createForwardingConstructor(ConstructorElement target,
ClassElement enclosing) {
return new SynthesizedConstructorElementX(
target.name, target, enclosing, false);
diff --git a/sdk/lib/_internal/compiler/implementation/ssa/builder.dart b/sdk/lib/_internal/compiler/implementation/ssa/builder.dart
index fcef110..6a57a39 100644
--- a/sdk/lib/_internal/compiler/implementation/ssa/builder.dart
+++ b/sdk/lib/_internal/compiler/implementation/ssa/builder.dart
@@ -1738,7 +1738,7 @@
* Invariant: The [constructor] and elements in [constructors] must all be
* implementation elements.
*/
- void buildInitializers(FunctionElement constructor,
+ void buildInitializers(ConstructorElement constructor,
List<FunctionElement> constructors,
Map<Element, HInstruction> fieldValues) {
assert(invariant(constructor, constructor.isImplementation));
@@ -1748,7 +1748,7 @@
return localsHandler.readLocal(element);
}
- Element target = constructor.targetConstructor.implementation;
+ Element target = constructor.definingConstructor.implementation;
Selector.addForwardingElementArgumentsToList(
constructor,
arguments,
diff --git a/sdk/lib/_internal/compiler/implementation/tree/unparser.dart b/sdk/lib/_internal/compiler/implementation/tree/unparser.dart
index 632741c..5917948 100644
--- a/sdk/lib/_internal/compiler/implementation/tree/unparser.dart
+++ b/sdk/lib/_internal/compiler/implementation/tree/unparser.dart
@@ -391,7 +391,7 @@
}
visitWhile(While node) {
- addToken(node.whileKeyword);
+ add(node.whileKeyword.value);
visit(node.condition);
visit(node.body);
}
diff --git a/sdk/lib/_internal/compiler/implementation/world.dart b/sdk/lib/_internal/compiler/implementation/world.dart
index 30f05a5..4b260f6 100644
--- a/sdk/lib/_internal/compiler/implementation/world.dart
+++ b/sdk/lib/_internal/compiler/implementation/world.dart
@@ -293,7 +293,7 @@
// expressions. In such a case, we have to look at the original
// function expressions's element.
// TODO(herhut): Generate classes for function expressions earlier.
- if (element is SynthesizedCallMethodElementX) {
+ if (element is closureMapping.SynthesizedCallMethodElementX) {
return getMightBePassedToApply(element.expression);
}
return functionsThatMightBePassedToApply.contains(element);
diff --git a/sdk/lib/_internal/pub/lib/src/barback/excluding_transformer.dart b/sdk/lib/_internal/pub/lib/src/barback/excluding_transformer.dart
index 0d9a965..9182ec6 100644
--- a/sdk/lib/_internal/pub/lib/src/barback/excluding_transformer.dart
+++ b/sdk/lib/_internal/pub/lib/src/barback/excluding_transformer.dart
@@ -19,7 +19,15 @@
Set<String> excludes) {
if (includes == null && excludes == null) return inner;
- return new ExcludingTransformer._(inner, includes, excludes);
+ if (inner is LazyTransformer) {
+ return new _LazyExcludingTransformer(
+ inner as LazyTransformer, includes, excludes);
+ } else if (inner is DeclaringTransformer) {
+ return new _DeclaringExcludingTransformer(
+ inner as DeclaringTransformer, includes, excludes);
+ } else {
+ return new ExcludingTransformer._(inner, includes, excludes);
+ }
}
final Transformer _inner;
@@ -56,3 +64,20 @@
String toString() => _inner.toString();
}
+
+class _DeclaringExcludingTransformer extends ExcludingTransformer
+ implements DeclaringTransformer {
+ _DeclaringExcludingTransformer(DeclaringTransformer inner,
+ Set<String> includes, Set<String> excludes)
+ : super._(inner as Transformer, includes, excludes);
+
+ Future declareOutputs(DeclaringTransform transform) =>
+ (_inner as DeclaringTransformer).declareOutputs(transform);
+}
+
+class _LazyExcludingTransformer extends _DeclaringExcludingTransformer
+ implements LazyTransformer {
+ _LazyExcludingTransformer(DeclaringTransformer inner,
+ Set<String> includes, Set<String> excludes)
+ : super(inner, includes, excludes);
+}
diff --git a/sdk/lib/_internal/pub/test/serve/supports_user_defined_declaring_transformers.dart b/sdk/lib/_internal/pub/test/serve/supports_user_defined_declaring_transformers_test.dart
similarity index 67%
rename from sdk/lib/_internal/pub/test/serve/supports_user_defined_declaring_transformers.dart
rename to sdk/lib/_internal/pub/test/serve/supports_user_defined_declaring_transformers_test.dart
index d066b2d..20d622b 100644
--- a/sdk/lib/_internal/pub/test/serve/supports_user_defined_declaring_transformers.dart
+++ b/sdk/lib/_internal/pub/test/serve/supports_user_defined_declaring_transformers_test.dart
@@ -8,31 +8,6 @@
import '../test_pub.dart';
import 'utils.dart';
-const LAZY_TRANSFORMER = """
-import 'dart:async';
-
-import 'package:barback/barback.dart';
-
-class LazyRewriteTransformer extends Transformer implements LazyTransformer {
- LazyRewriteTransformer.asPlugin();
-
- String get allowedExtensions => '.in';
-
- Future apply(Transform transform) {
- transform.logger.info('Rewriting \${transform.primaryInput.id}.');
- return transform.primaryInput.readAsString().then((contents) {
- var id = transform.primaryInput.id.changeExtension(".mid");
- transform.addOutput(new Asset.fromString(id, "\$contents.mid"));
- });
- }
-
- Future declareOutputs(DeclaringTransform transform) {
- transform.declareOutput(transform.primaryId.changeExtension(".mid"));
- return new Future.value();
- }
-}
-""";
-
const DECLARING_TRANSFORMER = """
import 'dart:async';
@@ -42,7 +17,7 @@
implements DeclaringTransformer {
DeclaringRewriteTransformer.asPlugin();
- String get allowedExtensions => '.mid';
+ String get allowedExtensions => '.out';
Future apply(Transform transform) {
transform.logger.info('Rewriting \${transform.primaryInput.id}.');
@@ -75,7 +50,7 @@
d.file("declaring.dart", DECLARING_TRANSFORMER)
])]),
d.dir("web", [
- d.file("foo.in", "foo")
+ d.file("foo.txt", "foo")
])
]).create();
@@ -85,12 +60,12 @@
// The build should complete without either transformer logging anything.
server.stdout.expect('Build completed successfully');
- requestShouldSucceed("foo.final", "foo.mid.final");
+ requestShouldSucceed("foo.final", "foo.out.final");
server.stdout.expect(emitsLines(
'[Info from LazyRewrite]:\n'
- 'Rewriting myapp|web/foo.in.\n'
+ 'Rewriting myapp|web/foo.txt.\n'
'[Info from DeclaringRewrite]:\n'
- 'Rewriting myapp|web/foo.mid.'));
+ 'Rewriting myapp|web/foo.out.'));
endPubServe();
});
}
diff --git a/sdk/lib/_internal/pub/test/serve/supports_user_defined_lazy_transformers_test.dart b/sdk/lib/_internal/pub/test/serve/supports_user_defined_lazy_transformers_test.dart
index fd88315..6a64b5d 100644
--- a/sdk/lib/_internal/pub/test/serve/supports_user_defined_lazy_transformers_test.dart
+++ b/sdk/lib/_internal/pub/test/serve/supports_user_defined_lazy_transformers_test.dart
@@ -8,31 +8,6 @@
import '../test_pub.dart';
import 'utils.dart';
-const TRANSFORMER = """
-import 'dart:async';
-
-import 'package:barback/barback.dart';
-
-class LazyRewriteTransformer extends Transformer implements LazyTransformer {
- LazyRewriteTransformer.asPlugin();
-
- String get allowedExtensions => '.txt';
-
- Future apply(Transform transform) {
- transform.logger.info('Rewriting \${transform.primaryInput.id}.');
- return transform.primaryInput.readAsString().then((contents) {
- var id = transform.primaryInput.id.changeExtension(".out");
- transform.addOutput(new Asset.fromString(id, "\$contents.out"));
- });
- }
-
- Future declareOutputs(DeclaringTransform transform) {
- transform.declareOutput(transform.primaryId.changeExtension(".out"));
- return new Future.value();
- }
-}
-""";
-
main() {
initConfig();
integration("supports a user-defined lazy transformer", () {
@@ -42,7 +17,7 @@
"transformers": ["myapp/src/transformer"]
}),
d.dir("lib", [d.dir("src", [
- d.file("transformer.dart", TRANSFORMER)
+ d.file("transformer.dart", LAZY_TRANSFORMER)
])]),
d.dir("web", [
d.file("foo.txt", "foo")
diff --git a/sdk/lib/_internal/pub/test/serve/utils.dart b/sdk/lib/_internal/pub/test/serve/utils.dart
index 0b74fc8..a1637f1 100644
--- a/sdk/lib/_internal/pub/test/serve/utils.dart
+++ b/sdk/lib/_internal/pub/test/serve/utils.dart
@@ -57,6 +57,32 @@
}
""";
+/// The code for a lazy version of [REWRITE_TRANSFORMER].
+const LAZY_TRANSFORMER = """
+import 'dart:async';
+
+import 'package:barback/barback.dart';
+
+class LazyRewriteTransformer extends Transformer implements LazyTransformer {
+ LazyRewriteTransformer.asPlugin();
+
+ String get allowedExtensions => '.txt';
+
+ Future apply(Transform transform) {
+ transform.logger.info('Rewriting \${transform.primaryInput.id}.');
+ return transform.primaryInput.readAsString().then((contents) {
+ var id = transform.primaryInput.id.changeExtension(".out");
+ transform.addOutput(new Asset.fromString(id, "\$contents.out"));
+ });
+ }
+
+ Future declareOutputs(DeclaringTransform transform) {
+ transform.declareOutput(transform.primaryId.changeExtension(".out"));
+ return new Future.value();
+ }
+}
+""";
+
/// The web socket error code for a directory not being served.
const NOT_SERVED = 1;
diff --git a/sdk/lib/_internal/pub/test/transformer/exclusion/works_on_dart2js_test.dart b/sdk/lib/_internal/pub/test/transformer/exclusion/works_on_dart2js_test.dart
index aedfbf9..13e821f 100644
--- a/sdk/lib/_internal/pub/test/transformer/exclusion/works_on_dart2js_test.dart
+++ b/sdk/lib/_internal/pub/test/transformer/exclusion/works_on_dart2js_test.dart
@@ -4,6 +4,7 @@
library pub_tests;
+import 'package:scheduled_test/scheduled_stream.dart';
import 'package:scheduled_test/scheduled_test.dart';
import '../../descriptor.dart' as d;
@@ -34,9 +35,17 @@
createLockFile('myapp', pkg: ['barback']);
- pubServe();
+ var server = pubServe();
+ // Dart2js should remain lazy.
+ server.stdout.expect("Build completed successfully");
+
requestShould404("a.dart.js");
requestShouldSucceed("b.dart.js", isNot(isEmpty));
+ server.stdout.expect(consumeThrough(emitsLines(
+ "[Info from Dart2JS]:\n"
+ "Compiling myapp|web/b.dart...")));
+ server.stdout.expect(consumeThrough("Build completed successfully"));
+
requestShould404("c.dart.js");
endPubServe();
});
diff --git a/sdk/lib/_internal/pub/test/transformer/exclusion/works_on_lazy_transformer_test.dart b/sdk/lib/_internal/pub/test/transformer/exclusion/works_on_lazy_transformer_test.dart
new file mode 100644
index 0000000..deef7a0
--- /dev/null
+++ b/sdk/lib/_internal/pub/test/transformer/exclusion/works_on_lazy_transformer_test.dart
@@ -0,0 +1,53 @@
+// Copyright (c) 2014, the Dart project authors. Please see the AUTHORS d.file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+library pub_tests;
+
+import 'package:scheduled_test/scheduled_stream.dart';
+import 'package:scheduled_test/scheduled_test.dart';
+
+import '../../descriptor.dart' as d;
+import '../../test_pub.dart';
+import '../../serve/utils.dart';
+
+main() {
+ initConfig();
+ integration("works on a lazy transformer", () {
+ d.dir(appPath, [
+ d.pubspec({
+ "name": "myapp",
+ "transformers": [
+ {
+ "myapp": {
+ "\$include": ["web/a.txt", "web/b.txt"],
+ "\$exclude": "web/a.txt"
+ }
+ }
+ ]
+ }),
+ d.dir("lib", [d.file("transformer.dart", LAZY_TRANSFORMER)]),
+ d.dir("web", [
+ d.file("a.txt", "a"),
+ d.file("b.txt", "b"),
+ d.file("c.txt", "c")
+ ])
+ ]).create();
+
+ createLockFile('myapp', pkg: ['barback']);
+
+ var server = pubServe();
+ // The transformer should remain lazy.
+ server.stdout.expect("Build completed successfully");
+
+ requestShould404("a.out");
+ requestShouldSucceed("b.out", isNot(isEmpty));
+ server.stdout.expect(consumeThrough(emitsLines(
+ "[Info from LazyRewrite]:\n"
+ "Rewriting myapp|web/b.txt.")));
+ server.stdout.expect(consumeThrough("Build completed successfully"));
+
+ requestShould404("c.out");
+ endPubServe();
+ });
+}
diff --git a/sdk/lib/html/dart2js/html_dart2js.dart b/sdk/lib/html/dart2js/html_dart2js.dart
index 392b2b9..38591f3 100644
--- a/sdk/lib/html/dart2js/html_dart2js.dart
+++ b/sdk/lib/html/dart2js/html_dart2js.dart
@@ -1595,11 +1595,6 @@
@Experimental() // untriaged
bool drawCustomFocusRing(Element element) native;
- @DomName('CanvasRenderingContext2D.drawSystemFocusRing')
- @DocsEditable()
- @Experimental() // untriaged
- void drawSystemFocusRing(Element element) native;
-
@DomName('CanvasRenderingContext2D.ellipse')
@DocsEditable()
@Experimental() // untriaged
@@ -22845,26 +22840,7 @@
@DomName('Screen.lockOrientation')
@DocsEditable()
@Experimental() // untriaged
- bool lockOrientation(orientation_OR_orientations) {
- if ((orientation_OR_orientations is String || orientation_OR_orientations == null)) {
- return _lockOrientation_1(orientation_OR_orientations);
- }
- if ((orientation_OR_orientations is List<String> || orientation_OR_orientations == null)) {
- List orientations_1 = convertDartToNative_StringArray(orientation_OR_orientations);
- return _lockOrientation_2(orientations_1);
- }
- throw new ArgumentError("Incorrect number or type of arguments");
- }
- @JSName('lockOrientation')
- @DomName('Screen.lockOrientation')
- @DocsEditable()
- @Experimental() // untriaged
- bool _lockOrientation_1(String orientation) native;
- @JSName('lockOrientation')
- @DomName('Screen.lockOrientation')
- @DocsEditable()
- @Experimental() // untriaged
- bool _lockOrientation_2(List orientations) native;
+ bool lockOrientation(String orientation) native;
@DomName('Screen.unlockOrientation')
@DocsEditable()
diff --git a/sdk/lib/html/dartium/html_dartium.dart b/sdk/lib/html/dartium/html_dartium.dart
index 45a23a1..06d047d 100644
--- a/sdk/lib/html/dartium/html_dartium.dart
+++ b/sdk/lib/html/dartium/html_dartium.dart
@@ -2229,11 +2229,6 @@
void _drawImage(canvas_OR_image_OR_imageBitmap_OR_video, num sx_OR_x, num sy_OR_y, [num sw_OR_width, num height_OR_sh, num dx, num dy, num dw, num dh]) => _blink.Native_CanvasRenderingContext2D__drawImage(this, canvas_OR_image_OR_imageBitmap_OR_video, sx_OR_x, sy_OR_y, sw_OR_width, height_OR_sh, dx, dy, dw, dh);
- @DomName('CanvasRenderingContext2D.drawSystemFocusRing')
- @DocsEditable()
- @Experimental() // untriaged
- void drawSystemFocusRing(Element element) => _blink.Native_CanvasRenderingContext2D_drawSystemFocusRing_Callback(this, element);
-
@DomName('CanvasRenderingContext2D.ellipse')
@DocsEditable()
@Experimental() // untriaged
@@ -24234,7 +24229,10 @@
@DocsEditable()
int get width => _blink.Native_Screen_width_Getter(this);
- bool lockOrientation(orientation_OR_orientations) => _blink.Native_Screen_lockOrientation(this, orientation_OR_orientations);
+ @DomName('Screen.lockOrientation')
+ @DocsEditable()
+ @Experimental() // untriaged
+ bool lockOrientation(String orientation) => _blink.Native_Screen_lockOrientation_Callback(this, orientation);
@DomName('Screen.unlockOrientation')
@DocsEditable()
diff --git a/sdk/lib/io/http_impl.dart b/sdk/lib/io/http_impl.dart
index 70b81b1..e9b610e 100644
--- a/sdk/lib/io/http_impl.dart
+++ b/sdk/lib/io/http_impl.dart
@@ -2035,7 +2035,15 @@
// HTTP server waiting for socket connections.
-class _HttpServer extends Stream<HttpRequest> implements HttpServer {
+class _HttpServer
+ extends Stream<HttpRequest> with _ServiceObject
+ implements HttpServer {
+ // Use default Map so we keep order.
+ static Map<int, _HttpServer> _servers = new Map<int, _HttpServer>();
+
+ final String _serviceTypePath = 'io/http/servers';
+ final String _serviceTypeName = 'HttpServer';
+
String serverHeader;
Duration _idleTimeout;
@@ -2067,12 +2075,14 @@
_controller = new StreamController<HttpRequest>(sync: true,
onCancel: close);
idleTimeout = const Duration(seconds: 120);
+ _servers[_serviceId] = this;
}
_HttpServer.listenOn(this._serverSocket) : _closeServer = false {
_controller = new StreamController<HttpRequest>(sync: true,
onCancel: close);
idleTimeout = const Duration(seconds: 120);
+ _servers[_serviceId] = this;
}
Duration get idleTimeout => _idleTimeout;
@@ -2139,17 +2149,18 @@
for (var c in _idleConnections.toList()) {
c.destroy();
}
- _maybeCloseSessionManager();
+ _maybePerformCleanup();
return result;
}
- void _maybeCloseSessionManager() {
+ void _maybePerformCleanup() {
if (closed &&
_idleConnections.isEmpty &&
_activeConnections.isEmpty &&
_sessionManagerInstance != null) {
_sessionManagerInstance.close();
_sessionManagerInstance = null;
+ _servers.remove(_serviceId);
}
}
@@ -2176,7 +2187,7 @@
void _connectionClosed(_HttpConnection connection) {
// Remove itself from either idle or active connections.
connection.unlink();
- _maybeCloseSessionManager();
+ _maybePerformCleanup();
}
void _markIdle(_HttpConnection connection) {
@@ -2215,6 +2226,24 @@
return result;
}
+ Map _toJSON(bool ref) {
+ var r = {
+ 'id': _servicePath,
+ 'type': _serviceType(ref),
+ 'name': '${address.host}:$port',
+ 'user_name': '${address.host}:$port',
+ };
+ if (ref) {
+ return r;
+ }
+ r['port'] = port;
+ r['address'] = address.host;
+ r['active'] = _activeConnections.length;
+ r['idle'] = _idleConnections.length;
+ r['closed'] = closed;
+ return r;
+ }
+
_HttpSessionManager _sessionManagerInstance;
// Indicated if the http server has been closed.
diff --git a/sdk/lib/io/io.dart b/sdk/lib/io/io.dart
index 70e7ff5..4cd9e3c 100644
--- a/sdk/lib/io/io.dart
+++ b/sdk/lib/io/io.dart
@@ -230,6 +230,7 @@
part 'platform.dart';
part 'platform_impl.dart';
part 'process.dart';
+part 'service_object.dart';
part 'socket.dart';
part 'stdio.dart';
part 'string_transformer.dart';
diff --git a/sdk/lib/io/iolib_sources.gypi b/sdk/lib/io/iolib_sources.gypi
index 67e98ee..5b82147 100644
--- a/sdk/lib/io/iolib_sources.gypi
+++ b/sdk/lib/io/iolib_sources.gypi
@@ -26,6 +26,7 @@
'platform.dart',
'platform_impl.dart',
'process.dart',
+ 'service_object.dart',
'socket.dart',
'stdio.dart',
'string_transformer.dart',
diff --git a/sdk/lib/io/service_object.dart b/sdk/lib/io/service_object.dart
new file mode 100644
index 0000000..43bd651
--- /dev/null
+++ b/sdk/lib/io/service_object.dart
@@ -0,0 +1,29 @@
+// Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+part of dart.io;
+
+int _nextServiceId = 1;
+
+// TODO(ajohnsen): Use other way of getting a uniq id.
+abstract class _ServiceObject {
+ int __serviceId = 0;
+ int get _serviceId {
+ if (__serviceId == 0) __serviceId = _nextServiceId++;
+ return __serviceId;
+ }
+
+ Map _toJSON(bool ref);
+
+ String get _servicePath => "$_serviceTypePath/$_serviceId";
+
+ String get _serviceTypePath;
+
+ String get _serviceTypeName;
+
+ String _serviceType(bool ref) {
+ if (ref) return "@$_serviceTypeName";
+ return _serviceTypeName;
+ }
+}
diff --git a/tests/co19/co19-dartium.status b/tests/co19/co19-dartium.status
index 08cfe6b..9f06144 100644
--- a/tests/co19/co19-dartium.status
+++ b/tests/co19/co19-dartium.status
@@ -211,6 +211,8 @@
LayoutTests/fast/dom/HTMLDialogElement/inert-node-is-unselectable_t01: RuntimeError # co19-roll r722: Please triage this failure.
LayoutTests/fast/dom/HTMLDialogElement/multiple-centered-dialogs_t01: RuntimeError # co19-roll r722: Please triage this failure.
LayoutTests/fast/dom/HTMLDialogElement/non-anchored-dialog-positioning_t01: RuntimeError # co19-roll r722: Please triage this failure.
+LayoutTests/fast/dom/HTMLOutputElement/dom-settable-token-list_t01: RuntimeError # Issue 18931
+LayoutTests/fast/dom/HTMLObjectElement/set-type-to-null-crash_t01: RuntimeError # Issue 18931
LayoutTests/fast/dom/HTMLDialogElement/show-modal-focusing-steps_t01: RuntimeError # co19-roll r722: Please triage this failure.
LayoutTests/fast/dom/HTMLDialogElement/submit-dialog-close-event_t01: RuntimeError # co19-roll r722: Please triage this failure.
LayoutTests/fast/dom/HTMLDialogElement/synthetic-click-inert_t01: RuntimeError # co19-roll r722: Please triage this failure.
@@ -218,7 +220,6 @@
LayoutTests/fast/dom/HTMLDialogElement/top-layer-position-static_t01: RuntimeError # co19-roll r722: Please triage this failure.
LayoutTests/fast/dom/HTMLElement/set-inner-outer-optimization_t01: RuntimeError # co19-roll r722: Please triage this failure.
LayoutTests/fast/dom/HTMLElement/spellcheck_t01: RuntimeError # co19-roll r722: Please triage this failure.
-LayoutTests/fast/dom/HTMLFormElement/adopt-assertion_t01: Timeout # co19-roll r722: Please triage this failure.
LayoutTests/fast/dom/HTMLFormElement/move-option-between-documents_t01: RuntimeError # co19-roll r722: Please triage this failure.
LayoutTests/fast/dom/HTMLImageElement/image-alt-text_t01: RuntimeError # co19-roll r722: Please triage this failure.
LayoutTests/fast/dom/HTMLImageElement/parse-src_t01: RuntimeError # co19-roll r722: Please triage this failure.
@@ -258,7 +259,6 @@
WebPlatformTest/shadow-dom/elements-and-dom-objects/extensions-to-element-interface/attributes/test-004_t01: RuntimeError # co19-roll r722: Please triage this failure.
WebPlatformTest/shadow-dom/elements-and-dom-objects/extensions-to-element-interface/attributes/test-004_t02: RuntimeError # co19-roll r722: Please triage this failure.
WebPlatformTest/shadow-dom/elements-and-dom-objects/extensions-to-element-interface/methods/elements-001_t01: RuntimeError # co19-roll r722: Please triage this failure.
-WebPlatformTest/shadow-dom/elements-and-dom-objects/extensions-to-event-interface/event-path-001_t01: RuntimeError # co19-roll r722: Please triage this failure.
WebPlatformTest/shadow-dom/elements-and-dom-objects/the-content-html-element/test-006_t01: RuntimeError # co19-roll r722: Please triage this failure.
WebPlatformTest/shadow-dom/elements-and-dom-objects/the-shadow-html-element/test-001_t01: RuntimeError # co19-roll r722: Please triage this failure.
WebPlatformTest/shadow-dom/elements-and-dom-objects/the-shadow-html-element/test-005_t01: RuntimeError # co19-roll r722: Please triage this failure.
@@ -284,6 +284,7 @@
WebPlatformTest/shadow-dom/shadow-trees/distributed-pseudo-element/test-002_t01: RuntimeError # co19-roll r722: Please triage this failure.
WebPlatformTest/shadow-dom/shadow-trees/lower-boundary-encapsulation/test-004_t01: RuntimeError # co19-roll r722: Please triage this failure.
WebPlatformTest/shadow-dom/shadow-trees/satisfying-matching-criteria/test-007_t01: RuntimeError # co19-roll r722: Please triage this failure.
+WebPlatformTest/shadow-dom/shadow-trees/upper-boundary-encapsulation/ownerdocument-001_t01: RuntimeError # co19-roll r722: Please triage this failure.
WebPlatformTest/shadow-dom/shadow-trees/upper-boundary-encapsulation/ownerdocument-002_t01: RuntimeError # co19-roll r722: Please triage this failure.
LayoutTests/fast/dom/HTMLDocument/active-element-gets-unforcusable_t01: Timeout, Pass # co19-roll r722: Please triage this failure.
LayoutTests/fast/dom/HTMLDocument/set-focus-on-valid-element_t01: Timeout, Pass # co19-roll r722: Please triage this failure.
@@ -299,9 +300,12 @@
LayoutTests/fast/dom/HTMLTemplateElement/custom-element-wrapper-gc_t01: RuntimeError # co19-roll r722: Please triage this failure.
LayoutTests/fast/dom/HTMLTemplateElement/innerHTML_t01: RuntimeError # co19-roll r722: Please triage this failure.
LayoutTests/fast/dom/HTMLTemplateElement/ownerDocumentXHTML_t01: RuntimeError # co19-roll r722: Please triage this failure.
+LayoutTests/fast/dom/MutationObserver/database-callback-delivery_t01: RuntimeError # Issue 18931
+LayoutTests/fast/dom/MutationObserver/observe-attributes_t01: RuntimeError # Issue 18931
LayoutTests/fast/dom/MutationObserver/observe-childList_t01: RuntimeError # co19-roll r722: Please triage this failure.
LayoutTests/fast/dom/MutationObserver/weak-callback-gc-crash_t01: RuntimeError # co19-roll r722: Please triage this failure.
LayoutTests/fast/dom/Node/initial-values_t01: RuntimeError # co19-roll r722: Please triage this failure.
+LayoutTests/fast/dom/NodeIterator/NodeIterator-basic_t01: RuntimeError # Issue 18931
LayoutTests/fast/dom/Range/range-created-during-remove-children_t01: RuntimeError, Pass # co19-roll r722: Please triage this failure.
LayoutTests/fast/html/imports/import-element-removed-flag_t01: Timeout # co19-roll r722: Please triage this failure.
LibTest/collection/ListBase/ListBase_class_A01_t01: Pass, Timeout # co19-roll r722: Please triage this failure.
@@ -331,7 +335,8 @@
WebPlatformTest/dom/nodes/Node-appendChild_t02: RuntimeError, Pass # co19-roll r722: Please triage this failure.
WebPlatformTest/dom/nodes/Node-insertBefore_t01: RuntimeError # co19-roll r722: Please triage this failure.
WebPlatformTest/dom/nodes/Node-isEqualNode_t01: RuntimeError # co19-roll r722: Please triage this failure.
-WebPlatformTest/dom/nodes/Node-replaceChild_t01: RuntimeError # co19-roll r722: Please triage this failure.
+WebPlatformTest/dom/nodes/Node-replaceChild_t01: RuntimeError # co19-roll r722: Please triage this failure
+WebPlatformTest/dom/nodes/Node-textContent_t01: RuntimeError # Issue 18931
WebPlatformTest/dom/nodes/attributes/attributes_A04_t01: RuntimeError # co19-roll r722: Please triage this failure.
WebPlatformTest/dom/nodes/attributes/attributes_A05_t01: RuntimeError # co19-roll r722: Please triage this failure.
WebPlatformTest/dom/nodes/attributes/setAttributeNS_A05_t01: RuntimeError # co19-roll r722: Please triage this failure.
@@ -344,6 +349,16 @@
WebPlatformTest/dom/nodes/attributes/setAttribute_A02_t01: RuntimeError # co19-roll r722: Please triage this failure.
WebPlatformTest/dom/nodes/attributes/setAttribute_A02_t02: RuntimeError # co19-roll r722: Please triage this failure.
WebPlatformTest/dom/nodes/attributes/setAttribute_A03_t01: RuntimeError # co19-roll r722: Please triage this failure.
+WebPlatformTest/shadow-dom/elements-and-dom-objects/shadowroot-object/shadowroot-attributes/test-005_t01: RuntimeError # Issue 18931
+WebPlatformTest/shadow-dom/elements-and-dom-objects/shadowroot-object/shadowroot-attributes/test-006_t01: RuntimeError # Issue 18931
+WebPlatformTest/shadow-dom/elements-and-dom-objects/the-content-html-element/test-004_t01: RuntimeError # Issue 18931
+WebPlatformTest/shadow-dom/elements-and-dom-objects/the-content-html-element/test-004_t02: RuntimeError # Issue 18931
+WebPlatformTest/shadow-dom/elements-and-dom-objects/the-shadow-html-element/test-003_t01: RuntimeError # Issue 18931
+WebPlatformTest/shadow-dom/elements-and-dom-objects/the-shadow-html-element/test-003_t02: RuntimeError # Issue 18931
+WebPlatformTest/shadow-dom/events/event-dispatch/test-002_t01: RuntimeError # Issue 18931
+WebPlatformTest/shadow-dom/events/retargeting-relatedtarget/test-001_t01: RuntimeError # Issue 18931
+WebPlatformTest/shadow-dom/events/retargeting-relatedtarget/test-002_t01: RuntimeError # Issue 18931
+WebPlatformTest/shadow-dom/shadow-trees/upper-boundary-encapsulation/test-009_t01: RuntimeError # Issue 18931
[ $compiler == none && ($runtime == dartium || $runtime == ContentShellOnAndroid ) && $checked ]
LibTest/core/List/removeAt_A02_t01: Fail # co19-roll r641: Please triage this failure
@@ -363,7 +378,6 @@
WebPlatformTest/html-templates/parsing-html-templates/clearing-the-stack-back-to-a-given-context/clearing-stack-back-to-a-table-body-context_t01: RuntimeError # co19-roll r722: Please triage this failure.
WebPlatformTest/html-templates/parsing-html-templates/clearing-the-stack-back-to-a-given-context/clearing-stack-back-to-a-table-context_t01: RuntimeError # co19-roll r722: Please triage this failure.
WebPlatformTest/html-templates/parsing-html-templates/clearing-the-stack-back-to-a-given-context/clearing-stack-back-to-a-table-row-context_t01: RuntimeError # co19-roll r722: Please triage this failure.
-WebPlatformTest/shadow-dom/shadow-trees/upper-boundary-encapsulation/ownerdocument-001_t01: RuntimeError # co19-roll r722: Please triage this failure.
[ $compiler == none && $runtime == ContentShellOnAndroid ]
LibTest/math/log_A01_t01: Pass, Fail # co19 issue 44.
diff --git a/tests/html/html.status b/tests/html/html.status
index 92ca0f7..eb67279 100644
--- a/tests/html/html.status
+++ b/tests/html/html.status
@@ -9,6 +9,12 @@
[ $runtime == drt || $runtime == dartium ]
custom/template_wrappers_test: Pass # Issue 16656 Override others
+[ $compiler == none && ($runtime == drt || $runtime == dartium) ]
+form_data_test/functional: Fail # Issue 18931 (Disabled for Chrome 35 roll)
+indexeddb_5_test: Fail # Issue 18931 (Disabled for Chrome 35 roll)
+custom/attribute_changed_callback_test/unsupported_on_polyfill: Fail # Issue 18931 (Disabled for Chrome 35 roll)
+websql_test/functional: Fail # Issue 18931 (Disabled for Chrome 35 roll)
+
[ $compiler == dart2js && $csp ]
custom/js_custom_test: Fail # Issue 14643
custom/element_upgrade_test: Fail # Issue 17298
@@ -403,14 +409,12 @@
element_types_test/supported_embed: Fail
element_types_test/supported_keygen: Fail
element_types_test/supported_object: Fail
-element_types_test/supported_shadow: Fail
element_types_test/supported_track: Fail
fileapi_test/supported: Fail
indexeddb_1_test/supportsDatabaseNames: Fail
input_element_test/supported_date: Fail
input_element_test/supported_datetime-local: Fail
input_element_test/supported_month: Fail
-input_element_test/supported_number: Fail
input_element_test/supported_time: Fail
input_element_test/supported_week: Fail
media_stream_test/supported_MediaStreamEvent: Fail
diff --git a/tests/language/language_analyzer.status b/tests/language/language_analyzer.status
index d8ff939..196ad26 100644
--- a/tests/language/language_analyzer.status
+++ b/tests/language/language_analyzer.status
@@ -15,8 +15,6 @@
type_check_const_function_typedef2_test/00: MissingCompileTimeError, Ok # Compile-time error in checked mode, because of constants.
-call_closurization_test: StaticWarning # Issue 17476
-
# Please add new failing tests before this line.
# Section below is for invalid tests.
#
diff --git a/tests/standalone/io/http_cookie_date_test.dart b/tests/standalone/io/http_cookie_date_test.dart
index 08288b6..51e3e1f 100644
--- a/tests/standalone/io/http_cookie_date_test.dart
+++ b/tests/standalone/io/http_cookie_date_test.dart
@@ -32,6 +32,7 @@
part "../../../sdk/lib/io/io_sink.dart";
part "../../../sdk/lib/io/platform.dart";
part "../../../sdk/lib/io/platform_impl.dart";
+part "../../../sdk/lib/io/service_object.dart";
part "../../../sdk/lib/io/secure_socket.dart";
part "../../../sdk/lib/io/secure_server_socket.dart";
part "../../../sdk/lib/io/socket.dart";
diff --git a/tests/standalone/io/http_headers_test.dart b/tests/standalone/io/http_headers_test.dart
index 5442ae8..6b4c1f1 100644
--- a/tests/standalone/io/http_headers_test.dart
+++ b/tests/standalone/io/http_headers_test.dart
@@ -32,6 +32,7 @@
part "../../../sdk/lib/io/io_sink.dart";
part "../../../sdk/lib/io/platform.dart";
part "../../../sdk/lib/io/platform_impl.dart";
+part "../../../sdk/lib/io/service_object.dart";
part "../../../sdk/lib/io/secure_socket.dart";
part "../../../sdk/lib/io/secure_server_socket.dart";
part "../../../sdk/lib/io/socket.dart";
diff --git a/tests/standalone/io/http_parser_test.dart b/tests/standalone/io/http_parser_test.dart
index 5321af0..c5da991 100644
--- a/tests/standalone/io/http_parser_test.dart
+++ b/tests/standalone/io/http_parser_test.dart
@@ -32,6 +32,7 @@
part "../../../sdk/lib/io/io_sink.dart";
part "../../../sdk/lib/io/platform.dart";
part "../../../sdk/lib/io/platform_impl.dart";
+part "../../../sdk/lib/io/service_object.dart";
part "../../../sdk/lib/io/secure_socket.dart";
part "../../../sdk/lib/io/secure_server_socket.dart";
part "../../../sdk/lib/io/socket.dart";
diff --git a/tests/standalone/io/web_socket_protocol_processor_test.dart b/tests/standalone/io/web_socket_protocol_processor_test.dart
index b14a37d..179fe0b 100644
--- a/tests/standalone/io/web_socket_protocol_processor_test.dart
+++ b/tests/standalone/io/web_socket_protocol_processor_test.dart
@@ -33,6 +33,7 @@
part "../../../sdk/lib/io/io_sink.dart";
part "../../../sdk/lib/io/platform.dart";
part "../../../sdk/lib/io/platform_impl.dart";
+part "../../../sdk/lib/io/service_object.dart";
part "../../../sdk/lib/io/secure_socket.dart";
part "../../../sdk/lib/io/secure_server_socket.dart";
part "../../../sdk/lib/io/socket.dart";
diff --git a/tools/VERSION b/tools/VERSION
index c879937..6e3a531 100644
--- a/tools/VERSION
+++ b/tools/VERSION
@@ -27,5 +27,5 @@
MAJOR 1
MINOR 5
PATCH 0
-PRERELEASE 0
+PRERELEASE 1
PRERELEASE_PATCH 0
diff --git a/tools/dartium/archive.py b/tools/dartium/archive.py
index b5f1e33..f479595 100755
--- a/tools/dartium/archive.py
+++ b/tools/dartium/archive.py
@@ -130,7 +130,8 @@
if os.path.exists(stageDir):
shutil.rmtree(stageDir)
os.mkdir(stageDir)
- oldFiles = glob.glob(target.split('-')[0] + '*.zip')
+ revision = target.split('-')[-1]
+ oldFiles = glob.glob(target.replace(revision, '*.zip'))
for oldFile in oldFiles:
os.remove(oldFile)
diff --git a/tools/dartium/dartium_bot_utils.py b/tools/dartium/dartium_bot_utils.py
old mode 100644
new mode 100755
diff --git a/tools/dartium/multivm_archive.py b/tools/dartium/multivm_archive.py
old mode 100644
new mode 100755
index 6b3877c..601adc3
--- a/tools/dartium/multivm_archive.py
+++ b/tools/dartium/multivm_archive.py
@@ -22,14 +22,13 @@
SRC_PATH = dartium_bot_utils.srcPath()
def main():
- print SRC_PATH
multivm_deps = os.path.join(os.path.dirname(SRC_PATH), 'multivm.deps')
revision_directory = (multivm_deps if (os.path.isdir(multivm_deps))
else os.path.join(SRC_PATH, 'dart'))
output, _ = subprocess.Popen(['svn', 'info'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
- shell=(platform.system == 'Windows'),
+ shell=(platform.system() == 'Windows'),
cwd=revision_directory).communicate()
revision = re.search('Last Changed Rev: (\d+)', output).group(1)
version = revision + '.0'
diff --git a/tools/dartium/update_deps.py b/tools/dartium/update_deps.py
index c3036eb..381d393 100755
--- a/tools/dartium/update_deps.py
+++ b/tools/dartium/update_deps.py
@@ -40,7 +40,7 @@
# Repositories to auto-update
########################################################################
-BRANCH_CURRENT="dart/1847"
+BRANCH_CURRENT="dart/1916"
BRANCH_NEXT="dart/1916"
BRANCH_MULTIVM="dart/multivm"
diff --git a/tools/dartium/upload_steps.py b/tools/dartium/upload_steps.py
old mode 100644
new mode 100755
diff --git a/tools/dom/idl/dart/dart.idl b/tools/dom/idl/dart/dart.idl
index 7aa1b29..6ef26f6 100644
--- a/tools/dom/idl/dart/dart.idl
+++ b/tools/dom/idl/dart/dart.idl
@@ -35,6 +35,8 @@
[Supplemental]
interface Node {
[Custom] Node cloneNode([Default=Undefined] optional boolean deep);
+ [Suppressed] readonly attribute Element nextElementSibling;
+ [Suppressed] readonly attribute Element previousElementSibling;
};
[Supplemental]
@@ -57,6 +59,18 @@
Element implements ElementTraversal;
*/
+[Supplemental]
+interface Element {
+ readonly attribute Element nextElementSibling;
+ readonly attribute Element previousElementSibling;
+};
+
+[Supplemental]
+interface CharacterData {
+ readonly attribute Element nextElementSibling;
+ readonly attribute Element previousElementSibling;
+};
+
[Callback]
interface TimeoutHandler {
void handleEvent();
@@ -75,6 +89,11 @@
interface CanvasRenderingContext2D {
[DartName=createImageDataFromImageData] ImageData createImageData(ImageData imagedata);
+ // Removed in 1916.
+ [Suppressed] void drawSystemFocusRing(Element element);
+
+ [Suppressed] void assert(boolean condition);
+
[Suppressed] attribute boolean webkitImageSmoothingEnabled;
};
@@ -350,4 +369,10 @@
[Suppressed]
interface Promise {};
+[Supplemental]
+interface Screen {
+ [Suppressed]
+ boolean lockOrientation(sequence<DOMString> orientations);
+};
+
Element implements GlobalEventHandlers;
diff --git a/tools/dom/scripts/systemnative.py b/tools/dom/scripts/systemnative.py
index c3a7d19..88a2655 100644
--- a/tools/dom/scripts/systemnative.py
+++ b/tools/dom/scripts/systemnative.py
@@ -10,7 +10,7 @@
import os
from generator import *
from htmldartgenerator import *
-from idlnode import IDLArgument, IDLAttribute
+from idlnode import IDLArgument, IDLAttribute, IDLEnum
from systemhtml import js_support_checks, GetCallbackInfo, HTML_LIBRARY_NAMES
# This is an ugly hack to get things working on the M35 roll. Once we
@@ -26,6 +26,14 @@
'FormData_constructorCallback_RESOLVER_STRING_1_HTMLFormElement',
'XMLHttpRequest_constructorCallback_RESOLVER_STRING_0_':
'XMLHttpRequest_constructorCallback_RESOLVER_STRING_1_XMLHttpRequestOptions',
+ # This callback name just gets generated sligtly different and we don't
+ # want to bother fixing it.
+ 'ScriptProcessorNode__setEventListener_Callback':
+ 'ScriptProcessorNode_setEventListener_Callback',
+ # We don't know how to get GLenum to show up as the correct type in this
+ # script and don't want to bother fixing it the right way.
+ 'WebGLDrawBuffers_drawBuffersWEBGL_Callback_RESOLVER_STRING_1_sequence<GLenum>' :
+ 'WebGLDrawBuffers_drawBuffersWEBGL_Callback_RESOLVER_STRING_1_sequence<unsigned long>'
}
# TODO(vsm): This logic needs to pulled from the source IDL. These tables are
@@ -156,6 +164,11 @@
'CanvasRenderingContext' : 'CanvasRenderingContext2D',
'Clipboard': 'DataTransfer',
'Player': 'AnimationPlayer',
+ 'Algorithm': 'KeyAlgorithm',
+ 'any': 'ScriptValue',
+ 'URLUtils': 'URL',
+ 'URLUtilsReadOnly': 'WorkerLocation',
+ 'Path': 'Path2D'
}
_cpp_partial_map = {}
@@ -323,7 +336,10 @@
return None
return matched.group(1)
-def TypeIdToBlinkName(interface_id):
+def TypeIdToBlinkName(interface_id, database):
+ if database.HasEnum(interface_id):
+ return "DOMString" # All enums are strings.
+
if interface_id in _blink_1916_rename_map:
interface_id = _blink_1916_rename_map[interface_id]
return interface_id
@@ -350,17 +366,17 @@
fields.append(suffix)
return "_".join(fields)
-def DeriveResolverString(interface_id, operation_id, native_suffix, type_ids, is_custom=False):
+def DeriveResolverString(interface_id, operation_id, native_suffix, type_ids, database, is_custom):
type_string = \
- "_".join(map(TypeIdToBlinkName, type_ids))
+ "_".join(map(lambda type_id : TypeIdToBlinkName(type_id, database), type_ids))
if native_suffix:
operation_id = "%s_%s" % (operation_id, native_suffix)
if is_custom:
components = \
- [TypeIdToBlinkName(interface_id), operation_id]
+ [TypeIdToBlinkName(interface_id, database), operation_id]
else:
components = \
- [TypeIdToBlinkName(interface_id), operation_id,
+ [TypeIdToBlinkName(interface_id, database), operation_id,
"RESOLVER_STRING", str(len(type_ids)), type_string]
return "_".join(components)
@@ -610,7 +626,7 @@
if self._dart_use_blink:
type_ids = [p.type.id for p in arguments[:argument_count]]
constructor_callback_id = \
- DeriveResolverString(self._interface.id, cpp_suffix, None, type_ids, is_custom)
+ DeriveResolverString(self._interface.id, cpp_suffix, None, type_ids, self._database, is_custom)
else:
constructor_callback_id = self._interface.id + '_' + constructor_callback_cpp_name
@@ -1035,13 +1051,14 @@
self._EmitExplicitIndexedGetter(dart_element_type)
else:
if self._dart_use_blink:
+ is_custom = any((op.id == 'item' and 'Custom' in op.ext_attrs) for op in self._interface.operations)
dart_native_name = \
DeriveNativeName(self._interface.id, "NativeIndexed", "Getter")
# First emit a toplevel function to do the native call
# Calls to this are emitted elsewhere,
resolver_string = \
DeriveResolverString(self._interface.id, "item", "Callback",
- ["unsigned long"])
+ ["unsigned long"], self._database, is_custom)
if resolver_string in _cpp_resolver_string_map:
resolver_string = \
_cpp_resolver_string_map[resolver_string]
@@ -1180,7 +1197,7 @@
for argument in operation.arguments[:len(info.param_infos)]]
resolver_string = \
DeriveResolverString(self._interface.id, operation.id,
- native_suffix, type_ids, is_custom)
+ native_suffix, type_ids, self._database, is_custom)
else:
resolver_string = None
cpp_callback_name = self._GenerateNativeBinding(
@@ -1216,7 +1233,7 @@
for argument in operation.arguments[:argument_count]]
resolver_string = \
DeriveResolverString(self._interface.id, operation.id,
- native_suffix, type_ids)
+ native_suffix, type_ids, self._database, is_custom)
else:
base_name = '_%s_%s' % (operation.id, version)
overload_name = base_name
@@ -1678,7 +1695,7 @@
else:
native_binding_id = self._interface.id
if self._dart_use_blink:
- native_binding_id = TypeIdToBlinkName(native_binding_id)
+ native_binding_id = TypeIdToBlinkName(native_binding_id, self._database)
native_binding = \
'%s_%s_%s' % (native_binding_id, idl_name, native_suffix)