Version 2.13.0-148.0.dev
Merge commit '4c0edc67207ef1758c01bbf6f2d4aea99a0f3331' into 'dev'
diff --git a/pkg/front_end/lib/src/fasta/kernel/constant_evaluator.dart b/pkg/front_end/lib/src/fasta/kernel/constant_evaluator.dart
index 45d2b4d..cc156ed 100644
--- a/pkg/front_end/lib/src/fasta/kernel/constant_evaluator.dart
+++ b/pkg/front_end/lib/src/fasta/kernel/constant_evaluator.dart
@@ -3365,6 +3365,37 @@
}
@override
+ ExecutionStatus visitForStatement(ForStatement node) {
+ for (VariableDeclaration variable in node.variables) {
+ final ExecutionStatus status = variable.accept(this);
+ if (status is! ProceedStatus) return status;
+ }
+
+ Constant condition =
+ node.condition != null ? evaluate(node.condition) : null;
+ while (node.condition == null || condition is BoolConstant) {
+ if (condition is BoolConstant && !condition.value) break;
+
+ final ExecutionStatus status = node.body.accept(this);
+ if (status is! ProceedStatus) return status;
+
+ for (Expression update in node.updates) {
+ Constant updateConstant = evaluate(update);
+ if (updateConstant is AbortConstant) {
+ return new AbortStatus(updateConstant);
+ }
+ }
+
+ if (node.condition != null) {
+ condition = evaluate(node.condition);
+ }
+ }
+
+ if (condition is AbortConstant) return new AbortStatus(condition);
+ return const ProceedStatus();
+ }
+
+ @override
ExecutionStatus visitExpressionStatement(ExpressionStatement node) {
Constant value = evaluate(node.expression);
if (value is AbortConstant) return new AbortStatus(value);
diff --git a/pkg/front_end/testcases/const_functions/const_functions_for_statements.dart b/pkg/front_end/testcases/const_functions/const_functions_for_statements.dart
new file mode 100644
index 0000000..bfc8001
--- /dev/null
+++ b/pkg/front_end/testcases/const_functions/const_functions_for_statements.dart
@@ -0,0 +1,46 @@
+// Copyright (c) 2021, the Dart project authors. Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+// Tests for statements for const functions.
+
+// SharedOptions=--enable-experiment=const-functions
+
+import "package:expect/expect.dart";
+
+const var1 = fn(2);
+const var2 = fn(3);
+int fn(int a) {
+ int b = a;
+ for (int i = 0; i < 2; i++) {
+ b += a;
+ }
+ return b;
+}
+
+const var3 = fn1(2);
+const var4 = fn1(3);
+int fn1(int a) {
+ int b = a;
+ for (int i = 0;; i++) {
+ b *= 3;
+ if (b > 10) return b;
+ }
+}
+
+const var5 = fn2();
+int fn2() {
+ for (int i = 0, j = 2;; i += 2, j += 1) {
+ if (i + j > 10) {
+ return i + j;
+ }
+ }
+}
+
+void main() {
+ Expect.equals(var1, 6);
+ Expect.equals(var2, 9);
+ Expect.equals(var3, 18);
+ Expect.equals(var4, 27);
+ Expect.equals(var5, 11);
+}
diff --git a/pkg/front_end/testcases/const_functions/const_functions_for_statements.dart.strong.expect b/pkg/front_end/testcases/const_functions/const_functions_for_statements.dart.strong.expect
new file mode 100644
index 0000000..801153f
--- /dev/null
+++ b/pkg/front_end/testcases/const_functions/const_functions_for_statements.dart.strong.expect
@@ -0,0 +1,49 @@
+library /*isNonNullableByDefault*/;
+import self as self;
+import "dart:core" as core;
+import "package:expect/expect.dart" as exp;
+
+import "package:expect/expect.dart";
+
+static const field core::int var1 = #C1;
+static const field core::int var2 = #C2;
+static const field core::int var3 = #C3;
+static const field core::int var4 = #C4;
+static const field core::int var5 = #C5;
+static method fn(core::int a) → core::int {
+ core::int b = a;
+ for (core::int i = 0; i.{core::num::<}(2); i = i.{core::num::+}(1)) {
+ b = b.{core::num::+}(a);
+ }
+ return b;
+}
+static method fn1(core::int a) → core::int {
+ core::int b = a;
+ for (core::int i = 0; ; i = i.{core::num::+}(1)) {
+ b = b.{core::num::*}(3);
+ if(b.{core::num::>}(10))
+ return b;
+ }
+}
+static method fn2() → core::int {
+ for (core::int i = 0, core::int j = 2; ; i = i.{core::num::+}(2), j = j.{core::num::+}(1)) {
+ if(i.{core::num::+}(j).{core::num::>}(10)) {
+ return i.{core::num::+}(j);
+ }
+ }
+}
+static method main() → void {
+ exp::Expect::equals(#C1, 6);
+ exp::Expect::equals(#C2, 9);
+ exp::Expect::equals(#C3, 18);
+ exp::Expect::equals(#C4, 27);
+ exp::Expect::equals(#C5, 11);
+}
+
+constants {
+ #C1 = 6
+ #C2 = 9
+ #C3 = 18
+ #C4 = 27
+ #C5 = 11
+}
diff --git a/pkg/front_end/testcases/const_functions/const_functions_for_statements.dart.strong.transformed.expect b/pkg/front_end/testcases/const_functions/const_functions_for_statements.dart.strong.transformed.expect
new file mode 100644
index 0000000..801153f
--- /dev/null
+++ b/pkg/front_end/testcases/const_functions/const_functions_for_statements.dart.strong.transformed.expect
@@ -0,0 +1,49 @@
+library /*isNonNullableByDefault*/;
+import self as self;
+import "dart:core" as core;
+import "package:expect/expect.dart" as exp;
+
+import "package:expect/expect.dart";
+
+static const field core::int var1 = #C1;
+static const field core::int var2 = #C2;
+static const field core::int var3 = #C3;
+static const field core::int var4 = #C4;
+static const field core::int var5 = #C5;
+static method fn(core::int a) → core::int {
+ core::int b = a;
+ for (core::int i = 0; i.{core::num::<}(2); i = i.{core::num::+}(1)) {
+ b = b.{core::num::+}(a);
+ }
+ return b;
+}
+static method fn1(core::int a) → core::int {
+ core::int b = a;
+ for (core::int i = 0; ; i = i.{core::num::+}(1)) {
+ b = b.{core::num::*}(3);
+ if(b.{core::num::>}(10))
+ return b;
+ }
+}
+static method fn2() → core::int {
+ for (core::int i = 0, core::int j = 2; ; i = i.{core::num::+}(2), j = j.{core::num::+}(1)) {
+ if(i.{core::num::+}(j).{core::num::>}(10)) {
+ return i.{core::num::+}(j);
+ }
+ }
+}
+static method main() → void {
+ exp::Expect::equals(#C1, 6);
+ exp::Expect::equals(#C2, 9);
+ exp::Expect::equals(#C3, 18);
+ exp::Expect::equals(#C4, 27);
+ exp::Expect::equals(#C5, 11);
+}
+
+constants {
+ #C1 = 6
+ #C2 = 9
+ #C3 = 18
+ #C4 = 27
+ #C5 = 11
+}
diff --git a/pkg/front_end/testcases/const_functions/const_functions_for_statements.dart.textual_outline.expect b/pkg/front_end/testcases/const_functions/const_functions_for_statements.dart.textual_outline.expect
new file mode 100644
index 0000000..b8f0c47
--- /dev/null
+++ b/pkg/front_end/testcases/const_functions/const_functions_for_statements.dart.textual_outline.expect
@@ -0,0 +1,11 @@
+import "package:expect/expect.dart";
+
+const var1 = fn(2);
+const var2 = fn(3);
+int fn(int a) {}
+const var3 = fn1(2);
+const var4 = fn1(3);
+int fn1(int a) {}
+const var5 = fn2();
+int fn2() {}
+void main() {}
diff --git a/pkg/front_end/testcases/const_functions/const_functions_for_statements.dart.textual_outline_modelled.expect b/pkg/front_end/testcases/const_functions/const_functions_for_statements.dart.textual_outline_modelled.expect
new file mode 100644
index 0000000..5431f56
--- /dev/null
+++ b/pkg/front_end/testcases/const_functions/const_functions_for_statements.dart.textual_outline_modelled.expect
@@ -0,0 +1,11 @@
+import "package:expect/expect.dart";
+
+const var1 = fn(2);
+const var2 = fn(3);
+const var3 = fn1(2);
+const var4 = fn1(3);
+const var5 = fn2();
+int fn(int a) {}
+int fn1(int a) {}
+int fn2() {}
+void main() {}
diff --git a/pkg/front_end/testcases/const_functions/const_functions_for_statements.dart.weak.expect b/pkg/front_end/testcases/const_functions/const_functions_for_statements.dart.weak.expect
new file mode 100644
index 0000000..801153f
--- /dev/null
+++ b/pkg/front_end/testcases/const_functions/const_functions_for_statements.dart.weak.expect
@@ -0,0 +1,49 @@
+library /*isNonNullableByDefault*/;
+import self as self;
+import "dart:core" as core;
+import "package:expect/expect.dart" as exp;
+
+import "package:expect/expect.dart";
+
+static const field core::int var1 = #C1;
+static const field core::int var2 = #C2;
+static const field core::int var3 = #C3;
+static const field core::int var4 = #C4;
+static const field core::int var5 = #C5;
+static method fn(core::int a) → core::int {
+ core::int b = a;
+ for (core::int i = 0; i.{core::num::<}(2); i = i.{core::num::+}(1)) {
+ b = b.{core::num::+}(a);
+ }
+ return b;
+}
+static method fn1(core::int a) → core::int {
+ core::int b = a;
+ for (core::int i = 0; ; i = i.{core::num::+}(1)) {
+ b = b.{core::num::*}(3);
+ if(b.{core::num::>}(10))
+ return b;
+ }
+}
+static method fn2() → core::int {
+ for (core::int i = 0, core::int j = 2; ; i = i.{core::num::+}(2), j = j.{core::num::+}(1)) {
+ if(i.{core::num::+}(j).{core::num::>}(10)) {
+ return i.{core::num::+}(j);
+ }
+ }
+}
+static method main() → void {
+ exp::Expect::equals(#C1, 6);
+ exp::Expect::equals(#C2, 9);
+ exp::Expect::equals(#C3, 18);
+ exp::Expect::equals(#C4, 27);
+ exp::Expect::equals(#C5, 11);
+}
+
+constants {
+ #C1 = 6
+ #C2 = 9
+ #C3 = 18
+ #C4 = 27
+ #C5 = 11
+}
diff --git a/pkg/front_end/testcases/const_functions/const_functions_for_statements.dart.weak.outline.expect b/pkg/front_end/testcases/const_functions/const_functions_for_statements.dart.weak.outline.expect
new file mode 100644
index 0000000..add1c85
--- /dev/null
+++ b/pkg/front_end/testcases/const_functions/const_functions_for_statements.dart.weak.outline.expect
@@ -0,0 +1,19 @@
+library /*isNonNullableByDefault*/;
+import self as self;
+import "dart:core" as core;
+
+import "package:expect/expect.dart";
+
+static const field core::int var1 = self::fn(2);
+static const field core::int var2 = self::fn(3);
+static const field core::int var3 = self::fn1(2);
+static const field core::int var4 = self::fn1(3);
+static const field core::int var5 = self::fn2();
+static method fn(core::int a) → core::int
+ ;
+static method fn1(core::int a) → core::int
+ ;
+static method fn2() → core::int
+ ;
+static method main() → void
+ ;
diff --git a/pkg/front_end/testcases/const_functions/const_functions_for_statements.dart.weak.transformed.expect b/pkg/front_end/testcases/const_functions/const_functions_for_statements.dart.weak.transformed.expect
new file mode 100644
index 0000000..801153f
--- /dev/null
+++ b/pkg/front_end/testcases/const_functions/const_functions_for_statements.dart.weak.transformed.expect
@@ -0,0 +1,49 @@
+library /*isNonNullableByDefault*/;
+import self as self;
+import "dart:core" as core;
+import "package:expect/expect.dart" as exp;
+
+import "package:expect/expect.dart";
+
+static const field core::int var1 = #C1;
+static const field core::int var2 = #C2;
+static const field core::int var3 = #C3;
+static const field core::int var4 = #C4;
+static const field core::int var5 = #C5;
+static method fn(core::int a) → core::int {
+ core::int b = a;
+ for (core::int i = 0; i.{core::num::<}(2); i = i.{core::num::+}(1)) {
+ b = b.{core::num::+}(a);
+ }
+ return b;
+}
+static method fn1(core::int a) → core::int {
+ core::int b = a;
+ for (core::int i = 0; ; i = i.{core::num::+}(1)) {
+ b = b.{core::num::*}(3);
+ if(b.{core::num::>}(10))
+ return b;
+ }
+}
+static method fn2() → core::int {
+ for (core::int i = 0, core::int j = 2; ; i = i.{core::num::+}(2), j = j.{core::num::+}(1)) {
+ if(i.{core::num::+}(j).{core::num::>}(10)) {
+ return i.{core::num::+}(j);
+ }
+ }
+}
+static method main() → void {
+ exp::Expect::equals(#C1, 6);
+ exp::Expect::equals(#C2, 9);
+ exp::Expect::equals(#C3, 18);
+ exp::Expect::equals(#C4, 27);
+ exp::Expect::equals(#C5, 11);
+}
+
+constants {
+ #C1 = 6
+ #C2 = 9
+ #C3 = 18
+ #C4 = 27
+ #C5 = 11
+}
diff --git a/runtime/vm/bitfield.h b/runtime/vm/bitfield.h
index 1eaa94d..4a4f45f 100644
--- a/runtime/vm/bitfield.h
+++ b/runtime/vm/bitfield.h
@@ -5,6 +5,8 @@
#ifndef RUNTIME_VM_BITFIELD_H_
#define RUNTIME_VM_BITFIELD_H_
+#include <type_traits>
+
#include "platform/assert.h"
#include "platform/globals.h"
@@ -17,19 +19,22 @@
template <typename S,
typename T,
int position,
- int size = (sizeof(S) * kBitsPerByte) - position>
+ int size = (sizeof(S) * kBitsPerByte) - position,
+ bool sign_extend = false>
class BitField {
public:
typedef T Type;
static_assert((sizeof(S) * kBitsPerByte) >= (position + size),
"BitField does not fit into the type.");
+ static_assert(!sign_extend || std::is_signed<T>::value,
+ "Should only sign extend signed bitfield types");
static const intptr_t kNextBit = position + size;
// Tells whether the provided value fits into the bit field.
static constexpr bool is_valid(T value) {
- return (static_cast<S>(value) & ~((kUwordOne << size) - 1)) == 0;
+ return decode(encode_unchecked(value)) == value;
}
// Returns a S mask of the bit field.
@@ -37,9 +42,7 @@
// Returns a S mask of the bit field which can be applied directly to
// to the raw unshifted bits.
- static constexpr S mask_in_place() {
- return ((kUwordOne << size) - 1) << position;
- }
+ static constexpr S mask_in_place() { return mask() << position; }
// Returns the shift count needed to right-shift the bit field to
// the least-significant bits.
@@ -51,12 +54,22 @@
// Returns an S with the bit field value encoded.
static UNLESS_DEBUG(constexpr) S encode(T value) {
DEBUG_ASSERT(is_valid(value));
- return static_cast<S>(value) << position;
+ return encode_unchecked(value);
}
// Extracts the bit field from the value.
static constexpr T decode(S value) {
- return static_cast<T>((value >> position) & ((kUwordOne << size) - 1));
+ // Ensure we slide down the sign bit if the value in the bit field is signed
+ // and negative. We use 64-bit ints inside the expression since we can have
+ // both cases: sizeof(S) > sizeof(T) or sizeof(S) < sizeof(T).
+ return static_cast<T>(
+ (sign_extend
+ ? (static_cast<int64_t>(static_cast<uint64_t>(value)
+ << (64 - (size + position))) >>
+ (64 - size))
+ : ((static_cast<typename std::make_unsigned<S>::type>(value) >>
+ position) &
+ mask())));
}
// Returns an S with the bit field value encoded based on the
@@ -64,7 +77,14 @@
// will be changed.
static UNLESS_DEBUG(constexpr) S update(T value, S original) {
DEBUG_ASSERT(is_valid(value));
- return (static_cast<S>(value) << position) | (~mask_in_place() & original);
+ return encode_unchecked(value) | (~mask_in_place() & original);
+ }
+
+ private:
+ // Returns an S with the bit field value encoded.
+ static constexpr S encode_unchecked(T value) {
+ return (static_cast<typename std::make_unsigned<S>::type>(value) & mask())
+ << position;
}
};
diff --git a/runtime/vm/bitfield_test.cc b/runtime/vm/bitfield_test.cc
index d8e8182..e90a257 100644
--- a/runtime/vm/bitfield_test.cc
+++ b/runtime/vm/bitfield_test.cc
@@ -23,4 +23,51 @@
EXPECT_EQ(2U, TestBitFields::update(1, 16));
}
+template <typename T>
+static void TestSignExtendedBitField() {
+ class F1 : public BitField<T, intptr_t, 0, 8, /*sign_extend=*/true> {};
+ class F2
+ : public BitField<T, uintptr_t, F1::kNextBit, 8, /*sign_extend=*/false> {
+ };
+ class F3
+ : public BitField<T, intptr_t, F2::kNextBit, 8, /*sign_extend=*/true> {};
+ class F4
+ : public BitField<T, uintptr_t, F3::kNextBit, 8, /*sign_extend=*/false> {
+ };
+
+ const uint32_t value =
+ F1::encode(-1) | F2::encode(1) | F3::encode(-2) | F4::encode(2);
+ EXPECT_EQ(0x02fe01ffU, value);
+ EXPECT_EQ(-1, F1::decode(value));
+ EXPECT_EQ(1U, F2::decode(value));
+ EXPECT_EQ(-2, F3::decode(value));
+ EXPECT_EQ(2U, F4::decode(value));
+}
+
+template <typename T>
+static void TestNotSignExtendedBitField() {
+ class F1 : public BitField<T, intptr_t, 0, 8, /*sign_extend=*/false> {};
+ class F2
+ : public BitField<T, uintptr_t, F1::kNextBit, 8, /*sign_extend=*/false> {
+ };
+ class F3
+ : public BitField<T, intptr_t, F2::kNextBit, 8, /*sign_extend=*/false> {};
+ class F4
+ : public BitField<T, uintptr_t, F3::kNextBit, 8, /*sign_extend=*/false> {
+ };
+
+ const uint32_t value =
+ F1::encode(-1) | F2::encode(1) | F3::encode(-2) | F4::encode(2);
+ EXPECT_EQ(0x02fe01ffU, value);
+ EXPECT_EQ(3, F1::decode(value));
+ EXPECT_EQ(1, F2::decode(value));
+ EXPECT_EQ(2, F3::decode(value));
+ EXPECT_EQ(2, F3::decode(value));
+}
+
+VM_UNIT_TEST_CASE(BitFields_SignedField) {
+ TestSignExtendedBitField<uint32_t>();
+ TestSignExtendedBitField<int32_t>();
+}
+
} // namespace dart
diff --git a/runtime/vm/compiler/backend/il_arm.cc b/runtime/vm/compiler/backend/il_arm.cc
index 42c547a..0655206 100644
--- a/runtime/vm/compiler/backend/il_arm.cc
+++ b/runtime/vm/compiler/backend/il_arm.cc
@@ -31,6 +31,9 @@
namespace dart {
+DECLARE_FLAG(bool, inline_alloc);
+DECLARE_FLAG(bool, use_slow_path);
+
// Generic summary for call instructions that have all arguments pushed
// on the stack and return the result in a fixed location depending on
// the return value (R0, Location::Pair(R0, R1) or Q0).
@@ -3172,13 +3175,15 @@
ASSERT(locs()->in(kLengthPos).reg() == kLengthReg);
compiler::Label slow_path, done;
- if (compiler->is_optimizing() && !FLAG_precompiled_mode &&
- num_elements()->BindsToConstant() &&
- compiler::target::IsSmi(num_elements()->BoundConstant())) {
- const intptr_t length =
- compiler::target::SmiValue(num_elements()->BoundConstant());
- if (Array::IsValidLength(length)) {
- InlineArrayAllocation(compiler, length, &slow_path, &done);
+ if (!FLAG_use_slow_path && FLAG_inline_alloc) {
+ if (compiler->is_optimizing() && !FLAG_precompiled_mode &&
+ num_elements()->BindsToConstant() &&
+ compiler::target::IsSmi(num_elements()->BoundConstant())) {
+ const intptr_t length =
+ compiler::target::SmiValue(num_elements()->BoundConstant());
+ if (Array::IsValidLength(length)) {
+ InlineArrayAllocation(compiler, length, &slow_path, &done);
+ }
}
}
@@ -3568,14 +3573,19 @@
compiler->AddSlowPathCode(slow_path);
intptr_t instance_size = Context::InstanceSize(num_context_variables());
- __ TryAllocateArray(kContextCid, instance_size, slow_path->entry_label(),
- result, // instance
- temp0, temp1, temp2);
+ if (!FLAG_use_slow_path && FLAG_inline_alloc) {
+ __ TryAllocateArray(kContextCid, instance_size, slow_path->entry_label(),
+ result, // instance
+ temp0, temp1, temp2);
- // Setup up number of context variables field.
- __ LoadImmediate(temp0, num_context_variables());
- __ str(temp0, compiler::FieldAddress(
- result, compiler::target::Context::num_variables_offset()));
+ // Setup up number of context variables field.
+ __ LoadImmediate(temp0, num_context_variables());
+ __ str(temp0,
+ compiler::FieldAddress(
+ result, compiler::target::Context::num_variables_offset()));
+ } else {
+ __ Jump(slow_path->entry_label());
+ }
__ Bind(slow_path->exit_label());
}
diff --git a/runtime/vm/compiler/backend/il_arm64.cc b/runtime/vm/compiler/backend/il_arm64.cc
index 48514db..57629ea 100644
--- a/runtime/vm/compiler/backend/il_arm64.cc
+++ b/runtime/vm/compiler/backend/il_arm64.cc
@@ -29,6 +29,9 @@
namespace dart {
+DECLARE_FLAG(bool, inline_alloc);
+DECLARE_FLAG(bool, use_slow_path);
+
// Generic summary for call instructions that have all arguments pushed
// on the stack and return the result in a fixed register R0 (or V0 if
// the return type is double).
@@ -2774,12 +2777,15 @@
ASSERT(locs()->in(kLengthPos).reg() == kLengthReg);
compiler::Label slow_path, done;
- if (compiler->is_optimizing() && !FLAG_precompiled_mode &&
- num_elements()->BindsToConstant() &&
- num_elements()->BoundConstant().IsSmi()) {
- const intptr_t length = Smi::Cast(num_elements()->BoundConstant()).Value();
- if (Array::IsValidLength(length)) {
- InlineArrayAllocation(compiler, length, &slow_path, &done);
+ if (!FLAG_use_slow_path && FLAG_inline_alloc) {
+ if (compiler->is_optimizing() && !FLAG_precompiled_mode &&
+ num_elements()->BindsToConstant() &&
+ num_elements()->BoundConstant().IsSmi()) {
+ const intptr_t length =
+ Smi::Cast(num_elements()->BoundConstant()).Value();
+ if (Array::IsValidLength(length)) {
+ InlineArrayAllocation(compiler, length, &slow_path, &done);
+ }
}
}
@@ -3140,14 +3146,18 @@
compiler->AddSlowPathCode(slow_path);
intptr_t instance_size = Context::InstanceSize(num_context_variables());
- __ TryAllocateArray(kContextCid, instance_size, slow_path->entry_label(),
- result, // instance
- temp0, temp1, temp2);
+ if (!FLAG_use_slow_path && FLAG_inline_alloc) {
+ __ TryAllocateArray(kContextCid, instance_size, slow_path->entry_label(),
+ result, // instance
+ temp0, temp1, temp2);
- // Setup up number of context variables field.
- __ LoadImmediate(temp0, num_context_variables());
- __ str(temp0,
- compiler::FieldAddress(result, Context::num_variables_offset()));
+ // Setup up number of context variables field.
+ __ LoadImmediate(temp0, num_context_variables());
+ __ str(temp0,
+ compiler::FieldAddress(result, Context::num_variables_offset()));
+ } else {
+ __ Jump(slow_path->entry_label());
+ }
__ Bind(slow_path->exit_label());
}
diff --git a/runtime/vm/compiler/backend/il_ia32.cc b/runtime/vm/compiler/backend/il_ia32.cc
index c32a520..c2aae32 100644
--- a/runtime/vm/compiler/backend/il_ia32.cc
+++ b/runtime/vm/compiler/backend/il_ia32.cc
@@ -29,6 +29,9 @@
namespace dart {
+DECLARE_FLAG(bool, inline_alloc);
+DECLARE_FLAG(bool, use_slow_path);
+
// Generic summary for call instructions that have all arguments pushed
// on the stack and return the result in a fixed register EAX.
LocationSummary* Instruction::MakeCallSummary(Zone* zone,
@@ -2539,11 +2542,14 @@
ASSERT(locs()->in(1).reg() == kLengthReg);
compiler::Label slow_path, done;
- if (compiler->is_optimizing() && num_elements()->BindsToConstant() &&
- num_elements()->BoundConstant().IsSmi()) {
- const intptr_t length = Smi::Cast(num_elements()->BoundConstant()).Value();
- if (Array::IsValidLength(length)) {
- InlineArrayAllocation(compiler, length, &slow_path, &done);
+ if (!FLAG_use_slow_path && FLAG_inline_alloc) {
+ if (compiler->is_optimizing() && num_elements()->BindsToConstant() &&
+ num_elements()->BoundConstant().IsSmi()) {
+ const intptr_t length =
+ Smi::Cast(num_elements()->BoundConstant()).Value();
+ if (Array::IsValidLength(length)) {
+ InlineArrayAllocation(compiler, length, &slow_path, &done);
+ }
}
}
@@ -2888,15 +2894,19 @@
compiler->AddSlowPathCode(slow_path);
intptr_t instance_size = Context::InstanceSize(num_context_variables());
- __ TryAllocateArray(kContextCid, instance_size, slow_path->entry_label(),
- compiler::Assembler::kFarJump,
- result, // instance
- temp, // end address
- temp2); // temp
+ if (!FLAG_use_slow_path && FLAG_inline_alloc) {
+ __ TryAllocateArray(kContextCid, instance_size, slow_path->entry_label(),
+ compiler::Assembler::kFarJump,
+ result, // instance
+ temp, // end address
+ temp2); // temp
- // Setup up number of context variables field.
- __ movl(compiler::FieldAddress(result, Context::num_variables_offset()),
- compiler::Immediate(num_context_variables()));
+ // Setup up number of context variables field.
+ __ movl(compiler::FieldAddress(result, Context::num_variables_offset()),
+ compiler::Immediate(num_context_variables()));
+ } else {
+ __ Jump(slow_path->entry_label());
+ }
__ Bind(slow_path->exit_label());
}
diff --git a/runtime/vm/compiler/backend/il_x64.cc b/runtime/vm/compiler/backend/il_x64.cc
index 085daca..b0a0802 100644
--- a/runtime/vm/compiler/backend/il_x64.cc
+++ b/runtime/vm/compiler/backend/il_x64.cc
@@ -30,6 +30,9 @@
namespace dart {
+DECLARE_FLAG(bool, inline_alloc);
+DECLARE_FLAG(bool, use_slow_path);
+
// Generic summary for call instructions that have all arguments pushed
// on the stack and return the result in a fixed register RAX (or XMM0 if
// the return type is double).
@@ -2854,12 +2857,15 @@
ASSERT(locs()->in(1).reg() == kLengthReg);
compiler::Label slow_path, done;
- if (compiler->is_optimizing() && !FLAG_precompiled_mode &&
- num_elements()->BindsToConstant() &&
- num_elements()->BoundConstant().IsSmi()) {
- const intptr_t length = Smi::Cast(num_elements()->BoundConstant()).Value();
- if (Array::IsValidLength(length)) {
- InlineArrayAllocation(compiler, length, &slow_path, &done);
+ if (!FLAG_use_slow_path && FLAG_inline_alloc) {
+ if (compiler->is_optimizing() && !FLAG_precompiled_mode &&
+ num_elements()->BindsToConstant() &&
+ num_elements()->BoundConstant().IsSmi()) {
+ const intptr_t length =
+ Smi::Cast(num_elements()->BoundConstant()).Value();
+ if (Array::IsValidLength(length)) {
+ InlineArrayAllocation(compiler, length, &slow_path, &done);
+ }
}
}
@@ -3227,15 +3233,19 @@
compiler->AddSlowPathCode(slow_path);
intptr_t instance_size = Context::InstanceSize(num_context_variables());
- __ TryAllocateArray(kContextCid, instance_size, slow_path->entry_label(),
- compiler::Assembler::kFarJump,
- result, // instance
- temp, // end address
- locs()->temp(1).reg());
+ if (!FLAG_use_slow_path && FLAG_inline_alloc) {
+ __ TryAllocateArray(kContextCid, instance_size, slow_path->entry_label(),
+ compiler::Assembler::kFarJump,
+ result, // instance
+ temp, // end address
+ locs()->temp(1).reg());
- // Setup up number of context variables field.
- __ movq(compiler::FieldAddress(result, Context::num_variables_offset()),
- compiler::Immediate(num_context_variables()));
+ // Setup up number of context variables field.
+ __ movq(compiler::FieldAddress(result, Context::num_variables_offset()),
+ compiler::Immediate(num_context_variables()));
+ } else {
+ __ Jump(slow_path->entry_label());
+ }
__ Bind(slow_path->exit_label());
}
diff --git a/runtime/vm/compiler/stub_code_compiler_arm.cc b/runtime/vm/compiler/stub_code_compiler_arm.cc
index ff24802..515e680 100644
--- a/runtime/vm/compiler/stub_code_compiler_arm.cc
+++ b/runtime/vm/compiler/stub_code_compiler_arm.cc
@@ -1004,7 +1004,7 @@
// R2: array length as Smi (must be preserved).
// The newly allocated object is returned in R0.
void StubCodeCompiler::GenerateAllocateArrayStub(Assembler* assembler) {
- if (!FLAG_use_slow_path) {
+ if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label slow_case;
// Compute the size to be allocated, it is based on the array length
// and is computed as:
@@ -1127,7 +1127,7 @@
void StubCodeCompiler::GenerateAllocateMintSharedWithFPURegsStub(
Assembler* assembler) {
// For test purpose call allocation stub without inline allocation attempt.
- if (!FLAG_use_slow_path) {
+ if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label slow_case;
__ TryAllocate(compiler::MintClass(), &slow_case,
AllocateMintABI::kResultReg, AllocateMintABI::kTempReg);
@@ -1148,7 +1148,7 @@
void StubCodeCompiler::GenerateAllocateMintSharedWithoutFPURegsStub(
Assembler* assembler) {
// For test purpose call allocation stub without inline allocation attempt.
- if (!FLAG_use_slow_path) {
+ if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label slow_case;
__ TryAllocate(compiler::MintClass(), &slow_case,
AllocateMintABI::kResultReg, AllocateMintABI::kTempReg);
@@ -1441,7 +1441,7 @@
// Clobbered:
// Potentially any since it can go to runtime.
void StubCodeCompiler::GenerateCloneContextStub(Assembler* assembler) {
- {
+ if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label slow_case;
// Load num. variable in the existing context.
@@ -3432,91 +3432,95 @@
COMPILE_ASSERT(AllocateTypedDataArrayABI::kLengthReg == R4);
COMPILE_ASSERT(AllocateTypedDataArrayABI::kResultReg == R0);
- Label call_runtime;
- NOT_IN_PRODUCT(__ LoadAllocationStatsAddress(R2, cid));
- NOT_IN_PRODUCT(__ MaybeTraceAllocation(R2, &call_runtime));
- __ mov(R2, Operand(AllocateTypedDataArrayABI::kLengthReg));
- /* Check that length is a positive Smi. */
- /* R2: requested array length argument. */
- __ tst(R2, Operand(kSmiTagMask));
- __ b(&call_runtime, NE);
- __ SmiUntag(R2);
- /* Check for length >= 0 && length <= max_len. */
- /* R2: untagged array length. */
- __ CompareImmediate(R2, max_len);
- __ b(&call_runtime, HI);
- __ mov(R2, Operand(R2, LSL, scale_shift));
- const intptr_t fixed_size_plus_alignment_padding =
- target::TypedData::InstanceSize() +
- target::ObjectAlignment::kObjectAlignment - 1;
- __ AddImmediate(R2, fixed_size_plus_alignment_padding);
- __ bic(R2, R2, Operand(target::ObjectAlignment::kObjectAlignment - 1));
- __ ldr(R0, Address(THR, target::Thread::top_offset()));
+ if (!FLAG_use_slow_path && FLAG_inline_alloc) {
+ Label call_runtime;
+ NOT_IN_PRODUCT(__ LoadAllocationStatsAddress(R2, cid));
+ NOT_IN_PRODUCT(__ MaybeTraceAllocation(R2, &call_runtime));
+ __ mov(R2, Operand(AllocateTypedDataArrayABI::kLengthReg));
+ /* Check that length is a positive Smi. */
+ /* R2: requested array length argument. */
+ __ tst(R2, Operand(kSmiTagMask));
+ __ b(&call_runtime, NE);
+ __ SmiUntag(R2);
+ /* Check for length >= 0 && length <= max_len. */
+ /* R2: untagged array length. */
+ __ CompareImmediate(R2, max_len);
+ __ b(&call_runtime, HI);
+ __ mov(R2, Operand(R2, LSL, scale_shift));
+ const intptr_t fixed_size_plus_alignment_padding =
+ target::TypedData::InstanceSize() +
+ target::ObjectAlignment::kObjectAlignment - 1;
+ __ AddImmediate(R2, fixed_size_plus_alignment_padding);
+ __ bic(R2, R2, Operand(target::ObjectAlignment::kObjectAlignment - 1));
+ __ ldr(R0, Address(THR, target::Thread::top_offset()));
- /* R2: allocation size. */
- __ adds(R1, R0, Operand(R2));
- __ b(&call_runtime, CS); /* Fail on unsigned overflow. */
+ /* R2: allocation size. */
+ __ adds(R1, R0, Operand(R2));
+ __ b(&call_runtime, CS); /* Fail on unsigned overflow. */
- /* Check if the allocation fits into the remaining space. */
- /* R0: potential new object start. */
- /* R1: potential next object start. */
- /* R2: allocation size. */
- __ ldr(IP, Address(THR, target::Thread::end_offset()));
- __ cmp(R1, Operand(IP));
- __ b(&call_runtime, CS);
+ /* Check if the allocation fits into the remaining space. */
+ /* R0: potential new object start. */
+ /* R1: potential next object start. */
+ /* R2: allocation size. */
+ __ ldr(IP, Address(THR, target::Thread::end_offset()));
+ __ cmp(R1, Operand(IP));
+ __ b(&call_runtime, CS);
- __ str(R1, Address(THR, target::Thread::top_offset()));
- __ AddImmediate(R0, kHeapObjectTag);
- /* Initialize the tags. */
- /* R0: new object start as a tagged pointer. */
- /* R1: new object end address. */
- /* R2: allocation size. */
- {
- __ CompareImmediate(R2, target::UntaggedObject::kSizeTagMaxSizeTag);
+ __ str(R1, Address(THR, target::Thread::top_offset()));
+ __ AddImmediate(R0, kHeapObjectTag);
+ /* Initialize the tags. */
+ /* R0: new object start as a tagged pointer. */
+ /* R1: new object end address. */
+ /* R2: allocation size. */
+ {
+ __ CompareImmediate(R2, target::UntaggedObject::kSizeTagMaxSizeTag);
+ __ mov(R3,
+ Operand(R2, LSL,
+ target::UntaggedObject::kTagBitsSizeTagPos -
+ target::ObjectAlignment::kObjectAlignmentLog2),
+ LS);
+ __ mov(R3, Operand(0), HI);
+
+ /* Get the class index and insert it into the tags. */
+ uword tags =
+ target::MakeTagWordForNewSpaceObject(cid, /*instance_size=*/0);
+ __ LoadImmediate(TMP, tags);
+ __ orr(R3, R3, Operand(TMP));
+ __ str(R3, FieldAddress(R0, target::Object::tags_offset())); /* Tags. */
+ }
+ /* Set the length field. */
+ /* R0: new object start as a tagged pointer. */
+ /* R1: new object end address. */
+ /* R2: allocation size. */
__ mov(R3,
- Operand(R2, LSL,
- target::UntaggedObject::kTagBitsSizeTagPos -
- target::ObjectAlignment::kObjectAlignmentLog2),
- LS);
- __ mov(R3, Operand(0), HI);
+ Operand(AllocateTypedDataArrayABI::kLengthReg)); /* Array length. */
+ __ StoreIntoObjectNoBarrier(
+ R0, FieldAddress(R0, target::TypedDataBase::length_offset()), R3);
+ /* Initialize all array elements to 0. */
+ /* R0: new object start as a tagged pointer. */
+ /* R1: new object end address. */
+ /* R2: allocation size. */
+ /* R3: iterator which initially points to the start of the variable */
+ /* R8, R9: zero. */
+ /* data area to be initialized. */
+ __ LoadImmediate(R8, 0);
+ __ mov(R9, Operand(R8));
+ __ AddImmediate(R3, R0, target::TypedData::InstanceSize() - 1);
+ __ StoreInternalPointer(
+ R0, FieldAddress(R0, target::TypedDataBase::data_field_offset()), R3);
+ Label init_loop;
+ __ Bind(&init_loop);
+ __ AddImmediate(R3, 2 * target::kWordSize);
+ __ cmp(R3, Operand(R1));
+ __ strd(R8, R9, R3, -2 * target::kWordSize, LS);
+ __ b(&init_loop, CC);
+ __ str(R8, Address(R3, -2 * target::kWordSize), HI);
- /* Get the class index and insert it into the tags. */
- uword tags = target::MakeTagWordForNewSpaceObject(cid, /*instance_size=*/0);
- __ LoadImmediate(TMP, tags);
- __ orr(R3, R3, Operand(TMP));
- __ str(R3, FieldAddress(R0, target::Object::tags_offset())); /* Tags. */
+ __ Ret();
+
+ __ Bind(&call_runtime);
}
- /* Set the length field. */
- /* R0: new object start as a tagged pointer. */
- /* R1: new object end address. */
- /* R2: allocation size. */
- __ mov(R3,
- Operand(AllocateTypedDataArrayABI::kLengthReg)); /* Array length. */
- __ StoreIntoObjectNoBarrier(
- R0, FieldAddress(R0, target::TypedDataBase::length_offset()), R3);
- /* Initialize all array elements to 0. */
- /* R0: new object start as a tagged pointer. */
- /* R1: new object end address. */
- /* R2: allocation size. */
- /* R3: iterator which initially points to the start of the variable */
- /* R8, R9: zero. */
- /* data area to be initialized. */
- __ LoadImmediate(R8, 0);
- __ mov(R9, Operand(R8));
- __ AddImmediate(R3, R0, target::TypedData::InstanceSize() - 1);
- __ StoreInternalPointer(
- R0, FieldAddress(R0, target::TypedDataBase::data_field_offset()), R3);
- Label init_loop;
- __ Bind(&init_loop);
- __ AddImmediate(R3, 2 * target::kWordSize);
- __ cmp(R3, Operand(R1));
- __ strd(R8, R9, R3, -2 * target::kWordSize, LS);
- __ b(&init_loop, CC);
- __ str(R8, Address(R3, -2 * target::kWordSize), HI);
- __ Ret();
-
- __ Bind(&call_runtime);
__ EnterStubFrame();
__ PushObject(Object::null_object()); // Make room for the result.
__ PushImmediate(target::ToRawSmi(cid)); // Cid
diff --git a/runtime/vm/compiler/stub_code_compiler_arm64.cc b/runtime/vm/compiler/stub_code_compiler_arm64.cc
index 4f4d44d..c8df259 100644
--- a/runtime/vm/compiler/stub_code_compiler_arm64.cc
+++ b/runtime/vm/compiler/stub_code_compiler_arm64.cc
@@ -1124,7 +1124,7 @@
// NOTE: R2 cannot be clobbered here as the caller relies on it being saved.
// The newly allocated object is returned in R0.
void StubCodeCompiler::GenerateAllocateArrayStub(Assembler* assembler) {
- if (!FLAG_use_slow_path) {
+ if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label slow_case;
// Compute the size to be allocated, it is based on the array length
// and is computed as:
@@ -1264,7 +1264,7 @@
void StubCodeCompiler::GenerateAllocateMintSharedWithFPURegsStub(
Assembler* assembler) {
// For test purpose call allocation stub without inline allocation attempt.
- if (!FLAG_use_slow_path) {
+ if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label slow_case;
__ TryAllocate(compiler::MintClass(), &slow_case,
AllocateMintABI::kResultReg, AllocateMintABI::kTempReg);
@@ -1284,7 +1284,7 @@
void StubCodeCompiler::GenerateAllocateMintSharedWithoutFPURegsStub(
Assembler* assembler) {
// For test purpose call allocation stub without inline allocation attempt.
- if (!FLAG_use_slow_path) {
+ if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label slow_case;
__ TryAllocate(compiler::MintClass(), &slow_case,
AllocateMintABI::kResultReg, AllocateMintABI::kTempReg);
@@ -1587,7 +1587,7 @@
// Clobbered:
// R1, (R2), R3, R4, (TMP)
void StubCodeCompiler::GenerateCloneContextStub(Assembler* assembler) {
- {
+ if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label slow_case;
// Load num. variable (int32) in the existing context.
@@ -3574,86 +3574,91 @@
COMPILE_ASSERT(AllocateTypedDataArrayABI::kLengthReg == R4);
COMPILE_ASSERT(AllocateTypedDataArrayABI::kResultReg == R0);
- Label call_runtime;
- NOT_IN_PRODUCT(__ MaybeTraceAllocation(cid, R2, &call_runtime));
- __ mov(R2, AllocateTypedDataArrayABI::kLengthReg);
- /* Check that length is a positive Smi. */
- /* R2: requested array length argument. */
- __ BranchIfNotSmi(R2, &call_runtime);
- __ SmiUntag(R2);
- /* Check for length >= 0 && length <= max_len. */
- /* R2: untagged array length. */
- __ CompareImmediate(R2, max_len, kObjectBytes);
- __ b(&call_runtime, HI);
- __ LslImmediate(R2, R2, scale_shift);
- const intptr_t fixed_size_plus_alignment_padding =
- target::TypedData::InstanceSize() +
- target::ObjectAlignment::kObjectAlignment - 1;
- __ AddImmediate(R2, fixed_size_plus_alignment_padding);
- __ andi(R2, R2, Immediate(~(target::ObjectAlignment::kObjectAlignment - 1)));
- __ ldr(R0, Address(THR, target::Thread::top_offset()));
+ if (!FLAG_use_slow_path && FLAG_inline_alloc) {
+ Label call_runtime;
+ NOT_IN_PRODUCT(__ MaybeTraceAllocation(cid, R2, &call_runtime));
+ __ mov(R2, AllocateTypedDataArrayABI::kLengthReg);
+ /* Check that length is a positive Smi. */
+ /* R2: requested array length argument. */
+ __ BranchIfNotSmi(R2, &call_runtime);
+ __ SmiUntag(R2);
+ /* Check for length >= 0 && length <= max_len. */
+ /* R2: untagged array length. */
+ __ CompareImmediate(R2, max_len, kObjectBytes);
+ __ b(&call_runtime, HI);
+ __ LslImmediate(R2, R2, scale_shift);
+ const intptr_t fixed_size_plus_alignment_padding =
+ target::TypedData::InstanceSize() +
+ target::ObjectAlignment::kObjectAlignment - 1;
+ __ AddImmediate(R2, fixed_size_plus_alignment_padding);
+ __ andi(R2, R2,
+ Immediate(~(target::ObjectAlignment::kObjectAlignment - 1)));
+ __ ldr(R0, Address(THR, target::Thread::top_offset()));
- /* R2: allocation size. */
- __ adds(R1, R0, Operand(R2));
- __ b(&call_runtime, CS); /* Fail on unsigned overflow. */
+ /* R2: allocation size. */
+ __ adds(R1, R0, Operand(R2));
+ __ b(&call_runtime, CS); /* Fail on unsigned overflow. */
- /* Check if the allocation fits into the remaining space. */
- /* R0: potential new object start. */
- /* R1: potential next object start. */
- /* R2: allocation size. */
- __ ldr(R6, Address(THR, target::Thread::end_offset()));
- __ cmp(R1, Operand(R6));
- __ b(&call_runtime, CS);
+ /* Check if the allocation fits into the remaining space. */
+ /* R0: potential new object start. */
+ /* R1: potential next object start. */
+ /* R2: allocation size. */
+ __ ldr(R6, Address(THR, target::Thread::end_offset()));
+ __ cmp(R1, Operand(R6));
+ __ b(&call_runtime, CS);
- /* Successfully allocated the object(s), now update top to point to */
- /* next object start and initialize the object. */
- __ str(R1, Address(THR, target::Thread::top_offset()));
- __ AddImmediate(R0, kHeapObjectTag);
- /* Initialize the tags. */
- /* R0: new object start as a tagged pointer. */
- /* R1: new object end address. */
- /* R2: allocation size. */
- {
- __ CompareImmediate(R2, target::UntaggedObject::kSizeTagMaxSizeTag);
- __ LslImmediate(R2, R2,
- target::UntaggedObject::kTagBitsSizeTagPos -
- target::ObjectAlignment::kObjectAlignmentLog2);
- __ csel(R2, ZR, R2, HI);
+ /* Successfully allocated the object(s), now update top to point to */
+ /* next object start and initialize the object. */
+ __ str(R1, Address(THR, target::Thread::top_offset()));
+ __ AddImmediate(R0, kHeapObjectTag);
+ /* Initialize the tags. */
+ /* R0: new object start as a tagged pointer. */
+ /* R1: new object end address. */
+ /* R2: allocation size. */
+ {
+ __ CompareImmediate(R2, target::UntaggedObject::kSizeTagMaxSizeTag);
+ __ LslImmediate(R2, R2,
+ target::UntaggedObject::kTagBitsSizeTagPos -
+ target::ObjectAlignment::kObjectAlignmentLog2);
+ __ csel(R2, ZR, R2, HI);
- /* Get the class index and insert it into the tags. */
- uword tags = target::MakeTagWordForNewSpaceObject(cid, /*instance_size=*/0);
- __ LoadImmediate(TMP, tags);
- __ orr(R2, R2, Operand(TMP));
- __ str(R2, FieldAddress(R0, target::Object::tags_offset())); /* Tags. */
+ /* Get the class index and insert it into the tags. */
+ uword tags =
+ target::MakeTagWordForNewSpaceObject(cid, /*instance_size=*/0);
+ __ LoadImmediate(TMP, tags);
+ __ orr(R2, R2, Operand(TMP));
+ __ str(R2, FieldAddress(R0, target::Object::tags_offset())); /* Tags. */
+ }
+ /* Set the length field. */
+ /* R0: new object start as a tagged pointer. */
+ /* R1: new object end address. */
+ __ mov(R2, AllocateTypedDataArrayABI::kLengthReg); /* Array length. */
+ __ StoreIntoObjectNoBarrier(
+ R0, FieldAddress(R0, target::TypedDataBase::length_offset()), R2);
+ /* Initialize all array elements to 0. */
+ /* R0: new object start as a tagged pointer. */
+ /* R1: new object end address. */
+ /* R2: iterator which initially points to the start of the variable */
+ /* R3: scratch register. */
+ /* data area to be initialized. */
+ __ mov(R3, ZR);
+ __ AddImmediate(R2, R0, target::TypedData::InstanceSize() - 1);
+ __ StoreInternalPointer(
+ R0, FieldAddress(R0, target::TypedDataBase::data_field_offset()), R2);
+ Label init_loop, done;
+ __ Bind(&init_loop);
+ __ cmp(R2, Operand(R1));
+ __ b(&done, CS);
+ __ str(R3, Address(R2, 0));
+ __ add(R2, R2, Operand(target::kWordSize));
+ __ b(&init_loop);
+ __ Bind(&done);
+
+ __ Ret();
+
+ __ Bind(&call_runtime);
}
- /* Set the length field. */
- /* R0: new object start as a tagged pointer. */
- /* R1: new object end address. */
- __ mov(R2, AllocateTypedDataArrayABI::kLengthReg); /* Array length. */
- __ StoreIntoObjectNoBarrier(
- R0, FieldAddress(R0, target::TypedDataBase::length_offset()), R2);
- /* Initialize all array elements to 0. */
- /* R0: new object start as a tagged pointer. */
- /* R1: new object end address. */
- /* R2: iterator which initially points to the start of the variable */
- /* R3: scratch register. */
- /* data area to be initialized. */
- __ mov(R3, ZR);
- __ AddImmediate(R2, R0, target::TypedData::InstanceSize() - 1);
- __ StoreInternalPointer(
- R0, FieldAddress(R0, target::TypedDataBase::data_field_offset()), R2);
- Label init_loop, done;
- __ Bind(&init_loop);
- __ cmp(R2, Operand(R1));
- __ b(&done, CS);
- __ str(R3, Address(R2, 0));
- __ add(R2, R2, Operand(target::kWordSize));
- __ b(&init_loop);
- __ Bind(&done);
- __ Ret();
-
- __ Bind(&call_runtime);
__ EnterStubFrame();
__ Push(ZR); // Result slot.
__ PushImmediate(target::ToRawSmi(cid)); // Cid
diff --git a/runtime/vm/compiler/stub_code_compiler_ia32.cc b/runtime/vm/compiler/stub_code_compiler_ia32.cc
index 5c780a3..2286706 100644
--- a/runtime/vm/compiler/stub_code_compiler_ia32.cc
+++ b/runtime/vm/compiler/stub_code_compiler_ia32.cc
@@ -804,7 +804,7 @@
// Uses EAX, EBX, ECX, EDI as temporary registers.
// The newly allocated object is returned in EAX.
void StubCodeCompiler::GenerateAllocateArrayStub(Assembler* assembler) {
- if (!FLAG_use_slow_path) {
+ if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label slow_case;
// Compute the size to be allocated, it is based on the array length
// and is computed as:
@@ -1223,7 +1223,7 @@
// Clobbered:
// EBX, ECX, EDX
void StubCodeCompiler::GenerateCloneContextStub(Assembler* assembler) {
- {
+ if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label slow_case;
// Load num. variable in the existing context.
@@ -2923,108 +2923,114 @@
COMPILE_ASSERT(AllocateTypedDataArrayABI::kLengthReg == EAX);
COMPILE_ASSERT(AllocateTypedDataArrayABI::kResultReg == EAX);
- // Save length argument for possible runtime call, as
- // EAX is clobbered.
- Label call_runtime;
- __ pushl(AllocateTypedDataArrayABI::kLengthReg);
+ if (!FLAG_use_slow_path && FLAG_inline_alloc) {
+ // Save length argument for possible runtime call, as
+ // EAX is clobbered.
+ Label call_runtime;
+ __ pushl(AllocateTypedDataArrayABI::kLengthReg);
- NOT_IN_PRODUCT(
- __ MaybeTraceAllocation(cid, ECX, &call_runtime, Assembler::kFarJump));
- __ movl(EDI, AllocateTypedDataArrayABI::kLengthReg);
- /* Check that length is a positive Smi. */
- /* EDI: requested array length argument. */
- __ testl(EDI, Immediate(kSmiTagMask));
- __ j(NOT_ZERO, &call_runtime);
- __ SmiUntag(EDI);
- /* Check for length >= 0 && length <= max_len. */
- /* EDI: untagged array length. */
- __ cmpl(EDI, Immediate(max_len));
- __ j(ABOVE, &call_runtime);
- /* Special case for scaling by 16. */
- if (scale_factor == TIMES_16) {
- /* double length of array. */
- __ addl(EDI, EDI);
- /* only scale by 8. */
- scale_factor = TIMES_8;
- }
+ NOT_IN_PRODUCT(
+ __ MaybeTraceAllocation(cid, ECX, &call_runtime, Assembler::kFarJump));
+ __ movl(EDI, AllocateTypedDataArrayABI::kLengthReg);
+ /* Check that length is a positive Smi. */
+ /* EDI: requested array length argument. */
+ __ testl(EDI, Immediate(kSmiTagMask));
+ __ j(NOT_ZERO, &call_runtime);
+ __ SmiUntag(EDI);
+ /* Check for length >= 0 && length <= max_len. */
+ /* EDI: untagged array length. */
+ __ cmpl(EDI, Immediate(max_len));
+ __ j(ABOVE, &call_runtime);
+ /* Special case for scaling by 16. */
+ if (scale_factor == TIMES_16) {
+ /* double length of array. */
+ __ addl(EDI, EDI);
+ /* only scale by 8. */
+ scale_factor = TIMES_8;
+ }
- const intptr_t fixed_size_plus_alignment_padding =
- target::TypedData::InstanceSize() +
- target::ObjectAlignment::kObjectAlignment - 1;
- __ leal(EDI, Address(EDI, scale_factor, fixed_size_plus_alignment_padding));
- __ andl(EDI, Immediate(-target::ObjectAlignment::kObjectAlignment));
- __ movl(EAX, Address(THR, target::Thread::top_offset()));
- __ movl(EBX, EAX);
- /* EDI: allocation size. */
- __ addl(EBX, EDI);
- __ j(CARRY, &call_runtime);
+ const intptr_t fixed_size_plus_alignment_padding =
+ target::TypedData::InstanceSize() +
+ target::ObjectAlignment::kObjectAlignment - 1;
+ __ leal(EDI, Address(EDI, scale_factor, fixed_size_plus_alignment_padding));
+ __ andl(EDI, Immediate(-target::ObjectAlignment::kObjectAlignment));
+ __ movl(EAX, Address(THR, target::Thread::top_offset()));
+ __ movl(EBX, EAX);
+ /* EDI: allocation size. */
+ __ addl(EBX, EDI);
+ __ j(CARRY, &call_runtime);
- /* Check if the allocation fits into the remaining space. */
- /* EAX: potential new object start. */
- /* EBX: potential next object start. */
- /* EDI: allocation size. */
- __ cmpl(EBX, Address(THR, target::Thread::end_offset()));
- __ j(ABOVE_EQUAL, &call_runtime);
+ /* Check if the allocation fits into the remaining space. */
+ /* EAX: potential new object start. */
+ /* EBX: potential next object start. */
+ /* EDI: allocation size. */
+ __ cmpl(EBX, Address(THR, target::Thread::end_offset()));
+ __ j(ABOVE_EQUAL, &call_runtime);
- /* Successfully allocated the object(s), now update top to point to */
- /* next object start and initialize the object. */
- __ movl(Address(THR, target::Thread::top_offset()), EBX);
- __ addl(EAX, Immediate(kHeapObjectTag));
+ /* Successfully allocated the object(s), now update top to point to */
+ /* next object start and initialize the object. */
+ __ movl(Address(THR, target::Thread::top_offset()), EBX);
+ __ addl(EAX, Immediate(kHeapObjectTag));
- /* Initialize the tags. */
- /* EAX: new object start as a tagged pointer. */
- /* EBX: new object end address. */
- /* EDI: allocation size. */
- {
- Label size_tag_overflow, done;
- __ cmpl(EDI, Immediate(target::UntaggedObject::kSizeTagMaxSizeTag));
- __ j(ABOVE, &size_tag_overflow, Assembler::kNearJump);
- __ shll(EDI, Immediate(target::UntaggedObject::kTagBitsSizeTagPos -
- target::ObjectAlignment::kObjectAlignmentLog2));
- __ jmp(&done, Assembler::kNearJump);
- __ Bind(&size_tag_overflow);
- __ movl(EDI, Immediate(0));
+ /* Initialize the tags. */
+ /* EAX: new object start as a tagged pointer. */
+ /* EBX: new object end address. */
+ /* EDI: allocation size. */
+ {
+ Label size_tag_overflow, done;
+ __ cmpl(EDI, Immediate(target::UntaggedObject::kSizeTagMaxSizeTag));
+ __ j(ABOVE, &size_tag_overflow, Assembler::kNearJump);
+ __ shll(EDI, Immediate(target::UntaggedObject::kTagBitsSizeTagPos -
+ target::ObjectAlignment::kObjectAlignmentLog2));
+ __ jmp(&done, Assembler::kNearJump);
+ __ Bind(&size_tag_overflow);
+ __ movl(EDI, Immediate(0));
+ __ Bind(&done);
+ /* Get the class index and insert it into the tags. */
+ uword tags =
+ target::MakeTagWordForNewSpaceObject(cid, /*instance_size=*/0);
+ __ orl(EDI, Immediate(tags));
+ __ movl(FieldAddress(EAX, target::Object::tags_offset()),
+ EDI); /* Tags. */
+ }
+
+ /* Set the length field. */
+ /* EAX: new object start as a tagged pointer. */
+ /* EBX: new object end address. */
+ __ popl(EDI); /* Array length. */
+ __ StoreIntoObjectNoBarrier(
+ EAX, FieldAddress(EAX, target::TypedDataBase::length_offset()), EDI);
+
+ /* Initialize all array elements to 0. */
+ /* EAX: new object start as a tagged pointer. */
+ /* EBX: new object end address. */
+ /* EDI: iterator which initially points to the start of the variable */
+ /* ECX: scratch register. */
+ /* data area to be initialized. */
+ __ xorl(ECX, ECX); /* Zero. */
+ __ leal(EDI, FieldAddress(EAX, target::TypedData::InstanceSize()));
+ __ StoreInternalPointer(
+ EAX, FieldAddress(EAX, target::TypedDataBase::data_field_offset()),
+ EDI);
+ Label done, init_loop;
+ __ Bind(&init_loop);
+ __ cmpl(EDI, EBX);
+ __ j(ABOVE_EQUAL, &done, Assembler::kNearJump);
+ __ movl(Address(EDI, 0), ECX);
+ __ addl(EDI, Immediate(target::kWordSize));
+ __ jmp(&init_loop, Assembler::kNearJump);
__ Bind(&done);
- /* Get the class index and insert it into the tags. */
- uword tags = target::MakeTagWordForNewSpaceObject(cid, /*instance_size=*/0);
- __ orl(EDI, Immediate(tags));
- __ movl(FieldAddress(EAX, target::Object::tags_offset()), EDI); /* Tags. */
+
+ __ ret();
+
+ __ Bind(&call_runtime);
+ __ popl(AllocateTypedDataArrayABI::kLengthReg);
}
- /* Set the length field. */
- /* EAX: new object start as a tagged pointer. */
- /* EBX: new object end address. */
- __ popl(EDI); /* Array length. */
- __ StoreIntoObjectNoBarrier(
- EAX, FieldAddress(EAX, target::TypedDataBase::length_offset()), EDI);
-
- /* Initialize all array elements to 0. */
- /* EAX: new object start as a tagged pointer. */
- /* EBX: new object end address. */
- /* EDI: iterator which initially points to the start of the variable */
- /* ECX: scratch register. */
- /* data area to be initialized. */
- __ xorl(ECX, ECX); /* Zero. */
- __ leal(EDI, FieldAddress(EAX, target::TypedData::InstanceSize()));
- __ StoreInternalPointer(
- EAX, FieldAddress(EAX, target::TypedDataBase::data_field_offset()), EDI);
- Label done, init_loop;
- __ Bind(&init_loop);
- __ cmpl(EDI, EBX);
- __ j(ABOVE_EQUAL, &done, Assembler::kNearJump);
- __ movl(Address(EDI, 0), ECX);
- __ addl(EDI, Immediate(target::kWordSize));
- __ jmp(&init_loop, Assembler::kNearJump);
- __ Bind(&done);
-
- __ ret();
-
- __ Bind(&call_runtime);
- __ popl(EDI); // Array length
__ EnterStubFrame();
__ PushObject(Object::null_object()); // Make room for the result.
__ pushl(Immediate(target::ToRawSmi(cid)));
- __ pushl(EDI); // Array length
+ __ pushl(AllocateTypedDataArrayABI::kLengthReg);
__ CallRuntime(kAllocateTypedDataRuntimeEntry, 2);
__ Drop(2); // Drop arguments.
__ popl(AllocateTypedDataArrayABI::kResultReg);
diff --git a/runtime/vm/compiler/stub_code_compiler_x64.cc b/runtime/vm/compiler/stub_code_compiler_x64.cc
index ade135f..2929038 100644
--- a/runtime/vm/compiler/stub_code_compiler_x64.cc
+++ b/runtime/vm/compiler/stub_code_compiler_x64.cc
@@ -1055,7 +1055,7 @@
// NOTE: R10 cannot be clobbered here as the caller relies on it being saved.
// The newly allocated object is returned in RAX.
void StubCodeCompiler::GenerateAllocateArrayStub(Assembler* assembler) {
- if (!FLAG_use_slow_path) {
+ if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label slow_case;
// Compute the size to be allocated, it is based on the array length
// and is computed as:
@@ -1187,7 +1187,7 @@
void StubCodeCompiler::GenerateAllocateMintSharedWithFPURegsStub(
Assembler* assembler) {
// For test purpose call allocation stub without inline allocation attempt.
- if (!FLAG_use_slow_path) {
+ if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label slow_case;
__ TryAllocate(compiler::MintClass(), &slow_case, Assembler::kNearJump,
AllocateMintABI::kResultReg, AllocateMintABI::kTempReg);
@@ -1207,7 +1207,7 @@
void StubCodeCompiler::GenerateAllocateMintSharedWithoutFPURegsStub(
Assembler* assembler) {
// For test purpose call allocation stub without inline allocation attempt.
- if (!FLAG_use_slow_path) {
+ if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label slow_case;
__ TryAllocate(compiler::MintClass(), &slow_case, Assembler::kNearJump,
AllocateMintABI::kResultReg, AllocateMintABI::kTempReg);
@@ -1530,7 +1530,7 @@
// Clobbered:
// R10, R13
void StubCodeCompiler::GenerateCloneContextStub(Assembler* assembler) {
- {
+ if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label slow_case;
// Load num. variable (int32_t) in the existing context.
@@ -3511,108 +3511,114 @@
COMPILE_ASSERT(AllocateTypedDataArrayABI::kLengthReg == RAX);
COMPILE_ASSERT(AllocateTypedDataArrayABI::kResultReg == RAX);
- // Save length argument for possible runtime call, as
- // RAX is clobbered.
- Label call_runtime;
- __ pushq(AllocateTypedDataArrayABI::kLengthReg);
+ if (!FLAG_use_slow_path && FLAG_inline_alloc) {
+ // Save length argument for possible runtime call, as
+ // RAX is clobbered.
+ Label call_runtime;
+ __ pushq(AllocateTypedDataArrayABI::kLengthReg);
- NOT_IN_PRODUCT(
- __ MaybeTraceAllocation(cid, &call_runtime, Assembler::kFarJump));
- __ movq(RDI, AllocateTypedDataArrayABI::kLengthReg);
- /* Check that length is a positive Smi. */
- /* RDI: requested array length argument. */
- __ testq(RDI, Immediate(kSmiTagMask));
- __ j(NOT_ZERO, &call_runtime);
- __ SmiUntag(RDI);
- /* Check for length >= 0 && length <= max_len. */
- /* RDI: untagged array length. */
- __ cmpq(RDI, Immediate(max_len));
- __ j(ABOVE, &call_runtime);
- /* Special case for scaling by 16. */
- if (scale_factor == TIMES_16) {
- /* double length of array. */
- __ addq(RDI, RDI);
- /* only scale by 8. */
- scale_factor = TIMES_8;
- }
- const intptr_t fixed_size_plus_alignment_padding =
- target::TypedData::InstanceSize() +
- target::ObjectAlignment::kObjectAlignment - 1;
- __ leaq(RDI, Address(RDI, scale_factor, fixed_size_plus_alignment_padding));
- __ andq(RDI, Immediate(-target::ObjectAlignment::kObjectAlignment));
- __ movq(RAX, Address(THR, target::Thread::top_offset()));
- __ movq(RCX, RAX);
+ NOT_IN_PRODUCT(
+ __ MaybeTraceAllocation(cid, &call_runtime, Assembler::kFarJump));
+ __ movq(RDI, AllocateTypedDataArrayABI::kLengthReg);
+ /* Check that length is a positive Smi. */
+ /* RDI: requested array length argument. */
+ __ testq(RDI, Immediate(kSmiTagMask));
+ __ j(NOT_ZERO, &call_runtime);
+ __ SmiUntag(RDI);
+ /* Check for length >= 0 && length <= max_len. */
+ /* RDI: untagged array length. */
+ __ cmpq(RDI, Immediate(max_len));
+ __ j(ABOVE, &call_runtime);
+ /* Special case for scaling by 16. */
+ if (scale_factor == TIMES_16) {
+ /* double length of array. */
+ __ addq(RDI, RDI);
+ /* only scale by 8. */
+ scale_factor = TIMES_8;
+ }
+ const intptr_t fixed_size_plus_alignment_padding =
+ target::TypedData::InstanceSize() +
+ target::ObjectAlignment::kObjectAlignment - 1;
+ __ leaq(RDI, Address(RDI, scale_factor, fixed_size_plus_alignment_padding));
+ __ andq(RDI, Immediate(-target::ObjectAlignment::kObjectAlignment));
+ __ movq(RAX, Address(THR, target::Thread::top_offset()));
+ __ movq(RCX, RAX);
- /* RDI: allocation size. */
- __ addq(RCX, RDI);
- __ j(CARRY, &call_runtime);
+ /* RDI: allocation size. */
+ __ addq(RCX, RDI);
+ __ j(CARRY, &call_runtime);
- /* Check if the allocation fits into the remaining space. */
- /* RAX: potential new object start. */
- /* RCX: potential next object start. */
- /* RDI: allocation size. */
- __ cmpq(RCX, Address(THR, target::Thread::end_offset()));
- __ j(ABOVE_EQUAL, &call_runtime);
+ /* Check if the allocation fits into the remaining space. */
+ /* RAX: potential new object start. */
+ /* RCX: potential next object start. */
+ /* RDI: allocation size. */
+ __ cmpq(RCX, Address(THR, target::Thread::end_offset()));
+ __ j(ABOVE_EQUAL, &call_runtime);
- /* Successfully allocated the object(s), now update top to point to */
- /* next object start and initialize the object. */
- __ movq(Address(THR, target::Thread::top_offset()), RCX);
- __ addq(RAX, Immediate(kHeapObjectTag));
- /* Initialize the tags. */
- /* RAX: new object start as a tagged pointer. */
- /* RCX: new object end address. */
- /* RDI: allocation size. */
- /* R13: scratch register. */
- {
- Label size_tag_overflow, done;
- __ cmpq(RDI, Immediate(target::UntaggedObject::kSizeTagMaxSizeTag));
- __ j(ABOVE, &size_tag_overflow, Assembler::kNearJump);
- __ shlq(RDI, Immediate(target::UntaggedObject::kTagBitsSizeTagPos -
- target::ObjectAlignment::kObjectAlignmentLog2));
- __ jmp(&done, Assembler::kNearJump);
+ /* Successfully allocated the object(s), now update top to point to */
+ /* next object start and initialize the object. */
+ __ movq(Address(THR, target::Thread::top_offset()), RCX);
+ __ addq(RAX, Immediate(kHeapObjectTag));
+ /* Initialize the tags. */
+ /* RAX: new object start as a tagged pointer. */
+ /* RCX: new object end address. */
+ /* RDI: allocation size. */
+ /* R13: scratch register. */
+ {
+ Label size_tag_overflow, done;
+ __ cmpq(RDI, Immediate(target::UntaggedObject::kSizeTagMaxSizeTag));
+ __ j(ABOVE, &size_tag_overflow, Assembler::kNearJump);
+ __ shlq(RDI, Immediate(target::UntaggedObject::kTagBitsSizeTagPos -
+ target::ObjectAlignment::kObjectAlignmentLog2));
+ __ jmp(&done, Assembler::kNearJump);
- __ Bind(&size_tag_overflow);
- __ LoadImmediate(RDI, Immediate(0));
+ __ Bind(&size_tag_overflow);
+ __ LoadImmediate(RDI, Immediate(0));
+ __ Bind(&done);
+
+ /* Get the class index and insert it into the tags. */
+ uword tags =
+ target::MakeTagWordForNewSpaceObject(cid, /*instance_size=*/0);
+ __ orq(RDI, Immediate(tags));
+ __ movq(FieldAddress(RAX, target::Object::tags_offset()),
+ RDI); /* Tags. */
+ }
+ /* Set the length field. */
+ /* RAX: new object start as a tagged pointer. */
+ /* RCX: new object end address. */
+ __ popq(RDI); /* Array length. */
+ __ StoreIntoObjectNoBarrier(
+ RAX, FieldAddress(RAX, target::TypedDataBase::length_offset()), RDI);
+ /* Initialize all array elements to 0. */
+ /* RAX: new object start as a tagged pointer. */
+ /* RCX: new object end address. */
+ /* RDI: iterator which initially points to the start of the variable */
+ /* RBX: scratch register. */
+ /* data area to be initialized. */
+ __ xorq(RBX, RBX); /* Zero. */
+ __ leaq(RDI, FieldAddress(RAX, target::TypedData::InstanceSize()));
+ __ StoreInternalPointer(
+ RAX, FieldAddress(RAX, target::TypedDataBase::data_field_offset()),
+ RDI);
+ Label done, init_loop;
+ __ Bind(&init_loop);
+ __ cmpq(RDI, RCX);
+ __ j(ABOVE_EQUAL, &done, Assembler::kNearJump);
+ __ movq(Address(RDI, 0), RBX);
+ __ addq(RDI, Immediate(target::kWordSize));
+ __ jmp(&init_loop, Assembler::kNearJump);
__ Bind(&done);
- /* Get the class index and insert it into the tags. */
- uword tags = target::MakeTagWordForNewSpaceObject(cid, /*instance_size=*/0);
- __ orq(RDI, Immediate(tags));
- __ movq(FieldAddress(RAX, target::Object::tags_offset()), RDI); /* Tags. */
+ __ ret();
+
+ __ Bind(&call_runtime);
+ __ popq(AllocateTypedDataArrayABI::kLengthReg);
}
- /* Set the length field. */
- /* RAX: new object start as a tagged pointer. */
- /* RCX: new object end address. */
- __ popq(RDI); /* Array length. */
- __ StoreIntoObjectNoBarrier(
- RAX, FieldAddress(RAX, target::TypedDataBase::length_offset()), RDI);
- /* Initialize all array elements to 0. */
- /* RAX: new object start as a tagged pointer. */
- /* RCX: new object end address. */
- /* RDI: iterator which initially points to the start of the variable */
- /* RBX: scratch register. */
- /* data area to be initialized. */
- __ xorq(RBX, RBX); /* Zero. */
- __ leaq(RDI, FieldAddress(RAX, target::TypedData::InstanceSize()));
- __ StoreInternalPointer(
- RAX, FieldAddress(RAX, target::TypedDataBase::data_field_offset()), RDI);
- Label done, init_loop;
- __ Bind(&init_loop);
- __ cmpq(RDI, RCX);
- __ j(ABOVE_EQUAL, &done, Assembler::kNearJump);
- __ movq(Address(RDI, 0), RBX);
- __ addq(RDI, Immediate(target::kWordSize));
- __ jmp(&init_loop, Assembler::kNearJump);
- __ Bind(&done);
- __ ret();
-
- __ Bind(&call_runtime);
- __ popq(RDI); // Array length
__ EnterStubFrame();
__ PushObject(Object::null_object()); // Make room for the result.
__ PushImmediate(Immediate(target::ToRawSmi(cid)));
- __ pushq(RDI); // Array length
+ __ pushq(AllocateTypedDataArrayABI::kLengthReg);
__ CallRuntime(kAllocateTypedDataRuntimeEntry, 2);
__ Drop(2); // Drop arguments.
__ popq(AllocateTypedDataArrayABI::kResultReg);
diff --git a/tests/language/const_functions/const_functions_for_statements_test.dart b/tests/language/const_functions/const_functions_for_statements_test.dart
new file mode 100644
index 0000000..4575070
--- /dev/null
+++ b/tests/language/const_functions/const_functions_for_statements_test.dart
@@ -0,0 +1,56 @@
+// Copyright (c) 2021, the Dart project authors. Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+// Tests for statements for const functions.
+
+// SharedOptions=--enable-experiment=const-functions
+
+import "package:expect/expect.dart";
+
+const var1 = fn(2);
+// ^^^^^
+// [analyzer] COMPILE_TIME_ERROR.CONST_INITIALIZED_WITH_NON_CONSTANT_VALUE
+const var2 = fn(3);
+// ^^^^^
+// [analyzer] COMPILE_TIME_ERROR.CONST_INITIALIZED_WITH_NON_CONSTANT_VALUE
+int fn(int a) {
+ int b = a;
+ for (int i = 0; i < 2; i++) {
+ b += a;
+ }
+ return b;
+}
+
+const var3 = fn1(2);
+// ^^^^^^
+// [analyzer] COMPILE_TIME_ERROR.CONST_INITIALIZED_WITH_NON_CONSTANT_VALUE
+const var4 = fn1(3);
+// ^^^^^^
+// [analyzer] COMPILE_TIME_ERROR.CONST_INITIALIZED_WITH_NON_CONSTANT_VALUE
+int fn1(int a) {
+ int b = a;
+ for (int i = 0;; i++) {
+ b *= 3;
+ if (b > 10) return b;
+ }
+}
+
+const var5 = fn2();
+// ^^^^^
+// [analyzer] COMPILE_TIME_ERROR.CONST_INITIALIZED_WITH_NON_CONSTANT_VALUE
+int fn2() {
+ for (int i = 0, j = 2;; i += 2, j += 1) {
+ if (i + j > 10) {
+ return i + j;
+ }
+ }
+}
+
+void main() {
+ Expect.equals(var1, 6);
+ Expect.equals(var2, 9);
+ Expect.equals(var3, 18);
+ Expect.equals(var4, 27);
+ Expect.equals(var5, 11);
+}
diff --git a/tools/VERSION b/tools/VERSION
index a5bbf8d..5a93430 100644
--- a/tools/VERSION
+++ b/tools/VERSION
@@ -27,5 +27,5 @@
MAJOR 2
MINOR 13
PATCH 0
-PRERELEASE 147
+PRERELEASE 148
PRERELEASE_PATCH 0
\ No newline at end of file