[vm, compiler] Allow targeting TSAN or MSAN by passing a flag to gen_snapshot.
Add check that the snapshot and runtime agree on whether to use MSAN. We already have this check for TSAN.
TEST=vm/dart/sanitizer_compatibility_test
Bug: https://github.com/dart-lang/sdk/issues/55637
Bug: https://github.com/dart-lang/sdk/issues/55638
Change-Id: I320e6f55cd59209ce6e58a82ac205a87c8a60a84
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/365487
Reviewed-by: Daco Harkes <dacoharkes@google.com>
Commit-Queue: Ryan Macnak <rmacnak@google.com>
Reviewed-by: Ivan Inozemtsev <iinozemtsev@google.com>
Reviewed-by: Siva Annamalai <asiva@google.com>
diff --git a/runtime/BUILD.gn b/runtime/BUILD.gn
index c2920b7..3060b33 100644
--- a/runtime/BUILD.gn
+++ b/runtime/BUILD.gn
@@ -89,6 +89,8 @@
# the same mode (TSAN or non-TSAN).
if (is_tsan) {
defines += [ "TARGET_USES_THREAD_SANITIZER" ]
+ } else if (is_msan) {
+ defines += [ "TARGET_USES_MEMORY_SANITIZER" ]
}
}
diff --git a/runtime/platform/thread_sanitizer.h b/runtime/platform/thread_sanitizer.h
index ed0843a..465f9a3 100644
--- a/runtime/platform/thread_sanitizer.h
+++ b/runtime/platform/thread_sanitizer.h
@@ -35,20 +35,4 @@
#define DO_IF_NOT_TSAN(CODE) CODE
#endif
-// By default TSAN is enabled if this code is compiled under TSAN.
-//
-// Though in our AOT compiler we don't know whether the target AOT runtime will
-// use TSAN or not, so we'll rely on the build rules telling us that
-// information.
-#if defined(USING_THREAD_SANITIZER) && !defined(DART_PRECOMPILER) && \
- !defined(TARGET_USES_THREAD_SANITIZER)
-#define TARGET_USES_THREAD_SANITIZER
-#endif
-
-#if defined(TARGET_USES_THREAD_SANITIZER)
-constexpr bool kTargetUsesThreadSanitizer = true;
-#else
-constexpr bool kTargetUsesThreadSanitizer = false;
-#endif
-
#endif // RUNTIME_PLATFORM_THREAD_SANITIZER_H_
diff --git a/runtime/tests/vm/dart/sanitizer_compatibility_test.dart b/runtime/tests/vm/dart/sanitizer_compatibility_test.dart
new file mode 100644
index 0000000..fe80493
--- /dev/null
+++ b/runtime/tests/vm/dart/sanitizer_compatibility_test.dart
@@ -0,0 +1,91 @@
+// Copyright (c) 2024, the Dart project authors. Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+// Check for a proper error when a snapshot and a runtime don't agree on which
+// sanitizer they are using.
+
+import "dart:io";
+
+import "package:expect/expect.dart";
+
+import "use_flag_test_helper.dart";
+
+String find(String haystack, List<String> needles) {
+ for (String needle in needles) {
+ if (haystack.contains(needle)) {
+ return needle;
+ }
+ }
+ throw "None of ${needles.join(' ')}";
+}
+
+void checkExists(String path) {
+ if (!File(path).existsSync()) {
+ throw "$path does not exist";
+ }
+}
+
+main() async {
+ var sanitizer = find(Platform.executable, ["MSAN", "TSAN"]);
+ var mode = find(Platform.executable, ["Debug", "Release", "Product"]);
+ var arch = find(Platform.executable, ["X64", "ARM64", "RISCV64"]);
+ var out = find(Platform.executable, ["out", "xcodebuild"]);
+ var targetFlag = {
+ "MSAN": "--target_memory_sanitizer",
+ "TSAN": "--target_thread_sanitizer"
+ }[sanitizer]!;
+
+ var nonePlatform = "$out/$mode$arch/vm_platform_strong.dill";
+ var noneGenSnapshot = "$out/$mode$arch/gen_snapshot";
+ var noneJitRuntime = "$out/$mode$arch/dart";
+ var noneAotRuntime = "$out/$mode$arch/dart_precompiled_runtime";
+ var sanitizerGenSnapshot = "$out/$mode$sanitizer$arch/gen_snapshot";
+ var sanitizerAotRuntime =
+ "$out/$mode$sanitizer$arch/dart_precompiled_runtime";
+
+ checkExists(noneGenSnapshot);
+ checkExists(noneJitRuntime);
+ checkExists(noneAotRuntime);
+ checkExists(sanitizerGenSnapshot);
+ checkExists(sanitizerAotRuntime);
+
+ await withTempDir('sanitizer-compatibility-test', (String tempDir) async {
+ var aotDill = "$tempDir/aot.dill";
+ var noneElf = "$tempDir/none.elf";
+ var sanitizerElf = "$tempDir/$sanitizer.elf";
+ var sanitizerElf2 = "$tempDir/${sanitizer}2.elf";
+
+ await run(noneJitRuntime, [
+ "pkg/vm/bin/gen_kernel.dart",
+ "--platform",
+ nonePlatform,
+ "--aot",
+ "-o",
+ aotDill,
+ "tests/language/unsorted/first_test.dart"
+ ]);
+
+ await run(noneGenSnapshot,
+ ["--snapshot-kind=app-aot-elf", "--elf=$noneElf", aotDill]);
+ await run(sanitizerGenSnapshot,
+ ["--snapshot-kind=app-aot-elf", "--elf=$sanitizerElf", aotDill]);
+ await run(noneGenSnapshot, [
+ "--snapshot-kind=app-aot-elf",
+ "--elf=$sanitizerElf2",
+ targetFlag,
+ aotDill
+ ]);
+
+ await run(noneAotRuntime, [noneElf]);
+ await run(sanitizerAotRuntime, [sanitizerElf]);
+ await run(sanitizerAotRuntime, [sanitizerElf2]);
+
+ var errorLines = await runError(noneAotRuntime, [sanitizerElf]);
+ Expect.contains("Snapshot not compatible", errorLines[0]);
+ errorLines = await runError(noneAotRuntime, [sanitizerElf2]);
+ Expect.contains("Snapshot not compatible", errorLines[0]);
+ errorLines = await runError(sanitizerAotRuntime, [noneElf]);
+ Expect.contains("Snapshot not compatible", errorLines[0]);
+ });
+}
diff --git a/runtime/tests/vm/vm.status b/runtime/tests/vm/vm.status
index d3755d3c..54e5f80 100644
--- a/runtime/tests/vm/vm.status
+++ b/runtime/tests/vm/vm.status
@@ -359,6 +359,9 @@
dart/isolates/send_object_to_spawn_uri_isolate_test: SkipByDesign # uses spawnUri
dart/issue32950_test: SkipByDesign # uses spawnUri.
+[ $runtime != dart_precompiled || $sanitizer != msan && $sanitizer != tsan ]
+dart/sanitizer_compatibility_test: SkipByDesign
+
[ $system != macos || $simulator ]
dart/thread_priority_macos_test: SkipByDesign
diff --git a/runtime/vm/compiler/assembler/assembler_arm64.cc b/runtime/vm/compiler/assembler/assembler_arm64.cc
index 0c7d63f..a3adb29 100644
--- a/runtime/vm/compiler/assembler/assembler_arm64.cc
+++ b/runtime/vm/compiler/assembler/assembler_arm64.cc
@@ -286,7 +286,6 @@
ASSERT(((offset + buffer_.GetPosition()) & (alignment - 1)) == 0);
}
-#if defined(TARGET_USES_THREAD_SANITIZER)
void Assembler::TsanLoadAcquire(Register addr) {
LeafRuntimeScope rt(this, /*frame_size=*/0, /*preserve_registers=*/true);
MoveRegister(R0, addr);
@@ -298,7 +297,6 @@
MoveRegister(R0, addr);
rt.Call(kTsanStoreReleaseRuntimeEntry, /*argument_count=*/1);
}
-#endif
static int CountLeadingZeros(uint64_t value, int width) {
if (width == 64) return Utils::CountLeadingZeros64(value);
@@ -1542,7 +1540,7 @@
ASSERT(addr != state);
Label slow_path, done, retry;
- if (FLAG_use_slow_path || kTargetUsesThreadSanitizer) {
+ if (FLAG_use_slow_path || FLAG_target_thread_sanitizer) {
b(&slow_path);
}
@@ -1557,7 +1555,7 @@
stxr(TMP, state, addr);
cbz(&done, TMP); // 0 means stxr was successful.
- if (!FLAG_use_slow_path && !kTargetUsesThreadSanitizer) {
+ if (!FLAG_use_slow_path && !FLAG_target_thread_sanitizer) {
b(&retry);
}
@@ -1601,7 +1599,7 @@
ASSERT(addr != state);
Label slow_path, done, retry;
- if (FLAG_use_slow_path || kTargetUsesThreadSanitizer) {
+ if (FLAG_use_slow_path || FLAG_target_thread_sanitizer) {
b(&slow_path);
}
@@ -1616,7 +1614,7 @@
stxr(TMP, state, addr);
cbz(&done, TMP); // 0 means stxr was successful.
- if (!FLAG_use_slow_path && !kTargetUsesThreadSanitizer) {
+ if (!FLAG_use_slow_path && !FLAG_target_thread_sanitizer) {
b(&retry);
}
diff --git a/runtime/vm/compiler/assembler/assembler_arm64.h b/runtime/vm/compiler/assembler/assembler_arm64.h
index cd2d587..f73fbf0 100644
--- a/runtime/vm/compiler/assembler/assembler_arm64.h
+++ b/runtime/vm/compiler/assembler/assembler_arm64.h
@@ -522,10 +522,8 @@
StoreToOffset(src, base, offset, kEightBytes);
}
-#if defined(TARGET_USES_THREAD_SANITIZER)
void TsanLoadAcquire(Register addr);
void TsanStoreRelease(Register addr);
-#endif
void LoadAcquire(Register dst,
const Address& address,
@@ -538,9 +536,9 @@
src = TMP2;
}
ldar(dst, src, size);
-#if defined(TARGET_USES_THREAD_SANITIZER)
- TsanLoadAcquire(src);
-#endif
+ if (FLAG_target_thread_sanitizer) {
+ TsanLoadAcquire(src);
+ }
}
#if defined(DART_COMPRESSED_POINTERS)
@@ -561,9 +559,9 @@
dst = TMP2;
}
stlr(src, dst, size);
-#if defined(TARGET_USES_THREAD_SANITIZER)
- TsanStoreRelease(dst);
-#endif
+ if (FLAG_target_thread_sanitizer) {
+ TsanStoreRelease(dst);
+ }
}
void CompareWithMemoryValue(Register value,
diff --git a/runtime/vm/compiler/assembler/assembler_arm64_test.cc b/runtime/vm/compiler/assembler/assembler_arm64_test.cc
index 31698d7..2fbcd69 100644
--- a/runtime/vm/compiler/assembler/assembler_arm64_test.cc
+++ b/runtime/vm/compiler/assembler/assembler_arm64_test.cc
@@ -7559,7 +7559,7 @@
}
// can't call (tsan) runtime methods
-#if !defined(TARGET_USES_THREAD_SANITIZER)
+#if !defined(USING_THREAD_SANITIZER)
ASSEMBLER_TEST_GENERATE(StoreReleaseLoadAcquire, assembler) {
__ SetupDartSP();
@@ -7636,7 +7636,7 @@
"mov csp, sp\n"
"ret\n");
}
-#endif // !defined(TARGET_USES_THREAD_SANITIZER)
+#endif // !defined(USING_THREAD_SANITIZER)
static void RangeCheck(Assembler* assembler, Register value, Register temp) {
const Register return_reg = CallingConventions::kReturnReg;
diff --git a/runtime/vm/compiler/assembler/assembler_ia32.cc b/runtime/vm/compiler/assembler/assembler_ia32.cc
index 5c60366..90c5177 100644
--- a/runtime/vm/compiler/assembler/assembler_ia32.cc
+++ b/runtime/vm/compiler/assembler/assembler_ia32.cc
@@ -2229,11 +2229,11 @@
// We don't run TSAN on 32 bit systems.
// Don't call StoreRelease here because we would have to load the immediate
// into a temp register which causes spilling.
-#if defined(TARGET_USES_THREAD_SANITIZER)
- if (memory_order == kRelease) {
- UNIMPLEMENTED();
+ if (FLAG_target_thread_sanitizer) {
+ if (memory_order == kRelease) {
+ UNIMPLEMENTED();
+ }
}
-#endif
if (target::CanEmbedAsRawPointerInGeneratedCode(value)) {
Immediate imm_value(target::ToRawPointer(value));
movl(dest, imm_value);
diff --git a/runtime/vm/compiler/assembler/assembler_ia32.h b/runtime/vm/compiler/assembler/assembler_ia32.h
index 0e082be..fa07225 100644
--- a/runtime/vm/compiler/assembler/assembler_ia32.h
+++ b/runtime/vm/compiler/assembler/assembler_ia32.h
@@ -674,9 +674,9 @@
// On intel loads have load-acquire behavior (i.e. loads are not re-ordered
// with other loads).
Load(dst, address, size);
-#if defined(TARGET_USES_THREAD_SANITIZER)
-#error No support for TSAN on IA32.
-#endif
+ if (FLAG_target_thread_sanitizer) {
+ FATAL("No support for TSAN on IA32.");
+ }
}
void StoreRelease(Register src,
const Address& address,
@@ -684,9 +684,9 @@
// On intel stores have store-release behavior (i.e. stores are not
// re-ordered with other stores).
Store(src, address, size);
-#if defined(TARGET_USES_THREAD_SANITIZER)
-#error No support for TSAN on IA32.
-#endif
+ if (FLAG_target_thread_sanitizer) {
+ FATAL("No support for TSAN on IA32.");
+ }
}
void CompareWithMemoryValue(Register value,
diff --git a/runtime/vm/compiler/assembler/assembler_riscv.cc b/runtime/vm/compiler/assembler/assembler_riscv.cc
index 56d501f..40d8c4c 100644
--- a/runtime/vm/compiler/assembler/assembler_riscv.cc
+++ b/runtime/vm/compiler/assembler/assembler_riscv.cc
@@ -2567,7 +2567,6 @@
jr(TMP2);
}
-#if defined(TARGET_USES_THREAD_SANITIZER)
void Assembler::TsanLoadAcquire(Register addr) {
LeafRuntimeScope rt(this, /*frame_size=*/0, /*preserve_registers=*/true);
MoveRegister(A0, addr);
@@ -2578,7 +2577,6 @@
MoveRegister(A0, addr);
rt.Call(kTsanStoreReleaseRuntimeEntry, /*argument_count=*/1);
}
-#endif
void Assembler::LoadAcquire(Register dst,
const Address& address,
@@ -2587,14 +2585,14 @@
Load(dst, address, size);
fence(HartEffects::kRead, HartEffects::kMemory);
-#if defined(TARGET_USES_THREAD_SANITIZER)
- if (address.offset() == 0) {
- TsanLoadAcquire(address.base());
- } else {
- AddImmediate(TMP2, address.base(), address.offset());
- TsanLoadAcquire(TMP2);
+ if (FLAG_target_thread_sanitizer) {
+ if (address.offset() == 0) {
+ TsanLoadAcquire(address.base());
+ } else {
+ AddImmediate(TMP2, address.base(), address.offset());
+ TsanLoadAcquire(TMP2);
+ }
}
-#endif
}
void Assembler::StoreRelease(Register src,
@@ -3773,7 +3771,7 @@
ASSERT(addr != state);
Label slow_path, done, retry;
- if (FLAG_use_slow_path || kTargetUsesThreadSanitizer) {
+ if (FLAG_use_slow_path || FLAG_target_thread_sanitizer) {
j(&slow_path, Assembler::kNearJump);
}
@@ -3787,7 +3785,7 @@
sc(state, state, Address(addr, 0));
beqz(state, &done, Assembler::kNearJump); // 0 means sc was successful.
- if (!FLAG_use_slow_path && !kTargetUsesThreadSanitizer) {
+ if (!FLAG_use_slow_path && !FLAG_target_thread_sanitizer) {
j(&retry, Assembler::kNearJump);
}
@@ -3809,7 +3807,7 @@
ASSERT(addr != state);
Label slow_path, done, retry;
- if (FLAG_use_slow_path || kTargetUsesThreadSanitizer) {
+ if (FLAG_use_slow_path || FLAG_target_thread_sanitizer) {
j(&slow_path, Assembler::kNearJump);
}
@@ -3823,7 +3821,7 @@
sc(state, state, Address(addr, 0));
beqz(state, &done, Assembler::kNearJump); // 0 means sc was successful.
- if (!FLAG_use_slow_path && !kTargetUsesThreadSanitizer) {
+ if (!FLAG_use_slow_path && !FLAG_target_thread_sanitizer) {
j(&retry, Assembler::kNearJump);
}
diff --git a/runtime/vm/compiler/assembler/assembler_riscv.h b/runtime/vm/compiler/assembler/assembler_riscv.h
index 2d4afef..e16bab1 100644
--- a/runtime/vm/compiler/assembler/assembler_riscv.h
+++ b/runtime/vm/compiler/assembler/assembler_riscv.h
@@ -873,10 +873,8 @@
StoreToOffset(src, base, offset, kWordBytes);
}
-#if defined(TARGET_USES_THREAD_SANITIZER)
void TsanLoadAcquire(Register addr);
void TsanStoreRelease(Register addr);
-#endif
void LoadAcquire(Register dst,
const Address& address,
diff --git a/runtime/vm/compiler/assembler/assembler_x64.cc b/runtime/vm/compiler/assembler/assembler_x64.cc
index 8f0f714..f0298b8 100644
--- a/runtime/vm/compiler/assembler/assembler_x64.cc
+++ b/runtime/vm/compiler/assembler/assembler_x64.cc
@@ -148,7 +148,7 @@
// For TSAN, we always go to the runtime so TSAN is aware of the release
// semantics of entering the safepoint.
Label done, slow_path;
- if (FLAG_use_slow_path || kTargetUsesThreadSanitizer) {
+ if (FLAG_use_slow_path || FLAG_target_thread_sanitizer) {
jmp(&slow_path);
}
@@ -162,7 +162,7 @@
popq(RAX);
cmpq(TMP, Immediate(target::Thread::full_safepoint_state_unacquired()));
- if (!FLAG_use_slow_path && !kTargetUsesThreadSanitizer) {
+ if (!FLAG_use_slow_path && !FLAG_target_thread_sanitizer) {
j(EQUAL, &done);
}
@@ -205,7 +205,7 @@
// For TSAN, we always go to the runtime so TSAN is aware of the acquire
// semantics of leaving the safepoint.
Label done, slow_path;
- if (FLAG_use_slow_path || kTargetUsesThreadSanitizer) {
+ if (FLAG_use_slow_path || FLAG_target_thread_sanitizer) {
jmp(&slow_path);
}
@@ -221,7 +221,7 @@
popq(RAX);
cmpq(TMP, Immediate(target::Thread::full_safepoint_state_acquired()));
- if (!FLAG_use_slow_path && !kTargetUsesThreadSanitizer) {
+ if (!FLAG_use_slow_path && !FLAG_target_thread_sanitizer) {
j(EQUAL, &done);
}
@@ -2023,7 +2023,6 @@
__ LeaveFrame();
}
-#if defined(TARGET_USES_THREAD_SANITIZER)
void Assembler::TsanLoadAcquire(Address addr) {
LeafRuntimeScope rt(this, /*frame_size=*/0, /*preserve_registers=*/true);
leaq(CallingConventions::kArg1Reg, addr);
@@ -2035,7 +2034,6 @@
leaq(CallingConventions::kArg1Reg, addr);
rt.Call(kTsanStoreReleaseRuntimeEntry, /*argument_count=*/1);
}
-#endif
void Assembler::RestoreCodePointer() {
movq(CODE_REG,
diff --git a/runtime/vm/compiler/assembler/assembler_x64.h b/runtime/vm/compiler/assembler/assembler_x64.h
index 80f98d1..1a19eb2 100644
--- a/runtime/vm/compiler/assembler/assembler_x64.h
+++ b/runtime/vm/compiler/assembler/assembler_x64.h
@@ -1102,10 +1102,8 @@
}
}
-#if defined(TARGET_USES_THREAD_SANITIZER)
void TsanLoadAcquire(Address addr);
void TsanStoreRelease(Address addr);
-#endif
void LoadAcquire(Register dst,
const Address& address,
@@ -1113,18 +1111,18 @@
// On intel loads have load-acquire behavior (i.e. loads are not re-ordered
// with other loads).
Load(dst, address, size);
-#if defined(TARGET_USES_THREAD_SANITIZER)
- TsanLoadAcquire(address);
-#endif
+ if (FLAG_target_thread_sanitizer) {
+ TsanLoadAcquire(address);
+ }
}
#if defined(DART_COMPRESSED_POINTERS)
void LoadAcquireCompressed(Register dst, const Address& address) override {
// On intel loads have load-acquire behavior (i.e. loads are not re-ordered
// with other loads).
LoadCompressed(dst, address);
-#if defined(TARGET_USES_THREAD_SANITIZER)
- TsanLoadAcquire(address);
-#endif
+ if (FLAG_target_thread_sanitizer) {
+ TsanLoadAcquire(address);
+ }
}
#endif
void StoreRelease(Register src,
@@ -1133,9 +1131,9 @@
// On intel stores have store-release behavior (i.e. stores are not
// re-ordered with other stores).
Store(src, address, size);
-#if defined(TARGET_USES_THREAD_SANITIZER)
- TsanStoreRelease(address);
-#endif
+ if (FLAG_target_thread_sanitizer) {
+ TsanStoreRelease(address);
+ }
}
void CompareWithMemoryValue(Register value,
diff --git a/runtime/vm/compiler/assembler/assembler_x64_test.cc b/runtime/vm/compiler/assembler/assembler_x64_test.cc
index 8eccbd1..679f336 100644
--- a/runtime/vm/compiler/assembler/assembler_x64_test.cc
+++ b/runtime/vm/compiler/assembler/assembler_x64_test.cc
@@ -6227,11 +6227,11 @@
__ popq(RAX))
ASSEMBLER_TEST_GENERATE(StoreReleaseLoadAcquire, assembler) {
-#if defined(TARGET_USES_THREAD_SANITIZER)
- // On TSAN builds StoreRelease/LoadAcquire will do a runtime
- // call to tell TSAN about our action.
- __ MoveRegister(THR, CallingConventions::kArg2Reg);
-#endif
+ if (FLAG_target_thread_sanitizer) {
+ // On TSAN builds StoreRelease/LoadAcquire will do a runtime
+ // call to tell TSAN about our action.
+ __ MoveRegister(THR, CallingConventions::kArg2Reg);
+ }
__ pushq(RCX);
__ xorq(RCX, RCX);
@@ -6306,11 +6306,11 @@
}
ASSEMBLER_TEST_GENERATE(StoreReleaseLoadAcquire1024, assembler) {
-#if defined(TARGET_USES_THREAD_SANITIZER)
- // On TSAN builds StoreRelease/LoadAcquire will do a runtime
- // call to tell TSAN about our action.
- __ MoveRegister(THR, CallingConventions::kArg2Reg);
-#endif
+ if (FLAG_target_thread_sanitizer) {
+ // On TSAN builds StoreRelease/LoadAcquire will do a runtime
+ // call to tell TSAN about our action.
+ __ MoveRegister(THR, CallingConventions::kArg2Reg);
+ }
__ pushq(RCX);
__ xorq(RCX, RCX);
@@ -6327,19 +6327,19 @@
ASSEMBLER_TEST_RUN(StoreReleaseLoadAcquire1024, test) {
const intptr_t res = test->InvokeWithCodeAndThread<intptr_t>(123);
EXPECT_EQ(123, res);
-#if !defined(TARGET_USES_THREAD_SANITIZER)
- EXPECT_DISASSEMBLY_NOT_WINDOWS(
- "push rcx\n"
- "xorq rcx,rcx\n"
- "push rcx\n"
- "subq rsp,0x400\n"
- "movq [rsp+0x400],rdx\n"
- "movq rax,[rsp+0x400]\n"
- "addq rsp,0x400\n"
- "pop rcx\n"
- "pop rcx\n"
- "ret\n");
-#endif
+ if (!FLAG_target_thread_sanitizer) {
+ EXPECT_DISASSEMBLY_NOT_WINDOWS(
+ "push rcx\n"
+ "xorq rcx,rcx\n"
+ "push rcx\n"
+ "subq rsp,0x400\n"
+ "movq [rsp+0x400],rdx\n"
+ "movq rax,[rsp+0x400]\n"
+ "addq rsp,0x400\n"
+ "pop rcx\n"
+ "pop rcx\n"
+ "ret\n");
+ }
}
ASSEMBLER_TEST_GENERATE(MoveByteRunTest, assembler) {
diff --git a/runtime/vm/compiler/backend/il.cc b/runtime/vm/compiler/backend/il.cc
index b3a5995..903e26e 100644
--- a/runtime/vm/compiler/backend/il.cc
+++ b/runtime/vm/compiler/backend/il.cc
@@ -7065,11 +7065,9 @@
// additionally verify here that there is an actual overlap. Instead, only
// do that when we need to calculate the end address of the regions in
// the loop case.
-#if defined(USING_MEMORY_SANITIZER)
- const auto jump_distance = compiler::Assembler::kFarJump;
-#else
- const auto jump_distance = compiler::Assembler::kNearJump;
-#endif
+ const auto jump_distance = FLAG_target_memory_sanitizer
+ ? compiler::Assembler::kFarJump
+ : compiler::Assembler::kNearJump;
__ BranchIf(UNSIGNED_LESS_EQUAL, ©_forwards, jump_distance);
__ Comment("Copying backwards");
if (constant_length) {
@@ -7169,13 +7167,15 @@
}
}
-#if defined(USING_MEMORY_SANITIZER) && defined(TARGET_ARCH_X64)
- RegisterSet kVolatileRegisterSet(CallingConventions::kVolatileCpuRegisters,
- CallingConventions::kVolatileXmmRegisters);
- __ PushRegisters(kVolatileRegisterSet);
- __ MsanUnpoison(dest_reg, num_bytes);
- __ PopRegisters(kVolatileRegisterSet);
+ if (FLAG_target_memory_sanitizer) {
+#if defined(TARGET_ARCH_X64)
+ RegisterSet kVolatileRegisterSet(CallingConventions::kVolatileCpuRegisters,
+ CallingConventions::kVolatileXmmRegisters);
+ __ PushRegisters(kVolatileRegisterSet);
+ __ MsanUnpoison(dest_reg, num_bytes);
+ __ PopRegisters(kVolatileRegisterSet);
#endif
+ }
}
#endif
diff --git a/runtime/vm/compiler/backend/il_arm.cc b/runtime/vm/compiler/backend/il_arm.cc
index c34f586..c8bd8ae 100644
--- a/runtime/vm/compiler/backend/il_arm.cc
+++ b/runtime/vm/compiler/backend/il_arm.cc
@@ -1788,9 +1788,9 @@
// Reserve space for the arguments that go on the stack (if any), then align.
__ ReserveAlignedFrameSpace(marshaller_.RequiredStackSpaceInBytes());
-#if defined(USING_MEMORY_SANITIZER)
- UNIMPLEMENTED();
-#endif
+ if (FLAG_target_memory_sanitizer) {
+ UNIMPLEMENTED();
+ }
EmitParamMoves(compiler, is_leaf_ ? FPREG : saved_fp_or_sp, temp1, TMP);
diff --git a/runtime/vm/compiler/backend/il_arm64.cc b/runtime/vm/compiler/backend/il_arm64.cc
index 5bf3d21..1688b08 100644
--- a/runtime/vm/compiler/backend/il_arm64.cc
+++ b/runtime/vm/compiler/backend/il_arm64.cc
@@ -252,9 +252,9 @@
const bool reversed = copy_forwards != nullptr;
const intptr_t shift = Utils::ShiftForPowerOfTwo(element_size_) -
(unboxed_inputs() ? 0 : kSmiTagShift);
-#if defined(USING_MEMORY_SANITIZER)
- __ PushPair(length_reg, dest_reg);
-#endif
+ if (FLAG_target_memory_sanitizer) {
+ __ PushPair(length_reg, dest_reg);
+ }
if (reversed) {
// Verify that the overlap actually exists by checking to see if
// dest_start < src_end.
@@ -298,18 +298,18 @@
compiler::kObjectBytes);
__ b(&loop, NOT_ZERO);
-#if defined(USING_MEMORY_SANITIZER)
- __ PopPair(length_reg, dest_reg);
- if (!unboxed_inputs()) {
- __ ExtendNonNegativeSmi(length_reg);
+ if (FLAG_target_memory_sanitizer) {
+ __ PopPair(length_reg, dest_reg);
+ if (!unboxed_inputs()) {
+ __ ExtendNonNegativeSmi(length_reg);
+ }
+ if (shift < 0) {
+ __ AsrImmediate(length_reg, length_reg, -shift);
+ } else {
+ __ LslImmediate(length_reg, length_reg, shift);
+ }
+ __ MsanUnpoison(dest_reg, length_reg);
}
- if (shift < 0) {
- __ AsrImmediate(length_reg, length_reg, -shift);
- } else {
- __ LslImmediate(length_reg, length_reg, shift);
- }
- __ MsanUnpoison(dest_reg, length_reg);
-#endif
}
void MemoryCopyInstr::EmitComputeStartPointer(FlowGraphCompiler* compiler,
@@ -1508,8 +1508,7 @@
// Reserve space for the arguments that go on the stack (if any), then align.
intptr_t stack_space = marshaller_.RequiredStackSpaceInBytes();
__ ReserveAlignedFrameSpace(stack_space);
-#if defined(USING_MEMORY_SANITIZER)
- {
+ if (FLAG_target_memory_sanitizer) {
RegisterSet kVolatileRegisterSet(kAbiVolatileCpuRegs & ~(1 << SP),
kAbiVolatileFpuRegs);
__ mov(temp1, SP);
@@ -1519,7 +1518,7 @@
// reserved for outgoing arguments and the spills which might have
// been generated by the register allocator. Some of these spill slots
// can be used as handles passed down to the runtime.
- __ sub(R1, is_leaf_ ? FPREG : saved_fp_or_sp, temp1);
+ __ sub(R1, is_leaf_ ? FPREG : saved_fp_or_sp, compiler::Operand(temp1));
__ MsanUnpoison(temp1, R1);
// Incoming Dart arguments to this trampoline are potentially used as local
@@ -1534,7 +1533,6 @@
RESTORES_LR_FROM_FRAME(__ PopRegisters(kVolatileRegisterSet));
}
-#endif
EmitParamMoves(compiler, is_leaf_ ? FPREG : saved_fp_or_sp, temp1, temp2);
@@ -2282,20 +2280,20 @@
UNREACHABLE();
}
-#if defined(USING_MEMORY_SANITIZER)
- if (index.IsRegister()) {
- __ ComputeElementAddressForRegIndex(TMP, IsUntagged(), class_id(),
- index_scale(), index_unboxed_, array,
- index.reg());
- } else {
- __ ComputeElementAddressForIntIndex(TMP, IsUntagged(), class_id(),
- index_scale(), array,
- Smi::Cast(index.constant()).Value());
+ if (FLAG_target_memory_sanitizer) {
+ if (index.IsRegister()) {
+ __ ComputeElementAddressForRegIndex(TMP, IsUntagged(), class_id(),
+ index_scale(), index_unboxed_, array,
+ index.reg());
+ } else {
+ __ ComputeElementAddressForIntIndex(TMP, IsUntagged(), class_id(),
+ index_scale(), array,
+ Smi::Cast(index.constant()).Value());
+ }
+ const intptr_t length_in_bytes = RepresentationUtils::ValueSize(
+ RepresentationUtils::RepresentationOfArrayElement(class_id()));
+ __ MsanUnpoison(TMP, length_in_bytes);
}
- const intptr_t length_in_bytes = RepresentationUtils::ValueSize(
- RepresentationUtils::RepresentationOfArrayElement(class_id()));
- __ MsanUnpoison(TMP, length_in_bytes);
-#endif
}
static void LoadValueCid(FlowGraphCompiler* compiler,
diff --git a/runtime/vm/compiler/backend/il_ia32.cc b/runtime/vm/compiler/backend/il_ia32.cc
index 441fc0a..475c738 100644
--- a/runtime/vm/compiler/backend/il_ia32.cc
+++ b/runtime/vm/compiler/backend/il_ia32.cc
@@ -1241,9 +1241,9 @@
// Reserve space for the arguments that go on the stack (if any), then align.
__ ReserveAlignedFrameSpace(stack_required);
-#if defined(USING_MEMORY_SANITIZER)
- UNIMPLEMENTED();
-#endif
+ if (FLAG_target_memory_sanitizer) {
+ UNIMPLEMENTED();
+ }
EmitParamMoves(compiler, is_leaf_ ? FPREG : saved_fp_or_sp, temp,
locs()->temp(1).reg());
diff --git a/runtime/vm/compiler/backend/il_riscv.cc b/runtime/vm/compiler/backend/il_riscv.cc
index dd0203a..0aff43c 100644
--- a/runtime/vm/compiler/backend/il_riscv.cc
+++ b/runtime/vm/compiler/backend/il_riscv.cc
@@ -1653,8 +1653,7 @@
// Reserve space for the arguments that go on the stack (if any), then align.
intptr_t stack_space = marshaller_.RequiredStackSpaceInBytes();
__ ReserveAlignedFrameSpace(stack_space);
-#if defined(USING_MEMORY_SANITIZER)
- {
+ if (FLAG_target_memory_sanitizer) {
RegisterSet kVolatileRegisterSet(kAbiVolatileCpuRegs, kAbiVolatileFpuRegs);
__ mv(temp1, SP);
__ PushRegisters(kVolatileRegisterSet);
@@ -1679,7 +1678,6 @@
__ PopRegisters(kVolatileRegisterSet);
}
-#endif
EmitParamMoves(compiler, is_leaf_ ? FPREG : saved_fp_or_sp, temp1, temp2);
@@ -2544,9 +2542,9 @@
UNREACHABLE();
}
-#if defined(USING_MEMORY_SANITIZER)
- UNIMPLEMENTED();
-#endif
+ if (FLAG_target_memory_sanitizer) {
+ UNIMPLEMENTED();
+ }
}
static void LoadValueCid(FlowGraphCompiler* compiler,
diff --git a/runtime/vm/compiler/backend/il_x64.cc b/runtime/vm/compiler/backend/il_x64.cc
index 04a4b35..1ea95e1 100644
--- a/runtime/vm/compiler/backend/il_x64.cc
+++ b/runtime/vm/compiler/backend/il_x64.cc
@@ -236,11 +236,9 @@
const ScaleFactor scale = ToScaleFactor(mov_size, /*index_unboxed=*/true);
__ leaq(TMP, compiler::Address(src_reg, length_reg, scale, -mov_size));
__ CompareRegisters(dest_reg, TMP);
-#if defined(USING_MEMORY_SANITIZER)
- const auto jump_distance = compiler::Assembler::kFarJump;
-#else
- const auto jump_distance = compiler::Assembler::kNearJump;
-#endif
+ const auto jump_distance = FLAG_target_memory_sanitizer
+ ? compiler::Assembler::kFarJump
+ : compiler::Assembler::kNearJump;
__ BranchIf(UNSIGNED_GREATER, copy_forwards, jump_distance);
// The backwards move must be performed, so move TMP -> src_reg and do the
// same adjustment for dest_reg.
@@ -249,18 +247,18 @@
compiler::Address(dest_reg, length_reg, scale, -mov_size));
__ std();
}
-#if defined(USING_MEMORY_SANITIZER)
- // For reversed, do the `rep` first. It sets `dest_reg` to the start again.
- // For forward, do the unpoisining first, before `dest_reg` is modified.
- __ movq(TMP, length_reg);
- if (mov_size != 1) {
- // Unpoison takes the length in bytes.
- __ MulImmediate(TMP, mov_size);
+ if (FLAG_target_memory_sanitizer) {
+ // For reversed, do the `rep` first. It sets `dest_reg` to the start again.
+ // For forward, do the unpoisining first, before `dest_reg` is modified.
+ __ movq(TMP, length_reg);
+ if (mov_size != 1) {
+ // Unpoison takes the length in bytes.
+ __ MulImmediate(TMP, mov_size);
+ }
+ if (!reversed) {
+ __ MsanUnpoison(dest_reg, TMP);
+ }
}
- if (!reversed) {
- __ MsanUnpoison(dest_reg, TMP);
- }
-#endif
switch (mov_size) {
case 1:
__ rep_movsb();
@@ -281,11 +279,11 @@
__ cld();
}
-#if defined(USING_MEMORY_SANITIZER)
- if (reversed) {
- __ MsanUnpoison(dest_reg, TMP);
+ if (FLAG_target_memory_sanitizer) {
+ if (reversed) {
+ __ MsanUnpoison(dest_reg, TMP);
+ }
}
-#endif
}
void MemoryCopyInstr::EmitComputeStartPointer(FlowGraphCompiler* compiler,
@@ -1430,8 +1428,7 @@
// Reserve space for the arguments that go on the stack (if any), then align.
intptr_t stack_space = marshaller_.RequiredStackSpaceInBytes();
__ ReserveAlignedFrameSpace(stack_space);
-#if defined(USING_MEMORY_SANITIZER)
- {
+ if (FLAG_target_memory_sanitizer) {
RegisterSet kVolatileRegisterSet(CallingConventions::kVolatileCpuRegisters,
CallingConventions::kVolatileXmmRegisters);
__ movq(temp, RSP);
@@ -1457,7 +1454,6 @@
__ PopRegisters(kVolatileRegisterSet);
}
-#endif
if (is_leaf_) {
EmitParamMoves(compiler, FPREG, saved_fp, TMP);
@@ -2259,12 +2255,12 @@
UNREACHABLE();
}
-#if defined(USING_MEMORY_SANITIZER)
- __ leaq(TMP, element_address);
- const intptr_t length_in_bytes = RepresentationUtils::ValueSize(
- RepresentationUtils::RepresentationOfArrayElement(class_id()));
- __ MsanUnpoison(TMP, length_in_bytes);
-#endif
+ if (FLAG_target_memory_sanitizer) {
+ __ leaq(TMP, element_address);
+ const intptr_t length_in_bytes = RepresentationUtils::ValueSize(
+ RepresentationUtils::RepresentationOfArrayElement(class_id()));
+ __ MsanUnpoison(TMP, length_in_bytes);
+ }
}
LocationSummary* GuardFieldClassInstr::MakeLocationSummary(Zone* zone,
diff --git a/runtime/vm/compiler/stub_code_compiler_arm64.cc b/runtime/vm/compiler/stub_code_compiler_arm64.cc
index 306f495..cd285bd 100644
--- a/runtime/vm/compiler/stub_code_compiler_arm64.cc
+++ b/runtime/vm/compiler/stub_code_compiler_arm64.cc
@@ -72,7 +72,7 @@
// [Thread::tsan_utils_->setjmp_buffer_]).
static void WithExceptionCatchingTrampoline(Assembler* assembler,
std::function<void()> fun) {
-#if defined(TARGET_USES_THREAD_SANITIZER) && !defined(USING_SIMULATOR)
+#if !defined(USING_SIMULATOR)
const Register kTsanUtilsReg = R3;
// Reserve space for arguments and align frame before entering C++ world.
@@ -87,69 +87,77 @@
// We rely on THR being preserved across the setjmp() call.
COMPILE_ASSERT(IsCalleeSavedRegister(THR));
- Label do_native_call;
+ if (FLAG_target_memory_sanitizer) {
+ Label do_native_call;
- // Save old jmp_buf.
- __ ldr(kTsanUtilsReg, Address(THR, target::Thread::tsan_utils_offset()));
- __ ldr(TMP,
- Address(kTsanUtilsReg, target::TsanUtils::setjmp_buffer_offset()));
- __ Push(TMP);
+ // Save old jmp_buf.
+ __ ldr(kTsanUtilsReg, Address(THR, target::Thread::tsan_utils_offset()));
+ __ ldr(TMP,
+ Address(kTsanUtilsReg, target::TsanUtils::setjmp_buffer_offset()));
+ __ Push(TMP);
- // Allocate jmp_buf struct on stack & remember pointer to it on the
- // [Thread::tsan_utils_->setjmp_buffer] (which exceptions.cc will longjmp()
- // to)
- __ AddImmediate(SP, -kJumpBufferSize);
- __ str(SP, Address(kTsanUtilsReg, target::TsanUtils::setjmp_buffer_offset()));
+ // Allocate jmp_buf struct on stack & remember pointer to it on the
+ // [Thread::tsan_utils_->setjmp_buffer] (which exceptions.cc will longjmp()
+ // to)
+ __ AddImmediate(SP, -kJumpBufferSize);
+ __ str(SP,
+ Address(kTsanUtilsReg, target::TsanUtils::setjmp_buffer_offset()));
- // Call setjmp() with a pointer to the allocated jmp_buf struct.
- __ MoveRegister(R0, SP);
- __ PushRegisters(volatile_registers);
- __ EnterCFrame(0);
- __ mov(R25, CSP);
- __ mov(CSP, SP);
- __ ldr(kTsanUtilsReg, Address(THR, target::Thread::tsan_utils_offset()));
- __ CallCFunction(
- Address(kTsanUtilsReg, target::TsanUtils::setjmp_function_offset()));
- __ mov(SP, CSP);
- __ mov(CSP, R25);
- __ LeaveCFrame();
- __ PopRegisters(volatile_registers);
+ // Call setjmp() with a pointer to the allocated jmp_buf struct.
+ __ MoveRegister(R0, SP);
+ __ PushRegisters(volatile_registers);
+ __ EnterCFrame(0);
+ __ mov(R25, CSP);
+ __ mov(CSP, SP);
+ __ ldr(kTsanUtilsReg, Address(THR, target::Thread::tsan_utils_offset()));
+ __ CallCFunction(
+ Address(kTsanUtilsReg, target::TsanUtils::setjmp_function_offset()));
+ __ mov(SP, CSP);
+ __ mov(CSP, R25);
+ __ LeaveCFrame();
+ __ PopRegisters(volatile_registers);
- // We are the target of a longjmp() iff setjmp() returns non-0.
- __ cbz(&do_native_call, R0);
+ // We are the target of a longjmp() iff setjmp() returns non-0.
+ __ cbz(&do_native_call, R0);
- // We are the target of a longjmp: Cleanup the stack and tail-call the
- // JumpToFrame stub which will take care of unwinding the stack and hand
- // execution to the catch entry.
- __ AddImmediate(SP, kJumpBufferSize);
- __ ldr(kTsanUtilsReg, Address(THR, target::Thread::tsan_utils_offset()));
- __ Pop(TMP);
- __ str(TMP,
- Address(kTsanUtilsReg, target::TsanUtils::setjmp_buffer_offset()));
+ // We are the target of a longjmp: Cleanup the stack and tail-call the
+ // JumpToFrame stub which will take care of unwinding the stack and hand
+ // execution to the catch entry.
+ __ AddImmediate(SP, kJumpBufferSize);
+ __ ldr(kTsanUtilsReg, Address(THR, target::Thread::tsan_utils_offset()));
+ __ Pop(TMP);
+ __ str(TMP,
+ Address(kTsanUtilsReg, target::TsanUtils::setjmp_buffer_offset()));
- __ ldr(R0, Address(kTsanUtilsReg, target::TsanUtils::exception_pc_offset()));
- __ ldr(R1, Address(kTsanUtilsReg, target::TsanUtils::exception_sp_offset()));
- __ ldr(R2, Address(kTsanUtilsReg, target::TsanUtils::exception_fp_offset()));
- __ MoveRegister(R3, THR);
- __ Jump(Address(THR, target::Thread::jump_to_frame_entry_point_offset()));
+ __ ldr(R0,
+ Address(kTsanUtilsReg, target::TsanUtils::exception_pc_offset()));
+ __ ldr(R1,
+ Address(kTsanUtilsReg, target::TsanUtils::exception_sp_offset()));
+ __ ldr(R2,
+ Address(kTsanUtilsReg, target::TsanUtils::exception_fp_offset()));
+ __ MoveRegister(R3, THR);
+ __ Jump(Address(THR, target::Thread::jump_to_frame_entry_point_offset()));
- // We leave the created [jump_buf] structure on the stack as well as the
- // pushed old [Thread::tsan_utils_->setjmp_buffer_].
- __ Bind(&do_native_call);
- __ MoveRegister(kSavedRspReg, SP);
-#endif // defined(TARGET_USES_THREAD_SANITIZER) && !defined(USING_SIMULATOR)
+ // We leave the created [jump_buf] structure on the stack as well as the
+ // pushed old [Thread::tsan_utils_->setjmp_buffer_].
+ __ Bind(&do_native_call);
+ __ MoveRegister(kSavedRspReg, SP);
+ }
+#endif // !defined(USING_SIMULATOR)
fun();
-#if defined(TARGET_USES_THREAD_SANITIZER) && !defined(USING_SIMULATOR)
- __ MoveRegister(SP, kSavedRspReg);
- __ AddImmediate(SP, kJumpBufferSize);
- const Register kTsanUtilsReg2 = kSavedRspReg;
- __ ldr(kTsanUtilsReg2, Address(THR, target::Thread::tsan_utils_offset()));
- __ Pop(TMP);
- __ str(TMP,
- Address(kTsanUtilsReg2, target::TsanUtils::setjmp_buffer_offset()));
-#endif // defined(TARGET_USES_THREAD_SANITIZER) && !defined(USING_SIMULATOR)
+#if !defined(USING_SIMULATOR)
+ if (FLAG_target_memory_sanitizer) {
+ __ MoveRegister(SP, kSavedRspReg);
+ __ AddImmediate(SP, kJumpBufferSize);
+ const Register kTsanUtilsReg2 = kSavedRspReg;
+ __ ldr(kTsanUtilsReg2, Address(THR, target::Thread::tsan_utils_offset()));
+ __ Pop(TMP);
+ __ str(TMP,
+ Address(kTsanUtilsReg2, target::TsanUtils::setjmp_buffer_offset()));
+ }
+#endif // !defined(USING_SIMULATOR)
}
// Input parameters:
diff --git a/runtime/vm/compiler/stub_code_compiler_x64.cc b/runtime/vm/compiler/stub_code_compiler_x64.cc
index f85b9d3..ee56f3f 100644
--- a/runtime/vm/compiler/stub_code_compiler_x64.cc
+++ b/runtime/vm/compiler/stub_code_compiler_x64.cc
@@ -74,7 +74,7 @@
// [Thread::tsan_utils_->setjmp_buffer_]).
static void WithExceptionCatchingTrampoline(Assembler* assembler,
std::function<void()> fun) {
-#if defined(TARGET_USES_THREAD_SANITIZER) && !defined(USING_SIMULATOR)
+#if !defined(USING_SIMULATOR)
const Register kTsanUtilsReg = RAX;
// Reserve space for arguments and align frame before entering C++ world.
@@ -89,70 +89,74 @@
// We rely on THR being preserved across the setjmp() call.
COMPILE_ASSERT(IsCalleeSavedRegister(THR));
- Label do_native_call;
+ if (FLAG_target_thread_sanitizer) {
+ Label do_native_call;
- // Save old jmp_buf.
- __ movq(kTsanUtilsReg, Address(THR, target::Thread::tsan_utils_offset()));
- __ pushq(Address(kTsanUtilsReg, target::TsanUtils::setjmp_buffer_offset()));
+ // Save old jmp_buf.
+ __ movq(kTsanUtilsReg, Address(THR, target::Thread::tsan_utils_offset()));
+ __ pushq(Address(kTsanUtilsReg, target::TsanUtils::setjmp_buffer_offset()));
- // Allocate jmp_buf struct on stack & remember pointer to it on the
- // [Thread::tsan_utils_->setjmp_buffer] (which exceptions.cc will longjmp()
- // to)
- __ AddImmediate(RSP, Immediate(-kJumpBufferSize));
- __ movq(Address(kTsanUtilsReg, target::TsanUtils::setjmp_buffer_offset()),
- RSP);
+ // Allocate jmp_buf struct on stack & remember pointer to it on the
+ // [Thread::tsan_utils_->setjmp_buffer] (which exceptions.cc will longjmp()
+ // to)
+ __ AddImmediate(RSP, Immediate(-kJumpBufferSize));
+ __ movq(Address(kTsanUtilsReg, target::TsanUtils::setjmp_buffer_offset()),
+ RSP);
- // Call setjmp() with a pointer to the allocated jmp_buf struct.
- __ MoveRegister(CallingConventions::kArg1Reg, RSP);
- __ PushRegisters(volatile_registers);
- if (OS::ActivationFrameAlignment() > 1) {
+ // Call setjmp() with a pointer to the allocated jmp_buf struct.
+ __ MoveRegister(CallingConventions::kArg1Reg, RSP);
+ __ PushRegisters(volatile_registers);
+ if (OS::ActivationFrameAlignment() > 1) {
+ __ MoveRegister(kSavedRspReg, RSP);
+ __ andq(RSP, Immediate(~(OS::ActivationFrameAlignment() - 1)));
+ }
+ __ movq(kTsanUtilsReg, Address(THR, target::Thread::tsan_utils_offset()));
+ __ CallCFunction(
+ Address(kTsanUtilsReg, target::TsanUtils::setjmp_function_offset()),
+ /*restore_rsp=*/true);
+ if (OS::ActivationFrameAlignment() > 1) {
+ __ MoveRegister(RSP, kSavedRspReg);
+ }
+ __ PopRegisters(volatile_registers);
+
+ // We are the target of a longjmp() iff setjmp() returns non-0.
+ __ CompareImmediate(RAX, 0);
+ __ BranchIf(EQUAL, &do_native_call);
+
+ // We are the target of a longjmp: Cleanup the stack and tail-call the
+ // JumpToFrame stub which will take care of unwinding the stack and hand
+ // execution to the catch entry.
+ __ AddImmediate(RSP, Immediate(kJumpBufferSize));
+ __ movq(kTsanUtilsReg, Address(THR, target::Thread::tsan_utils_offset()));
+ __ popq(Address(kTsanUtilsReg, target::TsanUtils::setjmp_buffer_offset()));
+
+ __ movq(CallingConventions::kArg1Reg,
+ Address(kTsanUtilsReg, target::TsanUtils::exception_pc_offset()));
+ __ movq(CallingConventions::kArg2Reg,
+ Address(kTsanUtilsReg, target::TsanUtils::exception_sp_offset()));
+ __ movq(CallingConventions::kArg3Reg,
+ Address(kTsanUtilsReg, target::TsanUtils::exception_fp_offset()));
+ __ MoveRegister(CallingConventions::kArg4Reg, THR);
+ __ jmp(Address(THR, target::Thread::jump_to_frame_entry_point_offset()));
+
+ // We leave the created [jump_buf] structure on the stack as well as the
+ // pushed old [Thread::tsan_utils_->setjmp_buffer_].
+ __ Bind(&do_native_call);
__ MoveRegister(kSavedRspReg, RSP);
- __ andq(RSP, Immediate(~(OS::ActivationFrameAlignment() - 1)));
}
- __ movq(kTsanUtilsReg, Address(THR, target::Thread::tsan_utils_offset()));
- __ CallCFunction(
- Address(kTsanUtilsReg, target::TsanUtils::setjmp_function_offset()),
- /*restore_rsp=*/true);
- if (OS::ActivationFrameAlignment() > 1) {
- __ MoveRegister(RSP, kSavedRspReg);
- }
- __ PopRegisters(volatile_registers);
-
- // We are the target of a longjmp() iff setjmp() returns non-0.
- __ CompareImmediate(RAX, 0);
- __ BranchIf(EQUAL, &do_native_call);
-
- // We are the target of a longjmp: Cleanup the stack and tail-call the
- // JumpToFrame stub which will take care of unwinding the stack and hand
- // execution to the catch entry.
- __ AddImmediate(RSP, Immediate(kJumpBufferSize));
- __ movq(kTsanUtilsReg, Address(THR, target::Thread::tsan_utils_offset()));
- __ popq(Address(kTsanUtilsReg, target::TsanUtils::setjmp_buffer_offset()));
-
- __ movq(CallingConventions::kArg1Reg,
- Address(kTsanUtilsReg, target::TsanUtils::exception_pc_offset()));
- __ movq(CallingConventions::kArg2Reg,
- Address(kTsanUtilsReg, target::TsanUtils::exception_sp_offset()));
- __ movq(CallingConventions::kArg3Reg,
- Address(kTsanUtilsReg, target::TsanUtils::exception_fp_offset()));
- __ MoveRegister(CallingConventions::kArg4Reg, THR);
- __ jmp(Address(THR, target::Thread::jump_to_frame_entry_point_offset()));
-
- // We leave the created [jump_buf] structure on the stack as well as the
- // pushed old [Thread::tsan_utils_->setjmp_buffer_].
- __ Bind(&do_native_call);
- __ MoveRegister(kSavedRspReg, RSP);
-#endif // defined(TARGET_USES_THREAD_SANITIZER) && !defined(USING_SIMULATOR)
+#endif // !defined(USING_SIMULATOR)
fun();
-#if defined(TARGET_USES_THREAD_SANITIZER) && !defined(USING_SIMULATOR)
- __ MoveRegister(RSP, kSavedRspReg);
- __ AddImmediate(RSP, Immediate(kJumpBufferSize));
- const Register kTsanUtilsReg2 = kSavedRspReg;
- __ movq(kTsanUtilsReg2, Address(THR, target::Thread::tsan_utils_offset()));
- __ popq(Address(kTsanUtilsReg2, target::TsanUtils::setjmp_buffer_offset()));
-#endif // defined(TARGET_USES_THREAD_SANITIZER) && !defined(USING_SIMULATOR)
+#if !defined(USING_SIMULATOR)
+ if (FLAG_target_thread_sanitizer) {
+ __ MoveRegister(RSP, kSavedRspReg);
+ __ AddImmediate(RSP, Immediate(kJumpBufferSize));
+ const Register kTsanUtilsReg2 = kSavedRspReg;
+ __ movq(kTsanUtilsReg2, Address(THR, target::Thread::tsan_utils_offset()));
+ __ popq(Address(kTsanUtilsReg2, target::TsanUtils::setjmp_buffer_offset()));
+ }
+#endif // !defined(USING_SIMULATOR)
}
// Input parameters:
diff --git a/runtime/vm/compiler_test.cc b/runtime/vm/compiler_test.cc
index f0ffd6d..63d57dc 100644
--- a/runtime/vm/compiler_test.cc
+++ b/runtime/vm/compiler_test.cc
@@ -288,7 +288,7 @@
}
// Too slow in debug mode.
-#if !defined(DEBUG) && !defined(TARGET_USES_THREAD_SANITIZER)
+#if !defined(DEBUG) && !defined(USING_THREAD_SANITIZER)
TEST_CASE(ManyClasses) {
// Limit is 20 bits. Check only more than 16 bits so test completes in
// reasonable time.
@@ -312,6 +312,6 @@
EXPECT(IsolateGroup::Current()->class_table()->NumCids() >= kNumClasses);
}
-#endif // !defined(DEBUG) && !defined(TARGET_USES_THREAD_SANITIZER)
+#endif // !defined(DEBUG) && !defined(USING_THREAD_SANITIZER)
} // namespace dart
diff --git a/runtime/vm/dart.cc b/runtime/vm/dart.cc
index 8e09af2..d426cd4 100644
--- a/runtime/vm/dart.cc
+++ b/runtime/vm/dart.cc
@@ -1028,7 +1028,8 @@
if (Snapshot::IncludesCode(kind)) {
VM_GLOBAL_FLAG_LIST(ADD_P, ADD_R, ADD_C, ADD_D);
- ADD_FLAG(tsan, kTargetUsesThreadSanitizer)
+ ADD_FLAG(tsan, FLAG_target_thread_sanitizer)
+ ADD_FLAG(msan, FLAG_target_memory_sanitizer)
if (kind == Snapshot::kFullJIT) {
// Enabling assertions affects deopt ids.
diff --git a/runtime/vm/flags.cc b/runtime/vm/flags.cc
index ddce614..7ccc6ed 100644
--- a/runtime/vm/flags.cc
+++ b/runtime/vm/flags.cc
@@ -72,6 +72,25 @@
#undef PRECOMPILE_FLAG_MACRO
#undef DEBUG_FLAG_MACRO
+#if defined(DART_PRECOMPILER)
+DEFINE_FLAG(bool,
+ target_thread_sanitizer,
+#if defined(TARGET_USES_THREAD_SANITIZER)
+ true,
+#else
+ false,
+#endif
+ "Generate Dart code compatible with Thread Sanitizer");
+DEFINE_FLAG(bool,
+ target_memory_sanitizer,
+#if defined(TARGET_USES_MEMORY_SANITIZER)
+ true,
+#else
+ false,
+#endif
+ "Generate Dart code compatible with Memory Sanitizer");
+#endif
+
bool Flags::initialized_ = false;
// List of registered flags.
diff --git a/runtime/vm/flags.h b/runtime/vm/flags.h
index 790e277..d9d1850 100644
--- a/runtime/vm/flags.h
+++ b/runtime/vm/flags.h
@@ -159,6 +159,22 @@
#undef PRODUCT_FLAG_MACRO
#undef PRECOMPILE_FLAG_MACRO
+#if defined(DART_PRECOMPILER)
+DECLARE_FLAG(bool, target_thread_sanitizer);
+DECLARE_FLAG(bool, target_memory_sanitizer);
+#else
+#if defined(USING_THREAD_SANITIZER)
+constexpr bool FLAG_target_thread_sanitizer = true;
+#else
+constexpr bool FLAG_target_thread_sanitizer = false;
+#endif
+#if defined(USING_MEMORY_SANITIZER)
+constexpr bool FLAG_target_memory_sanitizer = true;
+#else
+constexpr bool FLAG_target_memory_sanitizer = false;
+#endif
+#endif
+
} // namespace dart
#endif // RUNTIME_VM_FLAGS_H_
diff --git a/tools/bots/test_matrix.json b/tools/bots/test_matrix.json
index 7a4f5fd..31f4dd2 100644
--- a/tools/bots/test_matrix.json
+++ b/tools/bots/test_matrix.json
@@ -1289,6 +1289,7 @@
"name": "build dart",
"script": "tools/build.py",
"arguments": [
+ "--sanitizer=none,${sanitizer}",
"runtime",
"runtime_precompiled"
]