[vm] Bypass malloc for large Zone allocations to avoid jemalloc leaks.
Revert 9a07ad88f494ca81030172c07b47d54a0f46f822.
Bug: https://github.com/flutter/flutter/issues/29007
Change-Id: I6a5f51f0c3a54d354ec5f8495677d46f94d8a1d3
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/100568
Commit-Queue: Ryan Macnak <rmacnak@google.com>
Reviewed-by: Zach Anderson <zra@google.com>
Reviewed-by: Vyacheslav Egorov <vegorov@google.com>
diff --git a/runtime/vm/isolate.cc b/runtime/vm/isolate.cc
index 73f97414..92e2c88 100644
--- a/runtime/vm/isolate.cc
+++ b/runtime/vm/isolate.cc
@@ -949,8 +949,7 @@
catch_entry_moves_cache_(),
embedder_entry_points_(NULL),
obfuscation_map_(NULL),
- reverse_pc_lookup_cache_(nullptr),
- irregexp_backtrack_stack_(nullptr) {
+ reverse_pc_lookup_cache_(nullptr) {
FlagsCopyFrom(api_flags);
SetErrorsFatal(true);
set_compilation_allowed(true);
@@ -982,9 +981,6 @@
// RELEASE_ASSERT(reload_context_ == NULL);
#endif // !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)
- delete[] irregexp_backtrack_stack_;
- irregexp_backtrack_stack_ = nullptr;
-
delete reverse_pc_lookup_cache_;
reverse_pc_lookup_cache_ = nullptr;
diff --git a/runtime/vm/isolate.h b/runtime/vm/isolate.h
index 2782cd6..af038ff 100644
--- a/runtime/vm/isolate.h
+++ b/runtime/vm/isolate.h
@@ -734,18 +734,6 @@
reverse_pc_lookup_cache_ = table;
}
- // This doesn't belong here, but to avoid triggering bugs in jemalloc we
- // allocate the irregexpinterpreter's stack once per isolate instead of once
- // per regexp execution.
- // See https://github.com/flutter/flutter/issues/29007
- static constexpr intptr_t kIrregexpBacktrackStackSize = 1 << 16;
- intptr_t* irregexp_backtrack_stack() {
- if (irregexp_backtrack_stack_ == nullptr) {
- irregexp_backtrack_stack_ = new intptr_t[kIrregexpBacktrackStackSize];
- }
- return irregexp_backtrack_stack_;
- }
-
// Isolate-specific flag handling.
static void FlagsInitialize(Dart_IsolateFlags* api_flags);
void FlagsCopyTo(Dart_IsolateFlags* api_flags) const;
@@ -1058,8 +1046,6 @@
ReversePcLookupCache* reverse_pc_lookup_cache_;
- intptr_t* irregexp_backtrack_stack_;
-
static Dart_IsolateCreateCallback create_callback_;
static Dart_IsolateShutdownCallback shutdown_callback_;
static Dart_IsolateCleanupCallback cleanup_callback_;
diff --git a/runtime/vm/regexp_assembler_bytecode.cc b/runtime/vm/regexp_assembler_bytecode.cc
index 3b24809..0318a27 100644
--- a/runtime/vm/regexp_assembler_bytecode.cc
+++ b/runtime/vm/regexp_assembler_bytecode.cc
@@ -494,7 +494,7 @@
TypedData::Handle(zone, regexp.bytecode(is_one_byte, sticky));
ASSERT(!bytecode.IsNull());
IrregexpInterpreter::IrregexpResult result =
- IrregexpInterpreter::Match(bytecode, subject, raw_output, index);
+ IrregexpInterpreter::Match(bytecode, subject, raw_output, index, zone);
if (result == IrregexpInterpreter::RE_SUCCESS) {
// Copy capture results to the start of the registers array.
diff --git a/runtime/vm/regexp_interpreter.cc b/runtime/vm/regexp_interpreter.cc
index 86bc677..2921e3e 100644
--- a/runtime/vm/regexp_interpreter.cc
+++ b/runtime/vm/regexp_interpreter.cc
@@ -127,8 +127,8 @@
// matching terminates.
class BacktrackStack {
public:
- explicit BacktrackStack(Isolate* isolate) {
- data_ = isolate->irregexp_backtrack_stack();
+ explicit BacktrackStack(Zone* zone) {
+ data_ = zone->Alloc<intptr_t>(kBacktrackStackSize);
}
intptr_t* data() const { return data_; }
@@ -136,8 +136,7 @@
intptr_t max_size() const { return kBacktrackStackSize; }
private:
- static constexpr intptr_t kBacktrackStackSize =
- Isolate::kIrregexpBacktrackStackSize;
+ static const intptr_t kBacktrackStackSize = 1 << 16;
intptr_t* data_;
@@ -149,12 +148,13 @@
const String& subject,
int32_t* registers,
intptr_t current,
- uint32_t current_char) {
+ uint32_t current_char,
+ Zone* zone) {
const uint8_t* pc = code_base;
// BacktrackStack ensures that the memory allocated for the backtracking stack
// is returned to the system or cached if there is no stack being cached at
// the moment.
- BacktrackStack backtrack_stack(Isolate::Current());
+ BacktrackStack backtrack_stack(zone);
intptr_t* backtrack_stack_base = backtrack_stack.data();
intptr_t* backtrack_sp = backtrack_stack_base;
intptr_t backtrack_stack_space = backtrack_stack.max_size();
@@ -627,7 +627,8 @@
const TypedData& bytecode,
const String& subject,
int32_t* registers,
- intptr_t start_position) {
+ intptr_t start_position,
+ Zone* zone) {
NoSafepointScope no_safepoint;
const uint8_t* code_base = reinterpret_cast<uint8_t*>(bytecode.DataAddr(0));
@@ -638,10 +639,10 @@
if (subject.IsOneByteString() || subject.IsExternalOneByteString()) {
return RawMatch<uint8_t>(code_base, subject, registers, start_position,
- previous_char);
+ previous_char, zone);
} else if (subject.IsTwoByteString() || subject.IsExternalTwoByteString()) {
return RawMatch<uint16_t>(code_base, subject, registers, start_position,
- previous_char);
+ previous_char, zone);
} else {
UNREACHABLE();
return IrregexpInterpreter::RE_FAILURE;
diff --git a/runtime/vm/regexp_interpreter.h b/runtime/vm/regexp_interpreter.h
index 7a29fa0..9e4e567 100644
--- a/runtime/vm/regexp_interpreter.h
+++ b/runtime/vm/regexp_interpreter.h
@@ -20,7 +20,8 @@
static IrregexpResult Match(const TypedData& bytecode,
const String& subject,
int32_t* captures,
- intptr_t start_position);
+ intptr_t start_position,
+ Zone* zone);
};
} // namespace dart
diff --git a/runtime/vm/zone.cc b/runtime/vm/zone.cc
index 8227e48..2d95479 100644
--- a/runtime/vm/zone.cc
+++ b/runtime/vm/zone.cc
@@ -11,6 +11,7 @@
#include "vm/handles_impl.h"
#include "vm/heap/heap.h"
#include "vm/os.h"
+#include "vm/virtual_memory.h"
namespace dart {
@@ -21,25 +22,28 @@
public:
Segment* next() const { return next_; }
intptr_t size() const { return size_; }
+ VirtualMemory* memory() const { return memory_; }
uword start() { return address(sizeof(Segment)); }
uword end() { return address(size_); }
// Allocate or delete individual segments.
static Segment* New(intptr_t size, Segment* next);
+ static Segment* NewLarge(intptr_t size, Segment* next);
static void DeleteSegmentList(Segment* segment);
+ static void DeleteLargeSegmentList(Segment* segment);
static void IncrementMemoryCapacity(uintptr_t size);
static void DecrementMemoryCapacity(uintptr_t size);
private:
Segment* next_;
intptr_t size_;
+ VirtualMemory* memory_;
+ void* alignment_;
// Computes the address of the nth byte in this segment.
uword address(int n) { return reinterpret_cast<uword>(this) + n; }
- static void Delete(Segment* segment) { free(segment); }
-
DISALLOW_IMPLICIT_CONSTRUCTORS(Segment);
};
@@ -49,13 +53,36 @@
if (result == NULL) {
OUT_OF_MEMORY();
}
- ASSERT(Utils::IsAligned(result->start(), Zone::kAlignment));
#ifdef DEBUG
// Zap the entire allocated segment (including the header).
memset(result, kZapUninitializedByte, size);
#endif
result->next_ = next;
result->size_ = size;
+ result->memory_ = nullptr;
+ result->alignment_ = nullptr; // Avoid unused variable warnings.
+ IncrementMemoryCapacity(size);
+ return result;
+}
+
+Zone::Segment* Zone::Segment::NewLarge(intptr_t size, Zone::Segment* next) {
+ size = Utils::RoundUp(size, VirtualMemory::PageSize());
+ VirtualMemory* memory = VirtualMemory::Allocate(size, false, "dart-zone");
+ if (memory == NULL) {
+ OUT_OF_MEMORY();
+ }
+ Segment* result = reinterpret_cast<Segment*>(memory->start());
+#ifdef DEBUG
+ // Zap the entire allocated segment (including the header).
+ memset(result, kZapUninitializedByte, size);
+#endif
+ result->next_ = next;
+ result->size_ = size;
+ result->memory_ = memory;
+ result->alignment_ = nullptr; // Avoid unused variable warnings.
+
+ LSAN_REGISTER_ROOT_REGION(result, sizeof(*result));
+
IncrementMemoryCapacity(size);
return result;
}
@@ -69,7 +96,23 @@
// Zap the entire current segment (including the header).
memset(current, kZapDeletedByte, current->size());
#endif
- Segment::Delete(current);
+ free(current);
+ current = next;
+ }
+}
+
+void Zone::Segment::DeleteLargeSegmentList(Segment* head) {
+ Segment* current = head;
+ while (current != NULL) {
+ DecrementMemoryCapacity(current->size());
+ Segment* next = current->next();
+ VirtualMemory* memory = current->memory();
+#ifdef DEBUG
+ // Zap the entire current segment (including the header).
+ memset(current, kZapDeletedByte, current->size());
+#endif
+ LSAN_UNREGISTER_ROOT_REGION(current, sizeof(*current));
+ delete memory;
current = next;
}
}
@@ -129,7 +172,7 @@
Segment::DeleteSegmentList(head_);
}
if (large_segments_ != NULL) {
- Segment::DeleteSegmentList(large_segments_);
+ Segment::DeleteLargeSegmentList(large_segments_);
}
// Reset zone state.
#ifdef DEBUG
@@ -214,9 +257,9 @@
ASSERT(free_size < size);
// Create a new large segment and chain it up.
- ASSERT(Utils::IsAligned(sizeof(Segment), kAlignment));
- size += sizeof(Segment); // Account for book keeping fields in size.
- large_segments_ = Segment::New(size, large_segments_);
+ // Account for book keeping fields in size.
+ size += Utils::RoundUp(sizeof(Segment), kAlignment);
+ large_segments_ = Segment::NewLarge(size, large_segments_);
uword result = Utils::RoundUp(large_segments_->start(), kAlignment);
return result;
diff --git a/runtime/vm/zone_test.cc b/runtime/vm/zone_test.cc
index ff9f6b5..aeb98d6 100644
--- a/runtime/vm/zone_test.cc
+++ b/runtime/vm/zone_test.cc
@@ -176,4 +176,66 @@
EXPECT_EQ(0UL, ApiNativeScope::current_memory_usage());
}
+#if !defined(PRODUCT)
+// Allow for pooling in the malloc implementation.
+static const int64_t kRssSlack = 20 * MB;
+#endif // !defined(PRODUCT)
+
+// clang-format off
+static const size_t kSizes[] = {
+ 64 * KB,
+ 64 * KB + 2 * kWordSize,
+ 64 * KB - 2 * kWordSize,
+ 128 * KB,
+ 128 * KB + 2 * kWordSize,
+ 128 * KB - 2 * kWordSize,
+ 256 * KB,
+ 256 * KB + 2 * kWordSize,
+ 256 * KB - 2 * kWordSize,
+ 512 * KB,
+ 512 * KB + 2 * kWordSize,
+ 512 * KB - 2 * kWordSize,
+};
+// clang-format on
+
+TEST_CASE(StressMallocDirectly) {
+#if !defined(PRODUCT)
+ int64_t start_rss = Service::CurrentRSS();
+#endif // !defined(PRODUCT)
+
+ void* allocations[ARRAY_SIZE(kSizes)];
+ for (size_t i = 0; i < ((3u * GB) / (512u * KB)); i++) {
+ for (size_t j = 0; j < ARRAY_SIZE(kSizes); j++) {
+ allocations[j] = malloc(kSizes[j]);
+ }
+ for (size_t j = 0; j < ARRAY_SIZE(kSizes); j++) {
+ free(allocations[j]);
+ }
+ }
+
+#if !defined(PRODUCT)
+ int64_t stop_rss = Service::CurrentRSS();
+ EXPECT_LT(stop_rss, start_rss + kRssSlack);
+#endif // !defined(PRODUCT)
+}
+
+TEST_CASE(StressMallocThroughZones) {
+#if !defined(PRODUCT)
+ int64_t start_rss = Service::CurrentRSS();
+#endif // !defined(PRODUCT)
+
+ for (size_t i = 0; i < ((3u * GB) / (512u * KB)); i++) {
+ StackZone stack_zone(Thread::Current());
+ Zone* zone = stack_zone.GetZone();
+ for (size_t j = 0; j < ARRAY_SIZE(kSizes); j++) {
+ zone->Alloc<uint8_t>(kSizes[j]);
+ }
+ }
+
+#if !defined(PRODUCT)
+ int64_t stop_rss = Service::CurrentRSS();
+ EXPECT_LT(stop_rss, start_rss + kRssSlack);
+#endif // !defined(PRODUCT)
+}
+
} // namespace dart
diff --git a/tests/corelib_2/regexp/jemalloc_leak_backtracking_stack_test.dart b/tests/corelib_2/regexp/jemalloc_leak_backtracking_stack_test.dart
new file mode 100644
index 0000000..18c4621
--- /dev/null
+++ b/tests/corelib_2/regexp/jemalloc_leak_backtracking_stack_test.dart
@@ -0,0 +1,24 @@
+// Copyright (c) 2019, the Dart project authors. Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+// Regression test for https://github.com/flutter/flutter/issues/29007
+
+String escape(String string) {
+ var regex = new RegExp("(\\?|\\\$|\\*|\\(|\\)|\\[)|\\+|\\.|\\\\");
+ return string.replaceAllMapped(
+ regex, (Match m) => "\\" + string.substring(m.start, m.end));
+}
+
+main() {
+ var text = """
+Yet but three? Come one more.
+Two of both kinds make up four.
+""";
+ var accumulate = 0;
+ for (var i = 0; i < 65536; i++) {
+ accumulate += escape(text).length;
+ }
+
+ print(accumulate);
+}