| // Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file |
| // for details. All rights reserved. Use of this source code is governed by a |
| // BSD-style license that can be found in the LICENSE file. |
| |
| #include "vm/globals.h" |
| #if defined(HOST_OS_ANDROID) || defined(HOST_OS_LINUX) || defined(HOST_OS_MACOS) |
| |
| #include "vm/virtual_memory.h" |
| |
| #include <errno.h> |
| #include <fcntl.h> |
| #include <sys/mman.h> |
| #include <sys/stat.h> |
| #include <sys/syscall.h> |
| #include <unistd.h> |
| |
| #include "platform/assert.h" |
| #include "platform/utils.h" |
| #include "vm/isolate.h" |
| |
| // #define VIRTUAL_MEMORY_LOGGING 1 |
| #if defined(VIRTUAL_MEMORY_LOGGING) |
| #define LOG_INFO(msg, ...) OS::PrintErr(msg, ##__VA_ARGS__) |
| #else |
| #define LOG_INFO(msg, ...) |
| #endif // defined(VIRTUAL_MEMORY_LOGGING) |
| |
| namespace dart { |
| |
| // standard MAP_FAILED causes "error: use of old-style cast" as it |
| // defines MAP_FAILED as ((void *) -1) |
| #undef MAP_FAILED |
| #define MAP_FAILED reinterpret_cast<void*>(-1) |
| |
| DECLARE_FLAG(bool, dual_map_code); |
| DECLARE_FLAG(bool, write_protect_code); |
| |
| #if defined(TARGET_OS_LINUX) |
| DECLARE_FLAG(bool, generate_perf_events_symbols); |
| DECLARE_FLAG(bool, generate_perf_jitdump); |
| #endif |
| |
| uword VirtualMemory::page_size_ = 0; |
| |
| void VirtualMemory::Init() { |
| page_size_ = getpagesize(); |
| |
| #if defined(DUAL_MAPPING_SUPPORTED) |
| // Perf is Linux-specific and the flags aren't defined in Product. |
| #if defined(TARGET_OS_LINUX) && !defined(PRODUCT) |
| // Perf interacts strangely with memfds, leading it to sometimes collect |
| // garbled return addresses. |
| if (FLAG_generate_perf_events_symbols || FLAG_generate_perf_jitdump) { |
| LOG_INFO( |
| "Dual code mapping disabled to generate perf events or jitdump.\n"); |
| FLAG_dual_map_code = false; |
| return; |
| } |
| #endif |
| |
| // Detect dual mapping exec permission limitation on some platforms, |
| // such as on docker containers, and disable dual mapping in this case. |
| // Also detect for missing support of memfd_create syscall. |
| if (FLAG_dual_map_code) { |
| intptr_t size = page_size_; |
| intptr_t alignment = 256 * 1024; // e.g. heap page size. |
| VirtualMemory* vm = AllocateAligned(size, alignment, true, NULL); |
| if (vm == NULL) { |
| LOG_INFO("memfd_create not supported; disabling dual mapping of code.\n"); |
| FLAG_dual_map_code = false; |
| return; |
| } |
| void* region = reinterpret_cast<void*>(vm->region_.start()); |
| void* alias = reinterpret_cast<void*>(vm->alias_.start()); |
| if (region == alias || |
| mprotect(region, size, PROT_READ) != 0 || // Remove PROT_WRITE. |
| mprotect(alias, size, PROT_READ | PROT_EXEC) != 0) { // Add PROT_EXEC. |
| LOG_INFO("mprotect fails; disabling dual mapping of code.\n"); |
| FLAG_dual_map_code = false; |
| } |
| delete vm; |
| } |
| #endif // defined(DUAL_MAPPING_SUPPORTED) |
| } |
| |
| static void unmap(uword start, uword end) { |
| ASSERT(start <= end); |
| uword size = end - start; |
| if (size == 0) { |
| return; |
| } |
| |
| if (munmap(reinterpret_cast<void*>(start), size) != 0) { |
| int error = errno; |
| const int kBufferSize = 1024; |
| char error_buf[kBufferSize]; |
| FATAL2("munmap error: %d (%s)", error, |
| Utils::StrError(error, error_buf, kBufferSize)); |
| } |
| } |
| |
| #if defined(DUAL_MAPPING_SUPPORTED) |
| // Do not leak file descriptors to child processes. |
| #if !defined(MFD_CLOEXEC) |
| #define MFD_CLOEXEC 0x0001U |
| #endif |
| |
| // Wrapper to call memfd_create syscall. |
| static inline int memfd_create(const char* name, unsigned int flags) { |
| #if !defined(__NR_memfd_create) |
| errno = ENOSYS; |
| return -1; |
| #else |
| return syscall(__NR_memfd_create, name, flags); |
| #endif |
| } |
| |
| static void* MapAligned(int fd, |
| int prot, |
| intptr_t size, |
| intptr_t alignment, |
| intptr_t allocated_size) { |
| void* address = |
| mmap(NULL, allocated_size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); |
| LOG_INFO("mmap(NULL, 0x%" Px ", PROT_NONE, ...): %p\n", allocated_size, |
| address); |
| if (address == MAP_FAILED) { |
| return NULL; |
| } |
| |
| const uword base = reinterpret_cast<uword>(address); |
| const uword aligned_base = Utils::RoundUp(base, alignment); |
| |
| // Guarantee the alignment by mapping at a fixed address inside the above |
| // mapping. Overlapping region will be automatically discarded in the above |
| // mapping. Manually discard non-overlapping regions. |
| address = mmap(reinterpret_cast<void*>(aligned_base), size, prot, |
| MAP_SHARED | MAP_FIXED, fd, 0); |
| LOG_INFO("mmap(0x%" Px ", 0x%" Px ", %u, ...): %p\n", aligned_base, size, |
| prot, address); |
| if (address == MAP_FAILED) { |
| unmap(base, base + allocated_size); |
| return NULL; |
| } |
| ASSERT(address == reinterpret_cast<void*>(aligned_base)); |
| unmap(base, aligned_base); |
| unmap(aligned_base + size, base + allocated_size); |
| return address; |
| } |
| #endif // defined(DUAL_MAPPING_SUPPORTED) |
| |
| VirtualMemory* VirtualMemory::AllocateAligned(intptr_t size, |
| intptr_t alignment, |
| bool is_executable, |
| const char* name) { |
| #if defined(TARGET_ARCH_DBC) |
| RELEASE_ASSERT(!is_executable); |
| #endif |
| |
| // When FLAG_write_protect_code is active, code memory (indicated by |
| // is_executable = true) is allocated as non-executable and later |
| // changed to executable via VirtualMemory::Protect. |
| ASSERT(Utils::IsAligned(size, page_size_)); |
| ASSERT(Utils::IsPowerOfTwo(alignment)); |
| ASSERT(Utils::IsAligned(alignment, page_size_)); |
| const intptr_t allocated_size = size + alignment - page_size_; |
| #if defined(DUAL_MAPPING_SUPPORTED) |
| int fd = -1; |
| const bool dual_mapping = |
| is_executable && FLAG_write_protect_code && FLAG_dual_map_code; |
| if (dual_mapping) { |
| fd = memfd_create("dart_vm", MFD_CLOEXEC); |
| if (fd == -1) { |
| return NULL; |
| } |
| if (ftruncate(fd, size) == -1) { |
| close(fd); |
| return NULL; |
| } |
| const int region_prot = PROT_READ | PROT_WRITE; |
| void* region_ptr = |
| MapAligned(fd, region_prot, size, alignment, allocated_size); |
| if (region_ptr == NULL) { |
| close(fd); |
| return NULL; |
| } |
| MemoryRegion region(region_ptr, size); |
| // PROT_EXEC is added later via VirtualMemory::Protect. |
| const int alias_prot = PROT_READ; |
| void* alias_ptr = |
| MapAligned(fd, alias_prot, size, alignment, allocated_size); |
| close(fd); |
| if (alias_ptr == NULL) { |
| const uword region_base = reinterpret_cast<uword>(region_ptr); |
| unmap(region_base, region_base + size); |
| return NULL; |
| } |
| ASSERT(region_ptr != alias_ptr); |
| MemoryRegion alias(alias_ptr, size); |
| return new VirtualMemory(region, alias, region); |
| } |
| #endif // defined(DUAL_MAPPING_SUPPORTED) |
| const int prot = |
| PROT_READ | PROT_WRITE | |
| ((is_executable && !FLAG_write_protect_code) ? PROT_EXEC : 0); |
| void* address = |
| mmap(NULL, allocated_size, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); |
| LOG_INFO("mmap(NULL, 0x%" Px ", %u, ...): %p\n", allocated_size, prot, |
| address); |
| if (address == MAP_FAILED) { |
| return NULL; |
| } |
| |
| const uword base = reinterpret_cast<uword>(address); |
| const uword aligned_base = Utils::RoundUp(base, alignment); |
| |
| unmap(base, aligned_base); |
| unmap(aligned_base + size, base + allocated_size); |
| |
| MemoryRegion region(reinterpret_cast<void*>(aligned_base), size); |
| return new VirtualMemory(region, region); |
| } |
| |
| VirtualMemory::~VirtualMemory() { |
| if (vm_owns_region()) { |
| unmap(reserved_.start(), reserved_.end()); |
| const intptr_t alias_offset = AliasOffset(); |
| if (alias_offset != 0) { |
| unmap(reserved_.start() + alias_offset, reserved_.end() + alias_offset); |
| } |
| } |
| } |
| |
| void VirtualMemory::FreeSubSegment(void* address, |
| intptr_t size) { |
| const uword start = reinterpret_cast<uword>(address); |
| unmap(start, start + size); |
| } |
| |
| void VirtualMemory::Protect(void* address, intptr_t size, Protection mode) { |
| #if defined(TARGET_ARCH_DBC) |
| RELEASE_ASSERT((mode != kReadExecute) && (mode != kReadWriteExecute)); |
| #endif |
| #if defined(DEBUG) |
| Thread* thread = Thread::Current(); |
| ASSERT((thread == nullptr) || thread->IsMutatorThread() || |
| thread->isolate()->mutator_thread()->IsAtSafepoint()); |
| #endif |
| uword start_address = reinterpret_cast<uword>(address); |
| uword end_address = start_address + size; |
| uword page_address = Utils::RoundDown(start_address, PageSize()); |
| int prot = 0; |
| switch (mode) { |
| case kNoAccess: |
| prot = PROT_NONE; |
| break; |
| case kReadOnly: |
| prot = PROT_READ; |
| break; |
| case kReadWrite: |
| prot = PROT_READ | PROT_WRITE; |
| break; |
| case kReadExecute: |
| prot = PROT_READ | PROT_EXEC; |
| break; |
| case kReadWriteExecute: |
| prot = PROT_READ | PROT_WRITE | PROT_EXEC; |
| break; |
| } |
| if (mprotect(reinterpret_cast<void*>(page_address), |
| end_address - page_address, prot) != 0) { |
| int error = errno; |
| const int kBufferSize = 1024; |
| char error_buf[kBufferSize]; |
| LOG_INFO("mprotect(0x%" Px ", 0x%" Px ", %u) failed\n", page_address, |
| end_address - page_address, prot); |
| FATAL2("mprotect error: %d (%s)", error, |
| Utils::StrError(error, error_buf, kBufferSize)); |
| } |
| LOG_INFO("mprotect(0x%" Px ", 0x%" Px ", %u) ok\n", page_address, |
| end_address - page_address, prot); |
| } |
| |
| } // namespace dart |
| |
| #endif // defined(HOST_OS_ANDROID ... HOST_OS_LINUX ... HOST_OS_MACOS) |