[vm] Implement VirtualMemory::Allocate using AllocateAligned

Instead of implementing separate aligned and unaligned memory
allocation primitives for each OS, just change the unaligned allocator
into a wrapper around the aligned primitive.

While here, we can optimize the AllocateAligned logic slightly: if we
want an N-page-aligned allocation, we only need to increase the
allocation size by N-1 pages instead of N.

Notably, this means 1-page-aligned allocations don't require any extra
alignment pages, so the new logic behaves identically as before on
Android, Fuchsia, Linux, and macOS.

On Windows, it behaves slightly differently only in that unaligned
requests used to be handled as a single VirtualAlloc call with
MEM_RESERVE | MEM_COMMIT, but now they're handled as two separate
calls (reserve *then* commit). Naively, I don't expect this matters in
practice, but if it does, we can always add a fast path for
alignment==page_size_ without affecting the OS-independent API.

Change-Id: I42b2cf5dfc6e137546d8acfb6cc8939a01687948
Reviewed-on: https://dart-review.googlesource.com/c/91081
Commit-Queue: Ryan Macnak <rmacnak@google.com>
Reviewed-by: Ryan Macnak <rmacnak@google.com>
diff --git a/runtime/vm/virtual_memory.h b/runtime/vm/virtual_memory.h
index 0ccdb41..75ca214 100644
--- a/runtime/vm/virtual_memory.h
+++ b/runtime/vm/virtual_memory.h
@@ -41,7 +41,9 @@
   // the requested size cannot be allocated, NULL is returned.
   static VirtualMemory* Allocate(intptr_t size,
                                  bool is_executable,
-                                 const char* name);
+                                 const char* name) {
+    return AllocateAligned(size, PageSize(), is_executable, name);
+  }
   static VirtualMemory* AllocateAligned(intptr_t size,
                                         intptr_t alignment,
                                         bool is_executable,
diff --git a/runtime/vm/virtual_memory_fuchsia.cc b/runtime/vm/virtual_memory_fuchsia.cc
index 217bfc0..f11b354 100644
--- a/runtime/vm/virtual_memory_fuchsia.cc
+++ b/runtime/vm/virtual_memory_fuchsia.cc
@@ -43,9 +43,23 @@
   page_size_ = getpagesize();
 }
 
-VirtualMemory* VirtualMemory::Allocate(intptr_t size,
-                                       bool is_executable,
-                                       const char* name) {
+static void unmap(zx_handle_t vmar, uword start, uword end) {
+  ASSERT(start <= end);
+  const uword size = end - start;
+  if (size == 0) {
+    return;
+  }
+
+  zx_status_t status = zx_vmar_unmap(vmar, start, size);
+  if (status != ZX_OK) {
+    FATAL1("zx_vmar_unmap failed: %s\n", zx_status_get_string(status));
+  }
+}
+
+VirtualMemory* VirtualMemory::AllocateAligned(intptr_t size,
+                                              intptr_t alignment,
+                                              bool is_executable,
+                                              const char* name) {
   // When FLAG_write_protect_code is active, the VM allocates code
   // memory with !is_executable, and later changes to executable via
   // VirtualMemory::Protect, which requires ZX_RIGHT_EXECUTE on the
@@ -55,55 +69,9 @@
   const bool can_prot_exec = FLAG_write_protect_code;
 
   ASSERT(Utils::IsAligned(size, page_size_));
-  zx_handle_t vmo = ZX_HANDLE_INVALID;
-  zx_status_t status = zx_vmo_create(size, 0u, &vmo);
-  if (status != ZX_OK) {
-    LOG_ERR("zx_vmo_create(%ld) failed: %s\n", size,
-            zx_status_get_string(status));
-    return NULL;
-  }
-
-  if (name != NULL) {
-    zx_object_set_property(vmo, ZX_PROP_NAME, name, strlen(name));
-  }
-
-  if (is_executable || can_prot_exec) {
-    // Add ZX_RIGHT_EXECUTE permission to VMO, so it can be mapped
-    // into memory as executable.
-    status = zx_vmo_replace_as_executable(vmo, ZX_HANDLE_INVALID, &vmo);
-    if (status != ZX_OK) {
-      LOG_ERR("zx_vmo_replace_as_executable() failed: %s\n",
-              zx_status_get_string(status));
-      return NULL;
-    }
-  }
-
-  const uint32_t flags = ZX_VM_PERM_READ | ZX_VM_PERM_WRITE |
-                         (is_executable ? ZX_VM_PERM_EXECUTE : 0);
-  uword address;
-  status = zx_vmar_map(zx_vmar_root_self(), flags, 0, vmo, 0, size, &address);
-  zx_handle_close(vmo);
-  if (status != ZX_OK) {
-    LOG_ERR("zx_vmar_map(%u, %ld) failed: %s\n", flags, size,
-            zx_status_get_string(status));
-    return NULL;
-  }
-  LOG_INFO("zx_vmar_map(%u,%ld) success\n", flags, size);
-
-  MemoryRegion region(reinterpret_cast<void*>(address), size);
-  return new VirtualMemory(region, region);
-}
-
-VirtualMemory* VirtualMemory::AllocateAligned(intptr_t size,
-                                              intptr_t alignment,
-                                              bool is_executable,
-                                              const char* name) {
-  // See explanation in VirtualMemory::Allocate above.
-  const bool can_prot_exec = FLAG_write_protect_code;
-
-  ASSERT(Utils::IsAligned(size, page_size_));
+  ASSERT(Utils::IsPowerOfTwo(alignment));
   ASSERT(Utils::IsAligned(alignment, page_size_));
-  intptr_t allocated_size = size + alignment;
+  const intptr_t allocated_size = size + alignment - page_size_;
 
   zx_handle_t vmar = zx_vmar_root_self();
   zx_handle_t vmo = ZX_HANDLE_INVALID;
@@ -140,25 +108,10 @@
     return NULL;
   }
 
-  uword aligned_base = Utils::RoundUp(base, alignment);
-  ASSERT(base <= aligned_base);
+  const uword aligned_base = Utils::RoundUp(base, alignment);
 
-  if (base != aligned_base) {
-    uword extra_leading_size = aligned_base - base;
-    status = zx_vmar_unmap(vmar, base, extra_leading_size);
-    if (status != ZX_OK) {
-      FATAL1("zx_vmar_unmap failed: %s\n", zx_status_get_string(status));
-    }
-    allocated_size -= extra_leading_size;
-  }
-
-  if (allocated_size != size) {
-    uword extra_trailing_size = allocated_size - size;
-    status = zx_vmar_unmap(vmar, aligned_base + size, extra_trailing_size);
-    if (status != ZX_OK) {
-      FATAL1("zx_vmar_unmap failed: %s\n", zx_status_get_string(status));
-    }
-  }
+  unmap(vmar, base, aligned_base);
+  unmap(vmar, aligned_base + size, base + allocated_size);
 
   MemoryRegion region(reinterpret_cast<void*>(aligned_base), size);
   return new VirtualMemory(region, region);
@@ -167,25 +120,15 @@
 VirtualMemory::~VirtualMemory() {
   // Reserved region may be empty due to VirtualMemory::Truncate.
   if (vm_owns_region() && reserved_.size() != 0) {
-    zx_status_t status =
-        zx_vmar_unmap(zx_vmar_root_self(), reserved_.start(), reserved_.size());
-    if (status != ZX_OK) {
-      FATAL3("zx_vmar_unmap(%lx, %lx) failed: %s\n", reserved_.start(),
-             reserved_.size(), zx_status_get_string(status));
-    }
+    unmap(zx_vmar_root_self(), reserved_.start(), reserved_.end());
     LOG_INFO("zx_vmar_unmap(%lx, %lx) success\n", reserved_.start(),
              reserved_.size());
   }
 }
 
 bool VirtualMemory::FreeSubSegment(void* address, intptr_t size) {
-  zx_status_t status = zx_vmar_unmap(
-      zx_vmar_root_self(), reinterpret_cast<uintptr_t>(address), size);
-  if (status != ZX_OK) {
-    LOG_ERR("zx_vmar_unmap(%p, %lx) failed: %s\n", address, size,
-            zx_status_get_string(status));
-    return false;
-  }
+  const uword start = reinterpret_cast<uword>(address);
+  unmap(zx_vmar_root_self(), start, start + size);
   LOG_INFO("zx_vmar_unmap(%p, %lx) success\n", address, size);
   return true;
 }
diff --git a/runtime/vm/virtual_memory_posix.cc b/runtime/vm/virtual_memory_posix.cc
index 637548f..e9d9016 100644
--- a/runtime/vm/virtual_memory_posix.cc
+++ b/runtime/vm/virtual_memory_posix.cc
@@ -29,12 +29,14 @@
   page_size_ = getpagesize();
 }
 
-static void unmap(void* address, intptr_t size) {
+static void unmap(uword start, uword end) {
+  ASSERT(start <= end);
+  uword size = end - start;
   if (size == 0) {
     return;
   }
 
-  if (munmap(address, size) != 0) {
+  if (munmap(reinterpret_cast<void*>(start), size) != 0) {
     int error = errno;
     const int kBufferSize = 1024;
     char error_buf[kBufferSize];
@@ -43,47 +45,26 @@
   }
 }
 
-VirtualMemory* VirtualMemory::Allocate(intptr_t size,
-                                       bool is_executable,
-                                       const char* name) {
-  ASSERT(Utils::IsAligned(size, page_size_));
-  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
-  void* address = mmap(NULL, size, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
-  if (address == MAP_FAILED) {
-    return NULL;
-  }
-  MemoryRegion region(address, size);
-  return new VirtualMemory(region, region);
-}
-
 VirtualMemory* VirtualMemory::AllocateAligned(intptr_t size,
                                               intptr_t alignment,
                                               bool is_executable,
                                               const char* name) {
   ASSERT(Utils::IsAligned(size, page_size_));
+  ASSERT(Utils::IsPowerOfTwo(alignment));
   ASSERT(Utils::IsAligned(alignment, page_size_));
-  intptr_t allocated_size = size + alignment;
-  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+  const intptr_t allocated_size = size + alignment - page_size_;
+  const int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
   void* address =
       mmap(NULL, allocated_size, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
   if (address == MAP_FAILED) {
     return NULL;
   }
 
-  uword base = reinterpret_cast<uword>(address);
-  uword aligned_base = Utils::RoundUp(base, alignment);
-  ASSERT(base <= aligned_base);
+  const uword base = reinterpret_cast<uword>(address);
+  const uword aligned_base = Utils::RoundUp(base, alignment);
 
-  if (base != aligned_base) {
-    uword extra_leading_size = aligned_base - base;
-    unmap(reinterpret_cast<void*>(base), extra_leading_size);
-    allocated_size -= extra_leading_size;
-  }
-
-  if (allocated_size != size) {
-    uword extra_trailing_size = allocated_size - size;
-    unmap(reinterpret_cast<void*>(aligned_base + size), extra_trailing_size);
-  }
+  unmap(base, aligned_base);
+  unmap(aligned_base + size, base + allocated_size);
 
   MemoryRegion region(reinterpret_cast<void*>(aligned_base), size);
   return new VirtualMemory(region, region);
@@ -91,13 +72,14 @@
 
 VirtualMemory::~VirtualMemory() {
   if (vm_owns_region()) {
-    unmap(reserved_.pointer(), reserved_.size());
+    unmap(reserved_.start(), reserved_.end());
   }
 }
 
 bool VirtualMemory::FreeSubSegment(void* address,
                                    intptr_t size) {
-  unmap(address, size);
+  const uword start = reinterpret_cast<uword>(address);
+  unmap(start, start + size);
   return true;
 }
 
diff --git a/runtime/vm/virtual_memory_win.cc b/runtime/vm/virtual_memory_win.cc
index a36128a..d976676 100644
--- a/runtime/vm/virtual_memory_win.cc
+++ b/runtime/vm/virtual_memory_win.cc
@@ -22,26 +22,14 @@
   page_size_ = info.dwPageSize;
 }
 
-VirtualMemory* VirtualMemory::Allocate(intptr_t size,
-                                       bool is_executable,
-                                       const char* name) {
-  ASSERT(Utils::IsAligned(size, page_size_));
-  int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
-  void* address = VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT, prot);
-  if (address == NULL) {
-    return NULL;
-  }
-  MemoryRegion region(address, size);
-  return new VirtualMemory(region, region);
-}
-
 VirtualMemory* VirtualMemory::AllocateAligned(intptr_t size,
                                               intptr_t alignment,
                                               bool is_executable,
                                               const char* name) {
   ASSERT(Utils::IsAligned(size, page_size_));
+  ASSERT(Utils::IsPowerOfTwo(alignment));
   ASSERT(Utils::IsAligned(alignment, page_size_));
-  intptr_t reserved_size = size + alignment;
+  intptr_t reserved_size = size + alignment - page_size_;
   int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
   void* address = VirtualAlloc(NULL, reserved_size, MEM_RESERVE, prot);
   if (address == NULL) {