Revert "Reland "[VM runtime] Dual mapping of executable pages.""

This reverts commit 6da340bf76f104b982dc90c2f6642cceb4cdcafd.

Reason for revert: almost all benchmarks are now failing on Golem linux-x64 target. Flutter Golem build is broken 

Original change's description:
> Reland "[VM runtime] Dual mapping of executable pages."
> 
> This is a reland of 44186dfdcda19038616b48914d10b4b7a9ccfc2f
> 
> Original change's description:
> > [VM runtime] Dual mapping of executable pages.
> > 
> > Change-Id: Iaad78d324e25462ce951f4df26974a6a368c50b7
> > Reviewed-on: https://dart-review.googlesource.com/c/93377
> > Commit-Queue: RĂ©gis Crelier <regis@google.com>
> > Reviewed-by: Ryan Macnak <rmacnak@google.com>
> 
> Change-Id: I7a0caa078950637d9fe831732577fd2467061099
> Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/95263
> Reviewed-by: Ryan Macnak <rmacnak@google.com>

TBR=rmacnak@google.com,regis@google.com

Change-Id: I3342de2584537269ffe9a53946bef27cb25a69fc
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/96161
Reviewed-by: Vyacheslav Egorov <vegorov@google.com>
Commit-Queue: Vyacheslav Egorov <vegorov@google.com>
diff --git a/runtime/platform/globals.h b/runtime/platform/globals.h
index ec996e9..6171827 100644
--- a/runtime/platform/globals.h
+++ b/runtime/platform/globals.h
@@ -352,14 +352,6 @@
 #error Unknown architecture.
 #endif
 
-// Determine whether dual mapping of code pages is supported.
-#if !defined(USING_SIMULATOR) &&                                               \
-    (defined(HOST_OS_LINUX) || defined(HOST_OS_FUCHSIA)) &&                    \
-    !defined(HOST_OS_ANDROID) && !defined(TARGET_OS_ANDROID) &&                \
-    !defined(TARGET_ARCH_IA32)
-#define DUAL_MAPPING_SUPPORTED 1
-#endif
-
 // Disable background threads by default on armv5te. The relevant
 // implementations are uniprocessors.
 #if !defined(TARGET_ARCH_ARM_5TE)
diff --git a/runtime/tests/vm/vm.status b/runtime/tests/vm/vm.status
index 6554bb2..a56a92eb 100644
--- a/runtime/tests/vm/vm.status
+++ b/runtime/tests/vm/vm.status
@@ -4,7 +4,6 @@
 
 cc/AllocGeneric_Overflow: Crash, Fail # These tests are expected to crash on all platforms.
 cc/ArrayNew_Overflow_Crash: Crash, Fail # These tests are expected to crash on all platforms.
-cc/CodeExecutability: Crash, Fail # These tests are expected to crash on all platforms.
 cc/CodeImmutability: Crash, Fail # These tests are expected to crash on all platforms.
 cc/Dart2JSCompileAll: Fail, Crash # Issue 27369
 cc/Dart2JSCompilerStats: Fail, Crash # Issue 27369
@@ -29,7 +28,6 @@
 dart/appjit_cha_deopt_test: Pass, Slow # Quite slow in debug mode, uses --optimization-counter-threshold=100
 
 [ $builder_tag == asan ]
-cc/CodeExecutability: Fail, OK # Address Sanitizer turns a crash into a failure.
 cc/CodeImmutability: Fail, OK # Address Sanitizer turns a crash into a failure.
 
 [ $builder_tag == optimization_counter_threshold ]
diff --git a/runtime/vm/code_patcher.cc b/runtime/vm/code_patcher.cc
index 7168231..4926926 100644
--- a/runtime/vm/code_patcher.cc
+++ b/runtime/vm/code_patcher.cc
@@ -12,13 +12,6 @@
 
 DEFINE_FLAG(bool, write_protect_code, true, "Write protect jitted code");
 
-#if defined(DUAL_MAPPING_SUPPORTED)
-DEFINE_FLAG(bool, dual_map_code, true, "Dual map jitted code, RW and RX");
-#else
-DEFINE_FLAG(bool, dual_map_code, false, "Dual map jitted code, RW and RX");
-#endif  // defined(DUAL_MAPPING_SUPPORTED)
-
-#if defined(TARGET_ARCH_IA32) || defined(TARGET_ARCH_DBC)
 WritableInstructionsScope::WritableInstructionsScope(uword address,
                                                      intptr_t size)
     : address_(address), size_(size) {
@@ -34,7 +27,6 @@
                            VirtualMemory::kReadExecute);
   }
 }
-#endif  // defined(TARGET_ARCH_IA32) || defined(TARGET_ARCH_DBC)
 
 bool MatchesPattern(uword end, const int16_t* pattern, intptr_t size) {
   // When breaking within generated code in GDB, it may overwrite individual
diff --git a/runtime/vm/code_patcher.h b/runtime/vm/code_patcher.h
index 239a69d..6aa62af 100644
--- a/runtime/vm/code_patcher.h
+++ b/runtime/vm/code_patcher.h
@@ -19,12 +19,10 @@
 class RawFunction;
 class RawObject;
 
-#if defined(TARGET_ARCH_IA32) || defined(TARGET_ARCH_DBC)
 // Stack-allocated class to create a scope where the specified region
 // [address, address + size] has write access enabled. This is used
 // when patching generated code. Access is reset to read-execute in
 // the destructor of this scope.
-// Dual mapping of instructions pages is not supported on these target arch.
 class WritableInstructionsScope : public ValueObject {
  public:
   WritableInstructionsScope(uword address, intptr_t size);
@@ -34,7 +32,6 @@
   const uword address_;
   const intptr_t size_;
 };
-#endif  // defined(TARGET_ARCH_IA32) || defined(TARGET_ARCH_DBC)
 
 class CodePatcher : public AllStatic {
  public:
diff --git a/runtime/vm/compiler/relocation.cc b/runtime/vm/compiler/relocation.cc
index 5d4d35f..467aa78 100644
--- a/runtime/vm/compiler/relocation.cc
+++ b/runtime/vm/compiler/relocation.cc
@@ -5,7 +5,6 @@
 #include "vm/compiler/relocation.h"
 
 #include "vm/code_patcher.h"
-#include "vm/heap/pages.h"
 #include "vm/instructions.h"
 #include "vm/object_store.h"
 #include "vm/stub_code.h"
@@ -363,11 +362,8 @@
   auto caller = Code::InstructionsOf(unresolved_call->caller);
   const int32_t distance = destination_text - call_text_offset;
   {
-    uword addr = Instructions::PayloadStart(caller) + call_offset;
-    if (FLAG_write_protect_code) {
-      addr -= HeapPage::Of(caller)->AliasOffset();
-    }
-    PcRelativeCallPattern call(addr);
+    PcRelativeCallPattern call(Instructions::PayloadStart(caller) +
+                               call_offset);
     ASSERT(call.IsValid());
     call.set_distance(static_cast<int32_t>(distance));
     ASSERT(call.distance() == distance);
diff --git a/runtime/vm/heap/marker.cc b/runtime/vm/heap/marker.cc
index da86a36..03c9683 100644
--- a/runtime/vm/heap/marker.cc
+++ b/runtime/vm/heap/marker.cc
@@ -368,10 +368,6 @@
   }
 
   static bool TryAcquireMarkBit(RawObject* raw_obj) {
-    if (FLAG_write_protect_code && raw_obj->IsInstructions()) {
-      // A non-writable alias mapping may exist for instruction pages.
-      raw_obj = HeapPage::ToWritable(raw_obj);
-    }
     if (!sync) {
       raw_obj->SetMarkBitUnsynchronized();
       return true;
diff --git a/runtime/vm/heap/pages.cc b/runtime/vm/heap/pages.cc
index 19d8f04..e1ac801 100644
--- a/runtime/vm/heap/pages.cc
+++ b/runtime/vm/heap/pages.cc
@@ -57,8 +57,11 @@
 HeapPage* HeapPage::Allocate(intptr_t size_in_words,
                              PageType type,
                              const char* name) {
+  bool is_executable = (type == kExecutable);
+  // Create the new page executable (RWX) only if we're not in W^X mode
+  bool create_executable = !FLAG_write_protect_code && is_executable;
   VirtualMemory* memory = VirtualMemory::AllocateAligned(
-      size_in_words << kWordSizeLog2, kPageSize, type == kExecutable, name);
+      size_in_words << kWordSizeLog2, kPageSize, create_executable, name);
   if (memory == NULL) {
     return NULL;
   }
@@ -211,7 +214,7 @@
 
   VirtualMemory::Protection prot;
   if (read_only) {
-    if ((type_ == kExecutable) && (memory_->AliasOffset() == 0)) {
+    if (type_ == kExecutable) {
       prot = VirtualMemory::kReadExecute;
     } else {
       prot = VirtualMemory::kReadOnly;
diff --git a/runtime/vm/heap/pages.h b/runtime/vm/heap/pages.h
index d7381d7..98d480f 100644
--- a/runtime/vm/heap/pages.h
+++ b/runtime/vm/heap/pages.h
@@ -40,8 +40,7 @@
   HeapPage* next() const { return next_; }
   void set_next(HeapPage* next) { next_ = next; }
 
-  bool Contains(uword addr) const { return memory_->Contains(addr); }
-  intptr_t AliasOffset() const { return memory_->AliasOffset(); }
+  bool Contains(uword addr) { return memory_->Contains(addr); }
 
   uword object_start() const { return memory_->start() + ObjectStartOffset(); }
   uword object_end() const { return object_end_; }
@@ -71,8 +70,7 @@
   }
 
   // Warning: This does not work for objects on image pages because image pages
-  // are not aligned. However, it works for objects on large pages, because
-  // only one object is allocated per large page.
+  // are not aligned.
   static HeapPage* Of(RawObject* obj) {
     ASSERT(obj->IsHeapObject());
     ASSERT(obj->IsOldObject());
@@ -80,45 +78,10 @@
                                        kPageMask);
   }
 
-  // Warning: This does not work for addresses on image pages or on large pages.
-  static HeapPage* Of(uword addr) {
+  static HeapPage* Of(uintptr_t addr) {
     return reinterpret_cast<HeapPage*>(addr & kPageMask);
   }
 
-  // Warning: This does not work for objects on image pages.
-  static RawObject* ToExecutable(RawObject* obj) {
-    HeapPage* page = Of(obj);
-    VirtualMemory* memory = page->memory_;
-    const intptr_t alias_offset = memory->AliasOffset();
-    if (alias_offset == 0) {
-      return obj;  // Not aliased.
-    }
-    uword addr = RawObject::ToAddr(obj);
-    if (memory->Contains(addr)) {
-      return RawObject::FromAddr(addr + alias_offset);
-    }
-    // obj is executable.
-    ASSERT(memory->ContainsAlias(addr));
-    return obj;
-  }
-
-  // Warning: This does not work for objects on image pages.
-  static RawObject* ToWritable(RawObject* obj) {
-    HeapPage* page = Of(obj);
-    VirtualMemory* memory = page->memory_;
-    const intptr_t alias_offset = memory->AliasOffset();
-    if (alias_offset == 0) {
-      return obj;  // Not aliased.
-    }
-    uword addr = RawObject::ToAddr(obj);
-    if (memory->ContainsAlias(addr)) {
-      return RawObject::FromAddr(addr - alias_offset);
-    }
-    // obj is writable.
-    ASSERT(memory->Contains(addr));
-    return obj;
-  }
-
   // 1 card = 128 slots.
   static const intptr_t kSlotsPerCardLog2 = 7;
   static const intptr_t kBytesPerCardLog2 = kWordSizeLog2 + kSlotsPerCardLog2;
diff --git a/runtime/vm/heap/verifier.cc b/runtime/vm/heap/verifier.cc
index 237777e..8ea2fdb 100644
--- a/runtime/vm/heap/verifier.cc
+++ b/runtime/vm/heap/verifier.cc
@@ -49,10 +49,6 @@
     RawObject* raw_obj = *current;
     if (raw_obj->IsHeapObject()) {
       if (!allocated_set_->Contains(raw_obj)) {
-        if (raw_obj->IsInstructions() &&
-            allocated_set_->Contains(HeapPage::ToWritable(raw_obj))) {
-          continue;
-        }
         uword raw_addr = RawObject::ToAddr(raw_obj);
         FATAL1("Invalid object pointer encountered %#" Px "\n", raw_addr);
       }
diff --git a/runtime/vm/object.cc b/runtime/vm/object.cc
index 7014578..864555d 100644
--- a/runtime/vm/object.cc
+++ b/runtime/vm/object.cc
@@ -2015,12 +2015,7 @@
 bool Object::IsReadOnly() const {
   if (FLAG_verify_handles && raw()->IsReadOnly()) {
     Heap* vm_isolate_heap = Dart::vm_isolate()->heap();
-    uword addr = RawObject::ToAddr(raw());
-    if (!vm_isolate_heap->Contains(addr)) {
-      ASSERT(FLAG_write_protect_code);
-      addr = RawObject::ToAddr(HeapPage::ToWritable(raw()));
-      ASSERT(vm_isolate_heap->Contains(addr));
-    }
+    ASSERT(vm_isolate_heap->Contains(RawObject::ToAddr(raw())));
   }
   return raw()->IsReadOnly();
 }
@@ -2077,12 +2072,8 @@
       Isolate* isolate = Isolate::Current();
       Heap* isolate_heap = isolate->heap();
       Heap* vm_isolate_heap = Dart::vm_isolate()->heap();
-      uword addr = RawObject::ToAddr(raw_);
-      if (!isolate_heap->Contains(addr) && !vm_isolate_heap->Contains(addr)) {
-        ASSERT(FLAG_write_protect_code);
-        addr = RawObject::ToAddr(HeapPage::ToWritable(raw_));
-        ASSERT(isolate_heap->Contains(addr) || vm_isolate_heap->Contains(addr));
-      }
+      ASSERT(isolate_heap->Contains(RawObject::ToAddr(raw_)) ||
+             vm_isolate_heap->Contains(RawObject::ToAddr(raw_)));
     }
   }
 #endif
@@ -14571,24 +14562,6 @@
                                object->raw());
     }
 
-    // Write protect instructions and, if supported by OS, use dual mapping
-    // for execution.
-    if (FLAG_write_protect_code) {
-      uword address = RawObject::ToAddr(instrs.raw());
-      // Check if a dual mapping exists.
-      instrs = Instructions::RawCast(HeapPage::ToExecutable(instrs.raw()));
-      uword exec_address = RawObject::ToAddr(instrs.raw());
-      if (exec_address != address) {
-        VirtualMemory::Protect(reinterpret_cast<void*>(address),
-                               instrs.raw()->HeapSize(),
-                               VirtualMemory::kReadOnly);
-        address = exec_address;
-      }
-      VirtualMemory::Protect(reinterpret_cast<void*>(address),
-                             instrs.raw()->HeapSize(),
-                             VirtualMemory::kReadExecute);
-    }
-
     // Hook up Code and Instructions objects.
     code.SetActiveInstructions(instrs);
     code.set_instructions(instrs);
@@ -14599,6 +14572,13 @@
       code.set_object_pool(object_pool->raw());
     }
 
+    if (FLAG_write_protect_code) {
+      uword address = RawObject::ToAddr(instrs.raw());
+      VirtualMemory::Protect(reinterpret_cast<void*>(address),
+                             instrs.raw()->HeapSize(),
+                             VirtualMemory::kReadExecute);
+    }
+
 #if defined(DART_PRECOMPILER)
     if (stats != nullptr) {
       stats->Finalize();
diff --git a/runtime/vm/object.h b/runtime/vm/object.h
index 05ba3eb..e19f357 100644
--- a/runtime/vm/object.h
+++ b/runtime/vm/object.h
@@ -4651,7 +4651,6 @@
 
 class Code : public Object {
  public:
-  // When dual mapping, this returns the executable view.
   RawInstructions* active_instructions() const {
 #if defined(DART_PRECOMPILED_RUNTIME)
     UNREACHABLE();
@@ -4661,7 +4660,6 @@
 #endif
   }
 
-  // When dual mapping, these return the executable view.
   RawInstructions* instructions() const { return raw_ptr()->instructions_; }
   static RawInstructions* InstructionsOf(const RawCode* code) {
     return code->ptr()->instructions_;
@@ -5142,7 +5140,6 @@
 
   FINAL_HEAP_OBJECT_IMPLEMENTATION(Code, Object);
   friend class Class;
-  friend class CodeTestHelper;
   friend class SnapshotWriter;
   friend class StubCode;     // for set_object_pool
   friend class Precompiler;  // for set_object_pool
@@ -9143,12 +9140,8 @@
     Isolate* isolate = Isolate::Current();
     Heap* isolate_heap = isolate->heap();
     Heap* vm_isolate_heap = Dart::vm_isolate()->heap();
-    uword addr = RawObject::ToAddr(raw_);
-    if (!isolate_heap->Contains(addr) && !vm_isolate_heap->Contains(addr)) {
-      ASSERT(FLAG_write_protect_code);
-      addr = RawObject::ToAddr(HeapPage::ToWritable(raw_));
-      ASSERT(isolate_heap->Contains(addr) || vm_isolate_heap->Contains(addr));
-    }
+    ASSERT(isolate_heap->Contains(RawObject::ToAddr(raw_)) ||
+           vm_isolate_heap->Contains(RawObject::ToAddr(raw_)));
   }
 #endif
 }
diff --git a/runtime/vm/object_graph.cc b/runtime/vm/object_graph.cc
index 3e52f53..5a5e740 100644
--- a/runtime/vm/object_graph.cc
+++ b/runtime/vm/object_graph.cc
@@ -44,12 +44,7 @@
         if (!include_vm_objects_ && !IsUserClass((*current)->GetClassId())) {
           continue;
         }
-        if (FLAG_write_protect_code && (*current)->IsInstructions()) {
-          // A non-writable alias mapping may exist for instruction pages.
-          HeapPage::ToWritable(*current)->SetGraphMarked();
-        } else {
-          (*current)->SetGraphMarked();
-        }
+        (*current)->SetGraphMarked();
         Node node;
         node.ptr = current;
         node.obj = *current;
diff --git a/runtime/vm/object_test.cc b/runtime/vm/object_test.cc
index 4664ee7..c0d9526 100644
--- a/runtime/vm/object_test.cc
+++ b/runtime/vm/object_test.cc
@@ -23,7 +23,6 @@
 
 namespace dart {
 
-DECLARE_FLAG(bool, dual_map_code);
 DECLARE_FLAG(bool, write_protect_code);
 
 static RawClass* CreateDummyClass(const String& class_name,
@@ -2508,56 +2507,8 @@
   if (!FLAG_write_protect_code) {
     // Since this test is expected to crash, crash if write protection of code
     // is switched off.
-    FATAL("Test requires --write-protect-code; skip by forcing expected crash");
-  }
-  MallocHooks::set_stack_trace_collection_enabled(
-      stack_trace_collection_enabled);
-}
-
-class CodeTestHelper {
- public:
-  static void SetInstructions(const Code& code,
-                              const Instructions& instructions) {
-    code.SetActiveInstructions(instructions);
-    code.set_instructions(instructions);
-  }
-};
-
-// Test for executability of generated instructions. The test crashes with a
-// segmentation fault when executing the writeable view.
-ISOLATE_UNIT_TEST_CASE(CodeExecutability) {
-  bool stack_trace_collection_enabled =
-      MallocHooks::stack_trace_collection_enabled();
-  MallocHooks::set_stack_trace_collection_enabled(false);
-  extern void GenerateIncrement(Assembler * assembler);
-  ObjectPoolBuilder object_pool_builder;
-  Assembler _assembler_(&object_pool_builder);
-  GenerateIncrement(&_assembler_);
-  const Function& function = Function::Handle(CreateFunction("Test_Code"));
-  Code& code = Code::Handle(Code::FinalizeCodeAndNotify(
-      function, nullptr, &_assembler_, Code::PoolAttachment::kAttachPool));
-  function.AttachCode(code);
-  Instructions& instructions = Instructions::Handle(code.instructions());
-  uword payload_start = instructions.PayloadStart();
-  EXPECT_EQ(instructions.raw(), Instructions::FromPayloadStart(payload_start));
-  // Execute the executable view of the instructions (default).
-  Object& result =
-      Object::Handle(DartEntry::InvokeFunction(function, Array::empty_array()));
-  EXPECT_EQ(1, Smi::Cast(result).Value());
-  // Switch to the writeable but non-executable view of the instructions.
-  instructions ^= HeapPage::ToWritable(instructions.raw());
-  payload_start = instructions.PayloadStart();
-  EXPECT_EQ(instructions.raw(), Instructions::FromPayloadStart(payload_start));
-  // Hook up Code and Instructions objects.
-  CodeTestHelper::SetInstructions(code, instructions);
-  function.AttachCode(code);
-  // Try executing the generated code, expected to crash.
-  result = DartEntry::InvokeFunction(function, Array::empty_array());
-  EXPECT_EQ(1, Smi::Cast(result).Value());
-  if (!FLAG_dual_map_code) {
-    // Since this test is expected to crash, crash if dual mapping of code
-    // is switched off.
-    FATAL("Test requires --dual-map-code; skip by forcing expected crash");
+    // TODO(regis, fschneider): Should this be FATAL() instead?
+    OS::DebugBreak();
   }
   MallocHooks::set_stack_trace_collection_enabled(
       stack_trace_collection_enabled);
diff --git a/runtime/vm/os_linux.cc b/runtime/vm/os_linux.cc
index 20db94d..1079bdb 100644
--- a/runtime/vm/os_linux.cc
+++ b/runtime/vm/os_linux.cc
@@ -558,7 +558,9 @@
   }
 }
 
-// TODO(regis): Function called only from the simulator.
+// TODO(regis, iposva): When this function is no longer called from the
+// CodeImmutability test in object_test.cc, it will be called only from the
+// simulator, which means that only the Intel implementation is needed.
 void OS::DebugBreak() {
   __builtin_trap();
 }
diff --git a/runtime/vm/virtual_memory.cc b/runtime/vm/virtual_memory.cc
index 48ba867..6d73ba0 100644
--- a/runtime/vm/virtual_memory.cc
+++ b/runtime/vm/virtual_memory.cc
@@ -17,18 +17,12 @@
 void VirtualMemory::Truncate(intptr_t new_size) {
   ASSERT(Utils::IsAligned(new_size, PageSize()));
   ASSERT(new_size <= size());
-  if (reserved_.size() ==
-      region_.size()) {  // Don't create holes in reservation.
+  if (reserved_.size() == region_.size()) { // Don't create holes in reservation.
     FreeSubSegment(reinterpret_cast<void*>(start() + new_size),
                    size() - new_size);
     reserved_.set_size(new_size);
-    if (AliasOffset() != 0) {
-      FreeSubSegment(reinterpret_cast<void*>(alias_.start() + new_size),
-                     alias_.size() - new_size);
-    }
   }
   region_.Subregion(region_, 0, new_size);
-  alias_.Subregion(alias_, 0, new_size);
 }
 
 VirtualMemory* VirtualMemory::ForImagePage(void* pointer, uword size) {
@@ -37,7 +31,7 @@
   MemoryRegion region(pointer, size);
   MemoryRegion reserved(0, 0);  // NULL reservation indicates VM should not
                                 // attempt to free this memory.
-  VirtualMemory* memory = new VirtualMemory(region, region, reserved);
+  VirtualMemory* memory = new VirtualMemory(region, reserved);
   ASSERT(!memory->vm_owns_region());
   return memory;
 }
diff --git a/runtime/vm/virtual_memory.h b/runtime/vm/virtual_memory.h
index 496e63c..6d74541 100644
--- a/runtime/vm/virtual_memory.h
+++ b/runtime/vm/virtual_memory.h
@@ -28,14 +28,10 @@
   uword end() const { return region_.end(); }
   void* address() const { return region_.pointer(); }
   intptr_t size() const { return region_.size(); }
-  intptr_t AliasOffset() const { return alias_.start() - region_.start(); }
 
   static void Init();
 
   bool Contains(uword addr) const { return region_.Contains(addr); }
-  bool ContainsAlias(uword addr) const {
-    return (AliasOffset() != 0) && alias_.Contains(addr);
-  }
 
   // Changes the protection of the virtual memory area.
   static void Protect(void* address, intptr_t size, Protection mode);
@@ -76,22 +72,14 @@
   // can give back the virtual memory to the system. Returns true on success.
   static void FreeSubSegment(void* address, intptr_t size);
 
-  // These constructors are only used internally when reserving new virtual
-  // spaces. They do not reserve any virtual address space on their own.
+  // This constructor is only used internally when reserving new virtual spaces.
+  // It does not reserve any virtual address space on its own.
   VirtualMemory(const MemoryRegion& region,
-                const MemoryRegion& alias,
                 const MemoryRegion& reserved)
-      : region_(region), alias_(alias), reserved_(reserved) {}
-
-  VirtualMemory(const MemoryRegion& region, const MemoryRegion& reserved)
-      : region_(region), alias_(region), reserved_(reserved) {}
+      : region_(region), reserved_(reserved) {}
 
   MemoryRegion region_;
 
-  // Optional secondary mapping of region_ to a virtual space with different
-  // protection, e.g. allowing code execution.
-  MemoryRegion alias_;
-
   // The underlying reservation not yet given back to the OS.
   // Its address might disagree with region_ due to aligned allocations.
   // Its size might disagree with region_ due to Truncate.
@@ -99,10 +87,6 @@
 
   static uword page_size_;
 
-#if defined(HOST_OS_FUCHSIA)
-  static uword base_;  // Cached base of root vmar.
-#endif
-
   DISALLOW_IMPLICIT_CONSTRUCTORS(VirtualMemory);
 };
 
diff --git a/runtime/vm/virtual_memory_fuchsia.cc b/runtime/vm/virtual_memory_fuchsia.cc
index b627ddf..63c47ae 100644
--- a/runtime/vm/virtual_memory_fuchsia.cc
+++ b/runtime/vm/virtual_memory_fuchsia.cc
@@ -35,29 +35,15 @@
 
 namespace dart {
 
-DECLARE_FLAG(bool, dual_map_code);
 DECLARE_FLAG(bool, write_protect_code);
 
 uword VirtualMemory::page_size_ = 0;
-uword VirtualMemory::base_ = 0;
 
 void VirtualMemory::Init() {
   page_size_ = getpagesize();
-
-  // Cache the base of zx_vmar_root_self() which is used to align mappings.
-  zx_info_vmar_t buf[1];
-  size_t actual;
-  size_t avail;
-  zx_status_t status =
-      zx_object_get_info(zx_vmar_root_self(), ZX_INFO_VMAR, buf,
-                         sizeof(zx_info_vmar_t), &actual, &avail);
-  if (status != ZX_OK) {
-    FATAL1("zx_object_get_info failed: %s\n", zx_status_get_string(status));
-  }
-  base_ = buf[0].base;
 }
 
-static void Unmap(zx_handle_t vmar, uword start, uword end) {
+static void unmap(zx_handle_t vmar, uword start, uword end) {
   ASSERT(start <= end);
   const uword size = end - start;
   if (size == 0) {
@@ -70,61 +56,28 @@
   }
 }
 
-static void* MapAligned(zx_handle_t vmar,
-                        zx_handle_t vmo,
-                        zx_vm_option_t options,
-                        uword size,
-                        uword alignment,
-                        uword vmar_base,
-                        uword padded_size) {
-  uword base;
-  zx_status_t status =
-      zx_vmar_map(vmar, options, 0, vmo, 0u, padded_size, &base);
-  LOG_INFO("zx_vmar_map(%u, 0x%lx, 0x%lx)\n", options, base, padded_size);
-
-  if (status != ZX_OK) {
-    LOG_ERR("zx_vmar_map(%u, 0x%lx, 0x%lx) failed: %s\n", options, base,
-            padded_size, zx_status_get_string(status));
-    return NULL;
-  }
-  const uword aligned_base = Utils::RoundUp(base, alignment);
-  const zx_vm_option_t overwrite_options = options | ZX_VM_SPECIFIC_OVERWRITE;
-  status = zx_vmar_map(vmar, overwrite_options, aligned_base - vmar_base, vmo,
-                       0u, size, &base);
-  LOG_INFO("zx_vmar_map(%u, 0x%lx, 0x%lx)\n", overwrite_options,
-           aligned_base - vmar_base, size);
-
-  if (status != ZX_OK) {
-    LOG_ERR("zx_vmar_map(%u, 0x%lx, 0x%lx) failed: %s\n", overwrite_options,
-            aligned_base - vmar_base, size, zx_status_get_string(status));
-    return NULL;
-  }
-  ASSERT(base == aligned_base);
-  return reinterpret_cast<void*>(base);
-}
-
 VirtualMemory* VirtualMemory::AllocateAligned(intptr_t size,
                                               intptr_t alignment,
                                               bool is_executable,
                                               const char* name) {
-  // When FLAG_write_protect_code is active, code memory (indicated by
-  // is_executable = true) is allocated as non-executable and later
-  // changed to executable via VirtualMemory::Protect, which requires
-  // ZX_RIGHT_EXECUTE on the underlying VMO.
-  // In addition, dual mapping of the same underlying code memory is provided.
-  const bool dual_mapping =
-      is_executable && FLAG_write_protect_code && FLAG_dual_map_code;
+  // When FLAG_write_protect_code is active, the VM allocates code
+  // memory with !is_executable, and later changes to executable via
+  // VirtualMemory::Protect, which requires ZX_RIGHT_EXECUTE on the
+  // underlying VMO. Conservatively assume all memory needs to be
+  // executable in this mode.
+  // TODO(mdempsky): Make into parameter.
+  const bool can_prot_exec = FLAG_write_protect_code;
 
   ASSERT(Utils::IsAligned(size, page_size_));
   ASSERT(Utils::IsPowerOfTwo(alignment));
   ASSERT(Utils::IsAligned(alignment, page_size_));
-  const intptr_t padded_size = size + alignment - page_size_;
+  const intptr_t allocated_size = size + alignment - page_size_;
 
   zx_handle_t vmar = zx_vmar_root_self();
   zx_handle_t vmo = ZX_HANDLE_INVALID;
-  zx_status_t status = zx_vmo_create(size, 0u, &vmo);
+  zx_status_t status = zx_vmo_create(allocated_size, 0u, &vmo);
   if (status != ZX_OK) {
-    LOG_ERR("zx_vmo_create(0x%lx) failed: %s\n", size,
+    LOG_ERR("zx_vmo_create(%ld) failed: %s\n", size,
             zx_status_get_string(status));
     return NULL;
   }
@@ -133,9 +86,9 @@
     zx_object_set_property(vmo, ZX_PROP_NAME, name, strlen(name));
   }
 
-  if (is_executable) {
+  if (is_executable || can_prot_exec) {
     // Add ZX_RIGHT_EXECUTE permission to VMO, so it can be mapped
-    // into memory as executable (now or later).
+    // into memory as executable.
     status = zx_vmo_replace_as_executable(vmo, ZX_HANDLE_INVALID, &vmo);
     if (status != ZX_OK) {
       LOG_ERR("zx_vmo_replace_as_executable() failed: %s\n",
@@ -144,59 +97,39 @@
     }
   }
 
-  const zx_vm_option_t region_options =
-      ZX_VM_PERM_READ | ZX_VM_PERM_WRITE |
-      ((is_executable && !FLAG_write_protect_code) ? ZX_VM_PERM_EXECUTE : 0);
-  void* region_ptr = MapAligned(vmar, vmo, region_options, size, alignment,
-                                base_, padded_size);
-  if (region_ptr == NULL) {
+  const zx_vm_option_t options = ZX_VM_PERM_READ | ZX_VM_PERM_WRITE |
+                                 (is_executable ? ZX_VM_PERM_EXECUTE : 0);
+  uword base;
+  status = zx_vmar_map(vmar, options, 0u, vmo, 0u, allocated_size, &base);
+  zx_handle_close(vmo);
+  if (status != ZX_OK) {
+    LOG_ERR("zx_vmar_map(%u, %ld) failed: %s\n", flags, size,
+            zx_status_get_string(status));
     return NULL;
   }
-  MemoryRegion region(region_ptr, size);
 
-  VirtualMemory* result;
+  const uword aligned_base = Utils::RoundUp(base, alignment);
 
-  if (dual_mapping) {
-    // ZX_VM_PERM_EXECUTE is added later via VirtualMemory::Protect.
-    const zx_vm_option_t alias_options = ZX_VM_PERM_READ;
-    void* alias_ptr = MapAligned(vmar, vmo, alias_options, size, alignment,
-                                 base_, padded_size);
-    if (alias_ptr == NULL) {
-      const uword region_base = reinterpret_cast<uword>(region_ptr);
-      Unmap(vmar, region_base, region_base + size);
-      return NULL;
-    }
-    ASSERT(region_ptr != alias_ptr);
-    MemoryRegion alias(alias_ptr, size);
-    result = new VirtualMemory(region, alias, region);
-  } else {
-    result = new VirtualMemory(region, region, region);
-  }
-  zx_handle_close(vmo);
-  return result;
+  unmap(vmar, base, aligned_base);
+  unmap(vmar, aligned_base + size, base + allocated_size);
+
+  MemoryRegion region(reinterpret_cast<void*>(aligned_base), size);
+  return new VirtualMemory(region, region);
 }
 
 VirtualMemory::~VirtualMemory() {
   // Reserved region may be empty due to VirtualMemory::Truncate.
   if (vm_owns_region() && reserved_.size() != 0) {
-    Unmap(zx_vmar_root_self(), reserved_.start(), reserved_.end());
-    LOG_INFO("zx_vmar_unmap(0x%lx, 0x%lx) success\n", reserved_.start(),
+    unmap(zx_vmar_root_self(), reserved_.start(), reserved_.end());
+    LOG_INFO("zx_vmar_unmap(%lx, %lx) success\n", reserved_.start(),
              reserved_.size());
-
-    const intptr_t alias_offset = AliasOffset();
-    if (alias_offset != 0) {
-      Unmap(zx_vmar_root_self(), reserved_.start() + alias_offset,
-            reserved_.end() + alias_offset);
-      LOG_INFO("zx_vmar_unmap(0x%lx, 0x%lx) success\n",
-               reserved_.start() + alias_offset, reserved_.size());
-    }
   }
 }
 
 void VirtualMemory::FreeSubSegment(void* address, intptr_t size) {
   const uword start = reinterpret_cast<uword>(address);
-  Unmap(zx_vmar_root_self(), start, start + size);
-  LOG_INFO("zx_vmar_unmap(0x%p, 0x%lx) success\n", address, size);
+  unmap(zx_vmar_root_self(), start, start + size);
+  LOG_INFO("zx_vmar_unmap(%p, %lx) success\n", address, size);
 }
 
 void VirtualMemory::Protect(void* address, intptr_t size, Protection mode) {
@@ -228,12 +161,12 @@
   }
   zx_status_t status = zx_vmar_protect(zx_vmar_root_self(), prot, page_address,
                                        end_address - page_address);
-  LOG_INFO("zx_vmar_protect(%u, 0x%lx, 0x%lx)\n", prot, page_address,
-           end_address - page_address);
   if (status != ZX_OK) {
-    FATAL3("zx_vmar_protect(0x%lx, 0x%lx) failed: %s\n", page_address,
+    FATAL3("zx_vmar_protect(%lx, %lx) failed: %s\n", page_address,
            end_address - page_address, zx_status_get_string(status));
   }
+  LOG_INFO("zx_vmar_protect(%lx, %lx, %x) success\n", page_address,
+           end_address - page_address, prot);
 }
 
 }  // namespace dart
diff --git a/runtime/vm/virtual_memory_posix.cc b/runtime/vm/virtual_memory_posix.cc
index 8767250..31c7f15 100644
--- a/runtime/vm/virtual_memory_posix.cc
+++ b/runtime/vm/virtual_memory_posix.cc
@@ -9,7 +9,6 @@
 
 #include <errno.h>
 #include <sys/mman.h>
-#include <sys/syscall.h>
 #include <unistd.h>
 
 #include "platform/assert.h"
@@ -17,13 +16,6 @@
 
 #include "vm/isolate.h"
 
-// #define VIRTUAL_MEMORY_LOGGING 1
-#if defined(VIRTUAL_MEMORY_LOGGING)
-#define LOG_INFO(msg, ...) OS::PrintErr(msg, ##__VA_ARGS__)
-#else
-#define LOG_INFO(msg, ...)
-#endif  // defined(VIRTUAL_MEMORY_LOGGING)
-
 namespace dart {
 
 // standard MAP_FAILED causes "error: use of old-style cast" as it
@@ -31,9 +23,6 @@
 #undef MAP_FAILED
 #define MAP_FAILED reinterpret_cast<void*>(-1)
 
-DECLARE_FLAG(bool, dual_map_code);
-DECLARE_FLAG(bool, write_protect_code);
-
 uword VirtualMemory::page_size_ = 0;
 
 void VirtualMemory::Init() {
@@ -56,97 +45,17 @@
   }
 }
 
-#if defined(DUAL_MAPPING_SUPPORTED)
-// Wrapper to call memfd_create syscall.
-static inline int memfd_create(const char* name, unsigned int flags) {
-  return syscall(__NR_memfd_create, name, flags);
-}
-
-static void* MapAligned(int fd,
-                        int prot,
-                        intptr_t size,
-                        intptr_t alignment,
-                        intptr_t allocated_size) {
-  void* address =
-      mmap(NULL, allocated_size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
-  LOG_INFO("mmap(NULL, 0x%" Px ", PROT_NONE, ...): %p\n", allocated_size,
-           address);
-  if (address == MAP_FAILED) {
-    return NULL;
-  }
-
-  const uword base = reinterpret_cast<uword>(address);
-  const uword aligned_base = Utils::RoundUp(base, alignment);
-
-  // Guarantee the alignment by mapping at a fixed address inside the above
-  // mapping. Overlapping region will be automatically discarded in the above
-  // mapping. Manually discard non-overlapping regions.
-  address = mmap(reinterpret_cast<void*>(aligned_base), size, prot,
-                 MAP_SHARED | MAP_FIXED, fd, 0);
-  LOG_INFO("mmap(0x%" Px ", 0x%" Px ", %u, ...): %p\n", aligned_base, size,
-           prot, address);
-  if (address == MAP_FAILED) {
-    unmap(base, base + allocated_size);
-    return NULL;
-  }
-  ASSERT(address == reinterpret_cast<void*>(aligned_base));
-  unmap(base, aligned_base);
-  unmap(aligned_base + size, base + allocated_size);
-  return address;
-}
-#endif  // defined(DUAL_MAPPING_SUPPORTED)
-
 VirtualMemory* VirtualMemory::AllocateAligned(intptr_t size,
                                               intptr_t alignment,
                                               bool is_executable,
                                               const char* name) {
-  // When FLAG_write_protect_code is active, code memory (indicated by
-  // is_executable = true) is allocated as non-executable and later
-  // changed to executable via VirtualMemory::Protect.
   ASSERT(Utils::IsAligned(size, page_size_));
   ASSERT(Utils::IsPowerOfTwo(alignment));
   ASSERT(Utils::IsAligned(alignment, page_size_));
   const intptr_t allocated_size = size + alignment - page_size_;
-#if defined(DUAL_MAPPING_SUPPORTED)
-  int fd = -1;
-  const bool dual_mapping =
-      is_executable && FLAG_write_protect_code && FLAG_dual_map_code;
-  if (dual_mapping) {
-    fd = memfd_create("/dual_mapping", 0);
-    if ((fd == -1) || (ftruncate(fd, size) == -1)) {
-      close(fd);
-      return NULL;
-    }
-    const int region_prot = PROT_READ | PROT_WRITE;
-    void* region_ptr =
-        MapAligned(fd, region_prot, size, alignment, allocated_size);
-    if (region_ptr == NULL) {
-      close(fd);
-      return NULL;
-    }
-    MemoryRegion region(region_ptr, size);
-    // PROT_EXEC is added later via VirtualMemory::Protect.
-    const int alias_prot = PROT_READ;
-    void* alias_ptr =
-        MapAligned(fd, alias_prot, size, alignment, allocated_size);
-    close(fd);
-    if (alias_ptr == NULL) {
-      const uword region_base = reinterpret_cast<uword>(region_ptr);
-      unmap(region_base, region_base + size);
-      return NULL;
-    }
-    ASSERT(region_ptr != alias_ptr);
-    MemoryRegion alias(alias_ptr, size);
-    return new VirtualMemory(region, alias, region);
-  }
-#endif  // defined(DUAL_MAPPING_SUPPORTED)
-  const int prot =
-      PROT_READ | PROT_WRITE |
-      ((is_executable && !FLAG_write_protect_code) ? PROT_EXEC : 0);
+  const int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
   void* address =
-      mmap(NULL, allocated_size, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
-  LOG_INFO("mmap(NULL, 0x%" Px ", %u, ...): %p\n", allocated_size, prot,
-           address);
+      mmap(NULL, allocated_size, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
   if (address == MAP_FAILED) {
     return NULL;
   }
@@ -164,10 +73,6 @@
 VirtualMemory::~VirtualMemory() {
   if (vm_owns_region()) {
     unmap(reserved_.start(), reserved_.end());
-    const intptr_t alias_offset = AliasOffset();
-    if (alias_offset != 0) {
-      unmap(reserved_.start() + alias_offset, reserved_.end() + alias_offset);
-    }
   }
 }
 
@@ -209,15 +114,11 @@
     int error = errno;
     const int kBufferSize = 1024;
     char error_buf[kBufferSize];
-    LOG_INFO("mprotect(0x%" Px ", 0x%" Px ", %u) failed\n", page_address,
-             end_address - page_address, prot);
     FATAL2("mprotect error: %d (%s)", error,
            Utils::StrError(error, error_buf, kBufferSize));
   }
-  LOG_INFO("mprotect(0x%" Px ", 0x%" Px ", %u) ok\n", page_address,
-           end_address - page_address, prot);
 }
 
 }  // namespace dart
 
-#endif  // defined(HOST_OS_ANDROID ... HOST_OS_LINUX ... HOST_OS_MACOS)
+#endif  // defined(HOST_OS_ANDROID) || defined(HOST_OS_LINUX) || defined(HOST_OS_MACOS)
diff --git a/runtime/vm/virtual_memory_win.cc b/runtime/vm/virtual_memory_win.cc
index 3ec743f0..9c917b3 100644
--- a/runtime/vm/virtual_memory_win.cc
+++ b/runtime/vm/virtual_memory_win.cc
@@ -14,8 +14,6 @@
 
 namespace dart {
 
-DECLARE_FLAG(bool, write_protect_code);
-
 uword VirtualMemory::page_size_ = 0;
 
 void VirtualMemory::Init() {
@@ -28,16 +26,11 @@
                                               intptr_t alignment,
                                               bool is_executable,
                                               const char* name) {
-  // When FLAG_write_protect_code is active, code memory (indicated by
-  // is_executable = true) is allocated as non-executable and later
-  // changed to executable via VirtualMemory::Protect.
   ASSERT(Utils::IsAligned(size, page_size_));
   ASSERT(Utils::IsPowerOfTwo(alignment));
   ASSERT(Utils::IsAligned(alignment, page_size_));
   intptr_t reserved_size = size + alignment - page_size_;
-  int prot = (is_executable && !FLAG_write_protect_code)
-                 ? PAGE_EXECUTE_READWRITE
-                 : PAGE_READWRITE;
+  int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
   void* address = VirtualAlloc(NULL, reserved_size, MEM_RESERVE, prot);
   if (address == NULL) {
     return NULL;