[vm] Eliminate Mutex/Monitor indirection where possible

In many cases, the Mutexes and Monitors have to be marked "mutable"
because they're used to synchronize const accessor methods.

Small text segment improvement for Product builds:

$ size dart.{arm,x64}.{before,after}
   text	   data	    bss	    dec	    hex	filename
19726069	 409960	 392332	20528361	1393ce9	dart.arm.before
19725525	 409960	 392332	20527817	1393ac9	dart.arm.after
22576021	 600376	1782824	24959221	17cd8f5	dart.x64.before
22574821	 600376	1782824	24958021	17cd445	dart.x64.after

Change-Id: I68f5cd5ad452044df8bfebd160910496036a3e6b
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/101745
Commit-Queue: Matthew Dempsky <mdempsky@google.com>
Reviewed-by: Vyacheslav Egorov <vegorov@google.com>
diff --git a/runtime/bin/eventhandler_fuchsia.cc b/runtime/bin/eventhandler_fuchsia.cc
index 9f10a7e..837b7a0 100644
--- a/runtime/bin/eventhandler_fuchsia.cc
+++ b/runtime/bin/eventhandler_fuchsia.cc
@@ -76,7 +76,7 @@
 namespace bin {
 
 intptr_t IOHandle::Read(void* buffer, intptr_t num_bytes) {
-  MutexLocker ml(mutex_);
+  MutexLocker ml(&mutex_);
   const ssize_t read_bytes = NO_RETRY_EXPECTED(read(fd_, buffer, num_bytes));
   const int err = errno;
   LOG_INFO("IOHandle::Read: fd = %ld. read %ld bytes\n", fd_, read_bytes);
@@ -105,7 +105,7 @@
 }
 
 intptr_t IOHandle::Write(const void* buffer, intptr_t num_bytes) {
-  MutexLocker ml(mutex_);
+  MutexLocker ml(&mutex_);
   const ssize_t written_bytes =
       NO_RETRY_EXPECTED(write(fd_, buffer, num_bytes));
   const int err = errno;
@@ -122,7 +122,7 @@
 }
 
 intptr_t IOHandle::Accept(struct sockaddr* addr, socklen_t* addrlen) {
-  MutexLocker ml(mutex_);
+  MutexLocker ml(&mutex_);
   const intptr_t socket = NO_RETRY_EXPECTED(accept(fd_, addr, addrlen));
   const int err = errno;
   LOG_INFO("IOHandle::Accept: fd = %ld. socket = %ld\n", fd_, socket);
@@ -138,7 +138,7 @@
 }
 
 intptr_t IOHandle::AvailableBytes() {
-  MutexLocker ml(mutex_);
+  MutexLocker ml(&mutex_);
   ASSERT(fd_ >= 0);
   intptr_t available = FDUtils::AvailableBytes(fd_);
   LOG_INFO("IOHandle::AvailableBytes(): fd = %ld, bytes = %ld\n", fd_,
@@ -153,12 +153,12 @@
 }
 
 void IOHandle::Close() {
-  MutexLocker ml(mutex_);
+  MutexLocker ml(&mutex_);
   VOID_NO_RETRY_EXPECTED(close(fd_));
 }
 
 uint32_t IOHandle::MaskToEpollEvents(intptr_t mask) {
-  MutexLocker ml(mutex_);
+  MutexLocker ml(&mutex_);
   // Do not ask for POLLERR and POLLHUP explicitly as they are
   // triggered anyway.
   uint32_t events = POLLRDHUP;
@@ -230,12 +230,12 @@
 }
 
 bool IOHandle::AsyncWait(zx_handle_t port, uint32_t events, uint64_t key) {
-  MutexLocker ml(mutex_);
+  MutexLocker ml(&mutex_);
   return AsyncWaitLocked(port, events, key);
 }
 
 void IOHandle::CancelWait(zx_handle_t port, uint64_t key) {
-  MutexLocker ml(mutex_);
+  MutexLocker ml(&mutex_);
   LOG_INFO("IOHandle::CancelWait: fd = %ld\n", fd_);
   ASSERT(port != ZX_HANDLE_INVALID);
   ASSERT(handle_ != ZX_HANDLE_INVALID);
@@ -246,14 +246,14 @@
 }
 
 uint32_t IOHandle::WaitEnd(zx_signals_t observed) {
-  MutexLocker ml(mutex_);
+  MutexLocker ml(&mutex_);
   uint32_t events = 0;
   fdio_unsafe_wait_end(fdio_, observed, &events);
   return events;
 }
 
 intptr_t IOHandle::ToggleEvents(intptr_t event_mask) {
-  MutexLocker ml(mutex_);
+  MutexLocker ml(&mutex_);
   if (!write_events_enabled_) {
     LOG_INFO("IOHandle::ToggleEvents: fd = %ld de-asserting write\n", fd_);
     event_mask = event_mask & ~(1 << kOutEvent);
diff --git a/runtime/bin/eventhandler_fuchsia.h b/runtime/bin/eventhandler_fuchsia.h
index 0ff514b..07acef97 100644
--- a/runtime/bin/eventhandler_fuchsia.h
+++ b/runtime/bin/eventhandler_fuchsia.h
@@ -31,7 +31,7 @@
  public:
   explicit IOHandle(intptr_t fd)
       : ReferenceCounted(),
-        mutex_(new Mutex()),
+        mutex_(),
         write_events_enabled_(true),
         read_events_enabled_(true),
         fd_(fd),
@@ -65,13 +65,12 @@
     if (fdio_ != NULL) {
       fdio_unsafe_release(fdio_);
     }
-    delete mutex_;
   }
 
   bool AsyncWaitLocked(zx_handle_t port, uint32_t events, uint64_t key);
 
   // Mutex that protects the state here.
-  Mutex* mutex_;
+  Mutex mutex_;
   bool write_events_enabled_;
   bool read_events_enabled_;
   // Bytes remaining to be read from the socket. Read events should only be
diff --git a/runtime/bin/eventhandler_win.cc b/runtime/bin/eventhandler_win.cc
index f9f25de..37b4f28 100644
--- a/runtime/bin/eventhandler_win.cc
+++ b/runtime/bin/eventhandler_win.cc
@@ -116,10 +116,9 @@
       read_thread_handle_(NULL),
       read_thread_starting_(false),
       read_thread_finished_(false),
-      monitor_(new Monitor()) {}
+      monitor_() {}
 
 Handle::~Handle() {
-  delete monitor_;
 }
 
 bool Handle::CreateCompletionPort(HANDLE completion_port) {
@@ -133,7 +132,7 @@
 }
 
 void Handle::Close() {
-  MonitorLocker ml(monitor_);
+  MonitorLocker ml(&monitor_);
   if (!SupportsOverlappedIO()) {
     // If the handle uses synchronous I/O (e.g. stdin), cancel any pending
     // operation before closing the handle, so the read thread is not blocked.
@@ -174,7 +173,7 @@
 }
 
 void Handle::WaitForReadThreadStarted() {
-  MonitorLocker ml(monitor_);
+  MonitorLocker ml(&monitor_);
   while (read_thread_starting_) {
     ml.Wait();
   }
@@ -183,7 +182,7 @@
 void Handle::WaitForReadThreadFinished() {
   HANDLE to_join = NULL;
   {
-    MonitorLocker ml(monitor_);
+    MonitorLocker ml(&monitor_);
     if (read_thread_id_ != Thread::kInvalidThreadId) {
       while (!read_thread_finished_) {
         ml.Wait();
@@ -205,7 +204,7 @@
 void Handle::ReadComplete(OverlappedBuffer* buffer) {
   WaitForReadThreadStarted();
   {
-    MonitorLocker ml(monitor_);
+    MonitorLocker ml(&monitor_);
     // Currently only one outstanding read at the time.
     ASSERT(pending_read_ == buffer);
     ASSERT(data_ready_ == NULL);
@@ -224,7 +223,7 @@
 }
 
 void Handle::WriteComplete(OverlappedBuffer* buffer) {
-  MonitorLocker ml(monitor_);
+  MonitorLocker ml(&monitor_);
   // Currently only one outstanding write at the time.
   ASSERT(pending_write_ == buffer);
   OverlappedBuffer::DisposeBuffer(buffer);
@@ -237,7 +236,7 @@
 }
 
 void Handle::NotifyReadThreadStarted() {
-  MonitorLocker ml(monitor_);
+  MonitorLocker ml(&monitor_);
   ASSERT(read_thread_starting_);
   ASSERT(read_thread_id_ == Thread::kInvalidThreadId);
   read_thread_id_ = Thread::GetCurrentThreadId();
@@ -247,7 +246,7 @@
 }
 
 void Handle::NotifyReadThreadFinished() {
-  MonitorLocker ml(monitor_);
+  MonitorLocker ml(&monitor_);
   ASSERT(!read_thread_finished_);
   ASSERT(read_thread_id_ != Thread::kInvalidThreadId);
   read_thread_finished_ = true;
@@ -315,7 +314,7 @@
 }
 
 bool Handle::IssueWrite() {
-  MonitorLocker ml(monitor_);
+  MonitorLocker ml(&monitor_);
   ASSERT(type_ != kListenSocket);
   ASSERT(completion_port_ != INVALID_HANDLE_VALUE);
   ASSERT(HasPendingWrite());
@@ -365,7 +364,7 @@
 }
 
 void FileHandle::EnsureInitialized(EventHandlerImplementation* event_handler) {
-  MonitorLocker ml(monitor_);
+  MonitorLocker ml(&monitor_);
   event_handler_ = event_handler;
   if (completion_port_ == INVALID_HANDLE_VALUE) {
     if (SupportsOverlappedIO()) {
@@ -386,7 +385,7 @@
 
 void DirectoryWatchHandle::EnsureInitialized(
     EventHandlerImplementation* event_handler) {
-  MonitorLocker ml(monitor_);
+  MonitorLocker ml(&monitor_);
   event_handler_ = event_handler;
   if (completion_port_ == INVALID_HANDLE_VALUE) {
     CreateCompletionPort(event_handler_->completion_port());
@@ -418,7 +417,7 @@
 }
 
 void DirectoryWatchHandle::Stop() {
-  MonitorLocker ml(monitor_);
+  MonitorLocker ml(&monitor_);
   // Stop the outstanding read, so we can close the handle.
 
   if (HasPendingRead()) {
@@ -450,7 +449,7 @@
 }
 
 bool ListenSocket::IssueAccept() {
-  MonitorLocker ml(monitor_);
+  MonitorLocker ml(&monitor_);
 
   // For AcceptEx there needs to be buffer storage for address
   // information for two addresses (local and remote address). The
@@ -485,7 +484,7 @@
 
 void ListenSocket::AcceptComplete(OverlappedBuffer* buffer,
                                   HANDLE completion_port) {
-  MonitorLocker ml(monitor_);
+  MonitorLocker ml(&monitor_);
   if (!IsClosing()) {
     // Update the accepted socket to support the full range of API calls.
     SOCKET s = socket();
@@ -556,12 +555,12 @@
 }
 
 bool ListenSocket::CanAccept() {
-  MonitorLocker ml(monitor_);
+  MonitorLocker ml(&monitor_);
   return accepted_head_ != NULL;
 }
 
 ClientSocket* ListenSocket::Accept() {
-  MonitorLocker ml(monitor_);
+  MonitorLocker ml(&monitor_);
 
   ClientSocket* result = NULL;
 
@@ -589,7 +588,7 @@
 
 void ListenSocket::EnsureInitialized(
     EventHandlerImplementation* event_handler) {
-  MonitorLocker ml(monitor_);
+  MonitorLocker ml(&monitor_);
   if (AcceptEx_ == NULL) {
     ASSERT(completion_port_ == INVALID_HANDLE_VALUE);
     ASSERT(event_handler_ == NULL);
@@ -604,7 +603,7 @@
 }
 
 intptr_t Handle::Available() {
-  MonitorLocker ml(monitor_);
+  MonitorLocker ml(&monitor_);
   if (data_ready_ == NULL) {
     return 0;
   }
@@ -613,7 +612,7 @@
 }
 
 intptr_t Handle::Read(void* buffer, intptr_t num_bytes) {
-  MonitorLocker ml(monitor_);
+  MonitorLocker ml(&monitor_);
   if (data_ready_ == NULL) {
     return 0;
   }
@@ -633,7 +632,7 @@
                           intptr_t num_bytes,
                           struct sockaddr* sa,
                           socklen_t sa_len) {
-  MonitorLocker ml(monitor_);
+  MonitorLocker ml(&monitor_);
   if (data_ready_ == NULL) {
     return 0;
   }
@@ -658,7 +657,7 @@
 }
 
 intptr_t Handle::Write(const void* buffer, intptr_t num_bytes) {
-  MonitorLocker ml(monitor_);
+  MonitorLocker ml(&monitor_);
   if (HasPendingWrite()) {
     return 0;
   }
@@ -682,7 +681,7 @@
                         intptr_t num_bytes,
                         struct sockaddr* sa,
                         socklen_t sa_len) {
-  MonitorLocker ml(monitor_);
+  MonitorLocker ml(&monitor_);
   if (HasPendingWrite()) {
     return 0;
   }
@@ -718,7 +717,7 @@
 }
 
 void StdHandle::RunWriteLoop() {
-  MonitorLocker ml(monitor_);
+  MonitorLocker ml(&monitor_);
   write_thread_running_ = true;
   thread_id_ = Thread::GetCurrentThreadId();
   thread_handle_ = OpenThread(SYNCHRONIZE, false, thread_id_);
@@ -757,7 +756,7 @@
 }
 
 intptr_t StdHandle::Write(const void* buffer, intptr_t num_bytes) {
-  MonitorLocker ml(monitor_);
+  MonitorLocker ml(&monitor_);
   if (HasPendingWrite()) {
     return 0;
   }
@@ -803,7 +802,7 @@
 
 void StdHandle::DoClose() {
   {
-    MonitorLocker ml(monitor_);
+    MonitorLocker ml(&monitor_);
     if (write_thread_exists_) {
       write_thread_running_ = false;
       ml.Notify();
@@ -859,7 +858,7 @@
 }
 
 bool ClientSocket::IssueRead() {
-  MonitorLocker ml(monitor_);
+  MonitorLocker ml(&monitor_);
   ASSERT(completion_port_ != INVALID_HANDLE_VALUE);
   ASSERT(!HasPendingRead());
 
@@ -882,7 +881,7 @@
 }
 
 bool ClientSocket::IssueWrite() {
-  MonitorLocker ml(monitor_);
+  MonitorLocker ml(&monitor_);
   ASSERT(completion_port_ != INVALID_HANDLE_VALUE);
   ASSERT(HasPendingWrite());
   ASSERT(pending_write_->operation() == OverlappedBuffer::kWrite);
@@ -950,7 +949,7 @@
 
 void ClientSocket::EnsureInitialized(
     EventHandlerImplementation* event_handler) {
-  MonitorLocker ml(monitor_);
+  MonitorLocker ml(&monitor_);
   if (completion_port_ == INVALID_HANDLE_VALUE) {
     ASSERT(event_handler_ == NULL);
     event_handler_ = event_handler;
@@ -963,7 +962,7 @@
 }
 
 bool DatagramSocket::IssueSendTo(struct sockaddr* sa, socklen_t sa_len) {
-  MonitorLocker ml(monitor_);
+  MonitorLocker ml(&monitor_);
   ASSERT(completion_port_ != INVALID_HANDLE_VALUE);
   ASSERT(HasPendingWrite());
   ASSERT(pending_write_->operation() == OverlappedBuffer::kSendTo);
@@ -980,7 +979,7 @@
 }
 
 bool DatagramSocket::IssueRecvFrom() {
-  MonitorLocker ml(monitor_);
+  MonitorLocker ml(&monitor_);
   ASSERT(completion_port_ != INVALID_HANDLE_VALUE);
   ASSERT(!HasPendingRead());
 
@@ -1004,7 +1003,7 @@
 
 void DatagramSocket::EnsureInitialized(
     EventHandlerImplementation* event_handler) {
-  MonitorLocker ml(monitor_);
+  MonitorLocker ml(&monitor_);
   if (completion_port_ == INVALID_HANDLE_VALUE) {
     ASSERT(event_handler_ == NULL);
     event_handler_ = event_handler;
@@ -1045,7 +1044,7 @@
       ListenSocket* listen_socket = reinterpret_cast<ListenSocket*>(handle);
       listen_socket->EnsureInitialized(this);
 
-      MonitorLocker ml(listen_socket->monitor_);
+      MonitorLocker ml(&listen_socket->monitor_);
 
       if (IS_COMMAND(msg->data, kReturnTokenCommand)) {
         listen_socket->ReturnTokens(msg->dart_port, TOKEN_COUNT(msg->data));
@@ -1077,7 +1076,7 @@
       }
     } else {
       handle->EnsureInitialized(this);
-      MonitorLocker ml(handle->monitor_);
+      MonitorLocker ml(&handle->monitor_);
 
       if (IS_COMMAND(msg->data, kReturnTokenCommand)) {
         handle->ReturnTokens(msg->dart_port, TOKEN_COUNT(msg->data));
@@ -1151,7 +1150,7 @@
   listen_socket->AcceptComplete(buffer, completion_port_);
 
   {
-    MonitorLocker ml(listen_socket->monitor_);
+    MonitorLocker ml(&listen_socket->monitor_);
     TryDispatchingPendingAccepts(listen_socket);
   }
 
@@ -1346,7 +1345,6 @@
 }
 
 EventHandlerImplementation::EventHandlerImplementation() {
-  startup_monitor_ = new Monitor();
   handler_thread_id_ = Thread::kInvalidThreadId;
   handler_thread_handle_ = NULL;
   completion_port_ =
@@ -1362,7 +1360,6 @@
   DWORD res = WaitForSingleObject(handler_thread_handle_, INFINITE);
   CloseHandle(handler_thread_handle_);
   ASSERT(res == WAIT_OBJECT_0);
-  delete startup_monitor_;
   CloseHandle(completion_port_);
 }
 
@@ -1395,7 +1392,7 @@
   ASSERT(handler_impl != NULL);
 
   {
-    MonitorLocker ml(handler_impl->startup_monitor_);
+    MonitorLocker ml(&handler_impl->startup_monitor_);
     handler_impl->handler_thread_id_ = Thread::GetCurrentThreadId();
     handler_impl->handler_thread_handle_ =
         OpenThread(SYNCHRONIZE, false, handler_impl->handler_thread_id_);
@@ -1469,7 +1466,7 @@
   }
 
   {
-    MonitorLocker ml(startup_monitor_);
+    MonitorLocker ml(&startup_monitor_);
     while (handler_thread_id_ == Thread::kInvalidThreadId) {
       ml.Wait();
     }
diff --git a/runtime/bin/eventhandler_win.h b/runtime/bin/eventhandler_win.h
index 2833f13..bbd2179 100644
--- a/runtime/bin/eventhandler_win.h
+++ b/runtime/bin/eventhandler_win.h
@@ -262,7 +262,7 @@
 
   virtual void HandleIssueError();
 
-  Monitor* monitor_;
+  Monitor monitor_;
   Type type_;
   HANDLE handle_;
   HANDLE completion_port_;
@@ -558,7 +558,7 @@
   HANDLE completion_port() { return completion_port_; }
 
  private:
-  Monitor* startup_monitor_;
+  Monitor startup_monitor_;
   ThreadId handler_thread_id_;
   HANDLE handler_thread_handle_;
 
diff --git a/runtime/bin/loader.cc b/runtime/bin/loader.cc
index 2993e91..a56cdc2 100644
--- a/runtime/bin/loader.cc
+++ b/runtime/bin/loader.cc
@@ -33,14 +33,13 @@
     : port_(ILLEGAL_PORT),
       isolate_data_(isolate_data),
       error_(Dart_Null()),
-      monitor_(NULL),
+      monitor_(),
       pending_operations_(0),
       results_(NULL),
       results_length_(0),
       results_capacity_(0),
       payload_(NULL),
       payload_length_(0) {
-  monitor_ = new Monitor();
   ASSERT(isolate_data_ != NULL);
   port_ = Dart_NewNativePort("Loader", Loader::NativeMessageHandler, false);
   isolate_data_->set_loader(this);
@@ -51,15 +50,13 @@
   ASSERT(port_ != ILLEGAL_PORT);
   // Enter the monitor while we close the Dart port. After the Dart port is
   // closed, no more results can be queued.
-  monitor_->Enter();
+  monitor_.Enter();
   Dart_CloseNativePort(port_);
-  monitor_->Exit();
+  monitor_.Exit();
   RemoveLoader(port_);
   port_ = ILLEGAL_PORT;
   isolate_data_->set_loader(NULL);
   isolate_data_ = NULL;
-  delete monitor_;
-  monitor_ = NULL;
   for (intptr_t i = 0; i < results_length_; i++) {
     results_[i].Cleanup();
   }
@@ -182,7 +179,7 @@
   Dart_ListSetAt(request, 5, library_url);
 
   if (Dart_Post(loader_port, request)) {
-    MonitorLocker ml(monitor_);
+    MonitorLocker ml(&monitor_);
     pending_operations_++;
   }
 }
@@ -205,13 +202,13 @@
   Dart_ListSetAt(request, 5, library_url);
 
   if (Dart_Post(loader_port, request)) {
-    MonitorLocker ml(monitor_);
+    MonitorLocker ml(&monitor_);
     pending_operations_++;
   }
 }
 
 void Loader::QueueMessage(Dart_CObject* message) {
-  MonitorLocker ml(monitor_);
+  MonitorLocker ml(&monitor_);
   if (results_length_ == results_capacity_) {
     // Grow to an initial capacity or double in size.
     results_capacity_ = (results_capacity_ == 0) ? 4 : results_capacity_ * 2;
@@ -227,7 +224,7 @@
 }
 
 void Loader::BlockUntilComplete(ProcessResult process_result) {
-  MonitorLocker ml(monitor_);
+  MonitorLocker ml(&monitor_);
 
   while (true) {
     // If |ProcessQueueLocked| returns false, we've hit an error and should
diff --git a/runtime/bin/loader.h b/runtime/bin/loader.h
index 2e94033..4142c89 100644
--- a/runtime/bin/loader.h
+++ b/runtime/bin/loader.h
@@ -49,7 +49,7 @@
   Dart_Handle error_;
   // This monitor is used to protect the pending operations count and the
   // I/O result queue.
-  Monitor* monitor_;
+  Monitor monitor_;
 
   // The number of operations dispatched to the service isolate for loading.
   // Must be accessed with monitor_ held.
diff --git a/runtime/bin/socket.cc b/runtime/bin/socket.cc
index 113ea22..b706482 100644
--- a/runtime/bin/socket.cc
+++ b/runtime/bin/socket.cc
@@ -94,7 +94,7 @@
                                                       intptr_t backlog,
                                                       bool v6_only,
                                                       bool shared) {
-  MutexLocker ml(mutex_);
+  MutexLocker ml(&mutex_);
 
   OSSocket* first_os_socket = NULL;
   intptr_t port = SocketAddress::GetAddrPort(addr);
@@ -195,7 +195,7 @@
 
 bool ListeningSocketRegistry::CloseOneSafe(OSSocket* os_socket,
                                            bool update_hash_maps) {
-  ASSERT(!mutex_->TryLock());
+  ASSERT(!mutex_.TryLock());
   ASSERT(os_socket != NULL);
   ASSERT(os_socket->ref_count > 0);
   os_socket->ref_count--;
@@ -232,7 +232,7 @@
 }
 
 void ListeningSocketRegistry::CloseAllSafe() {
-  MutexLocker ml(mutex_);
+  MutexLocker ml(&mutex_);
 
   for (SimpleHashMap::Entry* cursor = sockets_by_fd_.Start(); cursor != NULL;
        cursor = sockets_by_fd_.Next(cursor)) {
@@ -241,7 +241,7 @@
 }
 
 bool ListeningSocketRegistry::CloseSafe(Socket* socketfd) {
-  ASSERT(!mutex_->TryLock());
+  ASSERT(!mutex_.TryLock());
   OSSocket* os_socket = LookupByFd(socketfd);
   if (os_socket != NULL) {
     return CloseOneSafe(os_socket, true);
diff --git a/runtime/bin/socket.h b/runtime/bin/socket.h
index 24d95b9..a23ff65 100644
--- a/runtime/bin/socket.h
+++ b/runtime/bin/socket.h
@@ -155,12 +155,10 @@
   ListeningSocketRegistry()
       : sockets_by_port_(SameIntptrValue, kInitialSocketsCount),
         sockets_by_fd_(SameIntptrValue, kInitialSocketsCount),
-        mutex_(new Mutex()) {}
+        mutex_() {}
 
   ~ListeningSocketRegistry() {
     CloseAllSafe();
-    delete mutex_;
-    mutex_ = NULL;
   }
 
   static void Initialize();
@@ -187,7 +185,7 @@
   // this function.
   bool CloseSafe(Socket* socketfd);
 
-  Mutex* mutex() { return mutex_; }
+  Mutex* mutex() { return &mutex_; }
 
  private:
   struct OSSocket {
@@ -254,7 +252,7 @@
   SimpleHashMap sockets_by_port_;
   SimpleHashMap sockets_by_fd_;
 
-  Mutex* mutex_;
+  Mutex mutex_;
 
   DISALLOW_COPY_AND_ASSIGN(ListeningSocketRegistry);
 };
diff --git a/runtime/vm/compiler/jit/compiler.cc b/runtime/vm/compiler/jit/compiler.cc
index e8543ae..da82ceb 100644
--- a/runtime/vm/compiler/jit/compiler.cc
+++ b/runtime/vm/compiler/jit/compiler.cc
@@ -1393,18 +1393,16 @@
 
 BackgroundCompiler::BackgroundCompiler(Isolate* isolate)
     : isolate_(isolate),
-      queue_monitor_(new Monitor()),
+      queue_monitor_(),
       function_queue_(new BackgroundCompilationQueue()),
-      done_monitor_(new Monitor()),
+      done_monitor_(),
       running_(false),
       done_(true),
       disabled_depth_(0) {}
 
 // Fields all deleted in ::Stop; here clear them.
 BackgroundCompiler::~BackgroundCompiler() {
-  delete queue_monitor_;
   delete function_queue_;
-  delete done_monitor_;
 }
 
 void BackgroundCompiler::Run() {
@@ -1420,7 +1418,7 @@
       HANDLESCOPE(thread);
       Function& function = Function::Handle(zone);
       {
-        MonitorLocker ml(queue_monitor_);
+        MonitorLocker ml(&queue_monitor_);
         function = function_queue()->PeekFunction();
       }
       while (running_ && !function.IsNull()) {
@@ -1437,7 +1435,7 @@
 
         QueueElement* qelem = NULL;
         {
-          MonitorLocker ml(queue_monitor_);
+          MonitorLocker ml(&queue_monitor_);
           if (function_queue()->IsEmpty()) {
             // We are shutting down, queue was cleared.
             function = Function::null();
@@ -1466,7 +1464,7 @@
     Thread::ExitIsolateAsHelper();
     {
       // Wait to be notified when the work queue is not empty.
-      MonitorLocker ml(queue_monitor_);
+      MonitorLocker ml(&queue_monitor_);
       while (function_queue()->IsEmpty() && running_) {
         ml.Wait();
       }
@@ -1475,7 +1473,7 @@
 
   {
     // Notify that the thread is done.
-    MonitorLocker ml_done(done_monitor_);
+    MonitorLocker ml_done(&done_monitor_);
     done_ = true;
     ml_done.Notify();
   }
@@ -1489,7 +1487,7 @@
     isolate_->heap()->CollectMostGarbage();
   }
   {
-    MonitorLocker ml(queue_monitor_);
+    MonitorLocker ml(&queue_monitor_);
     ASSERT(running_);
     if (function_queue()->ContainsObj(function)) {
       return;
@@ -1523,7 +1521,7 @@
   ASSERT(thread->IsMutatorThread());
   ASSERT(!thread->IsAtSafepoint());
 
-  MonitorLocker ml(done_monitor_);
+  MonitorLocker ml(&done_monitor_);
   if (running_ || !done_) return;
   running_ = true;
   done_ = false;
@@ -1541,14 +1539,14 @@
   ASSERT(!thread->IsAtSafepoint());
 
   {
-    MonitorLocker ml(queue_monitor_);
+    MonitorLocker ml(&queue_monitor_);
     running_ = false;
     function_queue_->Clear();
     ml.Notify();  // Stop waiting for the queue.
   }
 
   {
-    MonitorLocker ml_done(done_monitor_);
+    MonitorLocker ml_done(&done_monitor_);
     while (!done_) {
       ml_done.WaitWithSafepointCheck(thread);
     }
diff --git a/runtime/vm/compiler/jit/compiler.h b/runtime/vm/compiler/jit/compiler.h
index c7e6412..b804a70 100644
--- a/runtime/vm/compiler/jit/compiler.h
+++ b/runtime/vm/compiler/jit/compiler.h
@@ -226,10 +226,10 @@
 
   Isolate* isolate_;
 
-  Monitor* queue_monitor_;  // Controls access to the queue.
+  Monitor queue_monitor_;  // Controls access to the queue.
   BackgroundCompilationQueue* function_queue_;
 
-  Monitor* done_monitor_;   // Notify/wait that the thread is done.
+  Monitor done_monitor_;    // Notify/wait that the thread is done.
   bool running_;            // While true, will try to read queue and compile.
   bool done_;               // True if the thread is done.
 
diff --git a/runtime/vm/heap/freelist.cc b/runtime/vm/heap/freelist.cc
index 9c2aca1..c98af37 100644
--- a/runtime/vm/heap/freelist.cc
+++ b/runtime/vm/heap/freelist.cc
@@ -55,22 +55,20 @@
 }
 
 FreeList::FreeList()
-    : mutex_(new Mutex()),
-      freelist_search_budget_(kInitialFreeListSearchBudget) {
+    : mutex_(), freelist_search_budget_(kInitialFreeListSearchBudget) {
   Reset();
 }
 
 FreeList::~FreeList() {
-  delete mutex_;
 }
 
 uword FreeList::TryAllocate(intptr_t size, bool is_protected) {
-  MutexLocker ml(mutex_);
+  MutexLocker ml(&mutex_);
   return TryAllocateLocked(size, is_protected);
 }
 
 uword FreeList::TryAllocateLocked(intptr_t size, bool is_protected) {
-  DEBUG_ASSERT(mutex_->IsOwnedByCurrentThread());
+  DEBUG_ASSERT(mutex_.IsOwnedByCurrentThread());
   // Precondition: is_protected is false or else all free list elements are
   // in non-writable pages.
 
@@ -176,12 +174,12 @@
 }
 
 void FreeList::Free(uword addr, intptr_t size) {
-  MutexLocker ml(mutex_);
+  MutexLocker ml(&mutex_);
   FreeLocked(addr, size);
 }
 
 void FreeList::FreeLocked(uword addr, intptr_t size) {
-  DEBUG_ASSERT(mutex_->IsOwnedByCurrentThread());
+  DEBUG_ASSERT(mutex_.IsOwnedByCurrentThread());
   // Precondition required by AsElement and EnqueueElement: the (page
   // containing the) header of the freed block should be writable.  This is
   // the case when called for newly allocated pages because they are
@@ -194,7 +192,7 @@
 }
 
 void FreeList::Reset() {
-  MutexLocker ml(mutex_);
+  MutexLocker ml(&mutex_);
   free_map_.Reset();
   last_free_small_size_ = -1;
   for (int i = 0; i < (kNumLists + 1); i++) {
@@ -214,7 +212,7 @@
 }
 
 intptr_t FreeList::LengthLocked(int index) const {
-  DEBUG_ASSERT(mutex_->IsOwnedByCurrentThread());
+  DEBUG_ASSERT(mutex_.IsOwnedByCurrentThread());
   ASSERT(index >= 0);
   ASSERT(index < kNumLists);
   intptr_t result = 0;
@@ -306,7 +304,7 @@
 }
 
 void FreeList::Print() const {
-  MutexLocker ml(mutex_);
+  MutexLocker ml(&mutex_);
   PrintSmall();
   PrintLarge();
 }
@@ -337,12 +335,12 @@
 }
 
 FreeListElement* FreeList::TryAllocateLarge(intptr_t minimum_size) {
-  MutexLocker ml(mutex_);
+  MutexLocker ml(&mutex_);
   return TryAllocateLargeLocked(minimum_size);
 }
 
 FreeListElement* FreeList::TryAllocateLargeLocked(intptr_t minimum_size) {
-  DEBUG_ASSERT(mutex_->IsOwnedByCurrentThread());
+  DEBUG_ASSERT(mutex_.IsOwnedByCurrentThread());
   FreeListElement* previous = NULL;
   FreeListElement* current = free_lists_[kNumLists];
   // TODO(koda): Find largest.
diff --git a/runtime/vm/heap/freelist.h b/runtime/vm/heap/freelist.h
index 171ff1c..9fac19d 100644
--- a/runtime/vm/heap/freelist.h
+++ b/runtime/vm/heap/freelist.h
@@ -86,7 +86,7 @@
 
   void Print() const;
 
-  Mutex* mutex() { return mutex_; }
+  Mutex* mutex() { return &mutex_; }
   uword TryAllocateLocked(intptr_t size, bool is_protected);
   void FreeLocked(uword addr, intptr_t size);
 
@@ -97,7 +97,7 @@
   // Allocates locked and unprotected memory, but only from small elements
   // (i.e., fixed size lists).
   uword TryAllocateSmallLocked(intptr_t size) {
-    DEBUG_ASSERT(mutex_->IsOwnedByCurrentThread());
+    DEBUG_ASSERT(mutex_.IsOwnedByCurrentThread());
     if (size > last_free_small_size_) {
       return 0;
     }
@@ -159,7 +159,7 @@
   void PrintLarge() const;
 
   // Lock protecting the free list data structures.
-  Mutex* mutex_;
+  mutable Mutex mutex_;
 
   BitSet<kNumLists> free_map_;
 
diff --git a/runtime/vm/heap/heap.cc b/runtime/vm/heap/heap.cc
index 10f20bb..2a53317 100644
--- a/runtime/vm/heap/heap.cc
+++ b/runtime/vm/heap/heap.cc
@@ -39,8 +39,8 @@
     : isolate_(isolate),
       new_space_(this, max_new_gen_semi_words, kNewObjectAlignmentOffset),
       old_space_(this, max_old_gen_words),
-      barrier_(new Monitor()),
-      barrier_done_(new Monitor()),
+      barrier_(),
+      barrier_done_(),
       read_only_(false),
       gc_new_space_in_progress_(false),
       gc_old_space_in_progress_(false) {
@@ -53,9 +53,6 @@
 }
 
 Heap::~Heap() {
-  delete barrier_;
-  delete barrier_done_;
-
   for (int sel = 0; sel < kNumWeakSelectors; sel++) {
     delete new_weak_tables_[sel];
     delete old_weak_tables_[sel];
diff --git a/runtime/vm/heap/heap.h b/runtime/vm/heap/heap.h
index 628ab15..837f2c9 100644
--- a/runtime/vm/heap/heap.h
+++ b/runtime/vm/heap/heap.h
@@ -284,8 +284,8 @@
 
   Isolate* isolate() const { return isolate_; }
 
-  Monitor* barrier() const { return barrier_; }
-  Monitor* barrier_done() const { return barrier_done_; }
+  Monitor* barrier() const { return &barrier_; }
+  Monitor* barrier_done() const { return &barrier_done_; }
 
   void SetupImagePage(void* pointer, uword size, bool is_executable) {
     old_space_.SetupImagePage(pointer, size, is_executable);
@@ -382,8 +382,8 @@
   WeakTable* new_weak_tables_[kNumWeakSelectors];
   WeakTable* old_weak_tables_[kNumWeakSelectors];
 
-  Monitor* barrier_;
-  Monitor* barrier_done_;
+  mutable Monitor barrier_;
+  mutable Monitor barrier_done_;
 
   // GC stats collection.
   GCStats stats_;
diff --git a/runtime/vm/heap/pages.cc b/runtime/vm/heap/pages.cc
index 4f0b6fc..23c83dd 100644
--- a/runtime/vm/heap/pages.cc
+++ b/runtime/vm/heap/pages.cc
@@ -225,7 +225,7 @@
 PageSpace::PageSpace(Heap* heap, intptr_t max_capacity_in_words)
     : freelist_(),
       heap_(heap),
-      pages_lock_(new Mutex()),
+      pages_lock_(),
       pages_(NULL),
       pages_tail_(NULL),
       exec_pages_(NULL),
@@ -237,7 +237,7 @@
       max_capacity_in_words_(max_capacity_in_words),
       usage_(),
       allocated_black_in_words_(0),
-      tasks_lock_(new Monitor()),
+      tasks_lock_(),
       tasks_(0),
       concurrent_marker_tasks_(0),
       phase_(kDone),
@@ -269,8 +269,6 @@
   FreePages(exec_pages_);
   FreePages(large_pages_);
   FreePages(image_pages_);
-  delete pages_lock_;
-  delete tasks_lock_;
   ASSERT(marker_ == NULL);
 }
 
@@ -282,7 +280,7 @@
 
 HeapPage* PageSpace::AllocatePage(HeapPage::PageType type, bool link) {
   {
-    MutexLocker ml(pages_lock_);
+    MutexLocker ml(&pages_lock_);
     if (!CanIncreaseCapacityInWordsLocked(kPageSizeInWords)) {
       return NULL;
     }
@@ -300,7 +298,7 @@
     return NULL;
   }
 
-  MutexLocker ml(pages_lock_);
+  MutexLocker ml(&pages_lock_);
   if (link) {
     if (!is_exec) {
       if (pages_ == NULL) {
@@ -336,7 +334,7 @@
 HeapPage* PageSpace::AllocateLargePage(intptr_t size, HeapPage::PageType type) {
   const intptr_t page_size_in_words = LargePageSizeInWordsFor(size);
   {
-    MutexLocker ml(pages_lock_);
+    MutexLocker ml(&pages_lock_);
     if (!CanIncreaseCapacityInWordsLocked(page_size_in_words)) {
       return NULL;
     }
@@ -379,7 +377,7 @@
 void PageSpace::FreePage(HeapPage* page, HeapPage* previous_page) {
   bool is_exec = (page->type() == HeapPage::kExecutable);
   {
-    MutexLocker ml(pages_lock_);
+    MutexLocker ml(&pages_lock_);
     IncreaseCapacityInWordsLocked(-(page->memory_->size() >> kWordSizeLog2));
     if (!is_exec) {
       // Remove the page from the list of data pages.
@@ -553,7 +551,7 @@
 class ExclusivePageIterator : ValueObject {
  public:
   explicit ExclusivePageIterator(const PageSpace* space)
-      : space_(space), ml_(space->pages_lock_) {
+      : space_(space), ml_(&space->pages_lock_) {
     space_->MakeIterable();
     list_ = kRegular;
     page_ = space_->pages_;
@@ -605,7 +603,7 @@
 class ExclusiveCodePageIterator : ValueObject {
  public:
   explicit ExclusiveCodePageIterator(const PageSpace* space)
-      : space_(space), ml_(space->pages_lock_) {
+      : space_(space), ml_(&space->pages_lock_) {
     space_->MakeIterable();
     page_ = space_->exec_pages_;
   }
@@ -627,7 +625,7 @@
 class ExclusiveLargePageIterator : ValueObject {
  public:
   explicit ExclusiveLargePageIterator(const PageSpace* space)
-      : space_(space), ml_(space->pages_lock_) {
+      : space_(space), ml_(&space->pages_lock_) {
     space_->MakeIterable();
     page_ = space_->large_pages_;
   }
@@ -884,7 +882,7 @@
     // {"object_start": "0x...", "objects": [size, class id, size, ...]}
     // TODO(19445): Use ExclusivePageIterator once HeapMap supports large pages.
     HeapIterationScope iteration(Thread::Current());
-    MutexLocker ml(pages_lock_);
+    MutexLocker ml(&pages_lock_);
     MakeIterable();
     JSONArray all_pages(&heap_map, "pages");
     for (HeapPage* page = pages_; page != NULL; page = page->next()) {
@@ -909,7 +907,7 @@
 
 void PageSpace::WriteProtectCode(bool read_only) {
   if (FLAG_write_protect_code) {
-    MutexLocker ml(pages_lock_);
+    MutexLocker ml(&pages_lock_);
     NoSafepointScope no_safepoint;
     // No need to go through all of the data pages first.
     HeapPage* page = exec_pages_;
@@ -1236,7 +1234,7 @@
 void PageSpace::Compact(Thread* thread) {
   thread->isolate()->set_compaction_in_progress(true);
   GCCompactor compactor(thread, heap_);
-  compactor.Compact(pages_, &freelist_[HeapPage::kData], pages_lock_);
+  compactor.Compact(pages_, &freelist_[HeapPage::kData], &pages_lock_);
   thread->isolate()->set_compaction_in_progress(false);
 
   if (FLAG_verify_after_gc) {
@@ -1330,7 +1328,7 @@
     page->type_ = HeapPage::kData;
   }
 
-  MutexLocker ml(pages_lock_);
+  MutexLocker ml(&pages_lock_);
   page->next_ = image_pages_;
   image_pages_ = page;
 }
diff --git a/runtime/vm/heap/pages.h b/runtime/vm/heap/pages.h
index c1608af..6eb1e32 100644
--- a/runtime/vm/heap/pages.h
+++ b/runtime/vm/heap/pages.h
@@ -298,15 +298,15 @@
 
   int64_t UsedInWords() const { return usage_.used_in_words; }
   int64_t CapacityInWords() const {
-    MutexLocker ml(pages_lock_);
+    MutexLocker ml(&pages_lock_);
     return usage_.capacity_in_words;
   }
   void IncreaseCapacityInWords(intptr_t increase_in_words) {
-    MutexLocker ml(pages_lock_);
+    MutexLocker ml(&pages_lock_);
     IncreaseCapacityInWordsLocked(increase_in_words);
   }
   void IncreaseCapacityInWordsLocked(intptr_t increase_in_words) {
-    DEBUG_ASSERT(pages_lock_->IsOwnedByCurrentThread());
+    DEBUG_ASSERT(pages_lock_.IsOwnedByCurrentThread());
     usage_.capacity_in_words += increase_in_words;
     UpdateMaxCapacityLocked();
   }
@@ -316,7 +316,7 @@
 
   int64_t ExternalInWords() const { return usage_.external_in_words; }
   SpaceUsage GetCurrentUsage() const {
-    MutexLocker ml(pages_lock_);
+    MutexLocker ml(&pages_lock_);
     return usage_;
   }
 
@@ -399,7 +399,7 @@
                                is_protected, is_locked);
   }
 
-  Monitor* tasks_lock() const { return tasks_lock_; }
+  Monitor* tasks_lock() const { return &tasks_lock_; }
   intptr_t tasks() const { return tasks_; }
   void set_tasks(intptr_t val) {
     ASSERT(val >= 0);
@@ -495,7 +495,7 @@
   Heap* heap_;
 
   // Use ExclusivePageIterator for safe access to these.
-  Mutex* pages_lock_;
+  mutable Mutex pages_lock_;
   HeapPage* pages_;
   HeapPage* pages_tail_;
   HeapPage* exec_pages_;
@@ -517,7 +517,7 @@
   intptr_t allocated_black_in_words_;
 
   // Keep track of running MarkSweep tasks.
-  Monitor* tasks_lock_;
+  mutable Monitor tasks_lock_;
   intptr_t tasks_;
   intptr_t concurrent_marker_tasks_;
   Phase phase_;
diff --git a/runtime/vm/heap/pointer_block.cc b/runtime/vm/heap/pointer_block.cc
index 52b4563..19428f7 100644
--- a/runtime/vm/heap/pointer_block.cc
+++ b/runtime/vm/heap/pointer_block.cc
@@ -41,17 +41,16 @@
 }
 
 template <int BlockSize>
-BlockStack<BlockSize>::BlockStack() : mutex_(new Mutex()) {}
+BlockStack<BlockSize>::BlockStack() : mutex_() {}
 
 template <int BlockSize>
 BlockStack<BlockSize>::~BlockStack() {
   Reset();
-  delete mutex_;
 }
 
 template <int BlockSize>
 void BlockStack<BlockSize>::Reset() {
-  MutexLocker local_mutex_locker(mutex_);
+  MutexLocker local_mutex_locker(&mutex_);
   {
     // Empty all blocks and move them to the global cache.
     MutexLocker global_mutex_locker(global_mutex_);
@@ -71,7 +70,7 @@
 
 template <int BlockSize>
 typename BlockStack<BlockSize>::Block* BlockStack<BlockSize>::Blocks() {
-  MutexLocker ml(mutex_);
+  MutexLocker ml(&mutex_);
   while (!partial_.IsEmpty()) {
     full_.Push(partial_.Pop());
   }
@@ -82,14 +81,14 @@
 void BlockStack<BlockSize>::PushBlockImpl(Block* block) {
   ASSERT(block->next() == NULL);  // Should be just a single block.
   if (block->IsFull()) {
-    MutexLocker ml(mutex_);
+    MutexLocker ml(&mutex_);
     full_.Push(block);
   } else if (block->IsEmpty()) {
     MutexLocker ml(global_mutex_);
     global_empty_->Push(block);
     TrimGlobalEmpty();
   } else {
-    MutexLocker ml(mutex_);
+    MutexLocker ml(&mutex_);
     partial_.Push(block);
   }
 }
@@ -104,7 +103,7 @@
 void StoreBuffer::PushBlock(Block* block, ThresholdPolicy policy) {
   BlockStack<Block::kSize>::PushBlockImpl(block);
   if ((policy == kCheckThreshold) && Overflowed()) {
-    MutexLocker ml(mutex_);
+    MutexLocker ml(&mutex_);
     Thread* thread = Thread::Current();
     // Sanity check: it makes no sense to schedule the GC in another isolate.
     // (If Isolate ever gets multiple store buffers, we should avoid this
@@ -118,7 +117,7 @@
 typename BlockStack<BlockSize>::Block*
 BlockStack<BlockSize>::PopNonFullBlock() {
   {
-    MutexLocker ml(mutex_);
+    MutexLocker ml(&mutex_);
     if (!partial_.IsEmpty()) {
       return partial_.Pop();
     }
@@ -140,7 +139,7 @@
 template <int BlockSize>
 typename BlockStack<BlockSize>::Block*
 BlockStack<BlockSize>::PopNonEmptyBlock() {
-  MutexLocker ml(mutex_);
+  MutexLocker ml(&mutex_);
   if (!full_.IsEmpty()) {
     return full_.Pop();
   } else if (!partial_.IsEmpty()) {
@@ -152,7 +151,7 @@
 
 template <int BlockSize>
 bool BlockStack<BlockSize>::IsEmpty() {
-  MutexLocker ml(mutex_);
+  MutexLocker ml(&mutex_);
   return full_.IsEmpty() && partial_.IsEmpty();
 }
 
@@ -189,7 +188,7 @@
 }
 
 bool StoreBuffer::Overflowed() {
-  MutexLocker ml(mutex_);
+  MutexLocker ml(&mutex_);
   return (full_.length() + partial_.length()) > kMaxNonEmpty;
 }
 
diff --git a/runtime/vm/heap/pointer_block.h b/runtime/vm/heap/pointer_block.h
index 609ea97..e9ace07 100644
--- a/runtime/vm/heap/pointer_block.h
+++ b/runtime/vm/heap/pointer_block.h
@@ -7,12 +7,12 @@
 
 #include "platform/assert.h"
 #include "vm/globals.h"
+#include "vm/os_thread.h"
 
 namespace dart {
 
 // Forward declarations.
 class Isolate;
-class Mutex;
 class RawObject;
 class ObjectPointerVisitor;
 
@@ -131,7 +131,7 @@
 
   List full_;
   List partial_;
-  Mutex* mutex_;
+  Mutex mutex_;
 
   // Note: This is shared on the basis of block size.
   static const intptr_t kMaxGlobalEmpty = 100;
diff --git a/runtime/vm/heap/safepoint.cc b/runtime/vm/heap/safepoint.cc
index c231b9c..3888cdf 100644
--- a/runtime/vm/heap/safepoint.cc
+++ b/runtime/vm/heap/safepoint.cc
@@ -39,7 +39,7 @@
 
 SafepointHandler::SafepointHandler(Isolate* isolate)
     : isolate_(isolate),
-      safepoint_lock_(new Monitor()),
+      safepoint_lock_(),
       number_threads_not_at_safepoint_(0),
       safepoint_operation_count_(0),
       owner_(NULL) {}
@@ -47,8 +47,6 @@
 SafepointHandler::~SafepointHandler() {
   ASSERT(owner_ == NULL);
   ASSERT(safepoint_operation_count_ == 0);
-  delete safepoint_lock_;
-  safepoint_lock_ = NULL;
   isolate_ = NULL;
 }
 
@@ -96,7 +94,7 @@
               ASSERT(T->isolate() != NULL);
               current->ScheduleInterruptsLocked(Thread::kVMInterrupt);
             }
-            MonitorLocker sl(safepoint_lock_);
+            MonitorLocker sl(&safepoint_lock_);
             ++number_threads_not_at_safepoint_;
           }
         }
@@ -106,7 +104,7 @@
   }
   // Now wait for all threads that are not already at a safepoint to check-in.
   {
-    MonitorLocker sl(safepoint_lock_);
+    MonitorLocker sl(&safepoint_lock_);
     intptr_t num_attempts = 0;
     while (number_threads_not_at_safepoint_ > 0) {
       Monitor::WaitResult retval = sl.Wait(1000);
@@ -161,7 +159,7 @@
   MonitorLocker tl(T->thread_lock());
   T->SetAtSafepoint(true);
   if (T->IsSafepointRequested()) {
-    MonitorLocker sl(safepoint_lock_);
+    MonitorLocker sl(&safepoint_lock_);
     ASSERT(number_threads_not_at_safepoint_ > 0);
     number_threads_not_at_safepoint_ -= 1;
     sl.Notify();
@@ -185,7 +183,7 @@
   if (T->IsSafepointRequested()) {
     T->SetAtSafepoint(true);
     {
-      MonitorLocker sl(safepoint_lock_);
+      MonitorLocker sl(&safepoint_lock_);
       ASSERT(number_threads_not_at_safepoint_ > 0);
       number_threads_not_at_safepoint_ -= 1;
       sl.Notify();
diff --git a/runtime/vm/heap/safepoint.h b/runtime/vm/heap/safepoint.h
index ce84f85..906933a 100644
--- a/runtime/vm/heap/safepoint.h
+++ b/runtime/vm/heap/safepoint.h
@@ -78,7 +78,7 @@
 
   // Monitor used by thread initiating a safepoint operation to track threads
   // not at a safepoint and wait for these threads to reach a safepoint.
-  Monitor* safepoint_lock_;
+  Monitor safepoint_lock_;
   int32_t number_threads_not_at_safepoint_;
 
   // Count that indicates if a safepoint operation is currently in progress
diff --git a/runtime/vm/isolate.cc b/runtime/vm/isolate.cc
index e80616a..ba66dcd 100644
--- a/runtime/vm/isolate.cc
+++ b/runtime/vm/isolate.cc
@@ -884,29 +884,28 @@
       thread_registry_(new ThreadRegistry()),
       safepoint_handler_(new SafepointHandler(this)),
       random_(),
-      mutex_(new Mutex(NOT_IN_PRODUCT("Isolate::mutex_"))),
-      symbols_mutex_(new Mutex(NOT_IN_PRODUCT("Isolate::symbols_mutex_"))),
+      mutex_(NOT_IN_PRODUCT("Isolate::mutex_")),
+      symbols_mutex_(NOT_IN_PRODUCT("Isolate::symbols_mutex_")),
       type_canonicalization_mutex_(
-          new Mutex(NOT_IN_PRODUCT("Isolate::type_canonicalization_mutex_"))),
-      constant_canonicalization_mutex_(new Mutex(
-          NOT_IN_PRODUCT("Isolate::constant_canonicalization_mutex_"))),
+          NOT_IN_PRODUCT("Isolate::type_canonicalization_mutex_")),
+      constant_canonicalization_mutex_(
+          NOT_IN_PRODUCT("Isolate::constant_canonicalization_mutex_")),
       megamorphic_lookup_mutex_(
-          new Mutex(NOT_IN_PRODUCT("Isolate::megamorphic_lookup_mutex_"))),
+          NOT_IN_PRODUCT("Isolate::megamorphic_lookup_mutex_")),
       kernel_data_lib_cache_mutex_(
-          new Mutex(NOT_IN_PRODUCT("Isolate::kernel_data_lib_cache_mutex_"))),
+          NOT_IN_PRODUCT("Isolate::kernel_data_lib_cache_mutex_")),
       kernel_data_class_cache_mutex_(
-          new Mutex(NOT_IN_PRODUCT("Isolate::kernel_data_class_cache_mutex_"))),
+          NOT_IN_PRODUCT("Isolate::kernel_data_class_cache_mutex_")),
       kernel_constants_mutex_(
-          new Mutex(NOT_IN_PRODUCT("Isolate::kernel_constants_mutex_"))),
+          NOT_IN_PRODUCT("Isolate::kernel_constants_mutex_")),
       pending_deopts_(new MallocGrowableArray<PendingLazyDeopt>()),
       tag_table_(GrowableObjectArray::null()),
       deoptimized_code_array_(GrowableObjectArray::null()),
       sticky_error_(Error::null()),
       reloaded_kernel_blobs_(GrowableObjectArray::null()),
-      field_list_mutex_(
-          new Mutex(NOT_IN_PRODUCT("Isolate::field_list_mutex_"))),
+      field_list_mutex_(NOT_IN_PRODUCT("Isolate::field_list_mutex_")),
       boxed_field_list_(GrowableObjectArray::null()),
-      spawn_count_monitor_(new Monitor()),
+      spawn_count_monitor_(),
       handler_info_cache_(),
       catch_entry_moves_cache_() {
   FlagsCopyFrom(api_flags);
@@ -971,22 +970,6 @@
 #if defined(USING_SIMULATOR)
   delete simulator_;
 #endif
-  delete mutex_;
-  mutex_ = nullptr;  // Fail fast if interrupts are scheduled on a dead isolate.
-  delete symbols_mutex_;
-  symbols_mutex_ = nullptr;
-  delete type_canonicalization_mutex_;
-  type_canonicalization_mutex_ = nullptr;
-  delete constant_canonicalization_mutex_;
-  constant_canonicalization_mutex_ = nullptr;
-  delete megamorphic_lookup_mutex_;
-  megamorphic_lookup_mutex_ = nullptr;
-  delete kernel_constants_mutex_;
-  kernel_constants_mutex_ = nullptr;
-  delete kernel_data_lib_cache_mutex_;
-  kernel_data_lib_cache_mutex_ = nullptr;
-  delete kernel_data_class_cache_mutex_;
-  kernel_data_class_cache_mutex_ = nullptr;
   delete pending_deopts_;
   pending_deopts_ = nullptr;
   delete message_handler_;
@@ -995,10 +978,7 @@
   ASSERT(deopt_context_ ==
          nullptr);  // No deopt in progress when isolate deleted.
   delete spawn_state_;
-  delete field_list_mutex_;
-  field_list_mutex_ = nullptr;
   ASSERT(spawn_count_ == 0);
-  delete spawn_count_monitor_;
   delete safepoint_handler_;
   delete thread_registry_;
 
@@ -1306,7 +1286,7 @@
 const char* Isolate::MakeRunnable() {
   ASSERT(Isolate::Current() == nullptr);
 
-  MutexLocker ml(mutex_);
+  MutexLocker ml(&mutex_);
   // Check if we are in a valid state to make the isolate runnable.
   if (is_runnable() == true) {
     return "Isolate is already runnable";
@@ -2338,7 +2318,7 @@
   ASSERT(!field.IsOriginal());
   // The enclosed code allocates objects and can potentially trigger a GC,
   // ensure that we account for safepoints when grabbing the lock.
-  SafepointMutexLocker ml(field_list_mutex_);
+  SafepointMutexLocker ml(&field_list_mutex_);
   if (boxed_field_list_ == GrowableObjectArray::null()) {
     boxed_field_list_ = GrowableObjectArray::New(Heap::kOld);
   }
@@ -2349,7 +2329,7 @@
 
 RawField* Isolate::GetDeoptimizingBoxedField() {
   ASSERT(Thread::Current()->IsMutatorThread());
-  SafepointMutexLocker ml(field_list_mutex_);
+  SafepointMutexLocker ml(&field_list_mutex_);
   if (boxed_field_list_ == GrowableObjectArray::null()) {
     return Field::null();
   }
@@ -2792,19 +2772,19 @@
 }
 
 void Isolate::IncrementSpawnCount() {
-  MonitorLocker ml(spawn_count_monitor_);
+  MonitorLocker ml(&spawn_count_monitor_);
   spawn_count_++;
 }
 
 void Isolate::DecrementSpawnCount() {
-  MonitorLocker ml(spawn_count_monitor_);
+  MonitorLocker ml(&spawn_count_monitor_);
   ASSERT(spawn_count_ > 0);
   spawn_count_--;
   ml.Notify();
 }
 
 void Isolate::WaitForOutstandingSpawns() {
-  MonitorLocker ml(spawn_count_monitor_);
+  MonitorLocker ml(&spawn_count_monitor_);
   while (spawn_count_ > 0) {
     ml.Wait();
   }
diff --git a/runtime/vm/isolate.h b/runtime/vm/isolate.h
index dbfe0ab..914a40f 100644
--- a/runtime/vm/isolate.h
+++ b/runtime/vm/isolate.h
@@ -349,26 +349,22 @@
   IsolateSpawnState* spawn_state() const { return spawn_state_; }
   void set_spawn_state(IsolateSpawnState* value) { spawn_state_ = value; }
 
-  Mutex* mutex() const { return mutex_; }
-  Mutex* symbols_mutex() const { return symbols_mutex_; }
-  Mutex* type_canonicalization_mutex() const {
-    return type_canonicalization_mutex_;
+  Mutex* mutex() { return &mutex_; }
+  Mutex* symbols_mutex() { return &symbols_mutex_; }
+  Mutex* type_canonicalization_mutex() { return &type_canonicalization_mutex_; }
+  Mutex* constant_canonicalization_mutex() {
+    return &constant_canonicalization_mutex_;
   }
-  Mutex* constant_canonicalization_mutex() const {
-    return constant_canonicalization_mutex_;
-  }
-  Mutex* megamorphic_lookup_mutex() const { return megamorphic_lookup_mutex_; }
+  Mutex* megamorphic_lookup_mutex() { return &megamorphic_lookup_mutex_; }
 
-  Mutex* kernel_data_lib_cache_mutex() const {
-    return kernel_data_lib_cache_mutex_;
-  }
-  Mutex* kernel_data_class_cache_mutex() const {
-    return kernel_data_class_cache_mutex_;
+  Mutex* kernel_data_lib_cache_mutex() { return &kernel_data_lib_cache_mutex_; }
+  Mutex* kernel_data_class_cache_mutex() {
+    return &kernel_data_class_cache_mutex_;
   }
 
   // Any access to constants arrays must be locked since mutator and
   // background compiler can access the arrays at the same time.
-  Mutex* kernel_constants_mutex() const { return kernel_constants_mutex_; }
+  Mutex* kernel_constants_mutex() { return &kernel_constants_mutex_; }
 
 #if !defined(PRODUCT)
   Debugger* debugger() const {
@@ -434,9 +430,6 @@
   Simulator* simulator() const { return simulator_; }
   void set_simulator(Simulator* value) { simulator_ = value; }
 
-  Monitor* spawn_count_monitor() const { return spawn_count_monitor_; }
-  intptr_t* spawn_count() { return &spawn_count_; }
-
   void IncrementSpawnCount();
   void DecrementSpawnCount();
   void WaitForOutstandingSpawns();
@@ -1022,14 +1015,14 @@
   ApiState* api_state_ = nullptr;
   Random random_;
   Simulator* simulator_ = nullptr;
-  Mutex* mutex_;          // Protects compiler stats.
-  Mutex* symbols_mutex_;  // Protects concurrent access to the symbol table.
-  Mutex* type_canonicalization_mutex_;      // Protects type canonicalization.
-  Mutex* constant_canonicalization_mutex_;  // Protects const canonicalization.
-  Mutex* megamorphic_lookup_mutex_;  // Protects megamorphic table lookup.
-  Mutex* kernel_data_lib_cache_mutex_;
-  Mutex* kernel_data_class_cache_mutex_;
-  Mutex* kernel_constants_mutex_;
+  Mutex mutex_;          // Protects compiler stats.
+  Mutex symbols_mutex_;  // Protects concurrent access to the symbol table.
+  Mutex type_canonicalization_mutex_;      // Protects type canonicalization.
+  Mutex constant_canonicalization_mutex_;  // Protects const canonicalization.
+  Mutex megamorphic_lookup_mutex_;         // Protects megamorphic table lookup.
+  Mutex kernel_data_lib_cache_mutex_;
+  Mutex kernel_data_class_cache_mutex_;
+  Mutex kernel_constants_mutex_;
   MessageHandler* message_handler_ = nullptr;
   IsolateSpawnState* spawn_state_ = nullptr;
   intptr_t defer_finalization_count_ = 0;
@@ -1057,13 +1050,13 @@
   intptr_t loading_invalidation_gen_ = kInvalidGen;
 
   // Protect access to boxed_field_list_.
-  Mutex* field_list_mutex_;
+  Mutex field_list_mutex_;
   // List of fields that became boxed and that trigger deoptimization.
   RawGrowableObjectArray* boxed_field_list_;
 
   // This guards spawn_count_. An isolate cannot complete shutdown and be
   // destroyed while there are child isolates in the midst of a spawn.
-  Monitor* spawn_count_monitor_;
+  Monitor spawn_count_monitor_;
   intptr_t spawn_count_ = 0;
 
   HandlerInfoCache handler_info_cache_;
diff --git a/runtime/vm/kernel_isolate.cc b/runtime/vm/kernel_isolate.cc
index 660934f..ef25d78 100644
--- a/runtime/vm/kernel_isolate.cc
+++ b/runtime/vm/kernel_isolate.cc
@@ -388,7 +388,7 @@
 class KernelCompilationRequest : public ValueObject {
  public:
   KernelCompilationRequest()
-      : monitor_(new Monitor()),
+      : monitor_(),
         port_(Dart_NewNativePort("kernel-compilation-port",
                                  &HandleResponse,
                                  false)),
@@ -405,7 +405,6 @@
   ~KernelCompilationRequest() {
     UnregisterRequest(this);
     Dart_CloseNativePort(port_);
-    delete monitor_;
   }
 
   Dart_KernelCompilationResult SendAndWaitForResponse(
@@ -527,7 +526,7 @@
 
       // Wait for reply to arrive.
       VMTagScope tagScope(thread, VMTag::kLoadWaitTagId);
-      MonitorLocker ml(monitor_);
+      MonitorLocker ml(&monitor_);
       while (result_.status == Dart_KernelCompilationStatus_Unknown) {
         ml.Wait();
       }
@@ -715,7 +714,7 @@
 
     // Wait for reply to arrive.
     VMTagScope tagScope(Thread::Current(), VMTag::kLoadWaitTagId);
-    MonitorLocker ml(monitor_);
+    MonitorLocker ml(&monitor_);
     while (result_.status == Dart_KernelCompilationStatus_Unknown) {
       ml.Wait();
     }
@@ -756,7 +755,7 @@
 
     Dart_CObject** response = message->value.as_array.values;
 
-    MonitorLocker ml(monitor_);
+    MonitorLocker ml(&monitor_);
 
     ASSERT(response[0]->type == Dart_CObject_kInt32);
     result_.status = static_cast<Dart_KernelCompilationStatus>(
@@ -822,7 +821,7 @@
   // Guarded by requests_monitor_ lock.
   static KernelCompilationRequest* requests_;
 
-  Monitor* monitor_;
+  Monitor monitor_;
   Dart_Port port_;
 
   // Linked list of active requests. Guarded by requests_monitor_ lock.
diff --git a/runtime/vm/os_thread.cc b/runtime/vm/os_thread.cc
index 2cfe8fb..e3e9ddf 100644
--- a/runtime/vm/os_thread.cc
+++ b/runtime/vm/os_thread.cc
@@ -33,7 +33,7 @@
       trace_id_(OSThread::GetCurrentThreadTraceId()),
 #endif
       name_(NULL),
-      timeline_block_lock_(new Mutex()),
+      timeline_block_lock_(),
       timeline_block_(NULL),
       thread_list_next_(NULL),
       thread_interrupt_disabled_(1),  // Thread interrupts disabled by default.
@@ -85,7 +85,6 @@
   }
 #endif
   timeline_block_ = NULL;
-  delete timeline_block_lock_;
   free(name_);
 }
 
diff --git a/runtime/vm/os_thread.h b/runtime/vm/os_thread.h
index 9a95345..7d27cc8 100644
--- a/runtime/vm/os_thread.h
+++ b/runtime/vm/os_thread.h
@@ -38,6 +38,35 @@
 class ThreadState;
 class TimelineEventBlock;
 
+class Mutex {
+ public:
+  explicit Mutex(NOT_IN_PRODUCT(const char* name = "anonymous mutex"));
+  ~Mutex();
+
+  bool IsOwnedByCurrentThread() const;
+
+ private:
+  void Lock();
+  bool TryLock();  // Returns false if lock is busy and locking failed.
+  void Unlock();
+
+  MutexData data_;
+  NOT_IN_PRODUCT(const char* name_);
+#if defined(DEBUG)
+  ThreadId owner_;
+#endif  // defined(DEBUG)
+
+  friend class MallocLocker;
+  friend class MutexLocker;
+  friend class SafepointMutexLocker;
+  friend class OSThreadIterator;
+  friend class TimelineEventBlockIterator;
+  friend class TimelineEventRecorder;
+  friend class PageSpace;
+  friend void Dart_TestMutex();
+  DISALLOW_COPY_AND_ASSIGN(Mutex);
+};
+
 class BaseThread {
  public:
   bool is_os_thread() const { return is_os_thread_; }
@@ -86,7 +115,7 @@
     name_ = strdup(name);
   }
 
-  Mutex* timeline_block_lock() const { return timeline_block_lock_; }
+  Mutex* timeline_block_lock() const { return &timeline_block_lock_; }
 
   // Only safe to access when holding |timeline_block_lock_|.
   TimelineEventBlock* timeline_block() const { return timeline_block_; }
@@ -259,7 +288,7 @@
 #endif
   char* name_;  // A name for this thread.
 
-  Mutex* timeline_block_lock_;
+  mutable Mutex timeline_block_lock_;
   TimelineEventBlock* timeline_block_;
 
   // All |Thread|s are registered in the thread list.
@@ -308,44 +337,6 @@
   OSThread* next_;
 };
 
-class Mutex {
- public:
-  explicit Mutex(NOT_IN_PRODUCT(const char* name = "anonymous mutex"));
-  ~Mutex();
-
-#if defined(DEBUG)
-  bool IsOwnedByCurrentThread() const {
-    return owner_ == OSThread::GetCurrentThreadId();
-  }
-#else
-  bool IsOwnedByCurrentThread() const {
-    UNREACHABLE();
-    return false;
-  }
-#endif
-
- private:
-  void Lock();
-  bool TryLock();  // Returns false if lock is busy and locking failed.
-  void Unlock();
-
-  MutexData data_;
-  NOT_IN_PRODUCT(const char* name_);
-#if defined(DEBUG)
-  ThreadId owner_;
-#endif  // defined(DEBUG)
-
-  friend class MallocLocker;
-  friend class MutexLocker;
-  friend class SafepointMutexLocker;
-  friend class OSThreadIterator;
-  friend class TimelineEventBlockIterator;
-  friend class TimelineEventRecorder;
-  friend class PageSpace;
-  friend void Dart_TestMutex();
-  DISALLOW_COPY_AND_ASSIGN(Mutex);
-};
-
 class Monitor {
  public:
   enum WaitResult { kNotified, kTimedOut };
@@ -390,6 +381,15 @@
   DISALLOW_COPY_AND_ASSIGN(Monitor);
 };
 
+inline bool Mutex::IsOwnedByCurrentThread() const {
+#if defined(DEBUG)
+  return owner_ == OSThread::GetCurrentThreadId();
+#else
+  UNREACHABLE();
+  return false;
+#endif
+}
+
 }  // namespace dart
 
 #endif  // RUNTIME_VM_OS_THREAD_H_
diff --git a/runtime/vm/profiler.cc b/runtime/vm/profiler.cc
index ed05a74..52cbcf5 100644
--- a/runtime/vm/profiler.cc
+++ b/runtime/vm/profiler.cc
@@ -166,14 +166,13 @@
 }
 
 AllocationSampleBuffer::AllocationSampleBuffer(intptr_t capacity)
-    : SampleBuffer(capacity), mutex_(new Mutex()), free_sample_list_(NULL) {}
+    : SampleBuffer(capacity), mutex_(), free_sample_list_(NULL) {}
 
 SampleBuffer::~SampleBuffer() {
   delete memory_;
 }
 
 AllocationSampleBuffer::~AllocationSampleBuffer() {
-  delete mutex_;
 }
 
 Sample* SampleBuffer::At(intptr_t idx) const {
@@ -208,7 +207,7 @@
 }
 
 void AllocationSampleBuffer::FreeAllocationSample(Sample* sample) {
-  MutexLocker ml(mutex_);
+  MutexLocker ml(&mutex_);
   while (sample != NULL) {
     intptr_t continuation_index = -1;
     if (sample->is_continuation_sample()) {
@@ -243,7 +242,7 @@
 }
 
 Sample* AllocationSampleBuffer::ReserveSampleAndLink(Sample* previous) {
-  MutexLocker ml(mutex_);
+  MutexLocker ml(&mutex_);
   ASSERT(previous != NULL);
   intptr_t next_index = ReserveSampleSlotLocked();
   if (next_index < 0) {
@@ -262,7 +261,7 @@
 }
 
 Sample* AllocationSampleBuffer::ReserveSample() {
-  MutexLocker ml(mutex_);
+  MutexLocker ml(&mutex_);
   intptr_t index = ReserveSampleSlotLocked();
   if (index < 0) {
     return NULL;
diff --git a/runtime/vm/profiler.h b/runtime/vm/profiler.h
index d5122fc7d..f0280fc 100644
--- a/runtime/vm/profiler.h
+++ b/runtime/vm/profiler.h
@@ -655,7 +655,7 @@
   void FreeAllocationSample(Sample* sample);
 
  private:
-  Mutex* mutex_;
+  Mutex mutex_;
   Sample* free_sample_list_;
 
   DISALLOW_COPY_AND_ASSIGN(AllocationSampleBuffer);
diff --git a/runtime/vm/thread.cc b/runtime/vm/thread.cc
index e4b6658..6b48726 100644
--- a/runtime/vm/thread.cc
+++ b/runtime/vm/thread.cc
@@ -44,8 +44,6 @@
     delete api_reusable_scope_;
     api_reusable_scope_ = NULL;
   }
-  delete thread_lock_;
-  thread_lock_ = NULL;
 }
 
 #if defined(DEBUG)
@@ -80,7 +78,7 @@
       safepoint_state_(0),
       task_kind_(kUnknownTask),
       dart_stream_(NULL),
-      thread_lock_(new Monitor()),
+      thread_lock_(),
       api_reusable_scope_(NULL),
       api_top_scope_(NULL),
       no_callback_scope_depth_(0),
@@ -406,7 +404,7 @@
 void Thread::SetStackLimit(uword limit) {
   // The thread setting the stack limit is not necessarily the thread which
   // the stack limit is being set on.
-  MonitorLocker ml(thread_lock_);
+  MonitorLocker ml(&thread_lock_);
   if (!HasScheduledInterrupts()) {
     // No interrupt pending, set stack_limit_ too.
     stack_limit_ = limit;
@@ -419,12 +417,12 @@
 }
 
 void Thread::ScheduleInterrupts(uword interrupt_bits) {
-  MonitorLocker ml(thread_lock_);
+  MonitorLocker ml(&thread_lock_);
   ScheduleInterruptsLocked(interrupt_bits);
 }
 
 void Thread::ScheduleInterruptsLocked(uword interrupt_bits) {
-  ASSERT(thread_lock_->IsOwnedByCurrentThread());
+  ASSERT(thread_lock_.IsOwnedByCurrentThread());
   ASSERT((interrupt_bits & ~kInterruptsMask) == 0);  // Must fit in mask.
 
   // Check to see if any of the requested interrupts should be deferred.
@@ -444,7 +442,7 @@
 }
 
 uword Thread::GetAndClearInterrupts() {
-  MonitorLocker ml(thread_lock_);
+  MonitorLocker ml(&thread_lock_);
   if (stack_limit_ == saved_stack_limit_) {
     return 0;  // No interrupt was requested.
   }
@@ -454,7 +452,7 @@
 }
 
 void Thread::DeferOOBMessageInterrupts() {
-  MonitorLocker ml(thread_lock_);
+  MonitorLocker ml(&thread_lock_);
   defer_oob_messages_count_++;
   if (defer_oob_messages_count_ > 1) {
     // OOB message interrupts are already deferred.
@@ -482,7 +480,7 @@
 }
 
 void Thread::RestoreOOBMessageInterrupts() {
-  MonitorLocker ml(thread_lock_);
+  MonitorLocker ml(&thread_lock_);
   defer_oob_messages_count_--;
   if (defer_oob_messages_count_ > 0) {
     return;
diff --git a/runtime/vm/thread.h b/runtime/vm/thread.h
index 388cfae..040401e 100644
--- a/runtime/vm/thread.h
+++ b/runtime/vm/thread.h
@@ -337,7 +337,7 @@
   }
 
   // Monitor corresponding to this thread.
-  Monitor* thread_lock() const { return thread_lock_; }
+  Monitor* thread_lock() const { return &thread_lock_; }
 
   // The reusable api local scope for this thread.
   ApiLocalScope* api_reusable_scope() const { return api_reusable_scope_; }
@@ -869,7 +869,7 @@
 
   TaskKind task_kind_;
   TimelineStream* dart_stream_;
-  Monitor* thread_lock_;
+  mutable Monitor thread_lock_;
   ApiLocalScope* api_reusable_scope_;
   ApiLocalScope* api_top_scope_;
   int32_t no_callback_scope_depth_;
diff --git a/runtime/vm/thread_registry.cc b/runtime/vm/thread_registry.cc
index 2052d4a..2dcabf8 100644
--- a/runtime/vm/thread_registry.cc
+++ b/runtime/vm/thread_registry.cc
@@ -30,9 +30,6 @@
       delete thread;
     }
   }
-
-  // Delete monitor.
-  delete threads_lock_;
 }
 
 // Gets a free Thread structure, we special case the mutator thread
diff --git a/runtime/vm/thread_registry.h b/runtime/vm/thread_registry.h
index fda9474..b6c0ec5 100644
--- a/runtime/vm/thread_registry.h
+++ b/runtime/vm/thread_registry.h
@@ -23,7 +23,7 @@
 class ThreadRegistry {
  public:
   ThreadRegistry()
-      : threads_lock_(new Monitor()),
+      : threads_lock_(),
         active_list_(NULL),
         free_list_(NULL),
         mutator_thread_(NULL) {}
@@ -50,7 +50,7 @@
 
  private:
   Thread* active_list() const { return active_list_; }
-  Monitor* threads_lock() const { return threads_lock_; }
+  Monitor* threads_lock() const { return &threads_lock_; }
 
   Thread* GetFreeThreadLocked(Isolate* isolate, bool is_mutator);
   void ReturnThreadLocked(bool is_mutator, Thread* thread);
@@ -61,7 +61,7 @@
 
   // This monitor protects the threads list for an isolate, it is used whenever
   // we need to iterate over threads (both active and free) in an isolate.
-  Monitor* threads_lock_;
+  mutable Monitor threads_lock_;
   Thread* active_list_;  // List of active threads in the isolate.
   Thread* free_list_;    // Free list of Thread objects that can be reused.