1 /*
   2  * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/shared/suspendibleThreadSet.hpp"
  26 #include "gc/z/zAddress.inline.hpp"
  27 #include "gc/z/zCollectedHeap.hpp"
  28 #include "gc/z/zFuture.inline.hpp"
  29 #include "gc/z/zGlobals.hpp"
  30 #include "gc/z/zLock.inline.hpp"
  31 #include "gc/z/zPage.inline.hpp"
  32 #include "gc/z/zPageAllocator.hpp"
  33 #include "gc/z/zPageCache.inline.hpp"
  34 #include "gc/z/zSafeDelete.inline.hpp"
  35 #include "gc/z/zStat.hpp"
  36 #include "gc/z/zTracer.inline.hpp"
  37 #include "runtime/init.hpp"
  38 #include "runtime/java.hpp"
  39 #include "utilities/debug.hpp"
  40 
  41 static const ZStatCounter       ZCounterAllocationRate("Memory", "Allocation Rate", ZStatUnitBytesPerSecond);
  42 static const ZStatCounter       ZCounterPageCacheFlush("Memory", "Page Cache Flush", ZStatUnitBytesPerSecond);
  43 static const ZStatCounter       ZCounterUncommit("Memory", "Uncommit", ZStatUnitBytesPerSecond);
  44 static const ZStatCriticalPhase ZCriticalPhaseAllocationStall("Allocation Stall");
  45 
  46 class ZPageAllocRequest : public StackObj {
  47   friend class ZList<ZPageAllocRequest>;
  48 
  49 private:
  50   const uint8_t                _type;
  51   const size_t                 _size;
  52   const ZAllocationFlags       _flags;
  53   const unsigned int           _total_collections;
  54   ZListNode<ZPageAllocRequest> _node;
  55   ZFuture<ZPage*>              _result;
  56 
  57 public:
  58   ZPageAllocRequest(uint8_t type, size_t size, ZAllocationFlags flags, unsigned int total_collections) :
  59       _type(type),
  60       _size(size),
  61       _flags(flags),
  62       _total_collections(total_collections) {}
  63 
  64   uint8_t type() const {
  65     return _type;
  66   }
  67 
  68   size_t size() const {
  69     return _size;
  70   }
  71 
  72   ZAllocationFlags flags() const {
  73     return _flags;
  74   }
  75 
  76   unsigned int total_collections() const {
  77     return _total_collections;
  78   }
  79 
  80   ZPage* wait() {
  81     return _result.get();
  82   }
  83 
  84   void satisfy(ZPage* page) {
  85     _result.set(page);
  86   }
  87 };
  88 
  89 ZPage* const ZPageAllocator::gc_marker = (ZPage*)-1;
  90 
  91 ZPageAllocator::ZPageAllocator(size_t min_capacity,
  92                                size_t initial_capacity,
  93                                size_t max_capacity,
  94                                size_t max_reserve) :
  95     _lock(),
  96     _virtual(),
  97     _physical(),
  98     _cache(),
  99     _min_capacity(min_capacity),
 100     _max_capacity(max_capacity),
 101     _max_reserve(max_reserve),
 102     _current_max_capacity(max_capacity),
 103     _capacity(0),
 104     _used_high(0),
 105     _used_low(0),
 106     _used(0),
 107     _allocated(0),
 108     _reclaimed(0),
 109     _queue(),
 110     _safe_delete(),
 111     _uncommit(false),
 112     _initialized(false) {
 113 
 114   if (!_virtual.is_initialized() || !_physical.is_initialized()) {
 115     return;
 116   }
 117 
 118   log_info(gc, init)("Min Capacity: " SIZE_FORMAT "M", min_capacity / M);
 119   log_info(gc, init)("Initial Capacity: " SIZE_FORMAT "M", initial_capacity / M);
 120   log_info(gc, init)("Max Capacity: " SIZE_FORMAT "M", max_capacity / M);
 121   log_info(gc, init)("Max Reserve: " SIZE_FORMAT "M", max_reserve / M);
 122   log_info(gc, init)("Pre-touch: %s", AlwaysPreTouch ? "Enabled" : "Disabled");
 123 
 124   // Warn if system limits could stop us from reaching max capacity
 125   _physical.warn_commit_limits(max_capacity);
 126 
 127   // Commit initial capacity
 128   _capacity = _physical.commit(initial_capacity);
 129   if (_capacity != initial_capacity) {
 130     log_error(gc)("Failed to allocate initial Java heap (" SIZE_FORMAT "M)", initial_capacity / M);
 131     return;
 132   }
 133 
 134   // If uncommit is not explicitly disabled, max capacity is greater than
 135   // min capacity, and uncommit is supported by the platform, then we will
 136   // try to uncommit unused memory.
 137   _uncommit = ZUncommit && (max_capacity > min_capacity) && _physical.supports_uncommit();
 138   if (_uncommit) {
 139     log_info(gc, init)("Uncommit: Enabled, Delay: " UINTX_FORMAT "s", ZUncommitDelay);
 140   } else {
 141     log_info(gc, init)("Uncommit: Disabled");
 142   }
 143 
 144   // Pre-map initial capacity
 145   prime_cache(initial_capacity);
 146 
 147   // Successfully initialized
 148   _initialized = true;
 149 }
 150 
 151 void ZPageAllocator::prime_cache(size_t size) {
 152   // Allocate physical memory
 153   const ZPhysicalMemory pmem = _physical.alloc(size);
 154   guarantee(!pmem.is_null(), "Invalid size");
 155 
 156   // Allocate virtual memory
 157   const ZVirtualMemory vmem = _virtual.alloc(size, true /* alloc_from_front */);
 158   guarantee(!vmem.is_null(), "Invalid size");
 159 
 160   // Allocate page
 161   ZPage* const page = new ZPage(vmem, pmem);
 162 
 163   // Map page
 164   map_page(page);
 165   page->set_pre_mapped();
 166 
 167   // Add page to cache
 168   page->set_last_used();
 169   _cache.free_page(page);
 170 }
 171 
 172 bool ZPageAllocator::is_initialized() const {
 173   return _initialized;
 174 }
 175 
 176 size_t ZPageAllocator::min_capacity() const {
 177   return _min_capacity;
 178 }
 179 
 180 size_t ZPageAllocator::max_capacity() const {
 181   return _max_capacity;
 182 }
 183 
 184 size_t ZPageAllocator::current_max_capacity() const {
 185   return _current_max_capacity;
 186 }
 187 
 188 size_t ZPageAllocator::capacity() const {
 189   return _capacity;
 190 }
 191 
 192 size_t ZPageAllocator::max_reserve() const {
 193   return _max_reserve;
 194 }
 195 
 196 size_t ZPageAllocator::used_high() const {
 197   return _used_high;
 198 }
 199 
 200 size_t ZPageAllocator::used_low() const {
 201   return _used_low;
 202 }
 203 
 204 size_t ZPageAllocator::used() const {
 205   return _used;
 206 }
 207 
 208 size_t ZPageAllocator::unused() const {
 209   const ssize_t unused = (ssize_t)_capacity - (ssize_t)_used - (ssize_t)_max_reserve;
 210   return unused > 0 ? (size_t)unused : 0;
 211 }
 212 
 213 size_t ZPageAllocator::allocated() const {
 214   return _allocated;
 215 }
 216 
 217 size_t ZPageAllocator::reclaimed() const {
 218   return _reclaimed > 0 ? (size_t)_reclaimed : 0;
 219 }
 220 
 221 void ZPageAllocator::reset_statistics() {
 222   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 223   _allocated = 0;
 224   _reclaimed = 0;
 225   _used_high = _used_low = _used;
 226 }
 227 
 228 void ZPageAllocator::increase_used(size_t size, bool relocation) {
 229   if (relocation) {
 230     // Allocating a page for the purpose of relocation has a
 231     // negative contribution to the number of reclaimed bytes.
 232     _reclaimed -= size;
 233   }
 234   _allocated += size;
 235   _used += size;
 236   if (_used > _used_high) {
 237     _used_high = _used;
 238   }
 239 }
 240 
 241 void ZPageAllocator::decrease_used(size_t size, bool reclaimed) {
 242   if (reclaimed) {
 243     // Only pages explicitly released with the reclaimed flag set
 244     // counts as reclaimed bytes. This flag is typically true when
 245     // a worker releases a page after relocation, and is typically
 246     // false when we release a page to undo an allocation.
 247     _reclaimed += size;
 248   }
 249   _used -= size;
 250   if (_used < _used_low) {
 251     _used_low = _used;
 252   }
 253 }
 254 
 255 ZPage* ZPageAllocator::create_page(uint8_t type, size_t size) {
 256   // Allocate virtual memory
 257   const ZVirtualMemory vmem = _virtual.alloc(size);
 258   if (vmem.is_null()) {
 259     // Out of address space
 260     return NULL;
 261   }
 262 
 263   // Allocate physical memory
 264   const ZPhysicalMemory pmem = _physical.alloc(size);
 265   assert(!pmem.is_null(), "Invalid size");
 266 
 267   // Allocate page
 268   return new ZPage(type, vmem, pmem);
 269 }
 270 
 271 void ZPageAllocator::destroy_page(ZPage* page) {
 272   const ZVirtualMemory& vmem = page->virtual_memory();
 273   const ZPhysicalMemory& pmem = page->physical_memory();
 274 
 275   // Unmap memory
 276   _physical.unmap(pmem, vmem.start());
 277 
 278   // Free physical memory
 279   _physical.free(pmem);
 280 
 281   // Free virtual memory
 282   _virtual.free(vmem);
 283 
 284   // Delete page safely
 285   _safe_delete(page);
 286 }
 287 
 288 void ZPageAllocator::map_page(const ZPage* page) const {
 289   // Map physical memory
 290   if (!page->is_mapped()) {
 291     _physical.map(page->physical_memory(), page->start());
 292   } else if (ZVerifyViews) {
 293     _physical.debug_map(page->physical_memory(), page->start());
 294   }
 295 }
 296 
 297 size_t ZPageAllocator::max_available(bool no_reserve) const {
 298   size_t available = _current_max_capacity - _used;
 299 
 300   if (no_reserve) {
 301     // The reserve should not be considered available
 302     available -= MIN2(available, _max_reserve);
 303   }
 304 
 305   return available;
 306 }
 307 
 308 bool ZPageAllocator::ensure_available(size_t size, bool no_reserve) {
 309   if (max_available(no_reserve) < size) {
 310     // Not enough free memory
 311     return false;
 312   }
 313 
 314   // We add the max_reserve to the requested size to avoid losing
 315   // the reserve because of failure to increase capacity before
 316   // reaching max capacity.
 317   size += _max_reserve;
 318 
 319   // Don't try to increase capacity if enough unused capacity
 320   // is available or if current max capacity has been reached.
 321   const size_t available = _capacity - _used;
 322   if (available < size && _capacity < _current_max_capacity) {
 323     // Try to increase capacity
 324     const size_t commit = MIN2(size - available, _current_max_capacity - _capacity);
 325     const size_t committed = _physical.commit(commit);
 326     _capacity += committed;
 327 
 328     log_trace(gc, heap)("Make Available: Size: " SIZE_FORMAT "M, NoReserve: %s, "
 329                         "Available: " SIZE_FORMAT "M, Commit: " SIZE_FORMAT "M, "
 330                         "Committed: " SIZE_FORMAT "M, Capacity: " SIZE_FORMAT "M",
 331                         size / M, no_reserve ? "True" : "False", available / M,
 332                         commit / M, committed / M, _capacity / M);
 333 
 334     if (committed != commit) {
 335       // Failed, or partly failed, to increase capacity. Adjust current
 336       // max capacity to avoid further attempts to increase capacity.
 337       log_error(gc)("Forced to lower max Java heap size from "
 338                     SIZE_FORMAT "M(%.0lf%%) to " SIZE_FORMAT "M(%.0lf%%)",
 339                     _current_max_capacity / M, percent_of(_current_max_capacity, _max_capacity),
 340                     _capacity / M, percent_of(_capacity, _max_capacity));
 341 
 342       _current_max_capacity = _capacity;
 343     }
 344   }
 345 
 346   if (!no_reserve) {
 347     size -= _max_reserve;
 348   }
 349 
 350   const size_t new_available = _capacity - _used;
 351   return new_available >= size;
 352 }
 353 
 354 void ZPageAllocator::ensure_uncached_available(size_t size) {
 355   assert(_capacity - _used >= size, "Invalid size");
 356   const size_t uncached_available = _capacity - _used - _cache.available();
 357   if (size > uncached_available) {
 358     flush_cache_for_allocation(size - uncached_available);
 359   }
 360 }
 361 
 362 ZPage* ZPageAllocator::alloc_page_common_inner(uint8_t type, size_t size, bool no_reserve) {
 363   if (!ensure_available(size, no_reserve)) {
 364     // Not enough free memory
 365     return NULL;
 366   }
 367 
 368   // Try allocate page from the cache
 369   ZPage* const page = _cache.alloc_page(type, size);
 370   if (page != NULL) {
 371     return page;
 372   }
 373 
 374   // Try flush pages from the cache
 375   ensure_uncached_available(size);
 376 
 377   // Create new page
 378   return create_page(type, size);
 379 }
 380 
 381 ZPage* ZPageAllocator::alloc_page_common(uint8_t type, size_t size, ZAllocationFlags flags) {
 382   ZPage* const page = alloc_page_common_inner(type, size, flags.no_reserve());
 383   if (page == NULL) {
 384     // Out of memory
 385     return NULL;
 386   }
 387 
 388   // Update used statistics
 389   increase_used(size, flags.relocation());
 390 
 391   // Send trace event
 392   ZTracer::tracer()->report_page_alloc(size, _used, max_available(flags.no_reserve()), _cache.available(), flags);
 393 
 394   return page;
 395 }
 396 
 397 void ZPageAllocator::check_out_of_memory_during_initialization() {
 398   if (!is_init_completed()) {
 399     vm_exit_during_initialization("java.lang.OutOfMemoryError", "Java heap too small");
 400   }
 401 }
 402 
 403 ZPage* ZPageAllocator::alloc_page_blocking(uint8_t type, size_t size, ZAllocationFlags flags) {
 404   // Prepare to block
 405   ZPageAllocRequest request(type, size, flags, ZCollectedHeap::heap()->total_collections());
 406 
 407   _lock.lock();
 408 
 409   // Try non-blocking allocation
 410   ZPage* page = alloc_page_common(type, size, flags);
 411   if (page == NULL) {
 412     // Allocation failed, enqueue request
 413     _queue.insert_last(&request);
 414   }
 415 
 416   _lock.unlock();
 417 
 418   if (page == NULL) {
 419     // Allocation failed
 420     ZStatTimer timer(ZCriticalPhaseAllocationStall);
 421 
 422     // We can only block if VM is fully initialized
 423     check_out_of_memory_during_initialization();
 424 
 425     do {
 426       // Start asynchronous GC
 427       ZCollectedHeap::heap()->collect(GCCause::_z_allocation_stall);
 428 
 429       // Wait for allocation to complete or fail
 430       page = request.wait();
 431     } while (page == gc_marker);
 432 
 433     {
 434       // Guard deletion of underlying semaphore. This is a workaround for a
 435       // bug in sem_post() in glibc < 2.21, where it's not safe to destroy
 436       // the semaphore immediately after returning from sem_wait(). The
 437       // reason is that sem_post() can touch the semaphore after a waiting
 438       // thread have returned from sem_wait(). To avoid this race we are
 439       // forcing the waiting thread to acquire/release the lock held by the
 440       // posting thread. https://sourceware.org/bugzilla/show_bug.cgi?id=12674
 441       ZLocker<ZLock> locker(&_lock);
 442     }
 443   }
 444 
 445   return page;
 446 }
 447 
 448 ZPage* ZPageAllocator::alloc_page_nonblocking(uint8_t type, size_t size, ZAllocationFlags flags) {
 449   ZLocker<ZLock> locker(&_lock);
 450   return alloc_page_common(type, size, flags);
 451 }
 452 
 453 ZPage* ZPageAllocator::alloc_page(uint8_t type, size_t size, ZAllocationFlags flags) {
 454   ZPage* const page = flags.non_blocking()
 455                       ? alloc_page_nonblocking(type, size, flags)
 456                       : alloc_page_blocking(type, size, flags);
 457   if (page == NULL) {
 458     // Out of memory
 459     return NULL;
 460   }
 461 
 462   // Map page if needed
 463   map_page(page);
 464 
 465   // Reset page. This updates the page's sequence number and must
 466   // be done after page allocation, which potentially blocked in
 467   // a safepoint where the global sequence number was updated.
 468   page->reset();
 469 
 470   // Update allocation statistics. Exclude worker threads to avoid
 471   // artificial inflation of the allocation rate due to relocation.
 472   if (!flags.worker_thread()) {
 473     // Note that there are two allocation rate counters, which have
 474     // different purposes and are sampled at different frequencies.
 475     const size_t bytes = page->size();
 476     ZStatInc(ZCounterAllocationRate, bytes);
 477     ZStatInc(ZStatAllocRate::counter(), bytes);
 478   }
 479 
 480   return page;
 481 }
 482 
 483 void ZPageAllocator::satisfy_alloc_queue() {
 484   for (;;) {
 485     ZPageAllocRequest* const request = _queue.first();
 486     if (request == NULL) {
 487       // Allocation queue is empty
 488       return;
 489     }
 490 
 491     ZPage* const page = alloc_page_common(request->type(), request->size(), request->flags());
 492     if (page == NULL) {
 493       // Allocation could not be satisfied, give up
 494       return;
 495     }
 496 
 497     // Allocation succeeded, dequeue and satisfy request. Note that
 498     // the dequeue operation must happen first, since the request
 499     // will immediately be deallocated once it has been satisfied.
 500     _queue.remove(request);
 501     request->satisfy(page);
 502   }
 503 }
 504 
 505 void ZPageAllocator::free_page(ZPage* page, bool reclaimed) {
 506   ZLocker<ZLock> locker(&_lock);
 507 
 508   // Update used statistics
 509   decrease_used(page->size(), reclaimed);
 510 
 511   // Set time when last used
 512   page->set_last_used();
 513 
 514   // Cache page
 515   _cache.free_page(page);
 516 
 517   // Try satisfy blocked allocations
 518   satisfy_alloc_queue();
 519 }
 520 
 521 size_t ZPageAllocator::flush_cache(ZPageCacheFlushClosure* cl) {
 522   ZList<ZPage> list;
 523 
 524   // Flush pages
 525   _cache.flush(cl, &list);
 526 
 527   const size_t overflushed = cl->overflushed();
 528   if (overflushed > 0) {
 529     // Overflushed, keep part of last page
 530     ZPage* const page = list.last()->split(overflushed);
 531     _cache.free_page(page);
 532   }
 533 
 534   // Destroy pages
 535   size_t flushed = 0;
 536   for (ZPage* page = list.remove_first(); page != NULL; page = list.remove_first()) {
 537     flushed += page->size();
 538     destroy_page(page);
 539   }
 540 
 541   return flushed;
 542 }
 543 
 544 class ZPageCacheFlushForAllocationClosure : public ZPageCacheFlushClosure {
 545 public:
 546   ZPageCacheFlushForAllocationClosure(size_t requested) :
 547       ZPageCacheFlushClosure(requested) {}
 548 
 549   virtual bool do_page(const ZPage* page) {
 550     if (_flushed < _requested) {
 551       // Flush page
 552       _flushed += page->size();
 553       return true;
 554     }
 555 
 556     // Don't flush page
 557     return false;
 558   }
 559 };
 560 
 561 void ZPageAllocator::flush_cache_for_allocation(size_t requested) {
 562   assert(requested <= _cache.available(), "Invalid request");
 563 
 564   // Flush pages
 565   ZPageCacheFlushForAllocationClosure cl(requested);
 566   const size_t flushed = flush_cache(&cl);
 567 
 568   assert(requested == flushed, "Failed to flush");
 569 
 570   const size_t cached_after = _cache.available();
 571   const size_t cached_before = cached_after + flushed;
 572 
 573   log_info(gc, heap)("Page Cache: " SIZE_FORMAT "M(%.0lf%%)->" SIZE_FORMAT "M(%.0lf%%), "
 574                      "Flushed: " SIZE_FORMAT "M",
 575                      cached_before / M, percent_of(cached_before, max_capacity()),
 576                      cached_after / M, percent_of(cached_after, max_capacity()),
 577                      flushed / M);
 578 
 579   // Update statistics
 580   ZStatInc(ZCounterPageCacheFlush, flushed);
 581 }
 582 
 583 class ZPageCacheFlushForUncommitClosure : public ZPageCacheFlushClosure {
 584 private:
 585   const uint64_t _now;
 586   const uint64_t _delay;
 587   uint64_t       _timeout;
 588 
 589 public:
 590   ZPageCacheFlushForUncommitClosure(size_t requested, uint64_t delay) :
 591       ZPageCacheFlushClosure(requested),
 592       _now(os::elapsedTime()),
 593       _delay(delay),
 594       _timeout(_delay) {}
 595 
 596   virtual bool do_page(const ZPage* page) {
 597     const uint64_t expires = page->last_used() + _delay;
 598     const uint64_t timeout = expires - MIN2(expires, _now);
 599 
 600     if (_flushed < _requested && timeout == 0) {
 601       // Flush page
 602       _flushed += page->size();
 603       return true;
 604     }
 605 
 606     // Record shortest non-expired timeout
 607     _timeout = MIN2(_timeout, timeout);
 608 
 609     // Don't flush page
 610     return false;
 611   }
 612 
 613   uint64_t timeout() const {
 614     return _timeout;
 615   }
 616 };
 617 
 618 uint64_t ZPageAllocator::uncommit(uint64_t delay) {
 619   // Set the default timeout, when no pages are found in the
 620   // cache or when uncommit is disabled, equal to the delay.
 621   uint64_t timeout = delay;
 622 
 623   if (!_uncommit) {
 624     // Disabled
 625     return timeout;
 626   }
 627 
 628   size_t capacity_before;
 629   size_t capacity_after;
 630   size_t uncommitted;
 631 
 632   {
 633     SuspendibleThreadSetJoiner joiner;
 634     ZLocker<ZLock> locker(&_lock);
 635 
 636     // Don't flush more than we will uncommit. Never uncommit
 637     // the reserve, and never uncommit below min capacity.
 638     const size_t needed = MIN2(_used + _max_reserve, _current_max_capacity);
 639     const size_t guarded = MAX2(needed, _min_capacity);
 640     const size_t uncommittable = _capacity - guarded;
 641     const size_t uncached_available = _capacity - _used - _cache.available();
 642     size_t uncommit = MIN2(uncommittable, uncached_available);
 643     const size_t flush = uncommittable - uncommit;
 644 
 645     if (flush > 0) {
 646       // Flush pages to uncommit
 647       ZPageCacheFlushForUncommitClosure cl(flush, delay);
 648       uncommit += flush_cache(&cl);
 649       timeout = cl.timeout();
 650     }
 651 
 652     // Uncommit
 653     uncommitted = _physical.uncommit(uncommit);
 654     _capacity -= uncommitted;
 655 
 656     capacity_after = _capacity;
 657     capacity_before = capacity_after + uncommitted;
 658   }
 659 
 660   if (uncommitted > 0) {
 661     log_info(gc, heap)("Capacity: " SIZE_FORMAT "M(%.0lf%%)->" SIZE_FORMAT "M(%.0lf%%), "
 662                        "Uncommitted: " SIZE_FORMAT "M",
 663                        capacity_before / M, percent_of(capacity_before, max_capacity()),
 664                        capacity_after / M, percent_of(capacity_after, max_capacity()),
 665                        uncommitted / M);
 666 
 667     // Update statistics
 668     ZStatInc(ZCounterUncommit, uncommitted);
 669   }
 670 
 671   return timeout;
 672 }
 673 
 674 void ZPageAllocator::enable_deferred_delete() const {
 675   _safe_delete.enable_deferred_delete();
 676 }
 677 
 678 void ZPageAllocator::disable_deferred_delete() const {
 679   _safe_delete.disable_deferred_delete();
 680 }
 681 
 682 void ZPageAllocator::debug_map_page(const ZPage* page) const {
 683   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 684   _physical.debug_map(page->physical_memory(), page->start());
 685 }
 686 
 687 class ZPageCacheDebugMapClosure : public StackObj {
 688 private:
 689   const ZPageAllocator* const _allocator;
 690 
 691 public:
 692   ZPageCacheDebugMapClosure(const ZPageAllocator* allocator) :
 693       _allocator(allocator) {}
 694 
 695   virtual void do_page(const ZPage* page) {
 696     _allocator->debug_map_page(page);
 697   }
 698 };
 699 
 700 void ZPageAllocator::debug_map_cached_pages() const {
 701   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 702   ZPageCacheDebugMapClosure cl(this);
 703   _cache.pages_do(&cl);
 704 }
 705 
 706 void ZPageAllocator::debug_unmap_all_pages() const {
 707   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 708   _physical.debug_unmap(ZPhysicalMemorySegment(0 /* start */, ZAddressOffsetMax), 0 /* offset */);
 709 }
 710 
 711 bool ZPageAllocator::is_alloc_stalled() const {
 712   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 713   return !_queue.is_empty();
 714 }
 715 
 716 void ZPageAllocator::check_out_of_memory() {
 717   ZLocker<ZLock> locker(&_lock);
 718 
 719   // Fail allocation requests that were enqueued before the
 720   // last GC cycle started, otherwise start a new GC cycle.
 721   for (ZPageAllocRequest* request = _queue.first(); request != NULL; request = _queue.first()) {
 722     if (request->total_collections() == ZCollectedHeap::heap()->total_collections()) {
 723       // Start a new GC cycle, keep allocation requests enqueued
 724       request->satisfy(gc_marker);
 725       return;
 726     }
 727 
 728     // Out of memory, fail allocation request
 729     _queue.remove_first();
 730     request->satisfy(NULL);
 731   }
 732 }