< prev index next >

src/hotspot/share/gc/epsilon/epsilonHeap.cpp

Print this page




   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/epsilon/epsilonHeap.hpp"
  26 #include "gc/epsilon/epsilonMemoryPool.hpp"
  27 #include "gc/epsilon/epsilonThreadLocalData.hpp"
  28 #include "gc/shared/gcArguments.hpp"
  29 #include "gc/shared/locationPrinter.inline.hpp"
  30 #include "memory/allocation.hpp"
  31 #include "memory/allocation.inline.hpp"
  32 #include "memory/resourceArea.hpp"
  33 #include "memory/universe.hpp"
  34 #include "runtime/globals.hpp"
  35 
  36 jint EpsilonHeap::initialize() {
  37   size_t align = HeapAlignment;
  38   size_t init_byte_size = align_up(InitialHeapSize, align);
  39   size_t max_byte_size  = align_up(MaxHeapSize, align);
  40 
  41   // Initialize backing storage
  42   ReservedHeapSpace heap_rs = Universe::reserve_heap(max_byte_size, align);
  43   _virtual_space.initialize(heap_rs, init_byte_size);
  44 
  45   MemRegion committed_region((HeapWord*)_virtual_space.low(),          (HeapWord*)_virtual_space.high());
  46   MemRegion  reserved_region((HeapWord*)_virtual_space.low_boundary(), (HeapWord*)_virtual_space.high_boundary());
  47 
  48   initialize_reserved_region(heap_rs);
  49 
  50   _space = new ContiguousSpace();
  51   _space->initialize(committed_region, /* clear_space = */ true, /* mangle_space = */ true);
  52 
  53   // Precompute hot fields
  54   _max_tlab_size = MIN2(CollectedHeap::max_tlab_size(), align_object_size(EpsilonMaxTLABSize / HeapWordSize));
  55   _step_counter_update = MIN2<size_t>(max_byte_size / 16, EpsilonUpdateCountersStep);
  56   _step_heap_print = (EpsilonPrintHeapSteps == 0) ? SIZE_MAX : (max_byte_size / EpsilonPrintHeapSteps);
  57   _decay_time_ns = (int64_t) EpsilonTLABDecayTime * NANOSECS_PER_MILLISEC;
  58 
  59   // Enable monitoring
  60   _monitoring_support = new EpsilonMonitoringSupport(this);
  61   _last_counter_update = 0;
  62   _last_heap_print = 0;
  63 
  64   // Install barrier set
  65   BarrierSet::set_barrier_set(new EpsilonBarrierSet());
  66 
  67   // All done, print out the configuration
  68   if (init_byte_size != max_byte_size) {


  93 
  94 void EpsilonHeap::initialize_serviceability() {
  95   _pool = new EpsilonMemoryPool(this);
  96   _memory_manager.add_pool(_pool);
  97 }
  98 
  99 GrowableArray<GCMemoryManager*> EpsilonHeap::memory_managers() {
 100   GrowableArray<GCMemoryManager*> memory_managers(1);
 101   memory_managers.append(&_memory_manager);
 102   return memory_managers;
 103 }
 104 
 105 GrowableArray<MemoryPool*> EpsilonHeap::memory_pools() {
 106   GrowableArray<MemoryPool*> memory_pools(1);
 107   memory_pools.append(_pool);
 108   return memory_pools;
 109 }
 110 
 111 size_t EpsilonHeap::unsafe_max_tlab_alloc(Thread* thr) const {
 112   // Return max allocatable TLAB size, and let allocation path figure out
 113   // the actual allocation size. Note: result should be in bytes.
 114   return _max_tlab_size * HeapWordSize;
 115 }
 116 
 117 EpsilonHeap* EpsilonHeap::heap() {
 118   CollectedHeap* heap = Universe::heap();
 119   assert(heap != NULL, "Uninitialized access to EpsilonHeap::heap()");
 120   assert(heap->kind() == CollectedHeap::Epsilon, "Not an Epsilon heap");
 121   return (EpsilonHeap*)heap;
 122 }
 123 
 124 HeapWord* EpsilonHeap::allocate_work(size_t size) {
 125   assert(is_object_aligned(size), "Allocation size should be aligned: " SIZE_FORMAT, size);
 126 
 127   HeapWord* res = _space->par_allocate(size);
 128 
 129   while (res == NULL) {
 130     // Allocation failed, attempt expansion, and retry:
 131     MutexLocker ml(Heap_lock);
 132 
 133     size_t space_left = max_capacity() - capacity();
 134     size_t want_space = MAX2(size, EpsilonMinHeapExpand);


 287 }
 288 
 289 void EpsilonHeap::do_full_collection(bool clear_all_soft_refs) {
 290   collect(gc_cause());
 291 }
 292 
 293 void EpsilonHeap::safe_object_iterate(ObjectClosure *cl) {
 294   _space->safe_object_iterate(cl);
 295 }
 296 
 297 void EpsilonHeap::print_on(outputStream *st) const {
 298   st->print_cr("Epsilon Heap");
 299 
 300   // Cast away constness:
 301   ((VirtualSpace)_virtual_space).print_on(st);
 302 
 303   st->print_cr("Allocation space:");
 304   _space->print_on(st);
 305 
 306   MetaspaceUtils::print_on(st);
 307 }
 308 
 309 bool EpsilonHeap::print_location(outputStream* st, void* addr) const {
 310   return BlockLocationPrinter<EpsilonHeap>::print_location(st, addr);
 311 }
 312 
 313 void EpsilonHeap::print_tracing_info() const {
 314   print_heap_info(used());
 315   print_metaspace_info();
 316 }
 317 
 318 void EpsilonHeap::print_heap_info(size_t used) const {
 319   size_t reserved  = max_capacity();
 320   size_t committed = capacity();
 321 
 322   if (reserved != 0) {
 323     log_info(gc)("Heap: " SIZE_FORMAT "%s reserved, " SIZE_FORMAT "%s (%.2f%%) committed, "
 324                  SIZE_FORMAT "%s (%.2f%%) used",
 325             byte_size_in_proper_unit(reserved),  proper_unit_for_byte_size(reserved),
 326             byte_size_in_proper_unit(committed), proper_unit_for_byte_size(committed),
 327             committed * 100.0 / reserved,
 328             byte_size_in_proper_unit(used),      proper_unit_for_byte_size(used),
 329             used * 100.0 / reserved);
 330   } else {




   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/epsilon/epsilonHeap.hpp"
  26 #include "gc/epsilon/epsilonMemoryPool.hpp"
  27 #include "gc/epsilon/epsilonThreadLocalData.hpp"
  28 #include "gc/shared/gcArguments.hpp"

  29 #include "memory/allocation.hpp"
  30 #include "memory/allocation.inline.hpp"
  31 #include "memory/resourceArea.hpp"
  32 #include "memory/universe.hpp"
  33 #include "runtime/globals.hpp"
  34 
  35 jint EpsilonHeap::initialize() {
  36   size_t align = HeapAlignment;
  37   size_t init_byte_size = align_up(InitialHeapSize, align);
  38   size_t max_byte_size  = align_up(MaxHeapSize, align);
  39 
  40   // Initialize backing storage
  41   ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size, align);
  42   _virtual_space.initialize(heap_rs, init_byte_size);
  43 
  44   MemRegion committed_region((HeapWord*)_virtual_space.low(),          (HeapWord*)_virtual_space.high());
  45   MemRegion  reserved_region((HeapWord*)_virtual_space.low_boundary(), (HeapWord*)_virtual_space.high_boundary());
  46 
  47   initialize_reserved_region(reserved_region.start(), reserved_region.end());
  48 
  49   _space = new ContiguousSpace();
  50   _space->initialize(committed_region, /* clear_space = */ true, /* mangle_space = */ true);
  51 
  52   // Precompute hot fields
  53   _max_tlab_size = MIN2(CollectedHeap::max_tlab_size(), align_object_size(EpsilonMaxTLABSize / HeapWordSize));
  54   _step_counter_update = MIN2<size_t>(max_byte_size / 16, EpsilonUpdateCountersStep);
  55   _step_heap_print = (EpsilonPrintHeapSteps == 0) ? SIZE_MAX : (max_byte_size / EpsilonPrintHeapSteps);
  56   _decay_time_ns = (int64_t) EpsilonTLABDecayTime * NANOSECS_PER_MILLISEC;
  57 
  58   // Enable monitoring
  59   _monitoring_support = new EpsilonMonitoringSupport(this);
  60   _last_counter_update = 0;
  61   _last_heap_print = 0;
  62 
  63   // Install barrier set
  64   BarrierSet::set_barrier_set(new EpsilonBarrierSet());
  65 
  66   // All done, print out the configuration
  67   if (init_byte_size != max_byte_size) {


  92 
  93 void EpsilonHeap::initialize_serviceability() {
  94   _pool = new EpsilonMemoryPool(this);
  95   _memory_manager.add_pool(_pool);
  96 }
  97 
  98 GrowableArray<GCMemoryManager*> EpsilonHeap::memory_managers() {
  99   GrowableArray<GCMemoryManager*> memory_managers(1);
 100   memory_managers.append(&_memory_manager);
 101   return memory_managers;
 102 }
 103 
 104 GrowableArray<MemoryPool*> EpsilonHeap::memory_pools() {
 105   GrowableArray<MemoryPool*> memory_pools(1);
 106   memory_pools.append(_pool);
 107   return memory_pools;
 108 }
 109 
 110 size_t EpsilonHeap::unsafe_max_tlab_alloc(Thread* thr) const {
 111   // Return max allocatable TLAB size, and let allocation path figure out
 112   // the actual TLAB allocation size.
 113   return _max_tlab_size;
 114 }
 115 
 116 EpsilonHeap* EpsilonHeap::heap() {
 117   CollectedHeap* heap = Universe::heap();
 118   assert(heap != NULL, "Uninitialized access to EpsilonHeap::heap()");
 119   assert(heap->kind() == CollectedHeap::Epsilon, "Not an Epsilon heap");
 120   return (EpsilonHeap*)heap;
 121 }
 122 
 123 HeapWord* EpsilonHeap::allocate_work(size_t size) {
 124   assert(is_object_aligned(size), "Allocation size should be aligned: " SIZE_FORMAT, size);
 125 
 126   HeapWord* res = _space->par_allocate(size);
 127 
 128   while (res == NULL) {
 129     // Allocation failed, attempt expansion, and retry:
 130     MutexLocker ml(Heap_lock);
 131 
 132     size_t space_left = max_capacity() - capacity();
 133     size_t want_space = MAX2(size, EpsilonMinHeapExpand);


 286 }
 287 
 288 void EpsilonHeap::do_full_collection(bool clear_all_soft_refs) {
 289   collect(gc_cause());
 290 }
 291 
 292 void EpsilonHeap::safe_object_iterate(ObjectClosure *cl) {
 293   _space->safe_object_iterate(cl);
 294 }
 295 
 296 void EpsilonHeap::print_on(outputStream *st) const {
 297   st->print_cr("Epsilon Heap");
 298 
 299   // Cast away constness:
 300   ((VirtualSpace)_virtual_space).print_on(st);
 301 
 302   st->print_cr("Allocation space:");
 303   _space->print_on(st);
 304 
 305   MetaspaceUtils::print_on(st);




 306 }
 307 
 308 void EpsilonHeap::print_tracing_info() const {
 309   print_heap_info(used());
 310   print_metaspace_info();
 311 }
 312 
 313 void EpsilonHeap::print_heap_info(size_t used) const {
 314   size_t reserved  = max_capacity();
 315   size_t committed = capacity();
 316 
 317   if (reserved != 0) {
 318     log_info(gc)("Heap: " SIZE_FORMAT "%s reserved, " SIZE_FORMAT "%s (%.2f%%) committed, "
 319                  SIZE_FORMAT "%s (%.2f%%) used",
 320             byte_size_in_proper_unit(reserved),  proper_unit_for_byte_size(reserved),
 321             byte_size_in_proper_unit(committed), proper_unit_for_byte_size(committed),
 322             committed * 100.0 / reserved,
 323             byte_size_in_proper_unit(used),      proper_unit_for_byte_size(used),
 324             used * 100.0 / reserved);
 325   } else {


< prev index next >