< prev index next >

src/hotspot/share/gc/g1/g1MonitoringSupport.cpp

Print this page




 211   GrowableArray<GCMemoryManager*> memory_managers(2);
 212   memory_managers.append(&_incremental_memory_manager);
 213   memory_managers.append(&_full_gc_memory_manager);
 214   return memory_managers;
 215 }
 216 
 217 GrowableArray<MemoryPool*> G1MonitoringSupport::memory_pools() {
 218   GrowableArray<MemoryPool*> memory_pools(3);
 219   memory_pools.append(_eden_space_pool);
 220   memory_pools.append(_survivor_space_pool);
 221   memory_pools.append(_old_gen_pool);
 222   return memory_pools;
 223 }
 224 
 225 void G1MonitoringSupport::recalculate_sizes() {
 226   assert_heap_locked_or_at_safepoint(true);
 227 
 228   MutexLockerEx x(MonitoringSupport_lock, Mutex::_no_safepoint_check_flag);
 229   // Recalculate all the sizes from scratch.
 230 
 231   // This never includes used bytes of current allocating heap region.
 232   _overall_used = _g1h->used_unlocked();
 233   _eden_space_used = _g1h->eden_regions_used_bytes();
 234   _survivor_space_used = _g1h->survivor_regions_used_bytes();
 235 
 236   // _overall_used and _eden_space_used are obtained concurrently so
 237   // may be inconsistent with each other. To prevent _old_gen_used going negative,
 238   // use smaller value to substract.
 239   _old_gen_used = _overall_used - MIN2(_overall_used, _eden_space_used + _survivor_space_used);
 240 
 241   uint survivor_list_length = _g1h->survivor_regions_count();


 242   // Max length includes any potential extensions to the young gen
 243   // we'll do when the GC locker is active.
 244   uint young_list_max_length = _g1h->policy()->young_list_max_length();
 245   assert(young_list_max_length >= survivor_list_length, "invariant");
 246   uint eden_list_max_length = young_list_max_length - survivor_list_length;
 247 





 248   // First calculate the committed sizes that can be calculated independently.
 249   _survivor_space_committed = survivor_list_length * HeapRegion::GrainBytes;
 250   _old_gen_committed = HeapRegion::align_up_to_region_byte_size(_old_gen_used);
 251 
 252   // Next, start with the overall committed size.
 253   _overall_committed = _g1h->capacity();
 254   size_t committed = _overall_committed;
 255 
 256   // Remove the committed size we have calculated so far (for the
 257   // survivor and old space).
 258   assert(committed >= (_survivor_space_committed + _old_gen_committed), "sanity");
 259   committed -= _survivor_space_committed + _old_gen_committed;
 260 
 261   // Next, calculate and remove the committed size for the eden.
 262   _eden_space_committed = (size_t) eden_list_max_length * HeapRegion::GrainBytes;
 263   // Somewhat defensive: be robust in case there are inaccuracies in
 264   // the calculations
 265   _eden_space_committed = MIN2(_eden_space_committed, committed);
 266   committed -= _eden_space_committed;
 267 
 268   // Finally, give the rest to the old space...
 269   _old_gen_committed += committed;
 270   // ..and calculate the young gen committed.
 271   _young_gen_committed = _eden_space_committed + _survivor_space_committed;
 272 
 273   assert(_overall_committed ==
 274          (_eden_space_committed + _survivor_space_committed + _old_gen_committed),
 275          "the committed sizes should add up");
 276   // Somewhat defensive: cap the eden used size to make sure it
 277   // never exceeds the committed size.
 278   _eden_space_used = MIN2(_eden_space_used, _eden_space_committed);
 279   // _survivor_space_used is calculated during a safepoint and _survivor_space_committed
 280   // is calculated from survivor region count * heap region size.
 281   assert(_survivor_space_used <= _survivor_space_committed, "Survivor used bytes(" SIZE_FORMAT
 282          ") should be less than or equal to survivor committed(" SIZE_FORMAT ")",
 283          _survivor_space_used, _survivor_space_committed);
 284   // _old_gen_committed is calculated in terms of _old_gen_used value.
 285   assert(_old_gen_used <= _old_gen_committed, "Old gen used bytes(" SIZE_FORMAT
 286          ") should be less than or equal to old gen committed(" SIZE_FORMAT ")",
 287          _old_gen_used, _old_gen_committed);
 288 }
 289 
 290 void G1MonitoringSupport::update_sizes() {
 291   recalculate_sizes();
 292   if (UsePerfData) {
 293     _eden_space_counters->update_capacity(pad_capacity(_eden_space_committed));
 294     _eden_space_counters->update_used(_eden_space_used);
 295    // only the "to" survivor space is active, so we don't need to
 296     // update the counters for the "from" survivor space
 297     _to_space_counters->update_capacity(pad_capacity(_survivor_space_committed));
 298     _to_space_counters->update_used(_survivor_space_used);
 299     _old_space_counters->update_capacity(pad_capacity(_old_gen_committed));
 300     _old_space_counters->update_used(_old_gen_used);
 301 
 302     _young_gen_counters->update_all();
 303     _old_gen_counters->update_all();
 304 
 305     MetaspaceCounters::update_performance_counters();
 306     CompressedClassSpaceCounters::update_performance_counters();
 307   }




 211   GrowableArray<GCMemoryManager*> memory_managers(2);
 212   memory_managers.append(&_incremental_memory_manager);
 213   memory_managers.append(&_full_gc_memory_manager);
 214   return memory_managers;
 215 }
 216 
 217 GrowableArray<MemoryPool*> G1MonitoringSupport::memory_pools() {
 218   GrowableArray<MemoryPool*> memory_pools(3);
 219   memory_pools.append(_eden_space_pool);
 220   memory_pools.append(_survivor_space_pool);
 221   memory_pools.append(_old_gen_pool);
 222   return memory_pools;
 223 }
 224 
 225 void G1MonitoringSupport::recalculate_sizes() {
 226   assert_heap_locked_or_at_safepoint(true);
 227 
 228   MutexLockerEx x(MonitoringSupport_lock, Mutex::_no_safepoint_check_flag);
 229   // Recalculate all the sizes from scratch.
 230 
 231   uint young_list_length = _g1h->young_regions_count();









 232   uint survivor_list_length = _g1h->survivor_regions_count();
 233   assert(young_list_length >= survivor_list_length, "invariant");
 234   uint eden_list_length = young_list_length - survivor_list_length;
 235   // Max length includes any potential extensions to the young gen
 236   // we'll do when the GC locker is active.
 237   uint young_list_max_length = _g1h->policy()->young_list_max_length();
 238   assert(young_list_max_length >= survivor_list_length, "invariant");
 239   uint eden_list_max_length = young_list_max_length - survivor_list_length;
 240 
 241   _overall_used = _g1h->used_unlocked();
 242   _eden_space_used = (size_t) eden_list_length * HeapRegion::GrainBytes;
 243   _survivor_space_used = (size_t) survivor_list_length * HeapRegion::GrainBytes;
 244   _old_gen_used = subtract_up_to_zero(_overall_used, _eden_space_used + _survivor_space_used);
 245 
 246   // First calculate the committed sizes that can be calculated independently.
 247   _survivor_space_committed = _survivor_space_used;
 248   _old_gen_committed = HeapRegion::align_up_to_region_byte_size(_old_gen_used);
 249 
 250   // Next, start with the overall committed size.
 251   _overall_committed = _g1h->capacity();
 252   size_t committed = _overall_committed;
 253 
 254   // Remove the committed size we have calculated so far (for the
 255   // survivor and old space).
 256   assert(committed >= (_survivor_space_committed + _old_gen_committed), "sanity");
 257   committed -= _survivor_space_committed + _old_gen_committed;
 258 
 259   // Next, calculate and remove the committed size for the eden.
 260   _eden_space_committed = (size_t) eden_list_max_length * HeapRegion::GrainBytes;
 261   // Somewhat defensive: be robust in case there are inaccuracies in
 262   // the calculations
 263   _eden_space_committed = MIN2(_eden_space_committed, committed);
 264   committed -= _eden_space_committed;
 265 
 266   // Finally, give the rest to the old space...
 267   _old_gen_committed += committed;
 268   // ..and calculate the young gen committed.
 269   _young_gen_committed = _eden_space_committed + _survivor_space_committed;
 270 
 271   assert(_overall_committed ==
 272          (_eden_space_committed + _survivor_space_committed + _old_gen_committed),
 273          "the committed sizes should add up");
 274   // Somewhat defensive: cap the eden used size to make sure it
 275   // never exceeds the committed size.
 276   _eden_space_used = MIN2(_eden_space_used, _eden_space_committed);
 277   // _survivor_committed and _old_committed are calculated in terms of
 278   // the corresponding _*_used value, so the next two conditions
 279   // should hold.
 280   assert(_survivor_space_used <= _survivor_space_committed, "post-condition");
 281   assert(_old_gen_used <= _old_gen_committed, "post-condition");




 282 }
 283 
 284 void G1MonitoringSupport::update_sizes() {
 285   recalculate_sizes();
 286   if (UsePerfData) {
 287     _eden_space_counters->update_capacity(pad_capacity(_eden_space_committed));
 288     _eden_space_counters->update_used(_eden_space_used);
 289    // only the "to" survivor space is active, so we don't need to
 290     // update the counters for the "from" survivor space
 291     _to_space_counters->update_capacity(pad_capacity(_survivor_space_committed));
 292     _to_space_counters->update_used(_survivor_space_used);
 293     _old_space_counters->update_capacity(pad_capacity(_old_gen_committed));
 294     _old_space_counters->update_used(_old_gen_used);
 295 
 296     _young_gen_counters->update_all();
 297     _old_gen_counters->update_all();
 298 
 299     MetaspaceCounters::update_performance_counters();
 300     CompressedClassSpaceCounters::update_performance_counters();
 301   }


< prev index next >