< prev index next >

src/hotspot/share/gc/g1/g1ConcurrentMark.cpp

Print this page




  39 #include "gc/g1/g1ThreadLocalData.hpp"
  40 #include "gc/g1/heapRegion.inline.hpp"
  41 #include "gc/g1/heapRegionRemSet.hpp"
  42 #include "gc/g1/heapRegionSet.inline.hpp"
  43 #include "gc/shared/gcId.hpp"
  44 #include "gc/shared/gcTimer.hpp"
  45 #include "gc/shared/gcTrace.hpp"
  46 #include "gc/shared/gcTraceTime.inline.hpp"
  47 #include "gc/shared/gcVMOperations.hpp"
  48 #include "gc/shared/genOopClosures.inline.hpp"
  49 #include "gc/shared/referencePolicy.hpp"
  50 #include "gc/shared/strongRootsScope.hpp"
  51 #include "gc/shared/suspendibleThreadSet.hpp"
  52 #include "gc/shared/taskqueue.inline.hpp"
  53 #include "gc/shared/weakProcessor.inline.hpp"
  54 #include "gc/shared/workerPolicy.hpp"
  55 #include "include/jvm.h"
  56 #include "logging/log.hpp"
  57 #include "memory/allocation.hpp"
  58 #include "memory/resourceArea.hpp"
  59 #include "memory/universe.hpp"
  60 #include "oops/access.inline.hpp"
  61 #include "oops/oop.inline.hpp"
  62 #include "runtime/atomic.hpp"
  63 #include "runtime/handles.inline.hpp"
  64 #include "runtime/java.hpp"
  65 #include "runtime/prefetch.inline.hpp"
  66 #include "services/memTracker.hpp"
  67 #include "utilities/align.hpp"
  68 #include "utilities/growableArray.hpp"
  69 
  70 bool G1CMBitMapClosure::do_addr(HeapWord* const addr) {
  71   assert(addr < _cm->finger(), "invariant");
  72   assert(addr >= _task->finger(), "invariant");
  73 
  74   // We move that task's local finger along.
  75   _task->move_finger_to(addr);
  76 
  77   _task->scan_task_entry(G1TaskQueueEntry::from_oop(oop(addr)));
  78   // we only partially drain the local queue and global stack
  79   _task->drain_local_queue(true);


 150     log_debug(gc)("Expanded mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks",
 151                   old_capacity, new_capacity);
 152   } else {
 153     log_warning(gc)("Failed to expand mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks",
 154                     old_capacity, new_capacity);
 155   }
 156 }
 157 
 158 G1CMMarkStack::~G1CMMarkStack() {
 159   if (_base != NULL) {
 160     MmapArrayAllocator<TaskQueueEntryChunk>::free(_base, _chunk_capacity);
 161   }
 162 }
 163 
 164 void G1CMMarkStack::add_chunk_to_list(TaskQueueEntryChunk* volatile* list, TaskQueueEntryChunk* elem) {
 165   elem->next = *list;
 166   *list = elem;
 167 }
 168 
 169 void G1CMMarkStack::add_chunk_to_chunk_list(TaskQueueEntryChunk* elem) {
 170   MutexLocker x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag);
 171   add_chunk_to_list(&_chunk_list, elem);
 172   _chunks_in_chunk_list++;
 173 }
 174 
 175 void G1CMMarkStack::add_chunk_to_free_list(TaskQueueEntryChunk* elem) {
 176   MutexLocker x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag);
 177   add_chunk_to_list(&_free_list, elem);
 178 }
 179 
 180 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_list(TaskQueueEntryChunk* volatile* list) {
 181   TaskQueueEntryChunk* result = *list;
 182   if (result != NULL) {
 183     *list = (*list)->next;
 184   }
 185   return result;
 186 }
 187 
 188 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_chunk_list() {
 189   MutexLocker x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag);
 190   TaskQueueEntryChunk* result = remove_chunk_from_list(&_chunk_list);
 191   if (result != NULL) {
 192     _chunks_in_chunk_list--;
 193   }
 194   return result;
 195 }
 196 
 197 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_free_list() {
 198   MutexLocker x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag);
 199   return remove_chunk_from_list(&_free_list);
 200 }
 201 
 202 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::allocate_new_chunk() {
 203   // This dirty read of _hwm is okay because we only ever increase the _hwm in parallel code.
 204   // Further this limits _hwm to a value of _chunk_capacity + #threads, avoiding
 205   // wraparound of _hwm.
 206   if (_hwm >= _chunk_capacity) {
 207     return NULL;
 208   }
 209 
 210   size_t cur_idx = Atomic::add(1u, &_hwm) - 1;
 211   if (cur_idx >= _chunk_capacity) {
 212     return NULL;
 213   }
 214 
 215   TaskQueueEntryChunk* result = ::new (&_base[cur_idx]) TaskQueueEntryChunk;
 216   result->next = NULL;
 217   return result;
 218 }


 295     // force the caller to bail out of their loop.
 296     return NULL;
 297   }
 298 
 299   if (_claimed_root_regions >= _num_root_regions) {
 300     return NULL;
 301   }
 302 
 303   size_t claimed_index = Atomic::add((size_t)1, &_claimed_root_regions) - 1;
 304   if (claimed_index < _num_root_regions) {
 305     return _root_regions[claimed_index];
 306   }
 307   return NULL;
 308 }
 309 
 310 uint G1CMRootRegions::num_root_regions() const {
 311   return (uint)_num_root_regions;
 312 }
 313 
 314 void G1CMRootRegions::notify_scan_done() {
 315   MutexLocker x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 316   _scan_in_progress = false;
 317   RootRegionScan_lock->notify_all();
 318 }
 319 
 320 void G1CMRootRegions::cancel_scan() {
 321   notify_scan_done();
 322 }
 323 
 324 void G1CMRootRegions::scan_finished() {
 325   assert(scan_in_progress(), "pre-condition");
 326 
 327   if (!_should_abort) {
 328     assert(_claimed_root_regions >= num_root_regions(),
 329            "we should have claimed all root regions, claimed " SIZE_FORMAT ", length = %u",
 330            _claimed_root_regions, num_root_regions());
 331   }
 332 
 333   notify_scan_done();
 334 }
 335 
 336 bool G1CMRootRegions::wait_until_scan_finished() {
 337   if (!scan_in_progress()) {
 338     return false;
 339   }
 340 
 341   {
 342     MonitorLocker ml(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 343     while (scan_in_progress()) {
 344       ml.wait();
 345     }
 346   }
 347   return true;
 348 }
 349 
 350 // Returns the maximum number of workers to be used in a concurrent
 351 // phase based on the number of GC workers being used in a STW
 352 // phase.
 353 static uint scale_concurrent_worker_threads(uint num_gc_workers) {
 354   return MAX2((num_gc_workers + 2) / 4, 1U);
 355 }
 356 
 357 G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h,
 358                                    G1RegionToSpaceMapper* prev_bitmap_storage,
 359                                    G1RegionToSpaceMapper* next_bitmap_storage) :
 360   // _cm_thread set inside the constructor
 361   _g1h(g1h),
 362   _completed_initialization(false),
 363 
 364   _mark_bitmap_1(),


 408   _max_concurrent_workers(0),
 409 
 410   _region_mark_stats(NEW_C_HEAP_ARRAY(G1RegionMarkStats, _g1h->max_regions(), mtGC)),
 411   _top_at_rebuild_starts(NEW_C_HEAP_ARRAY(HeapWord*, _g1h->max_regions(), mtGC))
 412 {
 413   _mark_bitmap_1.initialize(g1h->reserved_region(), prev_bitmap_storage);
 414   _mark_bitmap_2.initialize(g1h->reserved_region(), next_bitmap_storage);
 415 
 416   // Create & start ConcurrentMark thread.
 417   _cm_thread = new G1ConcurrentMarkThread(this);
 418   if (_cm_thread->osthread() == NULL) {
 419     vm_shutdown_during_initialization("Could not create ConcurrentMarkThread");
 420   }
 421 
 422   assert(CGC_lock != NULL, "CGC_lock must be initialized");
 423 
 424   if (FLAG_IS_DEFAULT(ConcGCThreads) || ConcGCThreads == 0) {
 425     // Calculate the number of concurrent worker threads by scaling
 426     // the number of parallel GC threads.
 427     uint marking_thread_num = scale_concurrent_worker_threads(ParallelGCThreads);
 428     FLAG_SET_ERGO(ConcGCThreads, marking_thread_num);
 429   }
 430 
 431   assert(ConcGCThreads > 0, "ConcGCThreads have been set.");
 432   if (ConcGCThreads > ParallelGCThreads) {
 433     log_warning(gc)("More ConcGCThreads (%u) than ParallelGCThreads (%u).",
 434                     ConcGCThreads, ParallelGCThreads);
 435     return;
 436   }
 437 
 438   log_debug(gc)("ConcGCThreads: %u offset %u", ConcGCThreads, _worker_id_offset);
 439   log_debug(gc)("ParallelGCThreads: %u", ParallelGCThreads);
 440 
 441   _num_concurrent_workers = ConcGCThreads;
 442   _max_concurrent_workers = _num_concurrent_workers;
 443 
 444   _concurrent_workers = new WorkGang("G1 Conc", _max_concurrent_workers, false, true);
 445   _concurrent_workers->initialize_workers();
 446 
 447   if (FLAG_IS_DEFAULT(MarkStackSize)) {
 448     size_t mark_stack_size =
 449       MIN2(MarkStackSizeMax,
 450           MAX2(MarkStackSize, (size_t) (_max_concurrent_workers * TASKQUEUE_SIZE)));
 451     // Verify that the calculated value for MarkStackSize is in range.
 452     // It would be nice to use the private utility routine from Arguments.
 453     if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) {
 454       log_warning(gc)("Invalid value calculated for MarkStackSize (" SIZE_FORMAT "): "
 455                       "must be between 1 and " SIZE_FORMAT,
 456                       mark_stack_size, MarkStackSizeMax);
 457       return;
 458     }
 459     FLAG_SET_ERGO(MarkStackSize, mark_stack_size);
 460   } else {
 461     // Verify MarkStackSize is in range.
 462     if (FLAG_IS_CMDLINE(MarkStackSize)) {
 463       if (FLAG_IS_DEFAULT(MarkStackSizeMax)) {
 464         if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
 465           log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT "): "
 466                           "must be between 1 and " SIZE_FORMAT,
 467                           MarkStackSize, MarkStackSizeMax);
 468           return;
 469         }
 470       } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) {
 471         if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
 472           log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT ")"
 473                           " or for MarkStackSizeMax (" SIZE_FORMAT ")",
 474                           MarkStackSize, MarkStackSizeMax);
 475           return;
 476         }
 477       }
 478     }
 479   }


1272   FreeRegionList* _cleanup_list;
1273   HeapRegionClaimer _hrclaimer;
1274 
1275 public:
1276   G1ReclaimEmptyRegionsTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) :
1277     AbstractGangTask("G1 Cleanup"),
1278     _g1h(g1h),
1279     _cleanup_list(cleanup_list),
1280     _hrclaimer(n_workers) {
1281   }
1282 
1283   void work(uint worker_id) {
1284     FreeRegionList local_cleanup_list("Local Cleanup List");
1285     G1ReclaimEmptyRegionsClosure cl(_g1h, &local_cleanup_list);
1286     _g1h->heap_region_par_iterate_from_worker_offset(&cl, &_hrclaimer, worker_id);
1287     assert(cl.is_complete(), "Shouldn't have aborted!");
1288 
1289     // Now update the old/humongous region sets
1290     _g1h->remove_from_old_sets(cl.old_regions_removed(), cl.humongous_regions_removed());
1291     {
1292       MutexLocker x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
1293       _g1h->decrement_summary_bytes(cl.freed_bytes());
1294 
1295       _cleanup_list->add_ordered(&local_cleanup_list);
1296       assert(local_cleanup_list.is_empty(), "post-condition");
1297     }
1298   }
1299 };
1300 
1301 void G1ConcurrentMark::reclaim_empty_regions() {
1302   WorkGang* workers = _g1h->workers();
1303   FreeRegionList empty_regions_list("Empty Regions After Mark List");
1304 
1305   G1ReclaimEmptyRegionsTask cl(_g1h, &empty_regions_list, workers->active_workers());
1306   workers->run_task(&cl);
1307 
1308   if (!empty_regions_list.is_empty()) {
1309     log_debug(gc)("Reclaimed %u empty regions", empty_regions_list.length());
1310     // Now print the empty regions list.
1311     G1HRPrinter* hrp = _g1h->hr_printer();
1312     if (hrp->is_active()) {


1924 class VerifyNoCSetOops {
1925   G1CollectedHeap* _g1h;
1926   const char* _phase;
1927   int _info;
1928 
1929 public:
1930   VerifyNoCSetOops(const char* phase, int info = -1) :
1931     _g1h(G1CollectedHeap::heap()),
1932     _phase(phase),
1933     _info(info)
1934   { }
1935 
1936   void operator()(G1TaskQueueEntry task_entry) const {
1937     if (task_entry.is_array_slice()) {
1938       guarantee(_g1h->is_in_reserved(task_entry.slice()), "Slice " PTR_FORMAT " must be in heap.", p2i(task_entry.slice()));
1939       return;
1940     }
1941     guarantee(oopDesc::is_oop(task_entry.obj()),
1942               "Non-oop " PTR_FORMAT ", phase: %s, info: %d",
1943               p2i(task_entry.obj()), _phase, _info);
1944     HeapRegion* r = _g1h->heap_region_containing(task_entry.obj());
1945     guarantee(!(r->in_collection_set() || r->has_index_in_opt_cset()),
1946               "obj " PTR_FORMAT " from %s (%d) in region %u in (optional) collection set",
1947               p2i(task_entry.obj()), _phase, _info, r->hrm_index());
1948   }
1949 };
1950 
1951 void G1ConcurrentMark::verify_no_collection_set_oops() {
1952   assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
1953   if (!_g1h->collector_state()->mark_or_rebuild_in_progress()) {
1954     return;
1955   }
1956 
1957   // Verify entries on the global mark stack
1958   _global_mark_stack.iterate(VerifyNoCSetOops("Stack"));
1959 
1960   // Verify entries on the task queues
1961   for (uint i = 0; i < _max_num_tasks; ++i) {
1962     G1CMTaskQueue* queue = _task_queues->queue(i);
1963     queue->iterate(VerifyNoCSetOops("Queue", i));
1964   }
1965 
1966   // Verify the global finger
1967   HeapWord* global_finger = finger();
1968   if (global_finger != NULL && global_finger < _heap.end()) {
1969     // Since we always iterate over all regions, we might get a NULL HeapRegion
1970     // here.
1971     HeapRegion* global_hr = _g1h->heap_region_containing(global_finger);
1972     guarantee(global_hr == NULL || global_finger == global_hr->bottom(),
1973               "global finger: " PTR_FORMAT " region: " HR_FORMAT,
1974               p2i(global_finger), HR_FORMAT_PARAMS(global_hr));
1975   }
1976 
1977   // Verify the task fingers
1978   assert(_num_concurrent_workers <= _max_num_tasks, "sanity");
1979   for (uint i = 0; i < _num_concurrent_workers; ++i) {
1980     G1CMTask* task = _tasks[i];
1981     HeapWord* task_finger = task->finger();
1982     if (task_finger != NULL && task_finger < _heap.end()) {
1983       // See above note on the global finger verification.
1984       HeapRegion* r = _g1h->heap_region_containing(task_finger);
1985       guarantee(r == NULL || task_finger == r->bottom() ||
1986                 !r->in_collection_set() || !r->has_index_in_opt_cset(),
1987                 "task finger: " PTR_FORMAT " region: " HR_FORMAT,
1988                 p2i(task_finger), HR_FORMAT_PARAMS(r));
1989     }
1990   }
1991 }
1992 #endif // PRODUCT
1993 
1994 void G1ConcurrentMark::rebuild_rem_set_concurrently() {
1995   _g1h->rem_set()->rebuild_rem_set(this, _concurrent_workers, _worker_id_offset);
1996 }
1997 
1998 void G1ConcurrentMark::print_stats() {
1999   if (!log_is_enabled(Debug, gc, stats)) {
2000     return;
2001   }
2002   log_debug(gc, stats)("---------------------------------------------------------------------");
2003   for (size_t i = 0; i < _num_active_tasks; ++i) {
2004     _tasks[i]->print_stats();
2005     log_debug(gc, stats)("---------------------------------------------------------------------");
2006   }
2007 }
2008 




  39 #include "gc/g1/g1ThreadLocalData.hpp"
  40 #include "gc/g1/heapRegion.inline.hpp"
  41 #include "gc/g1/heapRegionRemSet.hpp"
  42 #include "gc/g1/heapRegionSet.inline.hpp"
  43 #include "gc/shared/gcId.hpp"
  44 #include "gc/shared/gcTimer.hpp"
  45 #include "gc/shared/gcTrace.hpp"
  46 #include "gc/shared/gcTraceTime.inline.hpp"
  47 #include "gc/shared/gcVMOperations.hpp"
  48 #include "gc/shared/genOopClosures.inline.hpp"
  49 #include "gc/shared/referencePolicy.hpp"
  50 #include "gc/shared/strongRootsScope.hpp"
  51 #include "gc/shared/suspendibleThreadSet.hpp"
  52 #include "gc/shared/taskqueue.inline.hpp"
  53 #include "gc/shared/weakProcessor.inline.hpp"
  54 #include "gc/shared/workerPolicy.hpp"
  55 #include "include/jvm.h"
  56 #include "logging/log.hpp"
  57 #include "memory/allocation.hpp"
  58 #include "memory/resourceArea.hpp"

  59 #include "oops/access.inline.hpp"
  60 #include "oops/oop.inline.hpp"
  61 #include "runtime/atomic.hpp"
  62 #include "runtime/handles.inline.hpp"
  63 #include "runtime/java.hpp"
  64 #include "runtime/prefetch.inline.hpp"
  65 #include "services/memTracker.hpp"
  66 #include "utilities/align.hpp"
  67 #include "utilities/growableArray.hpp"
  68 
  69 bool G1CMBitMapClosure::do_addr(HeapWord* const addr) {
  70   assert(addr < _cm->finger(), "invariant");
  71   assert(addr >= _task->finger(), "invariant");
  72 
  73   // We move that task's local finger along.
  74   _task->move_finger_to(addr);
  75 
  76   _task->scan_task_entry(G1TaskQueueEntry::from_oop(oop(addr)));
  77   // we only partially drain the local queue and global stack
  78   _task->drain_local_queue(true);


 149     log_debug(gc)("Expanded mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks",
 150                   old_capacity, new_capacity);
 151   } else {
 152     log_warning(gc)("Failed to expand mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks",
 153                     old_capacity, new_capacity);
 154   }
 155 }
 156 
 157 G1CMMarkStack::~G1CMMarkStack() {
 158   if (_base != NULL) {
 159     MmapArrayAllocator<TaskQueueEntryChunk>::free(_base, _chunk_capacity);
 160   }
 161 }
 162 
 163 void G1CMMarkStack::add_chunk_to_list(TaskQueueEntryChunk* volatile* list, TaskQueueEntryChunk* elem) {
 164   elem->next = *list;
 165   *list = elem;
 166 }
 167 
 168 void G1CMMarkStack::add_chunk_to_chunk_list(TaskQueueEntryChunk* elem) {
 169   MutexLockerEx x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag);
 170   add_chunk_to_list(&_chunk_list, elem);
 171   _chunks_in_chunk_list++;
 172 }
 173 
 174 void G1CMMarkStack::add_chunk_to_free_list(TaskQueueEntryChunk* elem) {
 175   MutexLockerEx x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag);
 176   add_chunk_to_list(&_free_list, elem);
 177 }
 178 
 179 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_list(TaskQueueEntryChunk* volatile* list) {
 180   TaskQueueEntryChunk* result = *list;
 181   if (result != NULL) {
 182     *list = (*list)->next;
 183   }
 184   return result;
 185 }
 186 
 187 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_chunk_list() {
 188   MutexLockerEx x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag);
 189   TaskQueueEntryChunk* result = remove_chunk_from_list(&_chunk_list);
 190   if (result != NULL) {
 191     _chunks_in_chunk_list--;
 192   }
 193   return result;
 194 }
 195 
 196 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_free_list() {
 197   MutexLockerEx x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag);
 198   return remove_chunk_from_list(&_free_list);
 199 }
 200 
 201 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::allocate_new_chunk() {
 202   // This dirty read of _hwm is okay because we only ever increase the _hwm in parallel code.
 203   // Further this limits _hwm to a value of _chunk_capacity + #threads, avoiding
 204   // wraparound of _hwm.
 205   if (_hwm >= _chunk_capacity) {
 206     return NULL;
 207   }
 208 
 209   size_t cur_idx = Atomic::add(1u, &_hwm) - 1;
 210   if (cur_idx >= _chunk_capacity) {
 211     return NULL;
 212   }
 213 
 214   TaskQueueEntryChunk* result = ::new (&_base[cur_idx]) TaskQueueEntryChunk;
 215   result->next = NULL;
 216   return result;
 217 }


 294     // force the caller to bail out of their loop.
 295     return NULL;
 296   }
 297 
 298   if (_claimed_root_regions >= _num_root_regions) {
 299     return NULL;
 300   }
 301 
 302   size_t claimed_index = Atomic::add((size_t)1, &_claimed_root_regions) - 1;
 303   if (claimed_index < _num_root_regions) {
 304     return _root_regions[claimed_index];
 305   }
 306   return NULL;
 307 }
 308 
 309 uint G1CMRootRegions::num_root_regions() const {
 310   return (uint)_num_root_regions;
 311 }
 312 
 313 void G1CMRootRegions::notify_scan_done() {
 314   MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 315   _scan_in_progress = false;
 316   RootRegionScan_lock->notify_all();
 317 }
 318 
 319 void G1CMRootRegions::cancel_scan() {
 320   notify_scan_done();
 321 }
 322 
 323 void G1CMRootRegions::scan_finished() {
 324   assert(scan_in_progress(), "pre-condition");
 325 
 326   if (!_should_abort) {
 327     assert(_claimed_root_regions >= num_root_regions(),
 328            "we should have claimed all root regions, claimed " SIZE_FORMAT ", length = %u",
 329            _claimed_root_regions, num_root_regions());
 330   }
 331 
 332   notify_scan_done();
 333 }
 334 
 335 bool G1CMRootRegions::wait_until_scan_finished() {
 336   if (!scan_in_progress()) {
 337     return false;
 338   }
 339 
 340   {
 341     MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 342     while (scan_in_progress()) {
 343       RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag);
 344     }
 345   }
 346   return true;
 347 }
 348 
 349 // Returns the maximum number of workers to be used in a concurrent
 350 // phase based on the number of GC workers being used in a STW
 351 // phase.
 352 static uint scale_concurrent_worker_threads(uint num_gc_workers) {
 353   return MAX2((num_gc_workers + 2) / 4, 1U);
 354 }
 355 
 356 G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h,
 357                                    G1RegionToSpaceMapper* prev_bitmap_storage,
 358                                    G1RegionToSpaceMapper* next_bitmap_storage) :
 359   // _cm_thread set inside the constructor
 360   _g1h(g1h),
 361   _completed_initialization(false),
 362 
 363   _mark_bitmap_1(),


 407   _max_concurrent_workers(0),
 408 
 409   _region_mark_stats(NEW_C_HEAP_ARRAY(G1RegionMarkStats, _g1h->max_regions(), mtGC)),
 410   _top_at_rebuild_starts(NEW_C_HEAP_ARRAY(HeapWord*, _g1h->max_regions(), mtGC))
 411 {
 412   _mark_bitmap_1.initialize(g1h->reserved_region(), prev_bitmap_storage);
 413   _mark_bitmap_2.initialize(g1h->reserved_region(), next_bitmap_storage);
 414 
 415   // Create & start ConcurrentMark thread.
 416   _cm_thread = new G1ConcurrentMarkThread(this);
 417   if (_cm_thread->osthread() == NULL) {
 418     vm_shutdown_during_initialization("Could not create ConcurrentMarkThread");
 419   }
 420 
 421   assert(CGC_lock != NULL, "CGC_lock must be initialized");
 422 
 423   if (FLAG_IS_DEFAULT(ConcGCThreads) || ConcGCThreads == 0) {
 424     // Calculate the number of concurrent worker threads by scaling
 425     // the number of parallel GC threads.
 426     uint marking_thread_num = scale_concurrent_worker_threads(ParallelGCThreads);
 427     FLAG_SET_ERGO(uint, ConcGCThreads, marking_thread_num);
 428   }
 429 
 430   assert(ConcGCThreads > 0, "ConcGCThreads have been set.");
 431   if (ConcGCThreads > ParallelGCThreads) {
 432     log_warning(gc)("More ConcGCThreads (%u) than ParallelGCThreads (%u).",
 433                     ConcGCThreads, ParallelGCThreads);
 434     return;
 435   }
 436 
 437   log_debug(gc)("ConcGCThreads: %u offset %u", ConcGCThreads, _worker_id_offset);
 438   log_debug(gc)("ParallelGCThreads: %u", ParallelGCThreads);
 439 
 440   _num_concurrent_workers = ConcGCThreads;
 441   _max_concurrent_workers = _num_concurrent_workers;
 442 
 443   _concurrent_workers = new WorkGang("G1 Conc", _max_concurrent_workers, false, true);
 444   _concurrent_workers->initialize_workers();
 445 
 446   if (FLAG_IS_DEFAULT(MarkStackSize)) {
 447     size_t mark_stack_size =
 448       MIN2(MarkStackSizeMax,
 449           MAX2(MarkStackSize, (size_t) (_max_concurrent_workers * TASKQUEUE_SIZE)));
 450     // Verify that the calculated value for MarkStackSize is in range.
 451     // It would be nice to use the private utility routine from Arguments.
 452     if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) {
 453       log_warning(gc)("Invalid value calculated for MarkStackSize (" SIZE_FORMAT "): "
 454                       "must be between 1 and " SIZE_FORMAT,
 455                       mark_stack_size, MarkStackSizeMax);
 456       return;
 457     }
 458     FLAG_SET_ERGO(size_t, MarkStackSize, mark_stack_size);
 459   } else {
 460     // Verify MarkStackSize is in range.
 461     if (FLAG_IS_CMDLINE(MarkStackSize)) {
 462       if (FLAG_IS_DEFAULT(MarkStackSizeMax)) {
 463         if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
 464           log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT "): "
 465                           "must be between 1 and " SIZE_FORMAT,
 466                           MarkStackSize, MarkStackSizeMax);
 467           return;
 468         }
 469       } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) {
 470         if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
 471           log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT ")"
 472                           " or for MarkStackSizeMax (" SIZE_FORMAT ")",
 473                           MarkStackSize, MarkStackSizeMax);
 474           return;
 475         }
 476       }
 477     }
 478   }


1271   FreeRegionList* _cleanup_list;
1272   HeapRegionClaimer _hrclaimer;
1273 
1274 public:
1275   G1ReclaimEmptyRegionsTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) :
1276     AbstractGangTask("G1 Cleanup"),
1277     _g1h(g1h),
1278     _cleanup_list(cleanup_list),
1279     _hrclaimer(n_workers) {
1280   }
1281 
1282   void work(uint worker_id) {
1283     FreeRegionList local_cleanup_list("Local Cleanup List");
1284     G1ReclaimEmptyRegionsClosure cl(_g1h, &local_cleanup_list);
1285     _g1h->heap_region_par_iterate_from_worker_offset(&cl, &_hrclaimer, worker_id);
1286     assert(cl.is_complete(), "Shouldn't have aborted!");
1287 
1288     // Now update the old/humongous region sets
1289     _g1h->remove_from_old_sets(cl.old_regions_removed(), cl.humongous_regions_removed());
1290     {
1291       MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
1292       _g1h->decrement_summary_bytes(cl.freed_bytes());
1293 
1294       _cleanup_list->add_ordered(&local_cleanup_list);
1295       assert(local_cleanup_list.is_empty(), "post-condition");
1296     }
1297   }
1298 };
1299 
1300 void G1ConcurrentMark::reclaim_empty_regions() {
1301   WorkGang* workers = _g1h->workers();
1302   FreeRegionList empty_regions_list("Empty Regions After Mark List");
1303 
1304   G1ReclaimEmptyRegionsTask cl(_g1h, &empty_regions_list, workers->active_workers());
1305   workers->run_task(&cl);
1306 
1307   if (!empty_regions_list.is_empty()) {
1308     log_debug(gc)("Reclaimed %u empty regions", empty_regions_list.length());
1309     // Now print the empty regions list.
1310     G1HRPrinter* hrp = _g1h->hr_printer();
1311     if (hrp->is_active()) {


1923 class VerifyNoCSetOops {
1924   G1CollectedHeap* _g1h;
1925   const char* _phase;
1926   int _info;
1927 
1928 public:
1929   VerifyNoCSetOops(const char* phase, int info = -1) :
1930     _g1h(G1CollectedHeap::heap()),
1931     _phase(phase),
1932     _info(info)
1933   { }
1934 
1935   void operator()(G1TaskQueueEntry task_entry) const {
1936     if (task_entry.is_array_slice()) {
1937       guarantee(_g1h->is_in_reserved(task_entry.slice()), "Slice " PTR_FORMAT " must be in heap.", p2i(task_entry.slice()));
1938       return;
1939     }
1940     guarantee(oopDesc::is_oop(task_entry.obj()),
1941               "Non-oop " PTR_FORMAT ", phase: %s, info: %d",
1942               p2i(task_entry.obj()), _phase, _info);
1943     guarantee(!_g1h->is_in_cset(task_entry.obj()),
1944               "obj: " PTR_FORMAT " in CSet, phase: %s, info: %d",
1945               p2i(task_entry.obj()), _phase, _info);

1946   }
1947 };
1948 
1949 void G1ConcurrentMark::verify_no_collection_set_oops() {
1950   assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
1951   if (!_g1h->collector_state()->mark_or_rebuild_in_progress()) {
1952     return;
1953   }
1954 
1955   // Verify entries on the global mark stack
1956   _global_mark_stack.iterate(VerifyNoCSetOops("Stack"));
1957 
1958   // Verify entries on the task queues
1959   for (uint i = 0; i < _max_num_tasks; ++i) {
1960     G1CMTaskQueue* queue = _task_queues->queue(i);
1961     queue->iterate(VerifyNoCSetOops("Queue", i));
1962   }
1963 
1964   // Verify the global finger
1965   HeapWord* global_finger = finger();
1966   if (global_finger != NULL && global_finger < _heap.end()) {
1967     // Since we always iterate over all regions, we might get a NULL HeapRegion
1968     // here.
1969     HeapRegion* global_hr = _g1h->heap_region_containing(global_finger);
1970     guarantee(global_hr == NULL || global_finger == global_hr->bottom(),
1971               "global finger: " PTR_FORMAT " region: " HR_FORMAT,
1972               p2i(global_finger), HR_FORMAT_PARAMS(global_hr));
1973   }
1974 
1975   // Verify the task fingers
1976   assert(_num_concurrent_workers <= _max_num_tasks, "sanity");
1977   for (uint i = 0; i < _num_concurrent_workers; ++i) {
1978     G1CMTask* task = _tasks[i];
1979     HeapWord* task_finger = task->finger();
1980     if (task_finger != NULL && task_finger < _heap.end()) {
1981       // See above note on the global finger verification.
1982       HeapRegion* task_hr = _g1h->heap_region_containing(task_finger);
1983       guarantee(task_hr == NULL || task_finger == task_hr->bottom() ||
1984                 !task_hr->in_collection_set(),
1985                 "task finger: " PTR_FORMAT " region: " HR_FORMAT,
1986                 p2i(task_finger), HR_FORMAT_PARAMS(task_hr));
1987     }
1988   }
1989 }
1990 #endif // PRODUCT
1991 
1992 void G1ConcurrentMark::rebuild_rem_set_concurrently() {
1993   _g1h->rem_set()->rebuild_rem_set(this, _concurrent_workers, _worker_id_offset);
1994 }
1995 
1996 void G1ConcurrentMark::print_stats() {
1997   if (!log_is_enabled(Debug, gc, stats)) {
1998     return;
1999   }
2000   log_debug(gc, stats)("---------------------------------------------------------------------");
2001   for (size_t i = 0; i < _num_active_tasks; ++i) {
2002     _tasks[i]->print_stats();
2003     log_debug(gc, stats)("---------------------------------------------------------------------");
2004   }
2005 }
2006 


< prev index next >