< prev index next >

src/hotspot/share/gc/g1/g1ConcurrentMark.cpp

Print this page




 240 bool G1CMMarkStack::par_pop_chunk(G1TaskQueueEntry* ptr_arr) {
 241   TaskQueueEntryChunk* cur = remove_chunk_from_chunk_list();
 242 
 243   if (cur == NULL) {
 244     return false;
 245   }
 246 
 247   Copy::conjoint_memory_atomic(cur->data, ptr_arr, EntriesPerChunk * sizeof(G1TaskQueueEntry));
 248 
 249   add_chunk_to_free_list(cur);
 250   return true;
 251 }
 252 
 253 void G1CMMarkStack::set_empty() {
 254   _chunks_in_chunk_list = 0;
 255   _hwm = 0;
 256   _chunk_list = NULL;
 257   _free_list = NULL;
 258 }
 259 
 260 G1CMRootMemRegions::G1CMRootMemRegions(uint const max_regions) :
 261     _root_regions(NULL),
 262     _max_regions(max_regions),
 263     _num_root_regions(0),
 264     _claimed_root_regions(0),
 265     _scan_in_progress(false),
 266     _should_abort(false) {
 267   _root_regions = new MemRegion[_max_regions];
 268   if (_root_regions == NULL) {
 269     vm_exit_during_initialization("Could not allocate root MemRegion set.");
 270   }
 271 }
 272 
 273 G1CMRootMemRegions::~G1CMRootMemRegions() {
 274   delete[] _root_regions;
 275 }
 276 
 277 void G1CMRootMemRegions::reset() {
 278   _num_root_regions = 0;
 279 }
 280 
 281 void G1CMRootMemRegions::add(HeapWord* start, HeapWord* end) {
 282   assert_at_safepoint();
 283   size_t idx = Atomic::add((size_t)1, &_num_root_regions) - 1;
 284   assert(idx < _max_regions, "Trying to add more root MemRegions than there is space " SIZE_FORMAT, _max_regions);
 285   assert(start != NULL && end != NULL && start <= end, "Start (" PTR_FORMAT ") should be less or equal to "
 286          "end (" PTR_FORMAT ")", p2i(start), p2i(end));
 287   _root_regions[idx].set_start(start);
 288   _root_regions[idx].set_end(end);
 289 }
 290 
 291 void G1CMRootMemRegions::prepare_for_scan() {
 292   assert(!scan_in_progress(), "pre-condition");
 293 
 294   _scan_in_progress = _num_root_regions > 0;
 295 
 296   _claimed_root_regions = 0;
 297   _should_abort = false;
 298 }
 299 
 300 const MemRegion* G1CMRootMemRegions::claim_next() {
 301   if (_should_abort) {
 302     // If someone has set the should_abort flag, we return NULL to
 303     // force the caller to bail out of their loop.
 304     return NULL;
 305   }
 306 
 307   if (_claimed_root_regions >= _num_root_regions) {
 308     return NULL;
 309   }
 310 
 311   size_t claimed_index = Atomic::add((size_t)1, &_claimed_root_regions) - 1;
 312   if (claimed_index < _num_root_regions) {
 313     return &_root_regions[claimed_index];
 314   }
 315   return NULL;
 316 }
 317 
 318 uint G1CMRootMemRegions::num_root_regions() const {
 319   return (uint)_num_root_regions;
 320 }
 321 
 322 void G1CMRootMemRegions::notify_scan_done() {
 323   MutexLocker x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 324   _scan_in_progress = false;
 325   RootRegionScan_lock->notify_all();
 326 }
 327 
 328 void G1CMRootMemRegions::cancel_scan() {
 329   notify_scan_done();
 330 }
 331 
 332 void G1CMRootMemRegions::scan_finished() {
 333   assert(scan_in_progress(), "pre-condition");
 334 
 335   if (!_should_abort) {
 336     assert(_claimed_root_regions >= num_root_regions(),
 337            "we should have claimed all root regions, claimed " SIZE_FORMAT ", length = %u",
 338            _claimed_root_regions, num_root_regions());
 339   }
 340 
 341   notify_scan_done();
 342 }
 343 
 344 bool G1CMRootMemRegions::wait_until_scan_finished() {
 345   if (!scan_in_progress()) {
 346     return false;
 347   }
 348 
 349   {
 350     MonitorLocker ml(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 351     while (scan_in_progress()) {
 352       ml.wait();
 353     }
 354   }
 355   return true;
 356 }
 357 
 358 // Returns the maximum number of workers to be used in a concurrent
 359 // phase based on the number of GC workers being used in a STW
 360 // phase.
 361 static uint scale_concurrent_worker_threads(uint num_gc_workers) {
 362   return MAX2((num_gc_workers + 2) / 4, 1U);
 363 }
 364 


 866   uint result = 0;
 867   if (!UseDynamicNumberOfGCThreads ||
 868       (!FLAG_IS_DEFAULT(ConcGCThreads) &&
 869        !ForceDynamicNumberOfGCThreads)) {
 870     result = _max_concurrent_workers;
 871   } else {
 872     result =
 873       WorkerPolicy::calc_default_active_workers(_max_concurrent_workers,
 874                                                 1, /* Minimum workers */
 875                                                 _num_concurrent_workers,
 876                                                 Threads::number_of_non_daemon_threads());
 877     // Don't scale the result down by scale_concurrent_workers() because
 878     // that scaling has already gone into "_max_concurrent_workers".
 879   }
 880   assert(result > 0 && result <= _max_concurrent_workers,
 881          "Calculated number of marking workers must be larger than zero and at most the maximum %u, but is %u",
 882          _max_concurrent_workers, result);
 883   return result;
 884 }
 885 
 886 void G1ConcurrentMark::scan_root_region(const MemRegion* region, uint worker_id) {
 887 #ifdef ASSERT
 888   HeapWord* last = region->last();
 889   HeapRegion* hr = _g1h->heap_region_containing(last);
 890   assert(hr->is_old() || hr->next_top_at_mark_start() == hr->bottom(),
 891          "Root regions must be old or survivor/eden but region %u is %s", hr->hrm_index(), hr->get_type_str());
 892   assert(hr->next_top_at_mark_start() == region->start(),
 893          "MemRegion start should be equal to nTAMS");
 894 #endif
 895 
 896   G1RootRegionScanClosure cl(_g1h, this, worker_id);
 897 
 898   const uintx interval = PrefetchScanIntervalInBytes;
 899   HeapWord* curr = region->start();
 900   const HeapWord* end = region->end();
 901   while (curr < end) {
 902     Prefetch::read(curr, interval);
 903     oop obj = oop(curr);
 904     int size = obj->oop_iterate_size(&cl);
 905     assert(size == obj->size(), "sanity");
 906     curr += size;
 907   }
 908 }
 909 
 910 class G1CMRootRegionScanTask : public AbstractGangTask {
 911   G1ConcurrentMark* _cm;
 912 public:
 913   G1CMRootRegionScanTask(G1ConcurrentMark* cm) :
 914     AbstractGangTask("G1 Root Region Scan"), _cm(cm) { }
 915 
 916   void work(uint worker_id) {
 917     assert(Thread::current()->is_ConcurrentGC_thread(),
 918            "this should only be done by a conc GC thread");
 919 
 920     G1CMRootMemRegions* root_regions = _cm->root_regions();
 921     const MemRegion* region = root_regions->claim_next();
 922     while (region != NULL) {
 923       _cm->scan_root_region(region, worker_id);
 924       region = root_regions->claim_next();
 925     }
 926   }
 927 };
 928 
 929 void G1ConcurrentMark::scan_root_regions() {
 930   // scan_in_progress() will have been set to true only if there was
 931   // at least one root region to scan. So, if it's false, we
 932   // should not attempt to do any further work.
 933   if (root_regions()->scan_in_progress()) {
 934     assert(!has_aborted(), "Aborting before root region scanning is finished not supported.");
 935 
 936     _num_concurrent_workers = MIN2(calc_active_marking_workers(),
 937                                    // We distribute work on a per-region basis, so starting
 938                                    // more threads than that is useless.
 939                                    root_regions()->num_root_regions());
 940     assert(_num_concurrent_workers <= _max_concurrent_workers,
 941            "Maximum number of marking threads exceeded");
 942 
 943     G1CMRootRegionScanTask task(this);
 944     log_debug(gc, ergo)("Running %s using %u workers for %u work units.",




 240 bool G1CMMarkStack::par_pop_chunk(G1TaskQueueEntry* ptr_arr) {
 241   TaskQueueEntryChunk* cur = remove_chunk_from_chunk_list();
 242 
 243   if (cur == NULL) {
 244     return false;
 245   }
 246 
 247   Copy::conjoint_memory_atomic(cur->data, ptr_arr, EntriesPerChunk * sizeof(G1TaskQueueEntry));
 248 
 249   add_chunk_to_free_list(cur);
 250   return true;
 251 }
 252 
 253 void G1CMMarkStack::set_empty() {
 254   _chunks_in_chunk_list = 0;
 255   _hwm = 0;
 256   _chunk_list = NULL;
 257   _free_list = NULL;
 258 }
 259 
 260 G1CMRootRegions::G1CMRootRegions(uint const max_regions) :
 261   _root_regions(NEW_C_HEAP_ARRAY(HeapRegion*, max_regions, mtGC)),
 262   _max_regions(max_regions),
 263   _num_root_regions(0),
 264   _claimed_root_regions(0),
 265   _scan_in_progress(false),
 266   _should_abort(false) { }





 267 
 268 G1CMRootRegions::~G1CMRootRegions() {
 269   FREE_C_HEAP_ARRAY(HeapRegion*, _max_regions);
 270 }
 271 
 272 void G1CMRootRegions::reset() {
 273   _num_root_regions = 0;
 274 }
 275 
 276 void G1CMRootRegions::add(HeapRegion* hr) {
 277   assert_at_safepoint();
 278   size_t idx = Atomic::add((size_t)1, &_num_root_regions) - 1;
 279   assert(idx < _max_regions, "Trying to add more root regions than there is space " SIZE_FORMAT, _max_regions);
 280   _root_regions[idx] = hr;



 281 }
 282 
 283 void G1CMRootRegions::prepare_for_scan() {
 284   assert(!scan_in_progress(), "pre-condition");
 285 
 286   _scan_in_progress = _num_root_regions > 0;
 287 
 288   _claimed_root_regions = 0;
 289   _should_abort = false;
 290 }
 291 
 292 HeapRegion* G1CMRootRegions::claim_next() {
 293   if (_should_abort) {
 294     // If someone has set the should_abort flag, we return NULL to
 295     // force the caller to bail out of their loop.
 296     return NULL;
 297   }
 298 
 299   if (_claimed_root_regions >= _num_root_regions) {
 300     return NULL;
 301   }
 302 
 303   size_t claimed_index = Atomic::add((size_t)1, &_claimed_root_regions) - 1;
 304   if (claimed_index < _num_root_regions) {
 305     return _root_regions[claimed_index];
 306   }
 307   return NULL;
 308 }
 309 
 310 uint G1CMRootRegions::num_root_regions() const {
 311   return (uint)_num_root_regions;
 312 }
 313 
 314 void G1CMRootRegions::notify_scan_done() {
 315   MutexLocker x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 316   _scan_in_progress = false;
 317   RootRegionScan_lock->notify_all();
 318 }
 319 
 320 void G1CMRootRegions::cancel_scan() {
 321   notify_scan_done();
 322 }
 323 
 324 void G1CMRootRegions::scan_finished() {
 325   assert(scan_in_progress(), "pre-condition");
 326 
 327   if (!_should_abort) {
 328     assert(_claimed_root_regions >= num_root_regions(),
 329            "we should have claimed all root regions, claimed " SIZE_FORMAT ", length = %u",
 330            _claimed_root_regions, num_root_regions());
 331   }
 332 
 333   notify_scan_done();
 334 }
 335 
 336 bool G1CMRootRegions::wait_until_scan_finished() {
 337   if (!scan_in_progress()) {
 338     return false;
 339   }
 340 
 341   {
 342     MonitorLocker ml(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 343     while (scan_in_progress()) {
 344       ml.wait();
 345     }
 346   }
 347   return true;
 348 }
 349 
 350 // Returns the maximum number of workers to be used in a concurrent
 351 // phase based on the number of GC workers being used in a STW
 352 // phase.
 353 static uint scale_concurrent_worker_threads(uint num_gc_workers) {
 354   return MAX2((num_gc_workers + 2) / 4, 1U);
 355 }
 356 


 858   uint result = 0;
 859   if (!UseDynamicNumberOfGCThreads ||
 860       (!FLAG_IS_DEFAULT(ConcGCThreads) &&
 861        !ForceDynamicNumberOfGCThreads)) {
 862     result = _max_concurrent_workers;
 863   } else {
 864     result =
 865       WorkerPolicy::calc_default_active_workers(_max_concurrent_workers,
 866                                                 1, /* Minimum workers */
 867                                                 _num_concurrent_workers,
 868                                                 Threads::number_of_non_daemon_threads());
 869     // Don't scale the result down by scale_concurrent_workers() because
 870     // that scaling has already gone into "_max_concurrent_workers".
 871   }
 872   assert(result > 0 && result <= _max_concurrent_workers,
 873          "Calculated number of marking workers must be larger than zero and at most the maximum %u, but is %u",
 874          _max_concurrent_workers, result);
 875   return result;
 876 }
 877 
 878 void G1ConcurrentMark::scan_root_region(HeapRegion* hr, uint worker_id) {
 879   assert(hr->is_old() || (hr->is_survivor() && hr->next_top_at_mark_start() == hr->bottom()),
 880          "Root regions must be old or survivor but region %u is %s", hr->hrm_index(), hr->get_type_str());







 881   G1RootRegionScanClosure cl(_g1h, this, worker_id);
 882 
 883   const uintx interval = PrefetchScanIntervalInBytes;
 884   HeapWord* curr = hr->next_top_at_mark_start();
 885   const HeapWord* end = hr->top();
 886   while (curr < end) {
 887     Prefetch::read(curr, interval);
 888     oop obj = oop(curr);
 889     int size = obj->oop_iterate_size(&cl);
 890     assert(size == obj->size(), "sanity");
 891     curr += size;
 892   }
 893 }
 894 
 895 class G1CMRootRegionScanTask : public AbstractGangTask {
 896   G1ConcurrentMark* _cm;
 897 public:
 898   G1CMRootRegionScanTask(G1ConcurrentMark* cm) :
 899     AbstractGangTask("G1 Root Region Scan"), _cm(cm) { }
 900 
 901   void work(uint worker_id) {
 902     assert(Thread::current()->is_ConcurrentGC_thread(),
 903            "this should only be done by a conc GC thread");
 904 
 905     G1CMRootRegions* root_regions = _cm->root_regions();
 906     HeapRegion* hr = root_regions->claim_next();
 907     while (hr != NULL) {
 908       _cm->scan_root_region(hr, worker_id);
 909       hr = root_regions->claim_next();
 910     }
 911   }
 912 };
 913 
 914 void G1ConcurrentMark::scan_root_regions() {
 915   // scan_in_progress() will have been set to true only if there was
 916   // at least one root region to scan. So, if it's false, we
 917   // should not attempt to do any further work.
 918   if (root_regions()->scan_in_progress()) {
 919     assert(!has_aborted(), "Aborting before root region scanning is finished not supported.");
 920 
 921     _num_concurrent_workers = MIN2(calc_active_marking_workers(),
 922                                    // We distribute work on a per-region basis, so starting
 923                                    // more threads than that is useless.
 924                                    root_regions()->num_root_regions());
 925     assert(_num_concurrent_workers <= _max_concurrent_workers,
 926            "Maximum number of marking threads exceeded");
 927 
 928     G1CMRootRegionScanTask task(this);
 929     log_debug(gc, ergo)("Running %s using %u workers for %u work units.",


< prev index next >