1 /*
   2  * Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/classLoaderDataGraph.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "gc/g1/g1BarrierSet.hpp"
  30 #include "gc/g1/g1BatchedTask.hpp"
  31 #include "gc/g1/g1CardSetMemory.hpp"
  32 #include "gc/g1/g1CollectedHeap.inline.hpp"
  33 #include "gc/g1/g1CollectorState.hpp"
  34 #include "gc/g1/g1ConcurrentMark.inline.hpp"
  35 #include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
  36 #include "gc/g1/g1DirtyCardQueue.hpp"
  37 #include "gc/g1/g1HeapVerifier.hpp"
  38 #include "gc/g1/g1OopClosures.inline.hpp"
  39 #include "gc/g1/g1Policy.hpp"
  40 #include "gc/g1/g1RegionMarkStatsCache.inline.hpp"
  41 #include "gc/g1/g1ThreadLocalData.hpp"
  42 #include "gc/g1/g1Trace.hpp"
  43 #include "gc/g1/heapRegion.inline.hpp"
  44 #include "gc/g1/heapRegionManager.hpp"
  45 #include "gc/g1/heapRegionRemSet.inline.hpp"
  46 #include "gc/g1/heapRegionSet.inline.hpp"
  47 #include "gc/shared/gcId.hpp"
  48 #include "gc/shared/gcTimer.hpp"
  49 #include "gc/shared/gcTraceTime.inline.hpp"
  50 #include "gc/shared/gcVMOperations.hpp"
  51 #include "gc/shared/genOopClosures.inline.hpp"
  52 #include "gc/shared/referencePolicy.hpp"
  53 #include "gc/shared/strongRootsScope.hpp"
  54 #include "gc/shared/suspendibleThreadSet.hpp"
  55 #include "gc/shared/taskTerminator.hpp"
  56 #include "gc/shared/taskqueue.inline.hpp"
  57 #include "gc/shared/weakProcessor.inline.hpp"
  58 #include "gc/shared/workerPolicy.hpp"
  59 #include "include/jvm.h"
  60 #include "logging/log.hpp"
  61 #include "memory/allocation.hpp"
  62 #include "memory/iterator.hpp"
  63 #include "memory/metaspaceUtils.hpp"
  64 #include "memory/resourceArea.hpp"
  65 #include "memory/universe.hpp"
  66 #include "oops/access.inline.hpp"
  67 #include "oops/oop.inline.hpp"
  68 #include "runtime/atomic.hpp"
  69 #include "runtime/globals_extension.hpp"
  70 #include "runtime/handles.inline.hpp"
  71 #include "runtime/java.hpp"
  72 #include "runtime/orderAccess.hpp"
  73 #include "runtime/prefetch.inline.hpp"
  74 #include "services/memTracker.hpp"
  75 #include "utilities/align.hpp"
  76 #include "utilities/formatBuffer.hpp"
  77 #include "utilities/growableArray.hpp"
  78 
  79 bool G1CMBitMapClosure::do_addr(HeapWord* const addr) {
  80   assert(addr < _cm->finger(), "invariant");
  81   assert(addr >= _task->finger(), "invariant");
  82 
  83   // We move that task's local finger along.
  84   _task->move_finger_to(addr);
  85 
  86   _task->scan_task_entry(G1TaskQueueEntry::from_oop(cast_to_oop(addr)));
  87   // we only partially drain the local queue and global stack
  88   _task->drain_local_queue(true);
  89   _task->drain_global_stack(true);
  90 
  91   // if the has_aborted flag has been raised, we need to bail out of
  92   // the iteration
  93   return !_task->has_aborted();
  94 }
  95 
  96 G1CMMarkStack::G1CMMarkStack() :
  97   _max_chunk_capacity(0),
  98   _base(NULL),
  99   _chunk_capacity(0) {
 100   set_empty();
 101 }
 102 
 103 bool G1CMMarkStack::resize(size_t new_capacity) {
 104   assert(is_empty(), "Only resize when stack is empty.");
 105   assert(new_capacity <= _max_chunk_capacity,
 106          "Trying to resize stack to " SIZE_FORMAT " chunks when the maximum is " SIZE_FORMAT, new_capacity, _max_chunk_capacity);
 107 
 108   TaskQueueEntryChunk* new_base = MmapArrayAllocator<TaskQueueEntryChunk>::allocate_or_null(new_capacity, mtGC);
 109 
 110   if (new_base == NULL) {
 111     log_warning(gc)("Failed to reserve memory for new overflow mark stack with " SIZE_FORMAT " chunks and size " SIZE_FORMAT "B.", new_capacity, new_capacity * sizeof(TaskQueueEntryChunk));
 112     return false;
 113   }
 114   // Release old mapping.
 115   if (_base != NULL) {
 116     MmapArrayAllocator<TaskQueueEntryChunk>::free(_base, _chunk_capacity);
 117   }
 118 
 119   _base = new_base;
 120   _chunk_capacity = new_capacity;
 121   set_empty();
 122 
 123   return true;
 124 }
 125 
 126 size_t G1CMMarkStack::capacity_alignment() {
 127   return (size_t)lcm(os::vm_allocation_granularity(), sizeof(TaskQueueEntryChunk)) / sizeof(G1TaskQueueEntry);
 128 }
 129 
 130 bool G1CMMarkStack::initialize(size_t initial_capacity, size_t max_capacity) {
 131   guarantee(_max_chunk_capacity == 0, "G1CMMarkStack already initialized.");
 132 
 133   size_t const TaskEntryChunkSizeInVoidStar = sizeof(TaskQueueEntryChunk) / sizeof(G1TaskQueueEntry);
 134 
 135   _max_chunk_capacity = align_up(max_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar;
 136   size_t initial_chunk_capacity = align_up(initial_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar;
 137 
 138   guarantee(initial_chunk_capacity <= _max_chunk_capacity,
 139             "Maximum chunk capacity " SIZE_FORMAT " smaller than initial capacity " SIZE_FORMAT,
 140             _max_chunk_capacity,
 141             initial_chunk_capacity);
 142 
 143   log_debug(gc)("Initialize mark stack with " SIZE_FORMAT " chunks, maximum " SIZE_FORMAT,
 144                 initial_chunk_capacity, _max_chunk_capacity);
 145 
 146   return resize(initial_chunk_capacity);
 147 }
 148 
 149 void G1CMMarkStack::expand() {
 150   if (_chunk_capacity == _max_chunk_capacity) {
 151     log_debug(gc)("Can not expand overflow mark stack further, already at maximum capacity of " SIZE_FORMAT " chunks.", _chunk_capacity);
 152     return;
 153   }
 154   size_t old_capacity = _chunk_capacity;
 155   // Double capacity if possible
 156   size_t new_capacity = MIN2(old_capacity * 2, _max_chunk_capacity);
 157 
 158   if (resize(new_capacity)) {
 159     log_debug(gc)("Expanded mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks",
 160                   old_capacity, new_capacity);
 161   } else {
 162     log_warning(gc)("Failed to expand mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks",
 163                     old_capacity, new_capacity);
 164   }
 165 }
 166 
 167 G1CMMarkStack::~G1CMMarkStack() {
 168   if (_base != NULL) {
 169     MmapArrayAllocator<TaskQueueEntryChunk>::free(_base, _chunk_capacity);
 170   }
 171 }
 172 
 173 void G1CMMarkStack::add_chunk_to_list(TaskQueueEntryChunk* volatile* list, TaskQueueEntryChunk* elem) {
 174   elem->next = *list;
 175   *list = elem;
 176 }
 177 
 178 void G1CMMarkStack::add_chunk_to_chunk_list(TaskQueueEntryChunk* elem) {
 179   MutexLocker x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag);
 180   add_chunk_to_list(&_chunk_list, elem);
 181   _chunks_in_chunk_list++;
 182 }
 183 
 184 void G1CMMarkStack::add_chunk_to_free_list(TaskQueueEntryChunk* elem) {
 185   MutexLocker x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag);
 186   add_chunk_to_list(&_free_list, elem);
 187 }
 188 
 189 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_list(TaskQueueEntryChunk* volatile* list) {
 190   TaskQueueEntryChunk* result = *list;
 191   if (result != NULL) {
 192     *list = (*list)->next;
 193   }
 194   return result;
 195 }
 196 
 197 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_chunk_list() {
 198   MutexLocker x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag);
 199   TaskQueueEntryChunk* result = remove_chunk_from_list(&_chunk_list);
 200   if (result != NULL) {
 201     _chunks_in_chunk_list--;
 202   }
 203   return result;
 204 }
 205 
 206 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_free_list() {
 207   MutexLocker x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag);
 208   return remove_chunk_from_list(&_free_list);
 209 }
 210 
 211 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::allocate_new_chunk() {
 212   // This dirty read of _hwm is okay because we only ever increase the _hwm in parallel code.
 213   // Further this limits _hwm to a value of _chunk_capacity + #threads, avoiding
 214   // wraparound of _hwm.
 215   if (_hwm >= _chunk_capacity) {
 216     return NULL;
 217   }
 218 
 219   size_t cur_idx = Atomic::fetch_and_add(&_hwm, 1u);
 220   if (cur_idx >= _chunk_capacity) {
 221     return NULL;
 222   }
 223 
 224   TaskQueueEntryChunk* result = ::new (&_base[cur_idx]) TaskQueueEntryChunk;
 225   result->next = NULL;
 226   return result;
 227 }
 228 
 229 bool G1CMMarkStack::par_push_chunk(G1TaskQueueEntry* ptr_arr) {
 230   // Get a new chunk.
 231   TaskQueueEntryChunk* new_chunk = remove_chunk_from_free_list();
 232 
 233   if (new_chunk == NULL) {
 234     // Did not get a chunk from the free list. Allocate from backing memory.
 235     new_chunk = allocate_new_chunk();
 236 
 237     if (new_chunk == NULL) {
 238       return false;
 239     }
 240   }
 241 
 242   Copy::conjoint_memory_atomic(ptr_arr, new_chunk->data, EntriesPerChunk * sizeof(G1TaskQueueEntry));
 243 
 244   add_chunk_to_chunk_list(new_chunk);
 245 
 246   return true;
 247 }
 248 
 249 bool G1CMMarkStack::par_pop_chunk(G1TaskQueueEntry* ptr_arr) {
 250   TaskQueueEntryChunk* cur = remove_chunk_from_chunk_list();
 251 
 252   if (cur == NULL) {
 253     return false;
 254   }
 255 
 256   Copy::conjoint_memory_atomic(cur->data, ptr_arr, EntriesPerChunk * sizeof(G1TaskQueueEntry));
 257 
 258   add_chunk_to_free_list(cur);
 259   return true;
 260 }
 261 
 262 void G1CMMarkStack::set_empty() {
 263   _chunks_in_chunk_list = 0;
 264   _hwm = 0;
 265   _chunk_list = NULL;
 266   _free_list = NULL;
 267 }
 268 
 269 G1CMRootMemRegions::G1CMRootMemRegions(uint const max_regions) :
 270     _root_regions(MemRegion::create_array(max_regions, mtGC)),
 271     _max_regions(max_regions),
 272     _num_root_regions(0),
 273     _claimed_root_regions(0),
 274     _scan_in_progress(false),
 275     _should_abort(false) { }
 276 
 277 G1CMRootMemRegions::~G1CMRootMemRegions() {
 278   MemRegion::destroy_array(_root_regions, _max_regions);
 279 }
 280 
 281 void G1CMRootMemRegions::reset() {
 282   _num_root_regions = 0;
 283 }
 284 
 285 void G1CMRootMemRegions::add(HeapWord* start, HeapWord* end) {
 286   assert_at_safepoint();
 287   size_t idx = Atomic::fetch_and_add(&_num_root_regions, 1u);
 288   assert(idx < _max_regions, "Trying to add more root MemRegions than there is space " SIZE_FORMAT, _max_regions);
 289   assert(start != NULL && end != NULL && start <= end, "Start (" PTR_FORMAT ") should be less or equal to "
 290          "end (" PTR_FORMAT ")", p2i(start), p2i(end));
 291   _root_regions[idx].set_start(start);
 292   _root_regions[idx].set_end(end);
 293 }
 294 
 295 void G1CMRootMemRegions::prepare_for_scan() {
 296   assert(!scan_in_progress(), "pre-condition");
 297 
 298   _scan_in_progress = _num_root_regions > 0;
 299 
 300   _claimed_root_regions = 0;
 301   _should_abort = false;
 302 }
 303 
 304 const MemRegion* G1CMRootMemRegions::claim_next() {
 305   if (_should_abort) {
 306     // If someone has set the should_abort flag, we return NULL to
 307     // force the caller to bail out of their loop.
 308     return NULL;
 309   }
 310 
 311   if (_claimed_root_regions >= _num_root_regions) {
 312     return NULL;
 313   }
 314 
 315   size_t claimed_index = Atomic::fetch_and_add(&_claimed_root_regions, 1u);
 316   if (claimed_index < _num_root_regions) {
 317     return &_root_regions[claimed_index];
 318   }
 319   return NULL;
 320 }
 321 
 322 uint G1CMRootMemRegions::num_root_regions() const {
 323   return (uint)_num_root_regions;
 324 }
 325 
 326 void G1CMRootMemRegions::notify_scan_done() {
 327   MutexLocker x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 328   _scan_in_progress = false;
 329   RootRegionScan_lock->notify_all();
 330 }
 331 
 332 void G1CMRootMemRegions::cancel_scan() {
 333   notify_scan_done();
 334 }
 335 
 336 void G1CMRootMemRegions::scan_finished() {
 337   assert(scan_in_progress(), "pre-condition");
 338 
 339   if (!_should_abort) {
 340     assert(_claimed_root_regions >= num_root_regions(),
 341            "we should have claimed all root regions, claimed " SIZE_FORMAT ", length = %u",
 342            _claimed_root_regions, num_root_regions());
 343   }
 344 
 345   notify_scan_done();
 346 }
 347 
 348 bool G1CMRootMemRegions::wait_until_scan_finished() {
 349   if (!scan_in_progress()) {
 350     return false;
 351   }
 352 
 353   {
 354     MonitorLocker ml(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 355     while (scan_in_progress()) {
 356       ml.wait();
 357     }
 358   }
 359   return true;
 360 }
 361 
 362 G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h,
 363                                    G1RegionToSpaceMapper* prev_bitmap_storage,
 364                                    G1RegionToSpaceMapper* next_bitmap_storage) :
 365   // _cm_thread set inside the constructor
 366   _g1h(g1h),
 367 
 368   _mark_bitmap_1(),
 369   _mark_bitmap_2(),
 370   _prev_mark_bitmap(&_mark_bitmap_1),
 371   _next_mark_bitmap(&_mark_bitmap_2),
 372 
 373   _heap(_g1h->reserved()),
 374 
 375   _root_regions(_g1h->max_regions()),
 376 
 377   _global_mark_stack(),
 378 
 379   // _finger set in set_non_marking_state
 380 
 381   _worker_id_offset(G1DirtyCardQueueSet::num_par_ids() + G1ConcRefinementThreads),
 382   _max_num_tasks(MAX2(ConcGCThreads, ParallelGCThreads)),
 383   // _num_active_tasks set in set_non_marking_state()
 384   // _tasks set inside the constructor
 385 
 386   _task_queues(new G1CMTaskQueueSet((int) _max_num_tasks)),
 387   _terminator((int) _max_num_tasks, _task_queues),
 388 
 389   _first_overflow_barrier_sync(),
 390   _second_overflow_barrier_sync(),
 391 
 392   _has_overflown(false),
 393   _concurrent(false),
 394   _has_aborted(false),
 395   _restart_for_overflow(false),
 396   _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
 397   _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()),
 398 
 399   // _verbose_level set below
 400 
 401   _init_times(),
 402   _remark_times(),
 403   _remark_mark_times(),
 404   _remark_weak_ref_times(),
 405   _cleanup_times(),
 406   _total_cleanup_time(0.0),
 407 
 408   _accum_task_vtime(NULL),
 409 
 410   _concurrent_workers(NULL),
 411   _num_concurrent_workers(0),
 412   _max_concurrent_workers(0),
 413 
 414   _region_mark_stats(NEW_C_HEAP_ARRAY(G1RegionMarkStats, _g1h->max_reserved_regions(), mtGC)),
 415   _top_at_rebuild_starts(NEW_C_HEAP_ARRAY(HeapWord*, _g1h->max_reserved_regions(), mtGC)),
 416   _needs_remembered_set_rebuild(false)
 417 {
 418   assert(CGC_lock != NULL, "CGC_lock must be initialized");
 419 
 420   _mark_bitmap_1.initialize(g1h->reserved(), prev_bitmap_storage);
 421   _mark_bitmap_2.initialize(g1h->reserved(), next_bitmap_storage);
 422 
 423   // Create & start ConcurrentMark thread.
 424   _cm_thread = new G1ConcurrentMarkThread(this);
 425   if (_cm_thread->osthread() == NULL) {
 426     vm_shutdown_during_initialization("Could not create ConcurrentMarkThread");
 427   }
 428 
 429   log_debug(gc)("ConcGCThreads: %u offset %u", ConcGCThreads, _worker_id_offset);
 430   log_debug(gc)("ParallelGCThreads: %u", ParallelGCThreads);
 431 
 432   _num_concurrent_workers = ConcGCThreads;
 433   _max_concurrent_workers = _num_concurrent_workers;
 434 
 435   _concurrent_workers = new WorkerThreads("G1 Conc", _max_concurrent_workers);
 436   _concurrent_workers->initialize_workers();
 437 
 438   if (!_global_mark_stack.initialize(MarkStackSize, MarkStackSizeMax)) {
 439     vm_exit_during_initialization("Failed to allocate initial concurrent mark overflow mark stack.");
 440   }
 441 
 442   _tasks = NEW_C_HEAP_ARRAY(G1CMTask*, _max_num_tasks, mtGC);
 443   _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_num_tasks, mtGC);
 444 
 445   // so that the assertion in MarkingTaskQueue::task_queue doesn't fail
 446   _num_active_tasks = _max_num_tasks;
 447 
 448   for (uint i = 0; i < _max_num_tasks; ++i) {
 449     G1CMTaskQueue* task_queue = new G1CMTaskQueue();
 450     _task_queues->register_queue(i, task_queue);
 451 
 452     _tasks[i] = new G1CMTask(i, this, task_queue, _region_mark_stats);
 453 
 454     _accum_task_vtime[i] = 0.0;
 455   }
 456 
 457   reset_at_marking_complete();
 458 }
 459 
 460 void G1ConcurrentMark::reset() {
 461   _has_aborted = false;
 462 
 463   reset_marking_for_restart();
 464 
 465   // Reset all tasks, since different phases will use different number of active
 466   // threads. So, it's easiest to have all of them ready.
 467   for (uint i = 0; i < _max_num_tasks; ++i) {
 468     _tasks[i]->reset(_next_mark_bitmap);
 469   }
 470 
 471   uint max_reserved_regions = _g1h->max_reserved_regions();
 472   for (uint i = 0; i < max_reserved_regions; i++) {
 473     _top_at_rebuild_starts[i] = NULL;
 474     _region_mark_stats[i].clear();
 475   }
 476 
 477   _root_regions.reset();
 478 }
 479 
 480 void G1ConcurrentMark::clear_statistics_in_region(uint region_idx) {
 481   for (uint j = 0; j < _max_num_tasks; ++j) {
 482     _tasks[j]->clear_mark_stats_cache(region_idx);
 483   }
 484   _top_at_rebuild_starts[region_idx] = NULL;
 485   _region_mark_stats[region_idx].clear();
 486 }
 487 
 488 void G1ConcurrentMark::clear_statistics(HeapRegion* r) {
 489   uint const region_idx = r->hrm_index();
 490   if (r->is_humongous()) {
 491     assert(r->is_starts_humongous(), "Got humongous continues region here");
 492     uint const size_in_regions = (uint)_g1h->humongous_obj_size_in_regions(cast_to_oop(r->humongous_start_region()->bottom())->size());
 493     for (uint j = region_idx; j < (region_idx + size_in_regions); j++) {
 494       clear_statistics_in_region(j);
 495     }
 496   } else {
 497     clear_statistics_in_region(region_idx);
 498   }
 499 }
 500 
 501 static void clear_mark_if_set(G1CMBitMap* bitmap, HeapWord* addr) {
 502   if (bitmap->is_marked(addr)) {
 503     bitmap->clear(addr);
 504   }
 505 }
 506 
 507 void G1ConcurrentMark::humongous_object_eagerly_reclaimed(HeapRegion* r) {
 508   assert_at_safepoint();
 509 
 510   // Need to clear all mark bits of the humongous object.
 511   clear_mark_if_set(_prev_mark_bitmap, r->bottom());
 512   clear_mark_if_set(_next_mark_bitmap, r->bottom());
 513 
 514   if (!_g1h->collector_state()->mark_or_rebuild_in_progress()) {
 515     return;
 516   }
 517 
 518   // Clear any statistics about the region gathered so far.
 519   clear_statistics(r);
 520 }
 521 
 522 void G1ConcurrentMark::reset_marking_for_restart() {
 523   _global_mark_stack.set_empty();
 524 
 525   // Expand the marking stack, if we have to and if we can.
 526   if (has_overflown()) {
 527     _global_mark_stack.expand();
 528 
 529     uint max_reserved_regions = _g1h->max_reserved_regions();
 530     for (uint i = 0; i < max_reserved_regions; i++) {
 531       _region_mark_stats[i].clear_during_overflow();
 532     }
 533   }
 534 
 535   clear_has_overflown();
 536   _finger = _heap.start();
 537 
 538   for (uint i = 0; i < _max_num_tasks; ++i) {
 539     G1CMTaskQueue* queue = _task_queues->queue(i);
 540     queue->set_empty();
 541   }
 542 }
 543 
 544 void G1ConcurrentMark::set_concurrency(uint active_tasks) {
 545   assert(active_tasks <= _max_num_tasks, "we should not have more");
 546 
 547   _num_active_tasks = active_tasks;
 548   // Need to update the three data structures below according to the
 549   // number of active threads for this phase.
 550   _terminator.reset_for_reuse(active_tasks);
 551   _first_overflow_barrier_sync.set_n_workers((int) active_tasks);
 552   _second_overflow_barrier_sync.set_n_workers((int) active_tasks);
 553 }
 554 
 555 void G1ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) {
 556   set_concurrency(active_tasks);
 557 
 558   _concurrent = concurrent;
 559 
 560   if (!concurrent) {
 561     // At this point we should be in a STW phase, and completed marking.
 562     assert_at_safepoint_on_vm_thread();
 563     assert(out_of_regions(),
 564            "only way to get here: _finger: " PTR_FORMAT ", _heap_end: " PTR_FORMAT,
 565            p2i(_finger), p2i(_heap.end()));
 566   }
 567 }
 568 
 569 void G1ConcurrentMark::reset_at_marking_complete() {
 570   // We set the global marking state to some default values when we're
 571   // not doing marking.
 572   reset_marking_for_restart();
 573   _num_active_tasks = 0;
 574 }
 575 
 576 G1ConcurrentMark::~G1ConcurrentMark() {
 577   FREE_C_HEAP_ARRAY(HeapWord*, _top_at_rebuild_starts);
 578   FREE_C_HEAP_ARRAY(G1RegionMarkStats, _region_mark_stats);
 579   // The G1ConcurrentMark instance is never freed.
 580   ShouldNotReachHere();
 581 }
 582 
 583 class G1ClearBitMapTask : public WorkerTask {
 584 public:
 585   static size_t chunk_size() { return M; }
 586 
 587 private:
 588   // Heap region closure used for clearing the _next_mark_bitmap.
 589   class G1ClearBitmapHRClosure : public HeapRegionClosure {
 590   private:
 591     G1ConcurrentMark* _cm;
 592     G1CMBitMap* _bitmap;
 593     bool _suspendible; // If suspendible, do yield checks.
 594 
 595     bool suspendible() {
 596       return _suspendible;
 597     }
 598 
 599     bool is_clear_concurrent_undo() {
 600       return suspendible() && _cm->cm_thread()->in_undo_mark();
 601     }
 602 
 603     bool has_aborted() {
 604       if (suspendible()) {
 605         _cm->do_yield_check();
 606         return _cm->has_aborted();
 607       }
 608       return false;
 609     }
 610 
 611     HeapWord* region_clear_limit(HeapRegion* r) {
 612       // During a Concurrent Undo Mark cycle, the _next_mark_bitmap is  cleared
 613       // without swapping with the _prev_mark_bitmap. Therefore, the per region
 614       // next_top_at_mark_start and live_words data are current wrt
 615       // _next_mark_bitmap. We use this information to only clear ranges of the
 616       // bitmap that require clearing.
 617       if (is_clear_concurrent_undo()) {
 618         // No need to clear bitmaps for empty regions.
 619         if (_cm->live_words(r->hrm_index()) == 0) {
 620           assert(_bitmap->get_next_marked_addr(r->bottom(), r->end()) == r->end(), "Should not have marked bits");
 621           return r->bottom();
 622         }
 623         assert(_bitmap->get_next_marked_addr(r->next_top_at_mark_start(), r->end()) == r->end(), "Should not have marked bits above ntams");
 624       }
 625       return r->end();
 626     }
 627 
 628   public:
 629     G1ClearBitmapHRClosure(G1ConcurrentMark* cm, bool suspendible) :
 630       HeapRegionClosure(),
 631       _cm(cm),
 632       _bitmap(cm->next_mark_bitmap()),
 633       _suspendible(suspendible)
 634     { }
 635 
 636     virtual bool do_heap_region(HeapRegion* r) {
 637       if (has_aborted()) {
 638         return true;
 639       }
 640 
 641       HeapWord* cur = r->bottom();
 642       HeapWord* const end = region_clear_limit(r);
 643 
 644       size_t const chunk_size_in_words = G1ClearBitMapTask::chunk_size() / HeapWordSize;
 645 
 646       while (cur < end) {
 647 
 648         MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end));
 649         _bitmap->clear_range(mr);
 650 
 651         cur += chunk_size_in_words;
 652 
 653         // Repeat the asserts from before the start of the closure. We will do them
 654         // as asserts here to minimize their overhead on the product. However, we
 655         // will have them as guarantees at the beginning / end of the bitmap
 656         // clearing to get some checking in the product.
 657         assert(!suspendible() || _cm->cm_thread()->in_progress(), "invariant");
 658         assert(!suspendible() || !G1CollectedHeap::heap()->collector_state()->mark_or_rebuild_in_progress(), "invariant");
 659 
 660         // Abort iteration if necessary.
 661         if (has_aborted()) {
 662           return true;
 663         }
 664       }
 665       assert(cur >= end, "Must have completed iteration over the bitmap for region %u.", r->hrm_index());
 666 
 667       return false;
 668     }
 669   };
 670 
 671   G1ClearBitmapHRClosure _cl;
 672   HeapRegionClaimer _hr_claimer;
 673   bool _suspendible; // If the task is suspendible, workers must join the STS.
 674 
 675 public:
 676   G1ClearBitMapTask(G1ConcurrentMark* cm, uint n_workers, bool suspendible) :
 677     WorkerTask("G1 Clear Bitmap"),
 678     _cl(cm, suspendible),
 679     _hr_claimer(n_workers),
 680     _suspendible(suspendible)
 681   { }
 682 
 683   void work(uint worker_id) {
 684     SuspendibleThreadSetJoiner sts_join(_suspendible);
 685     G1CollectedHeap::heap()->heap_region_par_iterate_from_worker_offset(&_cl, &_hr_claimer, worker_id);
 686   }
 687 
 688   bool is_complete() {
 689     return _cl.is_complete();
 690   }
 691 };
 692 
 693 void G1ConcurrentMark::clear_next_bitmap(WorkerThreads* workers, bool may_yield) {
 694   assert(may_yield || SafepointSynchronize::is_at_safepoint(), "Non-yielding bitmap clear only allowed at safepoint.");
 695 
 696   size_t const num_bytes_to_clear = (HeapRegion::GrainBytes * _g1h->num_regions()) / G1CMBitMap::heap_map_factor();
 697   size_t const num_chunks = align_up(num_bytes_to_clear, G1ClearBitMapTask::chunk_size()) / G1ClearBitMapTask::chunk_size();
 698 
 699   uint const num_workers = (uint)MIN2(num_chunks, (size_t)workers->active_workers());
 700 
 701   G1ClearBitMapTask cl(this, num_workers, may_yield);
 702 
 703   log_debug(gc, ergo)("Running %s with %u workers for " SIZE_FORMAT " work units.", cl.name(), num_workers, num_chunks);
 704   workers->run_task(&cl, num_workers);
 705   guarantee(!may_yield || cl.is_complete(), "Must have completed iteration when not yielding.");
 706 }
 707 
 708 void G1ConcurrentMark::cleanup_for_next_mark() {
 709   // Make sure that the concurrent mark thread looks to still be in
 710   // the current cycle.
 711   guarantee(cm_thread()->in_progress(), "invariant");
 712 
 713   // We are finishing up the current cycle by clearing the next
 714   // marking bitmap and getting it ready for the next cycle. During
 715   // this time no other cycle can start. So, let's make sure that this
 716   // is the case.
 717   guarantee(!_g1h->collector_state()->mark_or_rebuild_in_progress(), "invariant");
 718 
 719   clear_next_bitmap(_concurrent_workers, true);
 720 
 721   // Repeat the asserts from above.
 722   guarantee(cm_thread()->in_progress(), "invariant");
 723   guarantee(!_g1h->collector_state()->mark_or_rebuild_in_progress(), "invariant");
 724 }
 725 
 726 void G1ConcurrentMark::clear_next_bitmap(WorkerThreads* workers) {
 727   assert_at_safepoint_on_vm_thread();
 728   // To avoid fragmentation the full collection requesting to clear the bitmap
 729   // might use fewer workers than available. To ensure the bitmap is cleared
 730   // as efficiently as possible the number of active workers are temporarily
 731   // increased to include all currently created workers.
 732   WithActiveWorkers update(workers, workers->created_workers());
 733   clear_next_bitmap(workers, false);
 734 }
 735 
 736 class G1PreConcurrentStartTask : public G1BatchedTask {
 737   // Concurrent start needs claim bits to keep track of the marked-through CLDs.
 738   class CLDClearClaimedMarksTask;
 739   // Reset marking state.
 740   class ResetMarkingStateTask;
 741   // For each region note start of marking.
 742   class NoteStartOfMarkTask;
 743 
 744 public:
 745   G1PreConcurrentStartTask(GCCause::Cause cause, G1ConcurrentMark* cm);
 746 };
 747 
 748 class G1PreConcurrentStartTask::CLDClearClaimedMarksTask : public G1AbstractSubTask {
 749 public:
 750   CLDClearClaimedMarksTask() : G1AbstractSubTask(G1GCPhaseTimes::CLDClearClaimedMarks) { }
 751 
 752   double worker_cost() const override { return 1.0; }
 753   void do_work(uint worker_id) override;
 754 };
 755 
 756 class G1PreConcurrentStartTask::ResetMarkingStateTask : public G1AbstractSubTask {
 757   G1ConcurrentMark* _cm;
 758 public:
 759   ResetMarkingStateTask(G1ConcurrentMark* cm) : G1AbstractSubTask(G1GCPhaseTimes::ResetMarkingState), _cm(cm) { }
 760 
 761   double worker_cost() const override { return 1.0; }
 762   void do_work(uint worker_id) override;
 763 };
 764 
 765 class G1PreConcurrentStartTask::NoteStartOfMarkTask : public G1AbstractSubTask {
 766   HeapRegionClaimer _claimer;
 767 public:
 768   NoteStartOfMarkTask() : G1AbstractSubTask(G1GCPhaseTimes::NoteStartOfMark), _claimer(0) { }
 769 
 770   double worker_cost() const override {
 771     // The work done per region is very small, therefore we choose this magic number to cap the number
 772     // of threads used when there are few regions.
 773     const double regions_per_thread = 1000;
 774     return _claimer.n_regions() / regions_per_thread;
 775   }
 776 
 777   void set_max_workers(uint max_workers) override;
 778   void do_work(uint worker_id) override;
 779 };
 780 
 781 void G1PreConcurrentStartTask::CLDClearClaimedMarksTask::do_work(uint worker_id) {
 782   ClassLoaderDataGraph::clear_claimed_marks();
 783 }
 784 
 785 void G1PreConcurrentStartTask::ResetMarkingStateTask::do_work(uint worker_id) {
 786   // Reset marking state.
 787   _cm->reset();
 788 }
 789 
 790 class NoteStartOfMarkHRClosure : public HeapRegionClosure {
 791 public:
 792   bool do_heap_region(HeapRegion* r) override {
 793     r->note_start_of_marking();
 794     return false;
 795   }
 796 };
 797 
 798 void G1PreConcurrentStartTask::NoteStartOfMarkTask::do_work(uint worker_id) {
 799   NoteStartOfMarkHRClosure start_cl;
 800   G1CollectedHeap::heap()->heap_region_par_iterate_from_worker_offset(&start_cl, &_claimer, worker_id);
 801 }
 802 
 803 void G1PreConcurrentStartTask::NoteStartOfMarkTask::set_max_workers(uint max_workers) {
 804   _claimer.set_n_workers(max_workers);
 805 }
 806 
 807 G1PreConcurrentStartTask::G1PreConcurrentStartTask(GCCause::Cause cause, G1ConcurrentMark* cm) :
 808   G1BatchedTask("Pre Concurrent Start", G1CollectedHeap::heap()->phase_times()) {
 809   add_serial_task(new CLDClearClaimedMarksTask());
 810   add_serial_task(new ResetMarkingStateTask(cm));
 811   add_parallel_task(new NoteStartOfMarkTask());
 812 };
 813 
 814 void G1ConcurrentMark::pre_concurrent_start(GCCause::Cause cause) {
 815   assert_at_safepoint_on_vm_thread();
 816     
 817   CodeCache::increment_marking_cycle();
 818 
 819   G1PreConcurrentStartTask cl(cause, this);
 820   G1CollectedHeap::heap()->run_batch_task(&cl);
 821 
 822   _gc_tracer_cm->set_gc_cause(cause);
 823 }
 824 
 825 
 826 void G1ConcurrentMark::post_concurrent_mark_start() {
 827   // Start Concurrent Marking weak-reference discovery.
 828   ReferenceProcessor* rp = _g1h->ref_processor_cm();
 829   rp->start_discovery(false /* always_clear */);
 830 
 831   SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
 832   // This is the start of  the marking cycle, we're expected all
 833   // threads to have SATB queues with active set to false.
 834   satb_mq_set.set_active_all_threads(true, /* new active value */
 835                                      false /* expected_active */);
 836 
 837   _root_regions.prepare_for_scan();
 838 
 839   // update_g1_committed() will be called at the end of an evac pause
 840   // when marking is on. So, it's also called at the end of the
 841   // concurrent start pause to update the heap end, if the heap expands
 842   // during it. No need to call it here.
 843 }
 844 
 845 void G1ConcurrentMark::post_concurrent_undo_start() {
 846   root_regions()->cancel_scan();
 847 }
 848 
 849 /*
 850  * Notice that in the next two methods, we actually leave the STS
 851  * during the barrier sync and join it immediately afterwards. If we
 852  * do not do this, the following deadlock can occur: one thread could
 853  * be in the barrier sync code, waiting for the other thread to also
 854  * sync up, whereas another one could be trying to yield, while also
 855  * waiting for the other threads to sync up too.
 856  *
 857  * Note, however, that this code is also used during remark and in
 858  * this case we should not attempt to leave / enter the STS, otherwise
 859  * we'll either hit an assert (debug / fastdebug) or deadlock
 860  * (product). So we should only leave / enter the STS if we are
 861  * operating concurrently.
 862  *
 863  * Because the thread that does the sync barrier has left the STS, it
 864  * is possible to be suspended for a Full GC or an evacuation pause
 865  * could occur. This is actually safe, since the entering the sync
 866  * barrier is one of the last things do_marking_step() does, and it
 867  * doesn't manipulate any data structures afterwards.
 868  */
 869 
 870 void G1ConcurrentMark::enter_first_sync_barrier(uint worker_id) {
 871   bool barrier_aborted;
 872   {
 873     SuspendibleThreadSetLeaver sts_leave(concurrent());
 874     barrier_aborted = !_first_overflow_barrier_sync.enter();
 875   }
 876 
 877   // at this point everyone should have synced up and not be doing any
 878   // more work
 879 
 880   if (barrier_aborted) {
 881     // If the barrier aborted we ignore the overflow condition and
 882     // just abort the whole marking phase as quickly as possible.
 883     return;
 884   }
 885 }
 886 
 887 void G1ConcurrentMark::enter_second_sync_barrier(uint worker_id) {
 888   SuspendibleThreadSetLeaver sts_leave(concurrent());
 889   _second_overflow_barrier_sync.enter();
 890 
 891   // at this point everything should be re-initialized and ready to go
 892 }
 893 
 894 class G1CMConcurrentMarkingTask : public WorkerTask {
 895   G1ConcurrentMark*     _cm;
 896 
 897 public:
 898   void work(uint worker_id) {
 899     ResourceMark rm;
 900 
 901     double start_vtime = os::elapsedVTime();
 902 
 903     {
 904       SuspendibleThreadSetJoiner sts_join;
 905 
 906       assert(worker_id < _cm->active_tasks(), "invariant");
 907 
 908       G1CMTask* task = _cm->task(worker_id);
 909       task->record_start_time();
 910       if (!_cm->has_aborted()) {
 911         do {
 912           task->do_marking_step(G1ConcMarkStepDurationMillis,
 913                                 true  /* do_termination */,
 914                                 false /* is_serial*/);
 915 
 916           _cm->do_yield_check();
 917         } while (!_cm->has_aborted() && task->has_aborted());
 918       }
 919       task->record_end_time();
 920       guarantee(!task->has_aborted() || _cm->has_aborted(), "invariant");
 921     }
 922 
 923     double end_vtime = os::elapsedVTime();
 924     _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime);
 925   }
 926 
 927   G1CMConcurrentMarkingTask(G1ConcurrentMark* cm) :
 928       WorkerTask("Concurrent Mark"), _cm(cm) { }
 929 
 930   ~G1CMConcurrentMarkingTask() { }
 931 };
 932 
 933 uint G1ConcurrentMark::calc_active_marking_workers() {
 934   uint result = 0;
 935   if (!UseDynamicNumberOfGCThreads || !FLAG_IS_DEFAULT(ConcGCThreads)) {
 936     result = _max_concurrent_workers;
 937   } else {
 938     result =
 939       WorkerPolicy::calc_default_active_workers(_max_concurrent_workers,
 940                                                 1, /* Minimum workers */
 941                                                 _num_concurrent_workers,
 942                                                 Threads::number_of_non_daemon_threads());
 943     // Don't scale the result down by scale_concurrent_workers() because
 944     // that scaling has already gone into "_max_concurrent_workers".
 945   }
 946   assert(result > 0 && result <= _max_concurrent_workers,
 947          "Calculated number of marking workers must be larger than zero and at most the maximum %u, but is %u",
 948          _max_concurrent_workers, result);
 949   return result;
 950 }
 951 
 952 void G1ConcurrentMark::scan_root_region(const MemRegion* region, uint worker_id) {
 953 #ifdef ASSERT
 954   HeapWord* last = region->last();
 955   HeapRegion* hr = _g1h->heap_region_containing(last);
 956   assert(hr->is_old() || hr->next_top_at_mark_start() == hr->bottom(),
 957          "Root regions must be old or survivor/eden but region %u is %s", hr->hrm_index(), hr->get_type_str());
 958   assert(hr->next_top_at_mark_start() == region->start(),
 959          "MemRegion start should be equal to nTAMS");
 960 #endif
 961 
 962   G1RootRegionScanClosure cl(_g1h, this, worker_id);
 963 
 964   const uintx interval = PrefetchScanIntervalInBytes;
 965   HeapWord* curr = region->start();
 966   const HeapWord* end = region->end();
 967   while (curr < end) {
 968     Prefetch::read(curr, interval);
 969     oop obj = cast_to_oop(curr);
 970     size_t size = obj->oop_iterate_size(&cl);
 971     assert(size == obj->size(), "sanity");
 972     curr += size;
 973   }
 974 }
 975 
 976 class G1CMRootRegionScanTask : public WorkerTask {
 977   G1ConcurrentMark* _cm;
 978 public:
 979   G1CMRootRegionScanTask(G1ConcurrentMark* cm) :
 980     WorkerTask("G1 Root Region Scan"), _cm(cm) { }
 981 
 982   void work(uint worker_id) {
 983     G1CMRootMemRegions* root_regions = _cm->root_regions();
 984     const MemRegion* region = root_regions->claim_next();
 985     while (region != NULL) {
 986       _cm->scan_root_region(region, worker_id);
 987       region = root_regions->claim_next();
 988     }
 989   }
 990 };
 991 
 992 void G1ConcurrentMark::scan_root_regions() {
 993   // scan_in_progress() will have been set to true only if there was
 994   // at least one root region to scan. So, if it's false, we
 995   // should not attempt to do any further work.
 996   if (root_regions()->scan_in_progress()) {
 997     assert(!has_aborted(), "Aborting before root region scanning is finished not supported.");
 998 
 999     _num_concurrent_workers = MIN2(calc_active_marking_workers(),
1000                                    // We distribute work on a per-region basis, so starting
1001                                    // more threads than that is useless.
1002                                    root_regions()->num_root_regions());
1003     assert(_num_concurrent_workers <= _max_concurrent_workers,
1004            "Maximum number of marking threads exceeded");
1005 
1006     G1CMRootRegionScanTask task(this);
1007     log_debug(gc, ergo)("Running %s using %u workers for %u work units.",
1008                         task.name(), _num_concurrent_workers, root_regions()->num_root_regions());
1009     _concurrent_workers->run_task(&task, _num_concurrent_workers);
1010 
1011     // It's possible that has_aborted() is true here without actually
1012     // aborting the survivor scan earlier. This is OK as it's
1013     // mainly used for sanity checking.
1014     root_regions()->scan_finished();
1015   }
1016 }
1017 
1018 void G1ConcurrentMark::concurrent_cycle_start() {
1019   _gc_timer_cm->register_gc_start();
1020 
1021   _gc_tracer_cm->report_gc_start(GCCause::_no_gc /* first parameter is not used */, _gc_timer_cm->gc_start());
1022 
1023   _g1h->trace_heap_before_gc(_gc_tracer_cm);
1024 }
1025 
1026 void G1ConcurrentMark::concurrent_cycle_end() {
1027   _g1h->collector_state()->set_clearing_next_bitmap(false);
1028 
1029   _g1h->trace_heap_after_gc(_gc_tracer_cm);
1030 
1031   if (has_aborted()) {
1032     log_info(gc, marking)("Concurrent Mark Abort");
1033     _gc_tracer_cm->report_concurrent_mode_failure();
1034   }
1035 
1036   _gc_timer_cm->register_gc_end();
1037 
1038   _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
1039 }
1040 
1041 void G1ConcurrentMark::mark_from_roots() {
1042   _restart_for_overflow = false;
1043 
1044   _num_concurrent_workers = calc_active_marking_workers();
1045 
1046   uint active_workers = MAX2(1U, _num_concurrent_workers);
1047 
1048   // Setting active workers is not guaranteed since fewer
1049   // worker threads may currently exist and more may not be
1050   // available.
1051   active_workers = _concurrent_workers->set_active_workers(active_workers);
1052   log_info(gc, task)("Using %u workers of %u for marking", active_workers, _concurrent_workers->max_workers());
1053 
1054   // Parallel task terminator is set in "set_concurrency_and_phase()"
1055   set_concurrency_and_phase(active_workers, true /* concurrent */);
1056 
1057   G1CMConcurrentMarkingTask marking_task(this);
1058   _concurrent_workers->run_task(&marking_task);
1059   print_stats();
1060 }
1061 
1062 void G1ConcurrentMark::verify_during_pause(G1HeapVerifier::G1VerifyType type, VerifyOption vo, const char* caller) {
1063   G1HeapVerifier* verifier = _g1h->verifier();
1064 
1065   verifier->verify_region_sets_optional();
1066 
1067   if (VerifyDuringGC) {
1068     GCTraceTime(Debug, gc, phases) debug(caller, _gc_timer_cm);
1069 
1070     size_t const BufLen = 512;
1071     char buffer[BufLen];
1072 
1073     jio_snprintf(buffer, BufLen, "During GC (%s)", caller);
1074     verifier->verify(type, vo, buffer);
1075   }
1076 
1077   verifier->check_bitmaps(caller);
1078 }
1079 
1080 class G1UpdateRemSetTrackingBeforeRebuildTask : public WorkerTask {
1081   G1CollectedHeap* _g1h;
1082   G1ConcurrentMark* _cm;
1083   HeapRegionClaimer _hrclaimer;
1084   uint volatile _total_selected_for_rebuild;
1085 
1086   G1PrintRegionLivenessInfoClosure _cl;
1087 
1088   class G1UpdateRemSetTrackingBeforeRebuild : public HeapRegionClosure {
1089     G1CollectedHeap* _g1h;
1090     G1ConcurrentMark* _cm;
1091 
1092     G1PrintRegionLivenessInfoClosure* _cl;
1093 
1094     uint _num_regions_selected_for_rebuild;  // The number of regions actually selected for rebuild.
1095 
1096     void update_remset_before_rebuild(HeapRegion* hr) {
1097       G1RemSetTrackingPolicy* tracking_policy = _g1h->policy()->remset_tracker();
1098 
1099       bool selected_for_rebuild;
1100       if (hr->is_humongous()) {
1101         bool const is_live = _cm->live_words(hr->humongous_start_region()->hrm_index()) > 0;
1102         selected_for_rebuild = tracking_policy->update_humongous_before_rebuild(hr, is_live);
1103       } else {
1104         size_t const live_bytes = _cm->live_bytes(hr->hrm_index());
1105         selected_for_rebuild = tracking_policy->update_before_rebuild(hr, live_bytes);
1106       }
1107       if (selected_for_rebuild) {
1108         _num_regions_selected_for_rebuild++;
1109       }
1110       _cm->update_top_at_rebuild_start(hr);
1111     }
1112 
1113     // Distribute the given words across the humongous object starting with hr and
1114     // note end of marking.
1115     void distribute_marked_bytes(HeapRegion* hr, size_t marked_words) {
1116       uint const region_idx = hr->hrm_index();
1117       size_t const obj_size_in_words = (size_t)cast_to_oop(hr->bottom())->size();
1118       uint const num_regions_in_humongous = (uint)G1CollectedHeap::humongous_obj_size_in_regions(obj_size_in_words);
1119 
1120       // "Distributing" zero words means that we only note end of marking for these
1121       // regions.
1122       assert(marked_words == 0 || obj_size_in_words == marked_words,
1123              "Marked words should either be 0 or the same as humongous object (" SIZE_FORMAT ") but is " SIZE_FORMAT,
1124              obj_size_in_words, marked_words);
1125 
1126       for (uint i = region_idx; i < (region_idx + num_regions_in_humongous); i++) {
1127         HeapRegion* const r = _g1h->region_at(i);
1128         size_t const words_to_add = MIN2(HeapRegion::GrainWords, marked_words);
1129 
1130         log_trace(gc, marking)("Adding " SIZE_FORMAT " words to humongous region %u (%s)",
1131                                words_to_add, i, r->get_type_str());
1132         add_marked_bytes_and_note_end(r, words_to_add * HeapWordSize);
1133         marked_words -= words_to_add;
1134       }
1135       assert(marked_words == 0,
1136              SIZE_FORMAT " words left after distributing space across %u regions",
1137              marked_words, num_regions_in_humongous);
1138     }
1139 
1140     void update_marked_bytes(HeapRegion* hr) {
1141       uint const region_idx = hr->hrm_index();
1142       size_t const marked_words = _cm->live_words(region_idx);
1143       // The marking attributes the object's size completely to the humongous starts
1144       // region. We need to distribute this value across the entire set of regions a
1145       // humongous object spans.
1146       if (hr->is_humongous()) {
1147         assert(hr->is_starts_humongous() || marked_words == 0,
1148                "Should not have marked words " SIZE_FORMAT " in non-starts humongous region %u (%s)",
1149                marked_words, region_idx, hr->get_type_str());
1150         if (hr->is_starts_humongous()) {
1151           distribute_marked_bytes(hr, marked_words);
1152         }
1153       } else {
1154         log_trace(gc, marking)("Adding " SIZE_FORMAT " words to region %u (%s)", marked_words, region_idx, hr->get_type_str());
1155         add_marked_bytes_and_note_end(hr, _cm->live_bytes(region_idx));
1156       }
1157     }
1158 
1159     void add_marked_bytes_and_note_end(HeapRegion* hr, size_t marked_bytes) {
1160       hr->add_to_marked_bytes(marked_bytes);
1161       _cl->do_heap_region(hr);
1162       hr->note_end_of_marking();
1163     }
1164 
1165   public:
1166     G1UpdateRemSetTrackingBeforeRebuild(G1CollectedHeap* g1h, G1ConcurrentMark* cm, G1PrintRegionLivenessInfoClosure* cl) :
1167       _g1h(g1h), _cm(cm), _cl(cl), _num_regions_selected_for_rebuild(0) { }
1168 
1169     virtual bool do_heap_region(HeapRegion* r) {
1170       update_remset_before_rebuild(r);
1171       update_marked_bytes(r);
1172 
1173       return false;
1174     }
1175 
1176     uint num_selected_for_rebuild() const { return _num_regions_selected_for_rebuild; }
1177   };
1178 
1179 public:
1180   G1UpdateRemSetTrackingBeforeRebuildTask(G1CollectedHeap* g1h, G1ConcurrentMark* cm, uint num_workers) :
1181     WorkerTask("G1 Update RemSet Tracking Before Rebuild"),
1182     _g1h(g1h), _cm(cm), _hrclaimer(num_workers), _total_selected_for_rebuild(0), _cl("Post-Marking") { }
1183 
1184   virtual void work(uint worker_id) {
1185     G1UpdateRemSetTrackingBeforeRebuild update_cl(_g1h, _cm, &_cl);
1186     _g1h->heap_region_par_iterate_from_worker_offset(&update_cl, &_hrclaimer, worker_id);
1187     Atomic::add(&_total_selected_for_rebuild, update_cl.num_selected_for_rebuild());
1188   }
1189 
1190   uint total_selected_for_rebuild() const { return _total_selected_for_rebuild; }
1191 
1192   // Number of regions for which roughly one thread should be spawned for this work.
1193   static const uint RegionsPerThread = 384;
1194 };
1195 
1196 class G1UpdateRemSetTrackingAfterRebuild : public HeapRegionClosure {
1197   G1CollectedHeap* _g1h;
1198 public:
1199   G1UpdateRemSetTrackingAfterRebuild(G1CollectedHeap* g1h) : _g1h(g1h) { }
1200 
1201   virtual bool do_heap_region(HeapRegion* r) {
1202     _g1h->policy()->remset_tracker()->update_after_rebuild(r);
1203     return false;
1204   }
1205 };
1206 
1207 void G1ConcurrentMark::remark() {
1208   assert_at_safepoint_on_vm_thread();
1209 
1210   // If a full collection has happened, we should not continue. However we might
1211   // have ended up here as the Remark VM operation has been scheduled already.
1212   if (has_aborted()) {
1213     return;
1214   }
1215 
1216   G1Policy* policy = _g1h->policy();
1217   policy->record_concurrent_mark_remark_start();
1218 
1219   double start = os::elapsedTime();
1220 
1221   verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark before");
1222 
1223   {
1224     GCTraceTime(Debug, gc, phases) debug("Finalize Marking", _gc_timer_cm);
1225     finalize_marking();
1226   }
1227 
1228   double mark_work_end = os::elapsedTime();
1229 
1230   bool const mark_finished = !has_overflown();
1231   if (mark_finished) {
1232     weak_refs_work();
1233 
1234     SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
1235     // We're done with marking.
1236     // This is the end of the marking cycle, we're expected all
1237     // threads to have SATB queues with active set to true.
1238     satb_mq_set.set_active_all_threads(false, /* new active value */
1239                                        true /* expected_active */);
1240 
1241     {
1242       GCTraceTime(Debug, gc, phases) debug("Flush Task Caches", _gc_timer_cm);
1243       flush_all_task_caches();
1244     }
1245 
1246     // Install newly created mark bitmap as "prev".
1247     swap_mark_bitmaps();
1248 
1249     _g1h->collector_state()->set_clearing_next_bitmap(true);
1250     {
1251       GCTraceTime(Debug, gc, phases) debug("Update Remembered Set Tracking Before Rebuild", _gc_timer_cm);
1252 
1253       uint const workers_by_capacity = (_g1h->num_regions() + G1UpdateRemSetTrackingBeforeRebuildTask::RegionsPerThread - 1) /
1254                                        G1UpdateRemSetTrackingBeforeRebuildTask::RegionsPerThread;
1255       uint const num_workers = MIN2(_g1h->workers()->active_workers(), workers_by_capacity);
1256 
1257       G1UpdateRemSetTrackingBeforeRebuildTask cl(_g1h, this, num_workers);
1258       log_debug(gc,ergo)("Running %s using %u workers for %u regions in heap", cl.name(), num_workers, _g1h->num_regions());
1259       _g1h->workers()->run_task(&cl, num_workers);
1260 
1261       log_debug(gc, remset, tracking)("Remembered Set Tracking update regions total %u, selected %u",
1262                                       _g1h->num_regions(), cl.total_selected_for_rebuild());
1263 
1264       _needs_remembered_set_rebuild = (cl.total_selected_for_rebuild() > 0);
1265     }
1266     {
1267       GCTraceTime(Debug, gc, phases) debug("Reclaim Empty Regions", _gc_timer_cm);
1268       reclaim_empty_regions();
1269     }
1270 
1271     // Clean out dead classes
1272     if (ClassUnloadingWithConcurrentMark) {
1273       GCTraceTime(Debug, gc, phases) debug("Purge Metaspace", _gc_timer_cm);
1274       ClassLoaderDataGraph::purge(/*at_safepoint*/true);
1275     }
1276 
1277     _g1h->resize_heap_if_necessary();
1278     _g1h->uncommit_regions_if_necessary();
1279 
1280     compute_new_sizes();
1281 
1282     verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark after");
1283 
1284     assert(!restart_for_overflow(), "sanity");
1285     // Completely reset the marking state since marking completed
1286     reset_at_marking_complete();
1287   } else {
1288     // We overflowed.  Restart concurrent marking.
1289     _restart_for_overflow = true;
1290 
1291     verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark overflow");
1292 
1293     // Clear the marking state because we will be restarting
1294     // marking due to overflowing the global mark stack.
1295     reset_marking_for_restart();
1296   }
1297 
1298   {
1299     GCTraceTime(Debug, gc, phases) debug("Report Object Count", _gc_timer_cm);
1300     report_object_count(mark_finished);
1301   }
1302 
1303   // Statistics
1304   double now = os::elapsedTime();
1305   _remark_mark_times.add((mark_work_end - start) * 1000.0);
1306   _remark_weak_ref_times.add((now - mark_work_end) * 1000.0);
1307   _remark_times.add((now - start) * 1000.0);
1308 
1309   policy->record_concurrent_mark_remark_end();
1310   CodeCache::increment_marking_cycle();
1311 }
1312 
1313 class G1ReclaimEmptyRegionsTask : public WorkerTask {
1314   // Per-region work during the Cleanup pause.
1315   class G1ReclaimEmptyRegionsClosure : public HeapRegionClosure {
1316     G1CollectedHeap* _g1h;
1317     size_t _freed_bytes;
1318     FreeRegionList* _local_cleanup_list;
1319     uint _old_regions_removed;
1320     uint _archive_regions_removed;
1321     uint _humongous_regions_removed;
1322 
1323   public:
1324     G1ReclaimEmptyRegionsClosure(G1CollectedHeap* g1h,
1325                                  FreeRegionList* local_cleanup_list) :
1326       _g1h(g1h),
1327       _freed_bytes(0),
1328       _local_cleanup_list(local_cleanup_list),
1329       _old_regions_removed(0),
1330       _archive_regions_removed(0),
1331       _humongous_regions_removed(0) { }
1332 
1333     size_t freed_bytes() { return _freed_bytes; }
1334     const uint old_regions_removed() { return _old_regions_removed; }
1335     const uint archive_regions_removed() { return _archive_regions_removed; }
1336     const uint humongous_regions_removed() { return _humongous_regions_removed; }
1337 
1338     bool do_heap_region(HeapRegion *hr) {
1339       if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young() && !hr->is_closed_archive()) {
1340         log_trace(gc)("Reclaimed empty old gen region %u (%s) bot " PTR_FORMAT,
1341                       hr->hrm_index(), hr->get_short_type_str(), p2i(hr->bottom()));
1342         _freed_bytes += hr->used();
1343         hr->set_containing_set(NULL);
1344         if (hr->is_humongous()) {
1345           _humongous_regions_removed++;
1346           _g1h->free_humongous_region(hr, _local_cleanup_list);
1347         } else if (hr->is_open_archive()) {
1348           _archive_regions_removed++;
1349           _g1h->free_region(hr, _local_cleanup_list);
1350         } else {
1351           _old_regions_removed++;
1352           _g1h->free_region(hr, _local_cleanup_list);
1353         }
1354         hr->clear_cardtable();
1355         _g1h->concurrent_mark()->clear_statistics_in_region(hr->hrm_index());
1356       }
1357 
1358       return false;
1359     }
1360   };
1361 
1362   G1CollectedHeap* _g1h;
1363   FreeRegionList* _cleanup_list;
1364   HeapRegionClaimer _hrclaimer;
1365 
1366 public:
1367   G1ReclaimEmptyRegionsTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) :
1368     WorkerTask("G1 Cleanup"),
1369     _g1h(g1h),
1370     _cleanup_list(cleanup_list),
1371     _hrclaimer(n_workers) {
1372   }
1373 
1374   void work(uint worker_id) {
1375     FreeRegionList local_cleanup_list("Local Cleanup List");
1376     G1ReclaimEmptyRegionsClosure cl(_g1h, &local_cleanup_list);
1377     _g1h->heap_region_par_iterate_from_worker_offset(&cl, &_hrclaimer, worker_id);
1378     assert(cl.is_complete(), "Shouldn't have aborted!");
1379 
1380     // Now update the old/archive/humongous region sets
1381     _g1h->remove_from_old_gen_sets(cl.old_regions_removed(),
1382                                    cl.archive_regions_removed(),
1383                                    cl.humongous_regions_removed());
1384     {
1385       MutexLocker x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
1386       _g1h->decrement_summary_bytes(cl.freed_bytes());
1387 
1388       _cleanup_list->add_ordered(&local_cleanup_list);
1389       assert(local_cleanup_list.is_empty(), "post-condition");
1390     }
1391   }
1392 };
1393 
1394 void G1ConcurrentMark::reclaim_empty_regions() {
1395   WorkerThreads* workers = _g1h->workers();
1396   FreeRegionList empty_regions_list("Empty Regions After Mark List");
1397 
1398   G1ReclaimEmptyRegionsTask cl(_g1h, &empty_regions_list, workers->active_workers());
1399   workers->run_task(&cl);
1400 
1401   if (!empty_regions_list.is_empty()) {
1402     log_debug(gc)("Reclaimed %u empty regions", empty_regions_list.length());
1403     // Now print the empty regions list.
1404     _g1h->hr_printer()->cleanup(&empty_regions_list);
1405     // And actually make them available.
1406     _g1h->prepend_to_freelist(&empty_regions_list);
1407   }
1408 }
1409 
1410 void G1ConcurrentMark::compute_new_sizes() {
1411   MetaspaceGC::compute_new_size();
1412 
1413   // Cleanup will have freed any regions completely full of garbage.
1414   // Update the soft reference policy with the new heap occupancy.
1415   Universe::heap()->update_capacity_and_used_at_gc();
1416 
1417   // We reclaimed old regions so we should calculate the sizes to make
1418   // sure we update the old gen/space data.
1419   _g1h->monitoring_support()->update_sizes();
1420 }
1421 
1422 void G1ConcurrentMark::cleanup() {
1423   assert_at_safepoint_on_vm_thread();
1424 
1425   // If a full collection has happened, we shouldn't do this.
1426   if (has_aborted()) {
1427     return;
1428   }
1429 
1430   G1Policy* policy = _g1h->policy();
1431   policy->record_concurrent_mark_cleanup_start();
1432 
1433   double start = os::elapsedTime();
1434 
1435   verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "Cleanup before");
1436 
1437   if (needs_remembered_set_rebuild()) {
1438     GCTraceTime(Debug, gc, phases) debug("Update Remembered Set Tracking After Rebuild", _gc_timer_cm);
1439     G1UpdateRemSetTrackingAfterRebuild cl(_g1h);
1440     _g1h->heap_region_iterate(&cl);
1441   } else {
1442     log_debug(gc, phases)("No Remembered Sets to update after rebuild");
1443   }
1444 
1445   verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "Cleanup after");
1446 
1447   // We need to make this be a "collection" so any collection pause that
1448   // races with it goes around and waits for Cleanup to finish.
1449   _g1h->increment_total_collections();
1450 
1451   // Local statistics
1452   double recent_cleanup_time = (os::elapsedTime() - start);
1453   _total_cleanup_time += recent_cleanup_time;
1454   _cleanup_times.add(recent_cleanup_time);
1455 
1456   {
1457     GCTraceTime(Debug, gc, phases) debug("Finalize Concurrent Mark Cleanup", _gc_timer_cm);
1458     policy->record_concurrent_mark_cleanup_end(needs_remembered_set_rebuild());
1459   }
1460 }
1461 
1462 // 'Keep Alive' oop closure used by both serial parallel reference processing.
1463 // Uses the G1CMTask associated with a worker thread (for serial reference
1464 // processing the G1CMTask for worker 0 is used) to preserve (mark) and
1465 // trace referent objects.
1466 //
1467 // Using the G1CMTask and embedded local queues avoids having the worker
1468 // threads operating on the global mark stack. This reduces the risk
1469 // of overflowing the stack - which we would rather avoid at this late
1470 // state. Also using the tasks' local queues removes the potential
1471 // of the workers interfering with each other that could occur if
1472 // operating on the global stack.
1473 
1474 class G1CMKeepAliveAndDrainClosure : public OopClosure {
1475   G1ConcurrentMark* _cm;
1476   G1CMTask*         _task;
1477   uint              _ref_counter_limit;
1478   uint              _ref_counter;
1479   bool              _is_serial;
1480 public:
1481   G1CMKeepAliveAndDrainClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) :
1482     _cm(cm), _task(task), _ref_counter_limit(G1RefProcDrainInterval),
1483     _ref_counter(_ref_counter_limit), _is_serial(is_serial) {
1484     assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
1485   }
1486 
1487   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
1488   virtual void do_oop(      oop* p) { do_oop_work(p); }
1489 
1490   template <class T> void do_oop_work(T* p) {
1491     if (_cm->has_overflown()) {
1492       return;
1493     }
1494     if (!_task->deal_with_reference(p)) {
1495       // We did not add anything to the mark bitmap (or mark stack), so there is
1496       // no point trying to drain it.
1497       return;
1498     }
1499     _ref_counter--;
1500 
1501     if (_ref_counter == 0) {
1502       // We have dealt with _ref_counter_limit references, pushing them
1503       // and objects reachable from them on to the local stack (and
1504       // possibly the global stack). Call G1CMTask::do_marking_step() to
1505       // process these entries.
1506       //
1507       // We call G1CMTask::do_marking_step() in a loop, which we'll exit if
1508       // there's nothing more to do (i.e. we're done with the entries that
1509       // were pushed as a result of the G1CMTask::deal_with_reference() calls
1510       // above) or we overflow.
1511       //
1512       // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted()
1513       // flag while there may still be some work to do. (See the comment at
1514       // the beginning of G1CMTask::do_marking_step() for those conditions -
1515       // one of which is reaching the specified time target.) It is only
1516       // when G1CMTask::do_marking_step() returns without setting the
1517       // has_aborted() flag that the marking step has completed.
1518       do {
1519         double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
1520         _task->do_marking_step(mark_step_duration_ms,
1521                                false      /* do_termination */,
1522                                _is_serial);
1523       } while (_task->has_aborted() && !_cm->has_overflown());
1524       _ref_counter = _ref_counter_limit;
1525     }
1526   }
1527 };
1528 
1529 // 'Drain' oop closure used by both serial and parallel reference processing.
1530 // Uses the G1CMTask associated with a given worker thread (for serial
1531 // reference processing the G1CMtask for worker 0 is used). Calls the
1532 // do_marking_step routine, with an unbelievably large timeout value,
1533 // to drain the marking data structures of the remaining entries
1534 // added by the 'keep alive' oop closure above.
1535 
1536 class G1CMDrainMarkingStackClosure : public VoidClosure {
1537   G1ConcurrentMark* _cm;
1538   G1CMTask*         _task;
1539   bool              _is_serial;
1540  public:
1541   G1CMDrainMarkingStackClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) :
1542     _cm(cm), _task(task), _is_serial(is_serial) {
1543     assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
1544   }
1545 
1546   void do_void() {
1547     do {
1548       // We call G1CMTask::do_marking_step() to completely drain the local
1549       // and global marking stacks of entries pushed by the 'keep alive'
1550       // oop closure (an instance of G1CMKeepAliveAndDrainClosure above).
1551       //
1552       // G1CMTask::do_marking_step() is called in a loop, which we'll exit
1553       // if there's nothing more to do (i.e. we've completely drained the
1554       // entries that were pushed as a a result of applying the 'keep alive'
1555       // closure to the entries on the discovered ref lists) or we overflow
1556       // the global marking stack.
1557       //
1558       // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted()
1559       // flag while there may still be some work to do. (See the comment at
1560       // the beginning of G1CMTask::do_marking_step() for those conditions -
1561       // one of which is reaching the specified time target.) It is only
1562       // when G1CMTask::do_marking_step() returns without setting the
1563       // has_aborted() flag that the marking step has completed.
1564 
1565       _task->do_marking_step(1000000000.0 /* something very large */,
1566                              true         /* do_termination */,
1567                              _is_serial);
1568     } while (_task->has_aborted() && !_cm->has_overflown());
1569   }
1570 };
1571 
1572 class G1CMRefProcProxyTask : public RefProcProxyTask {
1573   G1CollectedHeap& _g1h;
1574   G1ConcurrentMark& _cm;
1575 
1576 public:
1577   G1CMRefProcProxyTask(uint max_workers, G1CollectedHeap& g1h, G1ConcurrentMark &cm)
1578     : RefProcProxyTask("G1CMRefProcProxyTask", max_workers),
1579       _g1h(g1h),
1580       _cm(cm) {}
1581 
1582   void work(uint worker_id) override {
1583     assert(worker_id < _max_workers, "sanity");
1584     G1CMIsAliveClosure is_alive(&_g1h);
1585     uint index = (_tm == RefProcThreadModel::Single) ? 0 : worker_id;
1586     G1CMKeepAliveAndDrainClosure keep_alive(&_cm, _cm.task(index), _tm == RefProcThreadModel::Single);
1587     BarrierEnqueueDiscoveredFieldClosure enqueue;
1588     G1CMDrainMarkingStackClosure complete_gc(&_cm, _cm.task(index), _tm == RefProcThreadModel::Single);
1589     _rp_task->rp_work(worker_id, &is_alive, &keep_alive, &enqueue, &complete_gc);
1590   }
1591 
1592   void prepare_run_task_hook() override {
1593     // We need to reset the concurrency level before each
1594     // proxy task execution, so that the termination protocol
1595     // and overflow handling in G1CMTask::do_marking_step() knows
1596     // how many workers to wait for.
1597     _cm.set_concurrency(_queue_count);
1598   }
1599 };
1600 
1601 void G1ConcurrentMark::weak_refs_work() {
1602   ResourceMark rm;
1603 
1604   // Is alive closure.
1605   G1CMIsAliveClosure g1_is_alive(_g1h);
1606 
1607   {
1608     GCTraceTime(Debug, gc, phases) debug("Reference Processing", _gc_timer_cm);
1609 
1610     ReferenceProcessor* rp = _g1h->ref_processor_cm();
1611 
1612     // See the comment in G1CollectedHeap::ref_processing_init()
1613     // about how reference processing currently works in G1.
1614 
1615     assert(_global_mark_stack.is_empty(), "mark stack should be empty");
1616 
1617     // We need at least one active thread. If reference processing
1618     // is not multi-threaded we use the current (VMThread) thread,
1619     // otherwise we use the workers from the G1CollectedHeap and
1620     // we utilize all the worker threads we can.
1621     uint active_workers = (ParallelRefProcEnabled ? _g1h->workers()->active_workers() : 1U);
1622     active_workers = clamp(active_workers, 1u, _max_num_tasks);
1623 
1624     // Set the degree of MT processing here.  If the discovery was done MT,
1625     // the number of threads involved during discovery could differ from
1626     // the number of active workers.  This is OK as long as the discovered
1627     // Reference lists are balanced (see balance_all_queues() and balance_queues()).
1628     rp->set_active_mt_degree(active_workers);
1629 
1630     // Parallel processing task executor.
1631     G1CMRefProcProxyTask task(rp->max_num_queues(), *_g1h, *this);
1632     ReferenceProcessorPhaseTimes pt(_gc_timer_cm, rp->max_num_queues());
1633 
1634     // Process the weak references.
1635     const ReferenceProcessorStats& stats = rp->process_discovered_references(task, pt);
1636     _gc_tracer_cm->report_gc_reference_stats(stats);
1637     pt.print_all_references();
1638 
1639     // The do_oop work routines of the keep_alive and drain_marking_stack
1640     // oop closures will set the has_overflown flag if we overflow the
1641     // global marking stack.
1642 
1643     assert(has_overflown() || _global_mark_stack.is_empty(),
1644            "Mark stack should be empty (unless it has overflown)");
1645 
1646     assert(rp->num_queues() == active_workers, "why not");
1647   }
1648 
1649   if (has_overflown()) {
1650     // We can not trust g1_is_alive and the contents of the heap if the marking stack
1651     // overflowed while processing references. Exit the VM.
1652     fatal("Overflow during reference processing, can not continue. Please "
1653           "increase MarkStackSizeMax (current value: " SIZE_FORMAT ") and "
1654           "restart.", MarkStackSizeMax);
1655     return;
1656   }
1657 
1658   assert(_global_mark_stack.is_empty(), "Marking should have completed");
1659 
1660   {
1661     GCTraceTime(Debug, gc, phases) debug("Weak Processing", _gc_timer_cm);
1662     WeakProcessor::weak_oops_do(_g1h->workers(), &g1_is_alive, &do_nothing_cl, 1);
1663   }
1664 
1665   // Unload Klasses, String, Code Cache, etc.
1666   if (ClassUnloadingWithConcurrentMark) {
1667     GCTraceTime(Debug, gc, phases) debug("Class Unloading", _gc_timer_cm);
1668     bool purged_classes = SystemDictionary::do_unloading(_gc_timer_cm);
1669     _g1h->complete_cleaning(&g1_is_alive, purged_classes);
1670   }
1671 }
1672 
1673 class G1PrecleanYieldClosure : public YieldClosure {
1674   G1ConcurrentMark* _cm;
1675 
1676 public:
1677   G1PrecleanYieldClosure(G1ConcurrentMark* cm) : _cm(cm) { }
1678 
1679   virtual bool should_return() {
1680     return _cm->has_aborted();
1681   }
1682 
1683   virtual bool should_return_fine_grain() {
1684     _cm->do_yield_check();
1685     return _cm->has_aborted();
1686   }
1687 };
1688 
1689 void G1ConcurrentMark::preclean() {
1690   assert(G1UseReferencePrecleaning, "Precleaning must be enabled.");
1691 
1692   SuspendibleThreadSetJoiner joiner;
1693 
1694   BarrierEnqueueDiscoveredFieldClosure enqueue;
1695 
1696   set_concurrency_and_phase(1, true);
1697 
1698   G1PrecleanYieldClosure yield_cl(this);
1699 
1700   ReferenceProcessor* rp = _g1h->ref_processor_cm();
1701   // Precleaning is single threaded. Temporarily disable MT discovery.
1702   ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(rp, false);
1703   rp->preclean_discovered_references(rp->is_alive_non_header(),
1704                                      &enqueue,
1705                                      &yield_cl,
1706                                      _gc_timer_cm);
1707 }
1708 
1709 // When sampling object counts, we already swapped the mark bitmaps, so we need to use
1710 // the prev bitmap determining liveness.
1711 class G1ObjectCountIsAliveClosure: public BoolObjectClosure {
1712   G1CollectedHeap* _g1h;
1713 public:
1714   G1ObjectCountIsAliveClosure(G1CollectedHeap* g1h) : _g1h(g1h) { }
1715 
1716   bool do_object_b(oop obj) {
1717     return obj != NULL &&
1718            (!_g1h->is_in_reserved(obj) || !_g1h->is_obj_dead(obj));
1719   }
1720 };
1721 
1722 void G1ConcurrentMark::report_object_count(bool mark_completed) {
1723   // Depending on the completion of the marking liveness needs to be determined
1724   // using either the next or prev bitmap.
1725   if (mark_completed) {
1726     G1ObjectCountIsAliveClosure is_alive(_g1h);
1727     _gc_tracer_cm->report_object_count_after_gc(&is_alive);
1728   } else {
1729     G1CMIsAliveClosure is_alive(_g1h);
1730     _gc_tracer_cm->report_object_count_after_gc(&is_alive);
1731   }
1732 }
1733 
1734 
1735 void G1ConcurrentMark::swap_mark_bitmaps() {
1736   G1CMBitMap* temp = _prev_mark_bitmap;
1737   _prev_mark_bitmap = _next_mark_bitmap;
1738   _next_mark_bitmap = temp;
1739 }
1740 
1741 // Closure for marking entries in SATB buffers.
1742 class G1CMSATBBufferClosure : public SATBBufferClosure {
1743 private:
1744   G1CMTask* _task;
1745   G1CollectedHeap* _g1h;
1746 
1747   // This is very similar to G1CMTask::deal_with_reference, but with
1748   // more relaxed requirements for the argument, so this must be more
1749   // circumspect about treating the argument as an object.
1750   void do_entry(void* entry) const {
1751     _task->increment_refs_reached();
1752     oop const obj = cast_to_oop(entry);
1753     _task->make_reference_grey(obj);
1754   }
1755 
1756 public:
1757   G1CMSATBBufferClosure(G1CMTask* task, G1CollectedHeap* g1h)
1758     : _task(task), _g1h(g1h) { }
1759 
1760   virtual void do_buffer(void** buffer, size_t size) {
1761     for (size_t i = 0; i < size; ++i) {
1762       do_entry(buffer[i]);
1763     }
1764   }
1765 };
1766 
1767 class G1RemarkThreadsClosure : public ThreadClosure {
1768   G1SATBMarkQueueSet& _qset;
1769   G1CMOopClosure _cm_cl;
1770   MarkingCodeBlobClosure _code_cl;
1771   uintx _claim_token;
1772 
1773  public:
1774   G1RemarkThreadsClosure(G1CollectedHeap* g1h, G1CMTask* task) :
1775     _qset(G1BarrierSet::satb_mark_queue_set()),
1776     _cm_cl(g1h, task),
1777     _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations, true /* keepalive nmethods */),
1778     _claim_token(Threads::thread_claim_token()) {}
1779 
1780   void do_thread(Thread* thread) {
1781     if (thread->claim_threads_do(true, _claim_token)) {
1782       // Transfer any partial buffer to the qset for completed buffer processing.
1783       _qset.flush_queue(G1ThreadLocalData::satb_mark_queue(thread));
1784       if (thread->is_Java_thread()) {
1785         // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking
1786         // however the liveness of oops reachable from nmethods have very complex lifecycles:
1787         // * Alive if on the stack of an executing method
1788         // * Weakly reachable otherwise
1789         // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be
1790         // live by the SATB invariant but other oops recorded in nmethods may behave differently.
1791         JavaThread::cast(thread)->nmethods_do(&_code_cl);
1792       }
1793     }
1794   }
1795 };
1796 
1797 class G1CMRemarkTask : public WorkerTask {
1798   G1ConcurrentMark* _cm;
1799 public:
1800   void work(uint worker_id) {
1801     G1CMTask* task = _cm->task(worker_id);
1802     task->record_start_time();
1803     {
1804       ResourceMark rm;
1805 
1806       G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task);
1807       Threads::threads_do(&threads_f);
1808     }
1809 
1810     do {
1811       task->do_marking_step(1000000000.0 /* something very large */,
1812                             true         /* do_termination       */,
1813                             false        /* is_serial            */);
1814     } while (task->has_aborted() && !_cm->has_overflown());
1815     // If we overflow, then we do not want to restart. We instead
1816     // want to abort remark and do concurrent marking again.
1817     task->record_end_time();
1818   }
1819 
1820   G1CMRemarkTask(G1ConcurrentMark* cm, uint active_workers) :
1821     WorkerTask("Par Remark"), _cm(cm) {
1822     _cm->terminator()->reset_for_reuse(active_workers);
1823   }
1824 };
1825 
1826 void G1ConcurrentMark::finalize_marking() {
1827   ResourceMark rm;
1828 
1829   _g1h->ensure_parsability(false);
1830 
1831   // this is remark, so we'll use up all active threads
1832   uint active_workers = _g1h->workers()->active_workers();
1833   set_concurrency_and_phase(active_workers, false /* concurrent */);
1834   // Leave _parallel_marking_threads at it's
1835   // value originally calculated in the G1ConcurrentMark
1836   // constructor and pass values of the active workers
1837   // through the task.
1838 
1839   {
1840     StrongRootsScope srs(active_workers);
1841 
1842     G1CMRemarkTask remarkTask(this, active_workers);
1843     // We will start all available threads, even if we decide that the
1844     // active_workers will be fewer. The extra ones will just bail out
1845     // immediately.
1846     _g1h->workers()->run_task(&remarkTask);
1847   }
1848 
1849   SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
1850   guarantee(has_overflown() ||
1851             satb_mq_set.completed_buffers_num() == 0,
1852             "Invariant: has_overflown = %s, num buffers = " SIZE_FORMAT,
1853             BOOL_TO_STR(has_overflown()),
1854             satb_mq_set.completed_buffers_num());
1855 
1856   print_stats();
1857 }
1858 
1859 void G1ConcurrentMark::flush_all_task_caches() {
1860   size_t hits = 0;
1861   size_t misses = 0;
1862   for (uint i = 0; i < _max_num_tasks; i++) {
1863     Pair<size_t, size_t> stats = _tasks[i]->flush_mark_stats_cache();
1864     hits += stats.first;
1865     misses += stats.second;
1866   }
1867   size_t sum = hits + misses;
1868   log_debug(gc, stats)("Mark stats cache hits " SIZE_FORMAT " misses " SIZE_FORMAT " ratio %1.3lf",
1869                        hits, misses, percent_of(hits, sum));
1870 }
1871 
1872 void G1ConcurrentMark::clear_range_in_prev_bitmap(MemRegion mr) {
1873   _prev_mark_bitmap->clear_range(mr);
1874 }
1875 
1876 HeapRegion*
1877 G1ConcurrentMark::claim_region(uint worker_id) {
1878   // "checkpoint" the finger
1879   HeapWord* finger = _finger;
1880 
1881   while (finger < _heap.end()) {
1882     assert(_g1h->is_in_reserved(finger), "invariant");
1883 
1884     HeapRegion* curr_region = _g1h->heap_region_containing(finger);
1885     // Make sure that the reads below do not float before loading curr_region.
1886     OrderAccess::loadload();
1887     // Above heap_region_containing may return NULL as we always scan claim
1888     // until the end of the heap. In this case, just jump to the next region.
1889     HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords;
1890 
1891     // Is the gap between reading the finger and doing the CAS too long?
1892     HeapWord* res = Atomic::cmpxchg(&_finger, finger, end);
1893     if (res == finger && curr_region != NULL) {
1894       // we succeeded
1895       HeapWord*   bottom        = curr_region->bottom();
1896       HeapWord*   limit         = curr_region->next_top_at_mark_start();
1897 
1898       // notice that _finger == end cannot be guaranteed here since,
1899       // someone else might have moved the finger even further
1900       assert(_finger >= end, "the finger should have moved forward");
1901 
1902       if (limit > bottom) {
1903         return curr_region;
1904       } else {
1905         assert(limit == bottom,
1906                "the region limit should be at bottom");
1907         // we return NULL and the caller should try calling
1908         // claim_region() again.
1909         return NULL;
1910       }
1911     } else {
1912       assert(_finger > finger, "the finger should have moved forward");
1913       // read it again
1914       finger = _finger;
1915     }
1916   }
1917 
1918   return NULL;
1919 }
1920 
1921 #ifndef PRODUCT
1922 class VerifyNoCSetOops {
1923   G1CollectedHeap* _g1h;
1924   const char* _phase;
1925   int _info;
1926 
1927 public:
1928   VerifyNoCSetOops(const char* phase, int info = -1) :
1929     _g1h(G1CollectedHeap::heap()),
1930     _phase(phase),
1931     _info(info)
1932   { }
1933 
1934   void operator()(G1TaskQueueEntry task_entry) const {
1935     if (task_entry.is_array_slice()) {
1936       guarantee(_g1h->is_in_reserved(task_entry.slice()), "Slice " PTR_FORMAT " must be in heap.", p2i(task_entry.slice()));
1937       return;
1938     }
1939     guarantee(oopDesc::is_oop(task_entry.obj()),
1940               "Non-oop " PTR_FORMAT ", phase: %s, info: %d",
1941               p2i(task_entry.obj()), _phase, _info);
1942     HeapRegion* r = _g1h->heap_region_containing(task_entry.obj());
1943     guarantee(!(r->in_collection_set() || r->has_index_in_opt_cset()),
1944               "obj " PTR_FORMAT " from %s (%d) in region %u in (optional) collection set",
1945               p2i(task_entry.obj()), _phase, _info, r->hrm_index());
1946   }
1947 };
1948 
1949 void G1ConcurrentMark::verify_no_collection_set_oops() {
1950   assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
1951   if (!_g1h->collector_state()->mark_or_rebuild_in_progress()) {
1952     return;
1953   }
1954 
1955   // Verify entries on the global mark stack
1956   _global_mark_stack.iterate(VerifyNoCSetOops("Stack"));
1957 
1958   // Verify entries on the task queues
1959   for (uint i = 0; i < _max_num_tasks; ++i) {
1960     G1CMTaskQueue* queue = _task_queues->queue(i);
1961     queue->iterate(VerifyNoCSetOops("Queue", i));
1962   }
1963 
1964   // Verify the global finger
1965   HeapWord* global_finger = finger();
1966   if (global_finger != NULL && global_finger < _heap.end()) {
1967     // Since we always iterate over all regions, we might get a NULL HeapRegion
1968     // here.
1969     HeapRegion* global_hr = _g1h->heap_region_containing(global_finger);
1970     guarantee(global_hr == NULL || global_finger == global_hr->bottom(),
1971               "global finger: " PTR_FORMAT " region: " HR_FORMAT,
1972               p2i(global_finger), HR_FORMAT_PARAMS(global_hr));
1973   }
1974 
1975   // Verify the task fingers
1976   assert(_num_concurrent_workers <= _max_num_tasks, "sanity");
1977   for (uint i = 0; i < _num_concurrent_workers; ++i) {
1978     G1CMTask* task = _tasks[i];
1979     HeapWord* task_finger = task->finger();
1980     if (task_finger != NULL && task_finger < _heap.end()) {
1981       // See above note on the global finger verification.
1982       HeapRegion* r = _g1h->heap_region_containing(task_finger);
1983       guarantee(r == NULL || task_finger == r->bottom() ||
1984                 !r->in_collection_set() || !r->has_index_in_opt_cset(),
1985                 "task finger: " PTR_FORMAT " region: " HR_FORMAT,
1986                 p2i(task_finger), HR_FORMAT_PARAMS(r));
1987     }
1988   }
1989 }
1990 #endif // PRODUCT
1991 
1992 void G1ConcurrentMark::rebuild_rem_set_concurrently() {
1993   // If Remark did not select any regions for RemSet rebuild,
1994   // skip the rebuild remembered set phase
1995   if (!needs_remembered_set_rebuild()) {
1996     log_debug(gc, marking)("Skipping Remembered Set Rebuild. No regions selected for rebuild");
1997     return;
1998   }
1999   _g1h->rem_set()->rebuild_rem_set(this, _concurrent_workers, _worker_id_offset);
2000 }
2001 
2002 void G1ConcurrentMark::print_stats() {
2003   if (!log_is_enabled(Debug, gc, stats)) {
2004     return;
2005   }
2006   log_debug(gc, stats)("---------------------------------------------------------------------");
2007   for (size_t i = 0; i < _num_active_tasks; ++i) {
2008     _tasks[i]->print_stats();
2009     log_debug(gc, stats)("---------------------------------------------------------------------");
2010   }
2011 }
2012 
2013 void G1ConcurrentMark::concurrent_cycle_abort() {
2014   if (!cm_thread()->in_progress() || _has_aborted) {
2015     // We haven't started a concurrent cycle or we have already aborted it. No need to do anything.
2016     return;
2017   }
2018 
2019   // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next
2020   // concurrent bitmap clearing.
2021   {
2022     GCTraceTime(Debug, gc) debug("Clear Next Bitmap");
2023     clear_next_bitmap(_g1h->workers());
2024   }
2025   // Note we cannot clear the previous marking bitmap here
2026   // since VerifyDuringGC verifies the objects marked during
2027   // a full GC against the previous bitmap.
2028 
2029   // Empty mark stack
2030   reset_marking_for_restart();
2031   for (uint i = 0; i < _max_num_tasks; ++i) {
2032     _tasks[i]->clear_region_fields();
2033   }
2034   _first_overflow_barrier_sync.abort();
2035   _second_overflow_barrier_sync.abort();
2036   _has_aborted = true;
2037 
2038   SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
2039   satb_mq_set.abandon_partial_marking();
2040   // This can be called either during or outside marking, we'll read
2041   // the expected_active value from the SATB queue set.
2042   satb_mq_set.set_active_all_threads(
2043                                  false, /* new active value */
2044                                  satb_mq_set.is_active() /* expected_active */);
2045 }
2046 
2047 static void print_ms_time_info(const char* prefix, const char* name,
2048                                NumberSeq& ns) {
2049   log_trace(gc, marking)("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).",
2050                          prefix, ns.num(), name, ns.sum()/1000.0, ns.avg());
2051   if (ns.num() > 0) {
2052     log_trace(gc, marking)("%s         [std. dev = %8.2f ms, max = %8.2f ms]",
2053                            prefix, ns.sd(), ns.maximum());
2054   }
2055 }
2056 
2057 void G1ConcurrentMark::print_summary_info() {
2058   Log(gc, marking) log;
2059   if (!log.is_trace()) {
2060     return;
2061   }
2062 
2063   log.trace(" Concurrent marking:");
2064   print_ms_time_info("  ", "init marks", _init_times);
2065   print_ms_time_info("  ", "remarks", _remark_times);
2066   {
2067     print_ms_time_info("     ", "final marks", _remark_mark_times);
2068     print_ms_time_info("     ", "weak refs", _remark_weak_ref_times);
2069 
2070   }
2071   print_ms_time_info("  ", "cleanups", _cleanup_times);
2072   log.trace("    Finalize live data total time = %8.2f s (avg = %8.2f ms).",
2073             _total_cleanup_time, (_cleanup_times.num() > 0 ? _total_cleanup_time * 1000.0 / (double)_cleanup_times.num() : 0.0));
2074   log.trace("  Total stop_world time = %8.2f s.",
2075             (_init_times.sum() + _remark_times.sum() + _cleanup_times.sum())/1000.0);
2076   log.trace("  Total concurrent time = %8.2f s (%8.2f s marking).",
2077             cm_thread()->vtime_accum(), cm_thread()->vtime_mark_accum());
2078 }
2079 
2080 void G1ConcurrentMark::threads_do(ThreadClosure* tc) const {
2081   _concurrent_workers->threads_do(tc);
2082 }
2083 
2084 void G1ConcurrentMark::print_on_error(outputStream* st) const {
2085   st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT,
2086                p2i(_prev_mark_bitmap), p2i(_next_mark_bitmap));
2087   _prev_mark_bitmap->print_on_error(st, " Prev Bits: ");
2088   _next_mark_bitmap->print_on_error(st, " Next Bits: ");
2089 }
2090 
2091 static ReferenceProcessor* get_cm_oop_closure_ref_processor(G1CollectedHeap* g1h) {
2092   ReferenceProcessor* result = g1h->ref_processor_cm();
2093   assert(result != NULL, "CM reference processor should not be NULL");
2094   return result;
2095 }
2096 
2097 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
2098                                G1CMTask* task)
2099   : MetadataVisitingOopIterateClosure(get_cm_oop_closure_ref_processor(g1h)),
2100     _g1h(g1h), _task(task)
2101 { }
2102 
2103 void G1CMTask::setup_for_region(HeapRegion* hr) {
2104   assert(hr != NULL,
2105         "claim_region() should have filtered out NULL regions");
2106   _curr_region  = hr;
2107   _finger       = hr->bottom();
2108   update_region_limit();
2109 }
2110 
2111 void G1CMTask::update_region_limit() {
2112   HeapRegion* hr            = _curr_region;
2113   HeapWord* bottom          = hr->bottom();
2114   HeapWord* limit           = hr->next_top_at_mark_start();
2115 
2116   if (limit == bottom) {
2117     // The region was collected underneath our feet.
2118     // We set the finger to bottom to ensure that the bitmap
2119     // iteration that will follow this will not do anything.
2120     // (this is not a condition that holds when we set the region up,
2121     // as the region is not supposed to be empty in the first place)
2122     _finger = bottom;
2123   } else if (limit >= _region_limit) {
2124     assert(limit >= _finger, "peace of mind");
2125   } else {
2126     assert(limit < _region_limit, "only way to get here");
2127     // This can happen under some pretty unusual circumstances.  An
2128     // evacuation pause empties the region underneath our feet (NTAMS
2129     // at bottom). We then do some allocation in the region (NTAMS
2130     // stays at bottom), followed by the region being used as a GC
2131     // alloc region (NTAMS will move to top() and the objects
2132     // originally below it will be grayed). All objects now marked in
2133     // the region are explicitly grayed, if below the global finger,
2134     // and we do not need in fact to scan anything else. So, we simply
2135     // set _finger to be limit to ensure that the bitmap iteration
2136     // doesn't do anything.
2137     _finger = limit;
2138   }
2139 
2140   _region_limit = limit;
2141 }
2142 
2143 void G1CMTask::giveup_current_region() {
2144   assert(_curr_region != NULL, "invariant");
2145   clear_region_fields();
2146 }
2147 
2148 void G1CMTask::clear_region_fields() {
2149   // Values for these three fields that indicate that we're not
2150   // holding on to a region.
2151   _curr_region   = NULL;
2152   _finger        = NULL;
2153   _region_limit  = NULL;
2154 }
2155 
2156 void G1CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) {
2157   if (cm_oop_closure == NULL) {
2158     assert(_cm_oop_closure != NULL, "invariant");
2159   } else {
2160     assert(_cm_oop_closure == NULL, "invariant");
2161   }
2162   _cm_oop_closure = cm_oop_closure;
2163 }
2164 
2165 void G1CMTask::reset(G1CMBitMap* next_mark_bitmap) {
2166   guarantee(next_mark_bitmap != NULL, "invariant");
2167   _next_mark_bitmap              = next_mark_bitmap;
2168   clear_region_fields();
2169 
2170   _calls                         = 0;
2171   _elapsed_time_ms               = 0.0;
2172   _termination_time_ms           = 0.0;
2173   _termination_start_time_ms     = 0.0;
2174 
2175   _mark_stats_cache.reset();
2176 }
2177 
2178 bool G1CMTask::should_exit_termination() {
2179   if (!regular_clock_call()) {
2180     return true;
2181   }
2182 
2183   // This is called when we are in the termination protocol. We should
2184   // quit if, for some reason, this task wants to abort or the global
2185   // stack is not empty (this means that we can get work from it).
2186   return !_cm->mark_stack_empty() || has_aborted();
2187 }
2188 
2189 void G1CMTask::reached_limit() {
2190   assert(_words_scanned >= _words_scanned_limit ||
2191          _refs_reached >= _refs_reached_limit ,
2192          "shouldn't have been called otherwise");
2193   abort_marking_if_regular_check_fail();
2194 }
2195 
2196 bool G1CMTask::regular_clock_call() {
2197   if (has_aborted()) {
2198     return false;
2199   }
2200 
2201   // First, we need to recalculate the words scanned and refs reached
2202   // limits for the next clock call.
2203   recalculate_limits();
2204 
2205   // During the regular clock call we do the following
2206 
2207   // (1) If an overflow has been flagged, then we abort.
2208   if (_cm->has_overflown()) {
2209     return false;
2210   }
2211 
2212   // If we are not concurrent (i.e. we're doing remark) we don't need
2213   // to check anything else. The other steps are only needed during
2214   // the concurrent marking phase.
2215   if (!_cm->concurrent()) {
2216     return true;
2217   }
2218 
2219   // (2) If marking has been aborted for Full GC, then we also abort.
2220   if (_cm->has_aborted()) {
2221     return false;
2222   }
2223 
2224   double curr_time_ms = os::elapsedVTime() * 1000.0;
2225 
2226   // (4) We check whether we should yield. If we have to, then we abort.
2227   if (SuspendibleThreadSet::should_yield()) {
2228     // We should yield. To do this we abort the task. The caller is
2229     // responsible for yielding.
2230     return false;
2231   }
2232 
2233   // (5) We check whether we've reached our time quota. If we have,
2234   // then we abort.
2235   double elapsed_time_ms = curr_time_ms - _start_time_ms;
2236   if (elapsed_time_ms > _time_target_ms) {
2237     _has_timed_out = true;
2238     return false;
2239   }
2240 
2241   // (6) Finally, we check whether there are enough completed STAB
2242   // buffers available for processing. If there are, we abort.
2243   SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
2244   if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) {
2245     // we do need to process SATB buffers, we'll abort and restart
2246     // the marking task to do so
2247     return false;
2248   }
2249   return true;
2250 }
2251 
2252 void G1CMTask::recalculate_limits() {
2253   _real_words_scanned_limit = _words_scanned + words_scanned_period;
2254   _words_scanned_limit      = _real_words_scanned_limit;
2255 
2256   _real_refs_reached_limit  = _refs_reached  + refs_reached_period;
2257   _refs_reached_limit       = _real_refs_reached_limit;
2258 }
2259 
2260 void G1CMTask::decrease_limits() {
2261   // This is called when we believe that we're going to do an infrequent
2262   // operation which will increase the per byte scanned cost (i.e. move
2263   // entries to/from the global stack). It basically tries to decrease the
2264   // scanning limit so that the clock is called earlier.
2265 
2266   _words_scanned_limit = _real_words_scanned_limit - 3 * words_scanned_period / 4;
2267   _refs_reached_limit  = _real_refs_reached_limit - 3 * refs_reached_period / 4;
2268 }
2269 
2270 void G1CMTask::move_entries_to_global_stack() {
2271   // Local array where we'll store the entries that will be popped
2272   // from the local queue.
2273   G1TaskQueueEntry buffer[G1CMMarkStack::EntriesPerChunk];
2274 
2275   size_t n = 0;
2276   G1TaskQueueEntry task_entry;
2277   while (n < G1CMMarkStack::EntriesPerChunk && _task_queue->pop_local(task_entry)) {
2278     buffer[n] = task_entry;
2279     ++n;
2280   }
2281   if (n < G1CMMarkStack::EntriesPerChunk) {
2282     buffer[n] = G1TaskQueueEntry();
2283   }
2284 
2285   if (n > 0) {
2286     if (!_cm->mark_stack_push(buffer)) {
2287       set_has_aborted();
2288     }
2289   }
2290 
2291   // This operation was quite expensive, so decrease the limits.
2292   decrease_limits();
2293 }
2294 
2295 bool G1CMTask::get_entries_from_global_stack() {
2296   // Local array where we'll store the entries that will be popped
2297   // from the global stack.
2298   G1TaskQueueEntry buffer[G1CMMarkStack::EntriesPerChunk];
2299 
2300   if (!_cm->mark_stack_pop(buffer)) {
2301     return false;
2302   }
2303 
2304   // We did actually pop at least one entry.
2305   for (size_t i = 0; i < G1CMMarkStack::EntriesPerChunk; ++i) {
2306     G1TaskQueueEntry task_entry = buffer[i];
2307     if (task_entry.is_null()) {
2308       break;
2309     }
2310     assert(task_entry.is_array_slice() || oopDesc::is_oop(task_entry.obj()), "Element " PTR_FORMAT " must be an array slice or oop", p2i(task_entry.obj()));
2311     bool success = _task_queue->push(task_entry);
2312     // We only call this when the local queue is empty or under a
2313     // given target limit. So, we do not expect this push to fail.
2314     assert(success, "invariant");
2315   }
2316 
2317   // This operation was quite expensive, so decrease the limits
2318   decrease_limits();
2319   return true;
2320 }
2321 
2322 void G1CMTask::drain_local_queue(bool partially) {
2323   if (has_aborted()) {
2324     return;
2325   }
2326 
2327   // Decide what the target size is, depending whether we're going to
2328   // drain it partially (so that other tasks can steal if they run out
2329   // of things to do) or totally (at the very end).
2330   size_t target_size;
2331   if (partially) {
2332     target_size = MIN2((size_t)_task_queue->max_elems()/3, (size_t)GCDrainStackTargetSize);
2333   } else {
2334     target_size = 0;
2335   }
2336 
2337   if (_task_queue->size() > target_size) {
2338     G1TaskQueueEntry entry;
2339     bool ret = _task_queue->pop_local(entry);
2340     while (ret) {
2341       scan_task_entry(entry);
2342       if (_task_queue->size() <= target_size || has_aborted()) {
2343         ret = false;
2344       } else {
2345         ret = _task_queue->pop_local(entry);
2346       }
2347     }
2348   }
2349 }
2350 
2351 void G1CMTask::drain_global_stack(bool partially) {
2352   if (has_aborted()) {
2353     return;
2354   }
2355 
2356   // We have a policy to drain the local queue before we attempt to
2357   // drain the global stack.
2358   assert(partially || _task_queue->size() == 0, "invariant");
2359 
2360   // Decide what the target size is, depending whether we're going to
2361   // drain it partially (so that other tasks can steal if they run out
2362   // of things to do) or totally (at the very end).
2363   // Notice that when draining the global mark stack partially, due to the racyness
2364   // of the mark stack size update we might in fact drop below the target. But,
2365   // this is not a problem.
2366   // In case of total draining, we simply process until the global mark stack is
2367   // totally empty, disregarding the size counter.
2368   if (partially) {
2369     size_t const target_size = _cm->partial_mark_stack_size_target();
2370     while (!has_aborted() && _cm->mark_stack_size() > target_size) {
2371       if (get_entries_from_global_stack()) {
2372         drain_local_queue(partially);
2373       }
2374     }
2375   } else {
2376     while (!has_aborted() && get_entries_from_global_stack()) {
2377       drain_local_queue(partially);
2378     }
2379   }
2380 }
2381 
2382 // SATB Queue has several assumptions on whether to call the par or
2383 // non-par versions of the methods. this is why some of the code is
2384 // replicated. We should really get rid of the single-threaded version
2385 // of the code to simplify things.
2386 void G1CMTask::drain_satb_buffers() {
2387   if (has_aborted()) {
2388     return;
2389   }
2390 
2391   // We set this so that the regular clock knows that we're in the
2392   // middle of draining buffers and doesn't set the abort flag when it
2393   // notices that SATB buffers are available for draining. It'd be
2394   // very counter productive if it did that. :-)
2395   _draining_satb_buffers = true;
2396 
2397   G1CMSATBBufferClosure satb_cl(this, _g1h);
2398   SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
2399 
2400   // This keeps claiming and applying the closure to completed buffers
2401   // until we run out of buffers or we need to abort.
2402   while (!has_aborted() &&
2403          satb_mq_set.apply_closure_to_completed_buffer(&satb_cl)) {
2404     abort_marking_if_regular_check_fail();
2405   }
2406 
2407   // Can't assert qset is empty here, even if not aborted.  If concurrent,
2408   // some other thread might be adding to the queue.  If not concurrent,
2409   // some other thread might have won the race for the last buffer, but
2410   // has not yet decremented the count.
2411 
2412   _draining_satb_buffers = false;
2413 
2414   // again, this was a potentially expensive operation, decrease the
2415   // limits to get the regular clock call early
2416   decrease_limits();
2417 }
2418 
2419 void G1CMTask::clear_mark_stats_cache(uint region_idx) {
2420   _mark_stats_cache.reset(region_idx);
2421 }
2422 
2423 Pair<size_t, size_t> G1CMTask::flush_mark_stats_cache() {
2424   return _mark_stats_cache.evict_all();
2425 }
2426 
2427 void G1CMTask::print_stats() {
2428   log_debug(gc, stats)("Marking Stats, task = %u, calls = %u", _worker_id, _calls);
2429   log_debug(gc, stats)("  Elapsed time = %1.2lfms, Termination time = %1.2lfms",
2430                        _elapsed_time_ms, _termination_time_ms);
2431   log_debug(gc, stats)("  Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms max = %1.2lfms, total = %1.2lfms",
2432                        _step_times_ms.num(),
2433                        _step_times_ms.avg(),
2434                        _step_times_ms.sd(),
2435                        _step_times_ms.maximum(),
2436                        _step_times_ms.sum());
2437   size_t const hits = _mark_stats_cache.hits();
2438   size_t const misses = _mark_stats_cache.misses();
2439   log_debug(gc, stats)("  Mark Stats Cache: hits " SIZE_FORMAT " misses " SIZE_FORMAT " ratio %.3f",
2440                        hits, misses, percent_of(hits, hits + misses));
2441 }
2442 
2443 bool G1ConcurrentMark::try_stealing(uint worker_id, G1TaskQueueEntry& task_entry) {
2444   return _task_queues->steal(worker_id, task_entry);
2445 }
2446 
2447 /*****************************************************************************
2448 
2449     The do_marking_step(time_target_ms, ...) method is the building
2450     block of the parallel marking framework. It can be called in parallel
2451     with other invocations of do_marking_step() on different tasks
2452     (but only one per task, obviously) and concurrently with the
2453     mutator threads, or during remark, hence it eliminates the need
2454     for two versions of the code. When called during remark, it will
2455     pick up from where the task left off during the concurrent marking
2456     phase. Interestingly, tasks are also claimable during evacuation
2457     pauses too, since do_marking_step() ensures that it aborts before
2458     it needs to yield.
2459 
2460     The data structures that it uses to do marking work are the
2461     following:
2462 
2463       (1) Marking Bitmap. If there are gray objects that appear only
2464       on the bitmap (this happens either when dealing with an overflow
2465       or when the concurrent start pause has simply marked the roots
2466       and didn't push them on the stack), then tasks claim heap
2467       regions whose bitmap they then scan to find gray objects. A
2468       global finger indicates where the end of the last claimed region
2469       is. A local finger indicates how far into the region a task has
2470       scanned. The two fingers are used to determine how to gray an
2471       object (i.e. whether simply marking it is OK, as it will be
2472       visited by a task in the future, or whether it needs to be also
2473       pushed on a stack).
2474 
2475       (2) Local Queue. The local queue of the task which is accessed
2476       reasonably efficiently by the task. Other tasks can steal from
2477       it when they run out of work. Throughout the marking phase, a
2478       task attempts to keep its local queue short but not totally
2479       empty, so that entries are available for stealing by other
2480       tasks. Only when there is no more work, a task will totally
2481       drain its local queue.
2482 
2483       (3) Global Mark Stack. This handles local queue overflow. During
2484       marking only sets of entries are moved between it and the local
2485       queues, as access to it requires a mutex and more fine-grain
2486       interaction with it which might cause contention. If it
2487       overflows, then the marking phase should restart and iterate
2488       over the bitmap to identify gray objects. Throughout the marking
2489       phase, tasks attempt to keep the global mark stack at a small
2490       length but not totally empty, so that entries are available for
2491       popping by other tasks. Only when there is no more work, tasks
2492       will totally drain the global mark stack.
2493 
2494       (4) SATB Buffer Queue. This is where completed SATB buffers are
2495       made available. Buffers are regularly removed from this queue
2496       and scanned for roots, so that the queue doesn't get too
2497       long. During remark, all completed buffers are processed, as
2498       well as the filled in parts of any uncompleted buffers.
2499 
2500     The do_marking_step() method tries to abort when the time target
2501     has been reached. There are a few other cases when the
2502     do_marking_step() method also aborts:
2503 
2504       (1) When the marking phase has been aborted (after a Full GC).
2505 
2506       (2) When a global overflow (on the global stack) has been
2507       triggered. Before the task aborts, it will actually sync up with
2508       the other tasks to ensure that all the marking data structures
2509       (local queues, stacks, fingers etc.)  are re-initialized so that
2510       when do_marking_step() completes, the marking phase can
2511       immediately restart.
2512 
2513       (3) When enough completed SATB buffers are available. The
2514       do_marking_step() method only tries to drain SATB buffers right
2515       at the beginning. So, if enough buffers are available, the
2516       marking step aborts and the SATB buffers are processed at
2517       the beginning of the next invocation.
2518 
2519       (4) To yield. when we have to yield then we abort and yield
2520       right at the end of do_marking_step(). This saves us from a lot
2521       of hassle as, by yielding we might allow a Full GC. If this
2522       happens then objects will be compacted underneath our feet, the
2523       heap might shrink, etc. We save checking for this by just
2524       aborting and doing the yield right at the end.
2525 
2526     From the above it follows that the do_marking_step() method should
2527     be called in a loop (or, otherwise, regularly) until it completes.
2528 
2529     If a marking step completes without its has_aborted() flag being
2530     true, it means it has completed the current marking phase (and
2531     also all other marking tasks have done so and have all synced up).
2532 
2533     A method called regular_clock_call() is invoked "regularly" (in
2534     sub ms intervals) throughout marking. It is this clock method that
2535     checks all the abort conditions which were mentioned above and
2536     decides when the task should abort. A work-based scheme is used to
2537     trigger this clock method: when the number of object words the
2538     marking phase has scanned or the number of references the marking
2539     phase has visited reach a given limit. Additional invocations to
2540     the method clock have been planted in a few other strategic places
2541     too. The initial reason for the clock method was to avoid calling
2542     vtime too regularly, as it is quite expensive. So, once it was in
2543     place, it was natural to piggy-back all the other conditions on it
2544     too and not constantly check them throughout the code.
2545 
2546     If do_termination is true then do_marking_step will enter its
2547     termination protocol.
2548 
2549     The value of is_serial must be true when do_marking_step is being
2550     called serially (i.e. by the VMThread) and do_marking_step should
2551     skip any synchronization in the termination and overflow code.
2552     Examples include the serial remark code and the serial reference
2553     processing closures.
2554 
2555     The value of is_serial must be false when do_marking_step is
2556     being called by any of the worker threads.
2557     Examples include the concurrent marking code (CMMarkingTask),
2558     the MT remark code, and the MT reference processing closures.
2559 
2560  *****************************************************************************/
2561 
2562 void G1CMTask::do_marking_step(double time_target_ms,
2563                                bool do_termination,
2564                                bool is_serial) {
2565   assert(time_target_ms >= 1.0, "minimum granularity is 1ms");
2566 
2567   _start_time_ms = os::elapsedVTime() * 1000.0;
2568 
2569   // If do_stealing is true then do_marking_step will attempt to
2570   // steal work from the other G1CMTasks. It only makes sense to
2571   // enable stealing when the termination protocol is enabled
2572   // and do_marking_step() is not being called serially.
2573   bool do_stealing = do_termination && !is_serial;
2574 
2575   G1Predictions const& predictor = _g1h->policy()->predictor();
2576   double diff_prediction_ms = predictor.predict_zero_bounded(&_marking_step_diff_ms);
2577   _time_target_ms = time_target_ms - diff_prediction_ms;
2578 
2579   // set up the variables that are used in the work-based scheme to
2580   // call the regular clock method
2581   _words_scanned = 0;
2582   _refs_reached  = 0;
2583   recalculate_limits();
2584 
2585   // clear all flags
2586   clear_has_aborted();
2587   _has_timed_out = false;
2588   _draining_satb_buffers = false;
2589 
2590   ++_calls;
2591 
2592   // Set up the bitmap and oop closures. Anything that uses them is
2593   // eventually called from this method, so it is OK to allocate these
2594   // statically.
2595   G1CMBitMapClosure bitmap_closure(this, _cm);
2596   G1CMOopClosure cm_oop_closure(_g1h, this);
2597   set_cm_oop_closure(&cm_oop_closure);
2598 
2599   if (_cm->has_overflown()) {
2600     // This can happen if the mark stack overflows during a GC pause
2601     // and this task, after a yield point, restarts. We have to abort
2602     // as we need to get into the overflow protocol which happens
2603     // right at the end of this task.
2604     set_has_aborted();
2605   }
2606 
2607   // First drain any available SATB buffers. After this, we will not
2608   // look at SATB buffers before the next invocation of this method.
2609   // If enough completed SATB buffers are queued up, the regular clock
2610   // will abort this task so that it restarts.
2611   drain_satb_buffers();
2612   // ...then partially drain the local queue and the global stack
2613   drain_local_queue(true);
2614   drain_global_stack(true);
2615 
2616   do {
2617     if (!has_aborted() && _curr_region != NULL) {
2618       // This means that we're already holding on to a region.
2619       assert(_finger != NULL, "if region is not NULL, then the finger "
2620              "should not be NULL either");
2621 
2622       // We might have restarted this task after an evacuation pause
2623       // which might have evacuated the region we're holding on to
2624       // underneath our feet. Let's read its limit again to make sure
2625       // that we do not iterate over a region of the heap that
2626       // contains garbage (update_region_limit() will also move
2627       // _finger to the start of the region if it is found empty).
2628       update_region_limit();
2629       // We will start from _finger not from the start of the region,
2630       // as we might be restarting this task after aborting half-way
2631       // through scanning this region. In this case, _finger points to
2632       // the address where we last found a marked object. If this is a
2633       // fresh region, _finger points to start().
2634       MemRegion mr = MemRegion(_finger, _region_limit);
2635 
2636       assert(!_curr_region->is_humongous() || mr.start() == _curr_region->bottom(),
2637              "humongous regions should go around loop once only");
2638 
2639       // Some special cases:
2640       // If the memory region is empty, we can just give up the region.
2641       // If the current region is humongous then we only need to check
2642       // the bitmap for the bit associated with the start of the object,
2643       // scan the object if it's live, and give up the region.
2644       // Otherwise, let's iterate over the bitmap of the part of the region
2645       // that is left.
2646       // If the iteration is successful, give up the region.
2647       if (mr.is_empty()) {
2648         giveup_current_region();
2649         abort_marking_if_regular_check_fail();
2650       } else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) {
2651         if (_next_mark_bitmap->is_marked(mr.start())) {
2652           // The object is marked - apply the closure
2653           bitmap_closure.do_addr(mr.start());
2654         }
2655         // Even if this task aborted while scanning the humongous object
2656         // we can (and should) give up the current region.
2657         giveup_current_region();
2658         abort_marking_if_regular_check_fail();
2659       } else if (_next_mark_bitmap->iterate(&bitmap_closure, mr)) {
2660         giveup_current_region();
2661         abort_marking_if_regular_check_fail();
2662       } else {
2663         assert(has_aborted(), "currently the only way to do so");
2664         // The only way to abort the bitmap iteration is to return
2665         // false from the do_bit() method. However, inside the
2666         // do_bit() method we move the _finger to point to the
2667         // object currently being looked at. So, if we bail out, we
2668         // have definitely set _finger to something non-null.
2669         assert(_finger != NULL, "invariant");
2670 
2671         // Region iteration was actually aborted. So now _finger
2672         // points to the address of the object we last scanned. If we
2673         // leave it there, when we restart this task, we will rescan
2674         // the object. It is easy to avoid this. We move the finger by
2675         // enough to point to the next possible object header.
2676         assert(_finger < _region_limit, "invariant");
2677         HeapWord* const new_finger = _finger + cast_to_oop(_finger)->size();
2678         // Check if bitmap iteration was aborted while scanning the last object
2679         if (new_finger >= _region_limit) {
2680           giveup_current_region();
2681         } else {
2682           move_finger_to(new_finger);
2683         }
2684       }
2685     }
2686     // At this point we have either completed iterating over the
2687     // region we were holding on to, or we have aborted.
2688 
2689     // We then partially drain the local queue and the global stack.
2690     // (Do we really need this?)
2691     drain_local_queue(true);
2692     drain_global_stack(true);
2693 
2694     // Read the note on the claim_region() method on why it might
2695     // return NULL with potentially more regions available for
2696     // claiming and why we have to check out_of_regions() to determine
2697     // whether we're done or not.
2698     while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) {
2699       // We are going to try to claim a new region. We should have
2700       // given up on the previous one.
2701       // Separated the asserts so that we know which one fires.
2702       assert(_curr_region  == NULL, "invariant");
2703       assert(_finger       == NULL, "invariant");
2704       assert(_region_limit == NULL, "invariant");
2705       HeapRegion* claimed_region = _cm->claim_region(_worker_id);
2706       if (claimed_region != NULL) {
2707         // Yes, we managed to claim one
2708         setup_for_region(claimed_region);
2709         assert(_curr_region == claimed_region, "invariant");
2710       }
2711       // It is important to call the regular clock here. It might take
2712       // a while to claim a region if, for example, we hit a large
2713       // block of empty regions. So we need to call the regular clock
2714       // method once round the loop to make sure it's called
2715       // frequently enough.
2716       abort_marking_if_regular_check_fail();
2717     }
2718 
2719     if (!has_aborted() && _curr_region == NULL) {
2720       assert(_cm->out_of_regions(),
2721              "at this point we should be out of regions");
2722     }
2723   } while ( _curr_region != NULL && !has_aborted());
2724 
2725   if (!has_aborted()) {
2726     // We cannot check whether the global stack is empty, since other
2727     // tasks might be pushing objects to it concurrently.
2728     assert(_cm->out_of_regions(),
2729            "at this point we should be out of regions");
2730     // Try to reduce the number of available SATB buffers so that
2731     // remark has less work to do.
2732     drain_satb_buffers();
2733   }
2734 
2735   // Since we've done everything else, we can now totally drain the
2736   // local queue and global stack.
2737   drain_local_queue(false);
2738   drain_global_stack(false);
2739 
2740   // Attempt at work stealing from other task's queues.
2741   if (do_stealing && !has_aborted()) {
2742     // We have not aborted. This means that we have finished all that
2743     // we could. Let's try to do some stealing...
2744 
2745     // We cannot check whether the global stack is empty, since other
2746     // tasks might be pushing objects to it concurrently.
2747     assert(_cm->out_of_regions() && _task_queue->size() == 0,
2748            "only way to reach here");
2749     while (!has_aborted()) {
2750       G1TaskQueueEntry entry;
2751       if (_cm->try_stealing(_worker_id, entry)) {
2752         scan_task_entry(entry);
2753 
2754         // And since we're towards the end, let's totally drain the
2755         // local queue and global stack.
2756         drain_local_queue(false);
2757         drain_global_stack(false);
2758       } else {
2759         break;
2760       }
2761     }
2762   }
2763 
2764   // We still haven't aborted. Now, let's try to get into the
2765   // termination protocol.
2766   if (do_termination && !has_aborted()) {
2767     // We cannot check whether the global stack is empty, since other
2768     // tasks might be concurrently pushing objects on it.
2769     // Separated the asserts so that we know which one fires.
2770     assert(_cm->out_of_regions(), "only way to reach here");
2771     assert(_task_queue->size() == 0, "only way to reach here");
2772     _termination_start_time_ms = os::elapsedVTime() * 1000.0;
2773 
2774     // The G1CMTask class also extends the TerminatorTerminator class,
2775     // hence its should_exit_termination() method will also decide
2776     // whether to exit the termination protocol or not.
2777     bool finished = (is_serial ||
2778                      _cm->terminator()->offer_termination(this));
2779     double termination_end_time_ms = os::elapsedVTime() * 1000.0;
2780     _termination_time_ms +=
2781       termination_end_time_ms - _termination_start_time_ms;
2782 
2783     if (finished) {
2784       // We're all done.
2785 
2786       // We can now guarantee that the global stack is empty, since
2787       // all other tasks have finished. We separated the guarantees so
2788       // that, if a condition is false, we can immediately find out
2789       // which one.
2790       guarantee(_cm->out_of_regions(), "only way to reach here");
2791       guarantee(_cm->mark_stack_empty(), "only way to reach here");
2792       guarantee(_task_queue->size() == 0, "only way to reach here");
2793       guarantee(!_cm->has_overflown(), "only way to reach here");
2794       guarantee(!has_aborted(), "should never happen if termination has completed");
2795     } else {
2796       // Apparently there's more work to do. Let's abort this task. It
2797       // will restart it and we can hopefully find more things to do.
2798       set_has_aborted();
2799     }
2800   }
2801 
2802   // Mainly for debugging purposes to make sure that a pointer to the
2803   // closure which was statically allocated in this frame doesn't
2804   // escape it by accident.
2805   set_cm_oop_closure(NULL);
2806   double end_time_ms = os::elapsedVTime() * 1000.0;
2807   double elapsed_time_ms = end_time_ms - _start_time_ms;
2808   // Update the step history.
2809   _step_times_ms.add(elapsed_time_ms);
2810 
2811   if (has_aborted()) {
2812     // The task was aborted for some reason.
2813     if (_has_timed_out) {
2814       double diff_ms = elapsed_time_ms - _time_target_ms;
2815       // Keep statistics of how well we did with respect to hitting
2816       // our target only if we actually timed out (if we aborted for
2817       // other reasons, then the results might get skewed).
2818       _marking_step_diff_ms.add(diff_ms);
2819     }
2820 
2821     if (_cm->has_overflown()) {
2822       // This is the interesting one. We aborted because a global
2823       // overflow was raised. This means we have to restart the
2824       // marking phase and start iterating over regions. However, in
2825       // order to do this we have to make sure that all tasks stop
2826       // what they are doing and re-initialize in a safe manner. We
2827       // will achieve this with the use of two barrier sync points.
2828 
2829       if (!is_serial) {
2830         // We only need to enter the sync barrier if being called
2831         // from a parallel context
2832         _cm->enter_first_sync_barrier(_worker_id);
2833 
2834         // When we exit this sync barrier we know that all tasks have
2835         // stopped doing marking work. So, it's now safe to
2836         // re-initialize our data structures.
2837       }
2838 
2839       clear_region_fields();
2840       flush_mark_stats_cache();
2841 
2842       if (!is_serial) {
2843         // If we're executing the concurrent phase of marking, reset the marking
2844         // state; otherwise the marking state is reset after reference processing,
2845         // during the remark pause.
2846         // If we reset here as a result of an overflow during the remark we will
2847         // see assertion failures from any subsequent set_concurrency_and_phase()
2848         // calls.
2849         if (_cm->concurrent() && _worker_id == 0) {
2850           // Worker 0 is responsible for clearing the global data structures because
2851           // of an overflow. During STW we should not clear the overflow flag (in
2852           // G1ConcurrentMark::reset_marking_state()) since we rely on it being true when we exit
2853           // method to abort the pause and restart concurrent marking.
2854           _cm->reset_marking_for_restart();
2855 
2856           log_info(gc, marking)("Concurrent Mark reset for overflow");
2857         }
2858 
2859         // ...and enter the second barrier.
2860         _cm->enter_second_sync_barrier(_worker_id);
2861       }
2862       // At this point, if we're during the concurrent phase of
2863       // marking, everything has been re-initialized and we're
2864       // ready to restart.
2865     }
2866   }
2867 }
2868 
2869 G1CMTask::G1CMTask(uint worker_id,
2870                    G1ConcurrentMark* cm,
2871                    G1CMTaskQueue* task_queue,
2872                    G1RegionMarkStats* mark_stats) :
2873   _objArray_processor(this),
2874   _worker_id(worker_id),
2875   _g1h(G1CollectedHeap::heap()),
2876   _cm(cm),
2877   _next_mark_bitmap(NULL),
2878   _task_queue(task_queue),
2879   _mark_stats_cache(mark_stats, G1RegionMarkStatsCache::RegionMarkStatsCacheSize),
2880   _calls(0),
2881   _time_target_ms(0.0),
2882   _start_time_ms(0.0),
2883   _cm_oop_closure(NULL),
2884   _curr_region(NULL),
2885   _finger(NULL),
2886   _region_limit(NULL),
2887   _words_scanned(0),
2888   _words_scanned_limit(0),
2889   _real_words_scanned_limit(0),
2890   _refs_reached(0),
2891   _refs_reached_limit(0),
2892   _real_refs_reached_limit(0),
2893   _has_aborted(false),
2894   _has_timed_out(false),
2895   _draining_satb_buffers(false),
2896   _step_times_ms(),
2897   _elapsed_time_ms(0.0),
2898   _termination_time_ms(0.0),
2899   _termination_start_time_ms(0.0),
2900   _marking_step_diff_ms()
2901 {
2902   guarantee(task_queue != NULL, "invariant");
2903 
2904   _marking_step_diff_ms.add(0.5);
2905 }
2906 
2907 // These are formatting macros that are used below to ensure
2908 // consistent formatting. The *_H_* versions are used to format the
2909 // header for a particular value and they should be kept consistent
2910 // with the corresponding macro. Also note that most of the macros add
2911 // the necessary white space (as a prefix) which makes them a bit
2912 // easier to compose.
2913 
2914 // All the output lines are prefixed with this string to be able to
2915 // identify them easily in a large log file.
2916 #define G1PPRL_LINE_PREFIX            "###"
2917 
2918 #define G1PPRL_ADDR_BASE_FORMAT    " " PTR_FORMAT "-" PTR_FORMAT
2919 #ifdef _LP64
2920 #define G1PPRL_ADDR_BASE_H_FORMAT  " %37s"
2921 #else // _LP64
2922 #define G1PPRL_ADDR_BASE_H_FORMAT  " %21s"
2923 #endif // _LP64
2924 
2925 // For per-region info
2926 #define G1PPRL_TYPE_FORMAT            "   %-4s"
2927 #define G1PPRL_TYPE_H_FORMAT          "   %4s"
2928 #define G1PPRL_STATE_FORMAT           "   %-5s"
2929 #define G1PPRL_STATE_H_FORMAT         "   %5s"
2930 #define G1PPRL_BYTE_FORMAT            "  " SIZE_FORMAT_W(9)
2931 #define G1PPRL_BYTE_H_FORMAT          "  %9s"
2932 #define G1PPRL_DOUBLE_FORMAT          "%14.1f"
2933 #define G1PPRL_GCEFF_FORMAT           "  %14s"
2934 #define G1PPRL_GCEFF_H_FORMAT         "  %14s"
2935 
2936 // For summary info
2937 #define G1PPRL_SUM_ADDR_FORMAT(tag)    "  " tag ":" G1PPRL_ADDR_BASE_FORMAT
2938 #define G1PPRL_SUM_BYTE_FORMAT(tag)    "  " tag ": " SIZE_FORMAT
2939 #define G1PPRL_SUM_MB_FORMAT(tag)      "  " tag ": %1.2f MB"
2940 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag) " / %1.2f %%"
2941 
2942 G1PrintRegionLivenessInfoClosure::G1PrintRegionLivenessInfoClosure(const char* phase_name) :
2943   _total_used_bytes(0), _total_capacity_bytes(0),
2944   _total_prev_live_bytes(0), _total_next_live_bytes(0),
2945   _total_remset_bytes(0), _total_strong_code_roots_bytes(0)
2946 {
2947   if (!log_is_enabled(Trace, gc, liveness)) {
2948     return;
2949   }
2950 
2951   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2952   MemRegion reserved = g1h->reserved();
2953   double now = os::elapsedTime();
2954 
2955   // Print the header of the output.
2956   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now);
2957   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" HEAP"
2958                           G1PPRL_SUM_ADDR_FORMAT("reserved")
2959                           G1PPRL_SUM_BYTE_FORMAT("region-size"),
2960                           p2i(reserved.start()), p2i(reserved.end()),
2961                           HeapRegion::GrainBytes);
2962   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX);
2963   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
2964                           G1PPRL_TYPE_H_FORMAT
2965                           G1PPRL_ADDR_BASE_H_FORMAT
2966                           G1PPRL_BYTE_H_FORMAT
2967                           G1PPRL_BYTE_H_FORMAT
2968                           G1PPRL_BYTE_H_FORMAT
2969                           G1PPRL_GCEFF_H_FORMAT
2970                           G1PPRL_BYTE_H_FORMAT
2971                           G1PPRL_STATE_H_FORMAT
2972                           G1PPRL_BYTE_H_FORMAT,
2973                           "type", "address-range",
2974                           "used", "prev-live", "next-live", "gc-eff",
2975                           "remset", "state", "code-roots");
2976   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
2977                           G1PPRL_TYPE_H_FORMAT
2978                           G1PPRL_ADDR_BASE_H_FORMAT
2979                           G1PPRL_BYTE_H_FORMAT
2980                           G1PPRL_BYTE_H_FORMAT
2981                           G1PPRL_BYTE_H_FORMAT
2982                           G1PPRL_GCEFF_H_FORMAT
2983                           G1PPRL_BYTE_H_FORMAT
2984                           G1PPRL_STATE_H_FORMAT
2985                           G1PPRL_BYTE_H_FORMAT,
2986                           "", "",
2987                           "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)",
2988                           "(bytes)", "", "(bytes)");
2989 }
2990 
2991 bool G1PrintRegionLivenessInfoClosure::do_heap_region(HeapRegion* r) {
2992   if (!log_is_enabled(Trace, gc, liveness)) {
2993     return false;
2994   }
2995 
2996   const char* type       = r->get_type_str();
2997   HeapWord* bottom       = r->bottom();
2998   HeapWord* end          = r->end();
2999   size_t capacity_bytes  = r->capacity();
3000   size_t used_bytes      = r->used();
3001   size_t prev_live_bytes = r->live_bytes();
3002   size_t next_live_bytes = r->next_live_bytes();
3003   double gc_eff          = r->gc_efficiency();
3004   size_t remset_bytes    = r->rem_set()->mem_size();
3005   size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size();
3006   const char* remset_type = r->rem_set()->get_short_state_str();
3007   FormatBuffer<16> gc_efficiency("");
3008 
3009   _total_used_bytes      += used_bytes;
3010   _total_capacity_bytes  += capacity_bytes;
3011   _total_prev_live_bytes += prev_live_bytes;
3012   _total_next_live_bytes += next_live_bytes;
3013   _total_remset_bytes    += remset_bytes;
3014   _total_strong_code_roots_bytes += strong_code_roots_bytes;
3015 
3016   if(gc_eff < 0) {
3017     gc_efficiency.append("-");
3018   } else {
3019     gc_efficiency.append(G1PPRL_DOUBLE_FORMAT, gc_eff);
3020   }
3021 
3022   // Print a line for this particular region.
3023   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
3024                         G1PPRL_TYPE_FORMAT
3025                         G1PPRL_ADDR_BASE_FORMAT
3026                         G1PPRL_BYTE_FORMAT
3027                         G1PPRL_BYTE_FORMAT
3028                         G1PPRL_BYTE_FORMAT
3029                         G1PPRL_GCEFF_FORMAT
3030                         G1PPRL_BYTE_FORMAT
3031                         G1PPRL_STATE_FORMAT
3032                         G1PPRL_BYTE_FORMAT,
3033                         type, p2i(bottom), p2i(end),
3034                         used_bytes, prev_live_bytes, next_live_bytes, gc_efficiency.buffer(),
3035                         remset_bytes, remset_type, strong_code_roots_bytes);
3036 
3037   return false;
3038 }
3039 
3040 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() {
3041   if (!log_is_enabled(Trace, gc, liveness)) {
3042     return;
3043   }
3044 
3045   // add static memory usages to remembered set sizes
3046   _total_remset_bytes += G1CardSetFreePool::free_list_pool()->mem_size() + HeapRegionRemSet::static_mem_size();
3047   // Print the footer of the output.
3048   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX);
3049   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
3050                          " SUMMARY"
3051                          G1PPRL_SUM_MB_FORMAT("capacity")
3052                          G1PPRL_SUM_MB_PERC_FORMAT("used")
3053                          G1PPRL_SUM_MB_PERC_FORMAT("prev-live")
3054                          G1PPRL_SUM_MB_PERC_FORMAT("next-live")
3055                          G1PPRL_SUM_MB_FORMAT("remset")
3056                          G1PPRL_SUM_MB_FORMAT("code-roots"),
3057                          bytes_to_mb(_total_capacity_bytes),
3058                          bytes_to_mb(_total_used_bytes),
3059                          percent_of(_total_used_bytes, _total_capacity_bytes),
3060                          bytes_to_mb(_total_prev_live_bytes),
3061                          percent_of(_total_prev_live_bytes, _total_capacity_bytes),
3062                          bytes_to_mb(_total_next_live_bytes),
3063                          percent_of(_total_next_live_bytes, _total_capacity_bytes),
3064                          bytes_to_mb(_total_remset_bytes),
3065                          bytes_to_mb(_total_strong_code_roots_bytes));
3066 }
--- EOF ---