1 /*
   2  * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/classLoaderDataGraph.hpp"
  27 #include "code/codeCache.hpp"
  28 #include "gc/g1/g1BarrierSet.hpp"
  29 #include "gc/g1/g1CollectedHeap.inline.hpp"
  30 #include "gc/g1/g1CollectorState.hpp"
  31 #include "gc/g1/g1ConcurrentMark.inline.hpp"
  32 #include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
  33 #include "gc/g1/g1DirtyCardQueue.hpp"
  34 #include "gc/g1/g1HeapVerifier.hpp"
  35 #include "gc/g1/g1OopClosures.inline.hpp"
  36 #include "gc/g1/g1Policy.hpp"
  37 #include "gc/g1/g1RegionMarkStatsCache.inline.hpp"
  38 #include "gc/g1/g1StringDedup.hpp"
  39 #include "gc/g1/g1ThreadLocalData.hpp"
  40 #include "gc/g1/heapRegion.inline.hpp"
  41 #include "gc/g1/heapRegionRemSet.hpp"
  42 #include "gc/g1/heapRegionSet.inline.hpp"
  43 #include "gc/shared/gcId.hpp"
  44 #include "gc/shared/gcTimer.hpp"
  45 #include "gc/shared/gcTrace.hpp"
  46 #include "gc/shared/gcTraceTime.inline.hpp"
  47 #include "gc/shared/gcVMOperations.hpp"
  48 #include "gc/shared/genOopClosures.inline.hpp"
  49 #include "gc/shared/referencePolicy.hpp"
  50 #include "gc/shared/strongRootsScope.hpp"
  51 #include "gc/shared/suspendibleThreadSet.hpp"
  52 #include "gc/shared/taskqueue.inline.hpp"
  53 #include "gc/shared/weakProcessor.inline.hpp"
  54 #include "gc/shared/workerPolicy.hpp"
  55 #include "include/jvm.h"
  56 #include "logging/log.hpp"
  57 #include "memory/allocation.hpp"
  58 #include "memory/resourceArea.hpp"
  59 #include "oops/access.inline.hpp"
  60 #include "oops/oop.inline.hpp"
  61 #include "runtime/atomic.hpp"
  62 #include "runtime/handles.inline.hpp"
  63 #include "runtime/java.hpp"
  64 #include "runtime/prefetch.inline.hpp"
  65 #include "services/memTracker.hpp"
  66 #include "utilities/align.hpp"
  67 #include "utilities/growableArray.hpp"
  68 
  69 bool G1CMBitMapClosure::do_addr(HeapWord* const addr) {
  70   assert(addr < _cm->finger(), "invariant");
  71   assert(addr >= _task->finger(), "invariant");
  72 
  73   // We move that task's local finger along.
  74   _task->move_finger_to(addr);
  75 
  76   _task->scan_task_entry(G1TaskQueueEntry::from_oop(oop(addr)));
  77   // we only partially drain the local queue and global stack
  78   _task->drain_local_queue(true);
  79   _task->drain_global_stack(true);
  80 
  81   // if the has_aborted flag has been raised, we need to bail out of
  82   // the iteration
  83   return !_task->has_aborted();
  84 }
  85 
  86 G1CMMarkStack::G1CMMarkStack() :
  87   _max_chunk_capacity(0),
  88   _base(NULL),
  89   _chunk_capacity(0) {
  90   set_empty();
  91 }
  92 
  93 bool G1CMMarkStack::resize(size_t new_capacity) {
  94   assert(is_empty(), "Only resize when stack is empty.");
  95   assert(new_capacity <= _max_chunk_capacity,
  96          "Trying to resize stack to " SIZE_FORMAT " chunks when the maximum is " SIZE_FORMAT, new_capacity, _max_chunk_capacity);
  97 
  98   TaskQueueEntryChunk* new_base = MmapArrayAllocator<TaskQueueEntryChunk>::allocate_or_null(new_capacity, mtGC);
  99 
 100   if (new_base == NULL) {
 101     log_warning(gc)("Failed to reserve memory for new overflow mark stack with " SIZE_FORMAT " chunks and size " SIZE_FORMAT "B.", new_capacity, new_capacity * sizeof(TaskQueueEntryChunk));
 102     return false;
 103   }
 104   // Release old mapping.
 105   if (_base != NULL) {
 106     MmapArrayAllocator<TaskQueueEntryChunk>::free(_base, _chunk_capacity);
 107   }
 108 
 109   _base = new_base;
 110   _chunk_capacity = new_capacity;
 111   set_empty();
 112 
 113   return true;
 114 }
 115 
 116 size_t G1CMMarkStack::capacity_alignment() {
 117   return (size_t)lcm(os::vm_allocation_granularity(), sizeof(TaskQueueEntryChunk)) / sizeof(G1TaskQueueEntry);
 118 }
 119 
 120 bool G1CMMarkStack::initialize(size_t initial_capacity, size_t max_capacity) {
 121   guarantee(_max_chunk_capacity == 0, "G1CMMarkStack already initialized.");
 122 
 123   size_t const TaskEntryChunkSizeInVoidStar = sizeof(TaskQueueEntryChunk) / sizeof(G1TaskQueueEntry);
 124 
 125   _max_chunk_capacity = align_up(max_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar;
 126   size_t initial_chunk_capacity = align_up(initial_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar;
 127 
 128   guarantee(initial_chunk_capacity <= _max_chunk_capacity,
 129             "Maximum chunk capacity " SIZE_FORMAT " smaller than initial capacity " SIZE_FORMAT,
 130             _max_chunk_capacity,
 131             initial_chunk_capacity);
 132 
 133   log_debug(gc)("Initialize mark stack with " SIZE_FORMAT " chunks, maximum " SIZE_FORMAT,
 134                 initial_chunk_capacity, _max_chunk_capacity);
 135 
 136   return resize(initial_chunk_capacity);
 137 }
 138 
 139 void G1CMMarkStack::expand() {
 140   if (_chunk_capacity == _max_chunk_capacity) {
 141     log_debug(gc)("Can not expand overflow mark stack further, already at maximum capacity of " SIZE_FORMAT " chunks.", _chunk_capacity);
 142     return;
 143   }
 144   size_t old_capacity = _chunk_capacity;
 145   // Double capacity if possible
 146   size_t new_capacity = MIN2(old_capacity * 2, _max_chunk_capacity);
 147 
 148   if (resize(new_capacity)) {
 149     log_debug(gc)("Expanded mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks",
 150                   old_capacity, new_capacity);
 151   } else {
 152     log_warning(gc)("Failed to expand mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks",
 153                     old_capacity, new_capacity);
 154   }
 155 }
 156 
 157 G1CMMarkStack::~G1CMMarkStack() {
 158   if (_base != NULL) {
 159     MmapArrayAllocator<TaskQueueEntryChunk>::free(_base, _chunk_capacity);
 160   }
 161 }
 162 
 163 void G1CMMarkStack::add_chunk_to_list(TaskQueueEntryChunk* volatile* list, TaskQueueEntryChunk* elem) {
 164   elem->next = *list;
 165   *list = elem;
 166 }
 167 
 168 void G1CMMarkStack::add_chunk_to_chunk_list(TaskQueueEntryChunk* elem) {
 169   MutexLockerEx x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag);
 170   add_chunk_to_list(&_chunk_list, elem);
 171   _chunks_in_chunk_list++;
 172 }
 173 
 174 void G1CMMarkStack::add_chunk_to_free_list(TaskQueueEntryChunk* elem) {
 175   MutexLockerEx x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag);
 176   add_chunk_to_list(&_free_list, elem);
 177 }
 178 
 179 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_list(TaskQueueEntryChunk* volatile* list) {
 180   TaskQueueEntryChunk* result = *list;
 181   if (result != NULL) {
 182     *list = (*list)->next;
 183   }
 184   return result;
 185 }
 186 
 187 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_chunk_list() {
 188   MutexLockerEx x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag);
 189   TaskQueueEntryChunk* result = remove_chunk_from_list(&_chunk_list);
 190   if (result != NULL) {
 191     _chunks_in_chunk_list--;
 192   }
 193   return result;
 194 }
 195 
 196 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_free_list() {
 197   MutexLockerEx x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag);
 198   return remove_chunk_from_list(&_free_list);
 199 }
 200 
 201 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::allocate_new_chunk() {
 202   // This dirty read of _hwm is okay because we only ever increase the _hwm in parallel code.
 203   // Further this limits _hwm to a value of _chunk_capacity + #threads, avoiding
 204   // wraparound of _hwm.
 205   if (_hwm >= _chunk_capacity) {
 206     return NULL;
 207   }
 208 
 209   size_t cur_idx = Atomic::add(1u, &_hwm) - 1;
 210   if (cur_idx >= _chunk_capacity) {
 211     return NULL;
 212   }
 213 
 214   TaskQueueEntryChunk* result = ::new (&_base[cur_idx]) TaskQueueEntryChunk;
 215   result->next = NULL;
 216   return result;
 217 }
 218 
 219 bool G1CMMarkStack::par_push_chunk(G1TaskQueueEntry* ptr_arr) {
 220   // Get a new chunk.
 221   TaskQueueEntryChunk* new_chunk = remove_chunk_from_free_list();
 222 
 223   if (new_chunk == NULL) {
 224     // Did not get a chunk from the free list. Allocate from backing memory.
 225     new_chunk = allocate_new_chunk();
 226 
 227     if (new_chunk == NULL) {
 228       return false;
 229     }
 230   }
 231 
 232   Copy::conjoint_memory_atomic(ptr_arr, new_chunk->data, EntriesPerChunk * sizeof(G1TaskQueueEntry));
 233 
 234   add_chunk_to_chunk_list(new_chunk);
 235 
 236   return true;
 237 }
 238 
 239 bool G1CMMarkStack::par_pop_chunk(G1TaskQueueEntry* ptr_arr) {
 240   TaskQueueEntryChunk* cur = remove_chunk_from_chunk_list();
 241 
 242   if (cur == NULL) {
 243     return false;
 244   }
 245 
 246   Copy::conjoint_memory_atomic(cur->data, ptr_arr, EntriesPerChunk * sizeof(G1TaskQueueEntry));
 247 
 248   add_chunk_to_free_list(cur);
 249   return true;
 250 }
 251 
 252 void G1CMMarkStack::set_empty() {
 253   _chunks_in_chunk_list = 0;
 254   _hwm = 0;
 255   _chunk_list = NULL;
 256   _free_list = NULL;
 257 }
 258 
 259 G1CMRootRegions::G1CMRootRegions(uint const max_regions) :
 260   _root_regions(NEW_C_HEAP_ARRAY(HeapRegion*, max_regions, mtGC)),
 261   _max_regions(max_regions),
 262   _num_root_regions(0),
 263   _claimed_root_regions(0),
 264   _scan_in_progress(false),
 265   _should_abort(false) { }
 266 
 267 G1CMRootRegions::~G1CMRootRegions() {
 268   FREE_C_HEAP_ARRAY(HeapRegion*, _max_regions);
 269 }
 270 
 271 void G1CMRootRegions::reset() {
 272   _num_root_regions = 0;
 273 }
 274 
 275 void G1CMRootRegions::add(HeapRegion* hr) {
 276   assert_at_safepoint();
 277   size_t idx = Atomic::add((size_t)1, &_num_root_regions) - 1;
 278   assert(idx < _max_regions, "Trying to add more root regions than there is space " SIZE_FORMAT, _max_regions);
 279   _root_regions[idx] = hr;
 280 }
 281 
 282 void G1CMRootRegions::prepare_for_scan() {
 283   assert(!scan_in_progress(), "pre-condition");
 284 
 285   _scan_in_progress = _num_root_regions > 0;
 286 
 287   _claimed_root_regions = 0;
 288   _should_abort = false;
 289 }
 290 
 291 HeapRegion* G1CMRootRegions::claim_next() {
 292   if (_should_abort) {
 293     // If someone has set the should_abort flag, we return NULL to
 294     // force the caller to bail out of their loop.
 295     return NULL;
 296   }
 297 
 298   if (_claimed_root_regions >= _num_root_regions) {
 299     return NULL;
 300   }
 301 
 302   size_t claimed_index = Atomic::add((size_t)1, &_claimed_root_regions) - 1;
 303   if (claimed_index < _num_root_regions) {
 304     return _root_regions[claimed_index];
 305   }
 306   return NULL;
 307 }
 308 
 309 uint G1CMRootRegions::num_root_regions() const {
 310   return (uint)_num_root_regions;
 311 }
 312 
 313 void G1CMRootRegions::notify_scan_done() {
 314   MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 315   _scan_in_progress = false;
 316   RootRegionScan_lock->notify_all();
 317 }
 318 
 319 void G1CMRootRegions::cancel_scan() {
 320   notify_scan_done();
 321 }
 322 
 323 void G1CMRootRegions::scan_finished() {
 324   assert(scan_in_progress(), "pre-condition");
 325 
 326   if (!_should_abort) {
 327     assert(_claimed_root_regions >= num_root_regions(),
 328            "we should have claimed all root regions, claimed " SIZE_FORMAT ", length = %u",
 329            _claimed_root_regions, num_root_regions());
 330   }
 331 
 332   notify_scan_done();
 333 }
 334 
 335 bool G1CMRootRegions::wait_until_scan_finished() {
 336   if (!scan_in_progress()) {
 337     return false;
 338   }
 339 
 340   {
 341     MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 342     while (scan_in_progress()) {
 343       RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag);
 344     }
 345   }
 346   return true;
 347 }
 348 
 349 // Returns the maximum number of workers to be used in a concurrent
 350 // phase based on the number of GC workers being used in a STW
 351 // phase.
 352 static uint scale_concurrent_worker_threads(uint num_gc_workers) {
 353   return MAX2((num_gc_workers + 2) / 4, 1U);
 354 }
 355 
 356 G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h,
 357                                    G1RegionToSpaceMapper* prev_bitmap_storage,
 358                                    G1RegionToSpaceMapper* next_bitmap_storage) :
 359   // _cm_thread set inside the constructor
 360   _g1h(g1h),
 361   _completed_initialization(false),
 362 
 363   _mark_bitmap_1(),
 364   _mark_bitmap_2(),
 365   _prev_mark_bitmap(&_mark_bitmap_1),
 366   _next_mark_bitmap(&_mark_bitmap_2),
 367 
 368   _heap(_g1h->reserved_region()),
 369 
 370   _root_regions(_g1h->max_regions()),
 371 
 372   _global_mark_stack(),
 373 
 374   // _finger set in set_non_marking_state
 375 
 376   _worker_id_offset(G1DirtyCardQueueSet::num_par_ids() + G1ConcRefinementThreads),
 377   _max_num_tasks(ParallelGCThreads),
 378   // _num_active_tasks set in set_non_marking_state()
 379   // _tasks set inside the constructor
 380 
 381   _task_queues(new G1CMTaskQueueSet((int) _max_num_tasks)),
 382   _terminator((int) _max_num_tasks, _task_queues),
 383 
 384   _first_overflow_barrier_sync(),
 385   _second_overflow_barrier_sync(),
 386 
 387   _has_overflown(false),
 388   _concurrent(false),
 389   _has_aborted(false),
 390   _restart_for_overflow(false),
 391   _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
 392   _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()),
 393 
 394   // _verbose_level set below
 395 
 396   _init_times(),
 397   _remark_times(),
 398   _remark_mark_times(),
 399   _remark_weak_ref_times(),
 400   _cleanup_times(),
 401   _total_cleanup_time(0.0),
 402 
 403   _accum_task_vtime(NULL),
 404 
 405   _concurrent_workers(NULL),
 406   _num_concurrent_workers(0),
 407   _max_concurrent_workers(0),
 408 
 409   _region_mark_stats(NEW_C_HEAP_ARRAY(G1RegionMarkStats, _g1h->max_regions(), mtGC)),
 410   _top_at_rebuild_starts(NEW_C_HEAP_ARRAY(HeapWord*, _g1h->max_regions(), mtGC))
 411 {
 412   _mark_bitmap_1.initialize(g1h->reserved_region(), prev_bitmap_storage);
 413   _mark_bitmap_2.initialize(g1h->reserved_region(), next_bitmap_storage);
 414 
 415   // Create & start ConcurrentMark thread.
 416   _cm_thread = new G1ConcurrentMarkThread(this);
 417   if (_cm_thread->osthread() == NULL) {
 418     vm_shutdown_during_initialization("Could not create ConcurrentMarkThread");
 419   }
 420 
 421   assert(CGC_lock != NULL, "CGC_lock must be initialized");
 422 
 423   if (FLAG_IS_DEFAULT(ConcGCThreads) || ConcGCThreads == 0) {
 424     // Calculate the number of concurrent worker threads by scaling
 425     // the number of parallel GC threads.
 426     uint marking_thread_num = scale_concurrent_worker_threads(ParallelGCThreads);
 427     FLAG_SET_ERGO(uint, ConcGCThreads, marking_thread_num);
 428   }
 429 
 430   assert(ConcGCThreads > 0, "ConcGCThreads have been set.");
 431   if (ConcGCThreads > ParallelGCThreads) {
 432     log_warning(gc)("More ConcGCThreads (%u) than ParallelGCThreads (%u).",
 433                     ConcGCThreads, ParallelGCThreads);
 434     return;
 435   }
 436 
 437   log_debug(gc)("ConcGCThreads: %u offset %u", ConcGCThreads, _worker_id_offset);
 438   log_debug(gc)("ParallelGCThreads: %u", ParallelGCThreads);
 439 
 440   _num_concurrent_workers = ConcGCThreads;
 441   _max_concurrent_workers = _num_concurrent_workers;
 442 
 443   _concurrent_workers = new WorkGang("G1 Conc", _max_concurrent_workers, false, true);
 444   _concurrent_workers->initialize_workers();
 445 
 446   if (FLAG_IS_DEFAULT(MarkStackSize)) {
 447     size_t mark_stack_size =
 448       MIN2(MarkStackSizeMax,
 449           MAX2(MarkStackSize, (size_t) (_max_concurrent_workers * TASKQUEUE_SIZE)));
 450     // Verify that the calculated value for MarkStackSize is in range.
 451     // It would be nice to use the private utility routine from Arguments.
 452     if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) {
 453       log_warning(gc)("Invalid value calculated for MarkStackSize (" SIZE_FORMAT "): "
 454                       "must be between 1 and " SIZE_FORMAT,
 455                       mark_stack_size, MarkStackSizeMax);
 456       return;
 457     }
 458     FLAG_SET_ERGO(size_t, MarkStackSize, mark_stack_size);
 459   } else {
 460     // Verify MarkStackSize is in range.
 461     if (FLAG_IS_CMDLINE(MarkStackSize)) {
 462       if (FLAG_IS_DEFAULT(MarkStackSizeMax)) {
 463         if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
 464           log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT "): "
 465                           "must be between 1 and " SIZE_FORMAT,
 466                           MarkStackSize, MarkStackSizeMax);
 467           return;
 468         }
 469       } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) {
 470         if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
 471           log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT ")"
 472                           " or for MarkStackSizeMax (" SIZE_FORMAT ")",
 473                           MarkStackSize, MarkStackSizeMax);
 474           return;
 475         }
 476       }
 477     }
 478   }
 479 
 480   if (!_global_mark_stack.initialize(MarkStackSize, MarkStackSizeMax)) {
 481     vm_exit_during_initialization("Failed to allocate initial concurrent mark overflow mark stack.");
 482   }
 483 
 484   _tasks = NEW_C_HEAP_ARRAY(G1CMTask*, _max_num_tasks, mtGC);
 485   _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_num_tasks, mtGC);
 486 
 487   // so that the assertion in MarkingTaskQueue::task_queue doesn't fail
 488   _num_active_tasks = _max_num_tasks;
 489 
 490   for (uint i = 0; i < _max_num_tasks; ++i) {
 491     G1CMTaskQueue* task_queue = new G1CMTaskQueue();
 492     task_queue->initialize();
 493     _task_queues->register_queue(i, task_queue);
 494 
 495     _tasks[i] = new G1CMTask(i, this, task_queue, _region_mark_stats, _g1h->max_regions());
 496 
 497     _accum_task_vtime[i] = 0.0;
 498   }
 499 
 500   reset_at_marking_complete();
 501   _completed_initialization = true;
 502 }
 503 
 504 void G1ConcurrentMark::reset() {
 505   _has_aborted = false;
 506 
 507   reset_marking_for_restart();
 508 
 509   // Reset all tasks, since different phases will use different number of active
 510   // threads. So, it's easiest to have all of them ready.
 511   for (uint i = 0; i < _max_num_tasks; ++i) {
 512     _tasks[i]->reset(_next_mark_bitmap);
 513   }
 514 
 515   uint max_regions = _g1h->max_regions();
 516   for (uint i = 0; i < max_regions; i++) {
 517     _top_at_rebuild_starts[i] = NULL;
 518     _region_mark_stats[i].clear();
 519   }
 520 }
 521 
 522 void G1ConcurrentMark::clear_statistics_in_region(uint region_idx) {
 523   for (uint j = 0; j < _max_num_tasks; ++j) {
 524     _tasks[j]->clear_mark_stats_cache(region_idx);
 525   }
 526   _top_at_rebuild_starts[region_idx] = NULL;
 527   _region_mark_stats[region_idx].clear();
 528 }
 529 
 530 void G1ConcurrentMark::clear_statistics(HeapRegion* r) {
 531   uint const region_idx = r->hrm_index();
 532   if (r->is_humongous()) {
 533     assert(r->is_starts_humongous(), "Got humongous continues region here");
 534     uint const size_in_regions = (uint)_g1h->humongous_obj_size_in_regions(oop(r->humongous_start_region()->bottom())->size());
 535     for (uint j = region_idx; j < (region_idx + size_in_regions); j++) {
 536       clear_statistics_in_region(j);
 537     }
 538   } else {
 539     clear_statistics_in_region(region_idx);
 540   }
 541 }
 542 
 543 static void clear_mark_if_set(G1CMBitMap* bitmap, HeapWord* addr) {
 544   if (bitmap->is_marked(addr)) {
 545     bitmap->clear(addr);
 546   }
 547 }
 548 
 549 void G1ConcurrentMark::humongous_object_eagerly_reclaimed(HeapRegion* r) {
 550   assert_at_safepoint_on_vm_thread();
 551 
 552   // Need to clear all mark bits of the humongous object.
 553   clear_mark_if_set(_prev_mark_bitmap, r->bottom());
 554   clear_mark_if_set(_next_mark_bitmap, r->bottom());
 555 
 556   if (!_g1h->collector_state()->mark_or_rebuild_in_progress()) {
 557     return;
 558   }
 559 
 560   // Clear any statistics about the region gathered so far.
 561   clear_statistics(r);
 562 }
 563 
 564 void G1ConcurrentMark::reset_marking_for_restart() {
 565   _global_mark_stack.set_empty();
 566 
 567   // Expand the marking stack, if we have to and if we can.
 568   if (has_overflown()) {
 569     _global_mark_stack.expand();
 570 
 571     uint max_regions = _g1h->max_regions();
 572     for (uint i = 0; i < max_regions; i++) {
 573       _region_mark_stats[i].clear_during_overflow();
 574     }
 575   }
 576 
 577   clear_has_overflown();
 578   _finger = _heap.start();
 579 
 580   for (uint i = 0; i < _max_num_tasks; ++i) {
 581     G1CMTaskQueue* queue = _task_queues->queue(i);
 582     queue->set_empty();
 583   }
 584 }
 585 
 586 void G1ConcurrentMark::set_concurrency(uint active_tasks) {
 587   assert(active_tasks <= _max_num_tasks, "we should not have more");
 588 
 589   _num_active_tasks = active_tasks;
 590   // Need to update the three data structures below according to the
 591   // number of active threads for this phase.
 592   _terminator.terminator()->reset_for_reuse((int) active_tasks);
 593   _first_overflow_barrier_sync.set_n_workers((int) active_tasks);
 594   _second_overflow_barrier_sync.set_n_workers((int) active_tasks);
 595 }
 596 
 597 void G1ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) {
 598   set_concurrency(active_tasks);
 599 
 600   _concurrent = concurrent;
 601 
 602   if (!concurrent) {
 603     // At this point we should be in a STW phase, and completed marking.
 604     assert_at_safepoint_on_vm_thread();
 605     assert(out_of_regions(),
 606            "only way to get here: _finger: " PTR_FORMAT ", _heap_end: " PTR_FORMAT,
 607            p2i(_finger), p2i(_heap.end()));
 608   }
 609 }
 610 
 611 void G1ConcurrentMark::reset_at_marking_complete() {
 612   // We set the global marking state to some default values when we're
 613   // not doing marking.
 614   reset_marking_for_restart();
 615   _num_active_tasks = 0;
 616 }
 617 
 618 G1ConcurrentMark::~G1ConcurrentMark() {
 619   FREE_C_HEAP_ARRAY(HeapWord*, _top_at_rebuild_starts);
 620   FREE_C_HEAP_ARRAY(G1RegionMarkStats, _region_mark_stats);
 621   // The G1ConcurrentMark instance is never freed.
 622   ShouldNotReachHere();
 623 }
 624 
 625 class G1ClearBitMapTask : public AbstractGangTask {
 626 public:
 627   static size_t chunk_size() { return M; }
 628 
 629 private:
 630   // Heap region closure used for clearing the given mark bitmap.
 631   class G1ClearBitmapHRClosure : public HeapRegionClosure {
 632   private:
 633     G1CMBitMap* _bitmap;
 634     G1ConcurrentMark* _cm;
 635   public:
 636     G1ClearBitmapHRClosure(G1CMBitMap* bitmap, G1ConcurrentMark* cm) : HeapRegionClosure(), _bitmap(bitmap), _cm(cm) {
 637     }
 638 
 639     virtual bool do_heap_region(HeapRegion* r) {
 640       size_t const chunk_size_in_words = G1ClearBitMapTask::chunk_size() / HeapWordSize;
 641 
 642       HeapWord* cur = r->bottom();
 643       HeapWord* const end = r->end();
 644 
 645       while (cur < end) {
 646         MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end));
 647         _bitmap->clear_range(mr);
 648 
 649         cur += chunk_size_in_words;
 650 
 651         // Abort iteration if after yielding the marking has been aborted.
 652         if (_cm != NULL && _cm->do_yield_check() && _cm->has_aborted()) {
 653           return true;
 654         }
 655         // Repeat the asserts from before the start of the closure. We will do them
 656         // as asserts here to minimize their overhead on the product. However, we
 657         // will have them as guarantees at the beginning / end of the bitmap
 658         // clearing to get some checking in the product.
 659         assert(_cm == NULL || _cm->cm_thread()->during_cycle(), "invariant");
 660         assert(_cm == NULL || !G1CollectedHeap::heap()->collector_state()->mark_or_rebuild_in_progress(), "invariant");
 661       }
 662       assert(cur == end, "Must have completed iteration over the bitmap for region %u.", r->hrm_index());
 663 
 664       return false;
 665     }
 666   };
 667 
 668   G1ClearBitmapHRClosure _cl;
 669   HeapRegionClaimer _hr_claimer;
 670   bool _suspendible; // If the task is suspendible, workers must join the STS.
 671 
 672 public:
 673   G1ClearBitMapTask(G1CMBitMap* bitmap, G1ConcurrentMark* cm, uint n_workers, bool suspendible) :
 674     AbstractGangTask("G1 Clear Bitmap"),
 675     _cl(bitmap, suspendible ? cm : NULL),
 676     _hr_claimer(n_workers),
 677     _suspendible(suspendible)
 678   { }
 679 
 680   void work(uint worker_id) {
 681     SuspendibleThreadSetJoiner sts_join(_suspendible);
 682     G1CollectedHeap::heap()->heap_region_par_iterate_from_worker_offset(&_cl, &_hr_claimer, worker_id);
 683   }
 684 
 685   bool is_complete() {
 686     return _cl.is_complete();
 687   }
 688 };
 689 
 690 void G1ConcurrentMark::clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield) {
 691   assert(may_yield || SafepointSynchronize::is_at_safepoint(), "Non-yielding bitmap clear only allowed at safepoint.");
 692 
 693   size_t const num_bytes_to_clear = (HeapRegion::GrainBytes * _g1h->num_regions()) / G1CMBitMap::heap_map_factor();
 694   size_t const num_chunks = align_up(num_bytes_to_clear, G1ClearBitMapTask::chunk_size()) / G1ClearBitMapTask::chunk_size();
 695 
 696   uint const num_workers = (uint)MIN2(num_chunks, (size_t)workers->active_workers());
 697 
 698   G1ClearBitMapTask cl(bitmap, this, num_workers, may_yield);
 699 
 700   log_debug(gc, ergo)("Running %s with %u workers for " SIZE_FORMAT " work units.", cl.name(), num_workers, num_chunks);
 701   workers->run_task(&cl, num_workers);
 702   guarantee(!may_yield || cl.is_complete(), "Must have completed iteration when not yielding.");
 703 }
 704 
 705 void G1ConcurrentMark::cleanup_for_next_mark() {
 706   // Make sure that the concurrent mark thread looks to still be in
 707   // the current cycle.
 708   guarantee(cm_thread()->during_cycle(), "invariant");
 709 
 710   // We are finishing up the current cycle by clearing the next
 711   // marking bitmap and getting it ready for the next cycle. During
 712   // this time no other cycle can start. So, let's make sure that this
 713   // is the case.
 714   guarantee(!_g1h->collector_state()->mark_or_rebuild_in_progress(), "invariant");
 715 
 716   clear_bitmap(_next_mark_bitmap, _concurrent_workers, true);
 717 
 718   // Repeat the asserts from above.
 719   guarantee(cm_thread()->during_cycle(), "invariant");
 720   guarantee(!_g1h->collector_state()->mark_or_rebuild_in_progress(), "invariant");
 721 }
 722 
 723 void G1ConcurrentMark::clear_prev_bitmap(WorkGang* workers) {
 724   assert_at_safepoint_on_vm_thread();
 725   clear_bitmap(_prev_mark_bitmap, workers, false);
 726 }
 727 
 728 class NoteStartOfMarkHRClosure : public HeapRegionClosure {
 729 public:
 730   bool do_heap_region(HeapRegion* r) {
 731     r->note_start_of_marking();
 732     return false;
 733   }
 734 };
 735 
 736 void G1ConcurrentMark::pre_initial_mark() {
 737   assert_at_safepoint_on_vm_thread();
 738 
 739   // Reset marking state.
 740   reset();
 741 
 742   // For each region note start of marking.
 743   NoteStartOfMarkHRClosure startcl;
 744   _g1h->heap_region_iterate(&startcl);
 745 
 746   _root_regions.reset();
 747 }
 748 
 749 
 750 void G1ConcurrentMark::post_initial_mark() {
 751   // Start Concurrent Marking weak-reference discovery.
 752   ReferenceProcessor* rp = _g1h->ref_processor_cm();
 753   // enable ("weak") refs discovery
 754   rp->enable_discovery();
 755   rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
 756 
 757   SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
 758   // This is the start of  the marking cycle, we're expected all
 759   // threads to have SATB queues with active set to false.
 760   satb_mq_set.set_active_all_threads(true, /* new active value */
 761                                      false /* expected_active */);
 762 
 763   _root_regions.prepare_for_scan();
 764 
 765   // update_g1_committed() will be called at the end of an evac pause
 766   // when marking is on. So, it's also called at the end of the
 767   // initial-mark pause to update the heap end, if the heap expands
 768   // during it. No need to call it here.
 769 }
 770 
 771 /*
 772  * Notice that in the next two methods, we actually leave the STS
 773  * during the barrier sync and join it immediately afterwards. If we
 774  * do not do this, the following deadlock can occur: one thread could
 775  * be in the barrier sync code, waiting for the other thread to also
 776  * sync up, whereas another one could be trying to yield, while also
 777  * waiting for the other threads to sync up too.
 778  *
 779  * Note, however, that this code is also used during remark and in
 780  * this case we should not attempt to leave / enter the STS, otherwise
 781  * we'll either hit an assert (debug / fastdebug) or deadlock
 782  * (product). So we should only leave / enter the STS if we are
 783  * operating concurrently.
 784  *
 785  * Because the thread that does the sync barrier has left the STS, it
 786  * is possible to be suspended for a Full GC or an evacuation pause
 787  * could occur. This is actually safe, since the entering the sync
 788  * barrier is one of the last things do_marking_step() does, and it
 789  * doesn't manipulate any data structures afterwards.
 790  */
 791 
 792 void G1ConcurrentMark::enter_first_sync_barrier(uint worker_id) {
 793   bool barrier_aborted;
 794   {
 795     SuspendibleThreadSetLeaver sts_leave(concurrent());
 796     barrier_aborted = !_first_overflow_barrier_sync.enter();
 797   }
 798 
 799   // at this point everyone should have synced up and not be doing any
 800   // more work
 801 
 802   if (barrier_aborted) {
 803     // If the barrier aborted we ignore the overflow condition and
 804     // just abort the whole marking phase as quickly as possible.
 805     return;
 806   }
 807 }
 808 
 809 void G1ConcurrentMark::enter_second_sync_barrier(uint worker_id) {
 810   SuspendibleThreadSetLeaver sts_leave(concurrent());
 811   _second_overflow_barrier_sync.enter();
 812 
 813   // at this point everything should be re-initialized and ready to go
 814 }
 815 
 816 class G1CMConcurrentMarkingTask : public AbstractGangTask {
 817   G1ConcurrentMark*     _cm;
 818 
 819 public:
 820   void work(uint worker_id) {
 821     assert(Thread::current()->is_ConcurrentGC_thread(), "Not a concurrent GC thread");
 822     ResourceMark rm;
 823 
 824     double start_vtime = os::elapsedVTime();
 825 
 826     {
 827       SuspendibleThreadSetJoiner sts_join;
 828 
 829       assert(worker_id < _cm->active_tasks(), "invariant");
 830 
 831       G1CMTask* task = _cm->task(worker_id);
 832       task->record_start_time();
 833       if (!_cm->has_aborted()) {
 834         do {
 835           task->do_marking_step(G1ConcMarkStepDurationMillis,
 836                                 true  /* do_termination */,
 837                                 false /* is_serial*/);
 838 
 839           _cm->do_yield_check();
 840         } while (!_cm->has_aborted() && task->has_aborted());
 841       }
 842       task->record_end_time();
 843       guarantee(!task->has_aborted() || _cm->has_aborted(), "invariant");
 844     }
 845 
 846     double end_vtime = os::elapsedVTime();
 847     _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime);
 848   }
 849 
 850   G1CMConcurrentMarkingTask(G1ConcurrentMark* cm) :
 851       AbstractGangTask("Concurrent Mark"), _cm(cm) { }
 852 
 853   ~G1CMConcurrentMarkingTask() { }
 854 };
 855 
 856 uint G1ConcurrentMark::calc_active_marking_workers() {
 857   uint result = 0;
 858   if (!UseDynamicNumberOfGCThreads ||
 859       (!FLAG_IS_DEFAULT(ConcGCThreads) &&
 860        !ForceDynamicNumberOfGCThreads)) {
 861     result = _max_concurrent_workers;
 862   } else {
 863     result =
 864       WorkerPolicy::calc_default_active_workers(_max_concurrent_workers,
 865                                                 1, /* Minimum workers */
 866                                                 _num_concurrent_workers,
 867                                                 Threads::number_of_non_daemon_threads());
 868     // Don't scale the result down by scale_concurrent_workers() because
 869     // that scaling has already gone into "_max_concurrent_workers".
 870   }
 871   assert(result > 0 && result <= _max_concurrent_workers,
 872          "Calculated number of marking workers must be larger than zero and at most the maximum %u, but is %u",
 873          _max_concurrent_workers, result);
 874   return result;
 875 }
 876 
 877 void G1ConcurrentMark::scan_root_region(HeapRegion* hr, uint worker_id) {
 878   assert(hr->is_old() || (hr->is_survivor() && hr->next_top_at_mark_start() == hr->bottom()),
 879          "Root regions must be old or survivor but region %u is %s", hr->hrm_index(), hr->get_type_str());
 880   G1RootRegionScanClosure cl(_g1h, this, worker_id);
 881 
 882   const uintx interval = PrefetchScanIntervalInBytes;
 883   HeapWord* curr = hr->next_top_at_mark_start();
 884   const HeapWord* end = hr->top();
 885   while (curr < end) {
 886     Prefetch::read(curr, interval);
 887     oop obj = oop(curr);
 888     int size = obj->oop_iterate_size(&cl);
 889     assert(size == obj->size(), "sanity");
 890     curr += size;
 891   }
 892 }
 893 
 894 class G1CMRootRegionScanTask : public AbstractGangTask {
 895   G1ConcurrentMark* _cm;
 896 public:
 897   G1CMRootRegionScanTask(G1ConcurrentMark* cm) :
 898     AbstractGangTask("G1 Root Region Scan"), _cm(cm) { }
 899 
 900   void work(uint worker_id) {
 901     assert(Thread::current()->is_ConcurrentGC_thread(),
 902            "this should only be done by a conc GC thread");
 903 
 904     G1CMRootRegions* root_regions = _cm->root_regions();
 905     HeapRegion* hr = root_regions->claim_next();
 906     while (hr != NULL) {
 907       _cm->scan_root_region(hr, worker_id);
 908       hr = root_regions->claim_next();
 909     }
 910   }
 911 };
 912 
 913 void G1ConcurrentMark::scan_root_regions() {
 914   // scan_in_progress() will have been set to true only if there was
 915   // at least one root region to scan. So, if it's false, we
 916   // should not attempt to do any further work.
 917   if (root_regions()->scan_in_progress()) {
 918     assert(!has_aborted(), "Aborting before root region scanning is finished not supported.");
 919 
 920     _num_concurrent_workers = MIN2(calc_active_marking_workers(),
 921                                    // We distribute work on a per-region basis, so starting
 922                                    // more threads than that is useless.
 923                                    root_regions()->num_root_regions());
 924     assert(_num_concurrent_workers <= _max_concurrent_workers,
 925            "Maximum number of marking threads exceeded");
 926 
 927     G1CMRootRegionScanTask task(this);
 928     log_debug(gc, ergo)("Running %s using %u workers for %u work units.",
 929                         task.name(), _num_concurrent_workers, root_regions()->num_root_regions());
 930     _concurrent_workers->run_task(&task, _num_concurrent_workers);
 931 
 932     // It's possible that has_aborted() is true here without actually
 933     // aborting the survivor scan earlier. This is OK as it's
 934     // mainly used for sanity checking.
 935     root_regions()->scan_finished();
 936   }
 937 }
 938 
 939 void G1ConcurrentMark::concurrent_cycle_start() {
 940   _gc_timer_cm->register_gc_start();
 941 
 942   _gc_tracer_cm->report_gc_start(GCCause::_no_gc /* first parameter is not used */, _gc_timer_cm->gc_start());
 943 
 944   _g1h->trace_heap_before_gc(_gc_tracer_cm);
 945 }
 946 
 947 void G1ConcurrentMark::concurrent_cycle_end() {
 948   _g1h->collector_state()->set_clearing_next_bitmap(false);
 949 
 950   _g1h->trace_heap_after_gc(_gc_tracer_cm);
 951 
 952   if (has_aborted()) {
 953     log_info(gc, marking)("Concurrent Mark Abort");
 954     _gc_tracer_cm->report_concurrent_mode_failure();
 955   }
 956 
 957   _gc_timer_cm->register_gc_end();
 958 
 959   _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
 960 }
 961 
 962 void G1ConcurrentMark::mark_from_roots() {
 963   _restart_for_overflow = false;
 964 
 965   _num_concurrent_workers = calc_active_marking_workers();
 966 
 967   uint active_workers = MAX2(1U, _num_concurrent_workers);
 968 
 969   // Setting active workers is not guaranteed since fewer
 970   // worker threads may currently exist and more may not be
 971   // available.
 972   active_workers = _concurrent_workers->update_active_workers(active_workers);
 973   log_info(gc, task)("Using %u workers of %u for marking", active_workers, _concurrent_workers->total_workers());
 974 
 975   // Parallel task terminator is set in "set_concurrency_and_phase()"
 976   set_concurrency_and_phase(active_workers, true /* concurrent */);
 977 
 978   G1CMConcurrentMarkingTask marking_task(this);
 979   _concurrent_workers->run_task(&marking_task);
 980   print_stats();
 981 }
 982 
 983 void G1ConcurrentMark::verify_during_pause(G1HeapVerifier::G1VerifyType type, VerifyOption vo, const char* caller) {
 984   G1HeapVerifier* verifier = _g1h->verifier();
 985 
 986   verifier->verify_region_sets_optional();
 987 
 988   if (VerifyDuringGC) {
 989     GCTraceTime(Debug, gc, phases) debug(caller, _gc_timer_cm);
 990 
 991     size_t const BufLen = 512;
 992     char buffer[BufLen];
 993 
 994     jio_snprintf(buffer, BufLen, "During GC (%s)", caller);
 995     verifier->verify(type, vo, buffer);
 996   }
 997 
 998   verifier->check_bitmaps(caller);
 999 }
1000 
1001 class G1UpdateRemSetTrackingBeforeRebuildTask : public AbstractGangTask {
1002   G1CollectedHeap* _g1h;
1003   G1ConcurrentMark* _cm;
1004   HeapRegionClaimer _hrclaimer;
1005   uint volatile _total_selected_for_rebuild;
1006 
1007   G1PrintRegionLivenessInfoClosure _cl;
1008 
1009   class G1UpdateRemSetTrackingBeforeRebuild : public HeapRegionClosure {
1010     G1CollectedHeap* _g1h;
1011     G1ConcurrentMark* _cm;
1012 
1013     G1PrintRegionLivenessInfoClosure* _cl;
1014 
1015     uint _num_regions_selected_for_rebuild;  // The number of regions actually selected for rebuild.
1016 
1017     void update_remset_before_rebuild(HeapRegion* hr) {
1018       G1RemSetTrackingPolicy* tracking_policy = _g1h->policy()->remset_tracker();
1019 
1020       bool selected_for_rebuild;
1021       if (hr->is_humongous()) {
1022         bool const is_live = _cm->liveness(hr->humongous_start_region()->hrm_index()) > 0;
1023         selected_for_rebuild = tracking_policy->update_humongous_before_rebuild(hr, is_live);
1024       } else {
1025         size_t const live_bytes = _cm->liveness(hr->hrm_index());
1026         selected_for_rebuild = tracking_policy->update_before_rebuild(hr, live_bytes);
1027       }
1028       if (selected_for_rebuild) {
1029         _num_regions_selected_for_rebuild++;
1030       }
1031       _cm->update_top_at_rebuild_start(hr);
1032     }
1033 
1034     // Distribute the given words across the humongous object starting with hr and
1035     // note end of marking.
1036     void distribute_marked_bytes(HeapRegion* hr, size_t marked_words) {
1037       uint const region_idx = hr->hrm_index();
1038       size_t const obj_size_in_words = (size_t)oop(hr->bottom())->size();
1039       uint const num_regions_in_humongous = (uint)G1CollectedHeap::humongous_obj_size_in_regions(obj_size_in_words);
1040 
1041       // "Distributing" zero words means that we only note end of marking for these
1042       // regions.
1043       assert(marked_words == 0 || obj_size_in_words == marked_words,
1044              "Marked words should either be 0 or the same as humongous object (" SIZE_FORMAT ") but is " SIZE_FORMAT,
1045              obj_size_in_words, marked_words);
1046 
1047       for (uint i = region_idx; i < (region_idx + num_regions_in_humongous); i++) {
1048         HeapRegion* const r = _g1h->region_at(i);
1049         size_t const words_to_add = MIN2(HeapRegion::GrainWords, marked_words);
1050 
1051         log_trace(gc, marking)("Adding " SIZE_FORMAT " words to humongous region %u (%s)",
1052                                words_to_add, i, r->get_type_str());
1053         add_marked_bytes_and_note_end(r, words_to_add * HeapWordSize);
1054         marked_words -= words_to_add;
1055       }
1056       assert(marked_words == 0,
1057              SIZE_FORMAT " words left after distributing space across %u regions",
1058              marked_words, num_regions_in_humongous);
1059     }
1060 
1061     void update_marked_bytes(HeapRegion* hr) {
1062       uint const region_idx = hr->hrm_index();
1063       size_t const marked_words = _cm->liveness(region_idx);
1064       // The marking attributes the object's size completely to the humongous starts
1065       // region. We need to distribute this value across the entire set of regions a
1066       // humongous object spans.
1067       if (hr->is_humongous()) {
1068         assert(hr->is_starts_humongous() || marked_words == 0,
1069                "Should not have marked words " SIZE_FORMAT " in non-starts humongous region %u (%s)",
1070                marked_words, region_idx, hr->get_type_str());
1071         if (hr->is_starts_humongous()) {
1072           distribute_marked_bytes(hr, marked_words);
1073         }
1074       } else {
1075         log_trace(gc, marking)("Adding " SIZE_FORMAT " words to region %u (%s)", marked_words, region_idx, hr->get_type_str());
1076         add_marked_bytes_and_note_end(hr, marked_words * HeapWordSize);
1077       }
1078     }
1079 
1080     void add_marked_bytes_and_note_end(HeapRegion* hr, size_t marked_bytes) {
1081       hr->add_to_marked_bytes(marked_bytes);
1082       _cl->do_heap_region(hr);
1083       hr->note_end_of_marking();
1084     }
1085 
1086   public:
1087     G1UpdateRemSetTrackingBeforeRebuild(G1CollectedHeap* g1h, G1ConcurrentMark* cm, G1PrintRegionLivenessInfoClosure* cl) :
1088       _g1h(g1h), _cm(cm), _cl(cl), _num_regions_selected_for_rebuild(0) { }
1089 
1090     virtual bool do_heap_region(HeapRegion* r) {
1091       update_remset_before_rebuild(r);
1092       update_marked_bytes(r);
1093 
1094       return false;
1095     }
1096 
1097     uint num_selected_for_rebuild() const { return _num_regions_selected_for_rebuild; }
1098   };
1099 
1100 public:
1101   G1UpdateRemSetTrackingBeforeRebuildTask(G1CollectedHeap* g1h, G1ConcurrentMark* cm, uint num_workers) :
1102     AbstractGangTask("G1 Update RemSet Tracking Before Rebuild"),
1103     _g1h(g1h), _cm(cm), _hrclaimer(num_workers), _total_selected_for_rebuild(0), _cl("Post-Marking") { }
1104 
1105   virtual void work(uint worker_id) {
1106     G1UpdateRemSetTrackingBeforeRebuild update_cl(_g1h, _cm, &_cl);
1107     _g1h->heap_region_par_iterate_from_worker_offset(&update_cl, &_hrclaimer, worker_id);
1108     Atomic::add(update_cl.num_selected_for_rebuild(), &_total_selected_for_rebuild);
1109   }
1110 
1111   uint total_selected_for_rebuild() const { return _total_selected_for_rebuild; }
1112 
1113   // Number of regions for which roughly one thread should be spawned for this work.
1114   static const uint RegionsPerThread = 384;
1115 };
1116 
1117 class G1UpdateRemSetTrackingAfterRebuild : public HeapRegionClosure {
1118   G1CollectedHeap* _g1h;
1119 public:
1120   G1UpdateRemSetTrackingAfterRebuild(G1CollectedHeap* g1h) : _g1h(g1h) { }
1121 
1122   virtual bool do_heap_region(HeapRegion* r) {
1123     _g1h->policy()->remset_tracker()->update_after_rebuild(r);
1124     return false;
1125   }
1126 };
1127 
1128 void G1ConcurrentMark::remark() {
1129   assert_at_safepoint_on_vm_thread();
1130 
1131   // If a full collection has happened, we should not continue. However we might
1132   // have ended up here as the Remark VM operation has been scheduled already.
1133   if (has_aborted()) {
1134     return;
1135   }
1136 
1137   G1Policy* policy = _g1h->policy();
1138   policy->record_concurrent_mark_remark_start();
1139 
1140   double start = os::elapsedTime();
1141 
1142   verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark before");
1143 
1144   {
1145     GCTraceTime(Debug, gc, phases) debug("Finalize Marking", _gc_timer_cm);
1146     finalize_marking();
1147   }
1148 
1149   double mark_work_end = os::elapsedTime();
1150 
1151   bool const mark_finished = !has_overflown();
1152   if (mark_finished) {
1153     weak_refs_work(false /* clear_all_soft_refs */);
1154 
1155     SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
1156     // We're done with marking.
1157     // This is the end of the marking cycle, we're expected all
1158     // threads to have SATB queues with active set to true.
1159     satb_mq_set.set_active_all_threads(false, /* new active value */
1160                                        true /* expected_active */);
1161 
1162     {
1163       GCTraceTime(Debug, gc, phases) debug("Flush Task Caches", _gc_timer_cm);
1164       flush_all_task_caches();
1165     }
1166 
1167     // Install newly created mark bitmap as "prev".
1168     swap_mark_bitmaps();
1169     {
1170       GCTraceTime(Debug, gc, phases) debug("Update Remembered Set Tracking Before Rebuild", _gc_timer_cm);
1171 
1172       uint const workers_by_capacity = (_g1h->num_regions() + G1UpdateRemSetTrackingBeforeRebuildTask::RegionsPerThread - 1) /
1173                                        G1UpdateRemSetTrackingBeforeRebuildTask::RegionsPerThread;
1174       uint const num_workers = MIN2(_g1h->workers()->active_workers(), workers_by_capacity);
1175 
1176       G1UpdateRemSetTrackingBeforeRebuildTask cl(_g1h, this, num_workers);
1177       log_debug(gc,ergo)("Running %s using %u workers for %u regions in heap", cl.name(), num_workers, _g1h->num_regions());
1178       _g1h->workers()->run_task(&cl, num_workers);
1179 
1180       log_debug(gc, remset, tracking)("Remembered Set Tracking update regions total %u, selected %u",
1181                                       _g1h->num_regions(), cl.total_selected_for_rebuild());
1182     }
1183     {
1184       GCTraceTime(Debug, gc, phases) debug("Reclaim Empty Regions", _gc_timer_cm);
1185       reclaim_empty_regions();
1186     }
1187 
1188     // Clean out dead classes
1189     if (ClassUnloadingWithConcurrentMark) {
1190       GCTraceTime(Debug, gc, phases) debug("Purge Metaspace", _gc_timer_cm);
1191       ClassLoaderDataGraph::purge();
1192     }
1193 
1194     _g1h->resize_heap_if_necessary();
1195 
1196     compute_new_sizes();
1197 
1198     verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark after");
1199 
1200     assert(!restart_for_overflow(), "sanity");
1201     // Completely reset the marking state since marking completed
1202     reset_at_marking_complete();
1203   } else {
1204     // We overflowed.  Restart concurrent marking.
1205     _restart_for_overflow = true;
1206 
1207     verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark overflow");
1208 
1209     // Clear the marking state because we will be restarting
1210     // marking due to overflowing the global mark stack.
1211     reset_marking_for_restart();
1212   }
1213 
1214   {
1215     GCTraceTime(Debug, gc, phases) debug("Report Object Count", _gc_timer_cm);
1216     report_object_count(mark_finished);
1217   }
1218 
1219   // Statistics
1220   double now = os::elapsedTime();
1221   _remark_mark_times.add((mark_work_end - start) * 1000.0);
1222   _remark_weak_ref_times.add((now - mark_work_end) * 1000.0);
1223   _remark_times.add((now - start) * 1000.0);
1224 
1225   policy->record_concurrent_mark_remark_end();
1226 }
1227 
1228 class G1ReclaimEmptyRegionsTask : public AbstractGangTask {
1229   // Per-region work during the Cleanup pause.
1230   class G1ReclaimEmptyRegionsClosure : public HeapRegionClosure {
1231     G1CollectedHeap* _g1h;
1232     size_t _freed_bytes;
1233     FreeRegionList* _local_cleanup_list;
1234     uint _old_regions_removed;
1235     uint _humongous_regions_removed;
1236 
1237   public:
1238     G1ReclaimEmptyRegionsClosure(G1CollectedHeap* g1h,
1239                                  FreeRegionList* local_cleanup_list) :
1240       _g1h(g1h),
1241       _freed_bytes(0),
1242       _local_cleanup_list(local_cleanup_list),
1243       _old_regions_removed(0),
1244       _humongous_regions_removed(0) { }
1245 
1246     size_t freed_bytes() { return _freed_bytes; }
1247     const uint old_regions_removed() { return _old_regions_removed; }
1248     const uint humongous_regions_removed() { return _humongous_regions_removed; }
1249 
1250     bool do_heap_region(HeapRegion *hr) {
1251       if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young() && !hr->is_archive()) {
1252         _freed_bytes += hr->used();
1253         hr->set_containing_set(NULL);
1254         if (hr->is_humongous()) {
1255           _humongous_regions_removed++;
1256           _g1h->free_humongous_region(hr, _local_cleanup_list);
1257         } else {
1258           _old_regions_removed++;
1259           _g1h->free_region(hr, _local_cleanup_list, false /* skip_remset */, false /* skip_hcc */, true /* locked */);
1260         }
1261         hr->clear_cardtable();
1262         _g1h->concurrent_mark()->clear_statistics_in_region(hr->hrm_index());
1263         log_trace(gc)("Reclaimed empty region %u (%s) bot " PTR_FORMAT, hr->hrm_index(), hr->get_short_type_str(), p2i(hr->bottom()));
1264       }
1265 
1266       return false;
1267     }
1268   };
1269 
1270   G1CollectedHeap* _g1h;
1271   FreeRegionList* _cleanup_list;
1272   HeapRegionClaimer _hrclaimer;
1273 
1274 public:
1275   G1ReclaimEmptyRegionsTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) :
1276     AbstractGangTask("G1 Cleanup"),
1277     _g1h(g1h),
1278     _cleanup_list(cleanup_list),
1279     _hrclaimer(n_workers) {
1280   }
1281 
1282   void work(uint worker_id) {
1283     FreeRegionList local_cleanup_list("Local Cleanup List");
1284     G1ReclaimEmptyRegionsClosure cl(_g1h, &local_cleanup_list);
1285     _g1h->heap_region_par_iterate_from_worker_offset(&cl, &_hrclaimer, worker_id);
1286     assert(cl.is_complete(), "Shouldn't have aborted!");
1287 
1288     // Now update the old/humongous region sets
1289     _g1h->remove_from_old_sets(cl.old_regions_removed(), cl.humongous_regions_removed());
1290     {
1291       MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
1292       _g1h->decrement_summary_bytes(cl.freed_bytes());
1293 
1294       _cleanup_list->add_ordered(&local_cleanup_list);
1295       assert(local_cleanup_list.is_empty(), "post-condition");
1296     }
1297   }
1298 };
1299 
1300 void G1ConcurrentMark::reclaim_empty_regions() {
1301   WorkGang* workers = _g1h->workers();
1302   FreeRegionList empty_regions_list("Empty Regions After Mark List");
1303 
1304   G1ReclaimEmptyRegionsTask cl(_g1h, &empty_regions_list, workers->active_workers());
1305   workers->run_task(&cl);
1306 
1307   if (!empty_regions_list.is_empty()) {
1308     log_debug(gc)("Reclaimed %u empty regions", empty_regions_list.length());
1309     // Now print the empty regions list.
1310     G1HRPrinter* hrp = _g1h->hr_printer();
1311     if (hrp->is_active()) {
1312       FreeRegionListIterator iter(&empty_regions_list);
1313       while (iter.more_available()) {
1314         HeapRegion* hr = iter.get_next();
1315         hrp->cleanup(hr);
1316       }
1317     }
1318     // And actually make them available.
1319     _g1h->prepend_to_freelist(&empty_regions_list);
1320   }
1321 }
1322 
1323 void G1ConcurrentMark::compute_new_sizes() {
1324   MetaspaceGC::compute_new_size();
1325 
1326   // Cleanup will have freed any regions completely full of garbage.
1327   // Update the soft reference policy with the new heap occupancy.
1328   Universe::update_heap_info_at_gc();
1329 
1330   // We reclaimed old regions so we should calculate the sizes to make
1331   // sure we update the old gen/space data.
1332   _g1h->g1mm()->update_sizes();
1333 }
1334 
1335 void G1ConcurrentMark::cleanup() {
1336   assert_at_safepoint_on_vm_thread();
1337 
1338   // If a full collection has happened, we shouldn't do this.
1339   if (has_aborted()) {
1340     return;
1341   }
1342 
1343   G1Policy* policy = _g1h->policy();
1344   policy->record_concurrent_mark_cleanup_start();
1345 
1346   double start = os::elapsedTime();
1347 
1348   verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "Cleanup before");
1349 
1350   {
1351     GCTraceTime(Debug, gc, phases) debug("Update Remembered Set Tracking After Rebuild", _gc_timer_cm);
1352     G1UpdateRemSetTrackingAfterRebuild cl(_g1h);
1353     _g1h->heap_region_iterate(&cl);
1354   }
1355 
1356   if (log_is_enabled(Trace, gc, liveness)) {
1357     G1PrintRegionLivenessInfoClosure cl("Post-Cleanup");
1358     _g1h->heap_region_iterate(&cl);
1359   }
1360 
1361   verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "Cleanup after");
1362 
1363   // We need to make this be a "collection" so any collection pause that
1364   // races with it goes around and waits for Cleanup to finish.
1365   _g1h->increment_total_collections();
1366 
1367   // Local statistics
1368   double recent_cleanup_time = (os::elapsedTime() - start);
1369   _total_cleanup_time += recent_cleanup_time;
1370   _cleanup_times.add(recent_cleanup_time);
1371 
1372   {
1373     GCTraceTime(Debug, gc, phases) debug("Finalize Concurrent Mark Cleanup", _gc_timer_cm);
1374     policy->record_concurrent_mark_cleanup_end();
1375   }
1376 }
1377 
1378 // 'Keep Alive' oop closure used by both serial parallel reference processing.
1379 // Uses the G1CMTask associated with a worker thread (for serial reference
1380 // processing the G1CMTask for worker 0 is used) to preserve (mark) and
1381 // trace referent objects.
1382 //
1383 // Using the G1CMTask and embedded local queues avoids having the worker
1384 // threads operating on the global mark stack. This reduces the risk
1385 // of overflowing the stack - which we would rather avoid at this late
1386 // state. Also using the tasks' local queues removes the potential
1387 // of the workers interfering with each other that could occur if
1388 // operating on the global stack.
1389 
1390 class G1CMKeepAliveAndDrainClosure : public OopClosure {
1391   G1ConcurrentMark* _cm;
1392   G1CMTask*         _task;
1393   uint              _ref_counter_limit;
1394   uint              _ref_counter;
1395   bool              _is_serial;
1396 public:
1397   G1CMKeepAliveAndDrainClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) :
1398     _cm(cm), _task(task), _ref_counter_limit(G1RefProcDrainInterval),
1399     _ref_counter(_ref_counter_limit), _is_serial(is_serial) {
1400     assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
1401   }
1402 
1403   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
1404   virtual void do_oop(      oop* p) { do_oop_work(p); }
1405 
1406   template <class T> void do_oop_work(T* p) {
1407     if (_cm->has_overflown()) {
1408       return;
1409     }
1410     if (!_task->deal_with_reference(p)) {
1411       // We did not add anything to the mark bitmap (or mark stack), so there is
1412       // no point trying to drain it.
1413       return;
1414     }
1415     _ref_counter--;
1416 
1417     if (_ref_counter == 0) {
1418       // We have dealt with _ref_counter_limit references, pushing them
1419       // and objects reachable from them on to the local stack (and
1420       // possibly the global stack). Call G1CMTask::do_marking_step() to
1421       // process these entries.
1422       //
1423       // We call G1CMTask::do_marking_step() in a loop, which we'll exit if
1424       // there's nothing more to do (i.e. we're done with the entries that
1425       // were pushed as a result of the G1CMTask::deal_with_reference() calls
1426       // above) or we overflow.
1427       //
1428       // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted()
1429       // flag while there may still be some work to do. (See the comment at
1430       // the beginning of G1CMTask::do_marking_step() for those conditions -
1431       // one of which is reaching the specified time target.) It is only
1432       // when G1CMTask::do_marking_step() returns without setting the
1433       // has_aborted() flag that the marking step has completed.
1434       do {
1435         double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
1436         _task->do_marking_step(mark_step_duration_ms,
1437                                false      /* do_termination */,
1438                                _is_serial);
1439       } while (_task->has_aborted() && !_cm->has_overflown());
1440       _ref_counter = _ref_counter_limit;
1441     }
1442   }
1443 };
1444 
1445 // 'Drain' oop closure used by both serial and parallel reference processing.
1446 // Uses the G1CMTask associated with a given worker thread (for serial
1447 // reference processing the G1CMtask for worker 0 is used). Calls the
1448 // do_marking_step routine, with an unbelievably large timeout value,
1449 // to drain the marking data structures of the remaining entries
1450 // added by the 'keep alive' oop closure above.
1451 
1452 class G1CMDrainMarkingStackClosure : public VoidClosure {
1453   G1ConcurrentMark* _cm;
1454   G1CMTask*         _task;
1455   bool              _is_serial;
1456  public:
1457   G1CMDrainMarkingStackClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) :
1458     _cm(cm), _task(task), _is_serial(is_serial) {
1459     assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
1460   }
1461 
1462   void do_void() {
1463     do {
1464       // We call G1CMTask::do_marking_step() to completely drain the local
1465       // and global marking stacks of entries pushed by the 'keep alive'
1466       // oop closure (an instance of G1CMKeepAliveAndDrainClosure above).
1467       //
1468       // G1CMTask::do_marking_step() is called in a loop, which we'll exit
1469       // if there's nothing more to do (i.e. we've completely drained the
1470       // entries that were pushed as a a result of applying the 'keep alive'
1471       // closure to the entries on the discovered ref lists) or we overflow
1472       // the global marking stack.
1473       //
1474       // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted()
1475       // flag while there may still be some work to do. (See the comment at
1476       // the beginning of G1CMTask::do_marking_step() for those conditions -
1477       // one of which is reaching the specified time target.) It is only
1478       // when G1CMTask::do_marking_step() returns without setting the
1479       // has_aborted() flag that the marking step has completed.
1480 
1481       _task->do_marking_step(1000000000.0 /* something very large */,
1482                              true         /* do_termination */,
1483                              _is_serial);
1484     } while (_task->has_aborted() && !_cm->has_overflown());
1485   }
1486 };
1487 
1488 // Implementation of AbstractRefProcTaskExecutor for parallel
1489 // reference processing at the end of G1 concurrent marking
1490 
1491 class G1CMRefProcTaskExecutor : public AbstractRefProcTaskExecutor {
1492 private:
1493   G1CollectedHeap*  _g1h;
1494   G1ConcurrentMark* _cm;
1495   WorkGang*         _workers;
1496   uint              _active_workers;
1497 
1498 public:
1499   G1CMRefProcTaskExecutor(G1CollectedHeap* g1h,
1500                           G1ConcurrentMark* cm,
1501                           WorkGang* workers,
1502                           uint n_workers) :
1503     _g1h(g1h), _cm(cm),
1504     _workers(workers), _active_workers(n_workers) { }
1505 
1506   virtual void execute(ProcessTask& task, uint ergo_workers);
1507 };
1508 
1509 class G1CMRefProcTaskProxy : public AbstractGangTask {
1510   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
1511   ProcessTask&      _proc_task;
1512   G1CollectedHeap*  _g1h;
1513   G1ConcurrentMark* _cm;
1514 
1515 public:
1516   G1CMRefProcTaskProxy(ProcessTask& proc_task,
1517                        G1CollectedHeap* g1h,
1518                        G1ConcurrentMark* cm) :
1519     AbstractGangTask("Process reference objects in parallel"),
1520     _proc_task(proc_task), _g1h(g1h), _cm(cm) {
1521     ReferenceProcessor* rp = _g1h->ref_processor_cm();
1522     assert(rp->processing_is_mt(), "shouldn't be here otherwise");
1523   }
1524 
1525   virtual void work(uint worker_id) {
1526     ResourceMark rm;
1527     HandleMark hm;
1528     G1CMTask* task = _cm->task(worker_id);
1529     G1CMIsAliveClosure g1_is_alive(_g1h);
1530     G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */);
1531     G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */);
1532 
1533     _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain);
1534   }
1535 };
1536 
1537 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task, uint ergo_workers) {
1538   assert(_workers != NULL, "Need parallel worker threads.");
1539   assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
1540   assert(_workers->active_workers() >= ergo_workers,
1541          "Ergonomically chosen workers(%u) should be less than or equal to active workers(%u)",
1542          ergo_workers, _workers->active_workers());
1543 
1544   G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm);
1545 
1546   // We need to reset the concurrency level before each
1547   // proxy task execution, so that the termination protocol
1548   // and overflow handling in G1CMTask::do_marking_step() knows
1549   // how many workers to wait for.
1550   _cm->set_concurrency(ergo_workers);
1551   _workers->run_task(&proc_task_proxy, ergo_workers);
1552 }
1553 
1554 void G1ConcurrentMark::weak_refs_work(bool clear_all_soft_refs) {
1555   ResourceMark rm;
1556   HandleMark   hm;
1557 
1558   // Is alive closure.
1559   G1CMIsAliveClosure g1_is_alive(_g1h);
1560 
1561   // Inner scope to exclude the cleaning of the string table
1562   // from the displayed time.
1563   {
1564     GCTraceTime(Debug, gc, phases) debug("Reference Processing", _gc_timer_cm);
1565 
1566     ReferenceProcessor* rp = _g1h->ref_processor_cm();
1567 
1568     // See the comment in G1CollectedHeap::ref_processing_init()
1569     // about how reference processing currently works in G1.
1570 
1571     // Set the soft reference policy
1572     rp->setup_policy(clear_all_soft_refs);
1573     assert(_global_mark_stack.is_empty(), "mark stack should be empty");
1574 
1575     // Instances of the 'Keep Alive' and 'Complete GC' closures used
1576     // in serial reference processing. Note these closures are also
1577     // used for serially processing (by the the current thread) the
1578     // JNI references during parallel reference processing.
1579     //
1580     // These closures do not need to synchronize with the worker
1581     // threads involved in parallel reference processing as these
1582     // instances are executed serially by the current thread (e.g.
1583     // reference processing is not multi-threaded and is thus
1584     // performed by the current thread instead of a gang worker).
1585     //
1586     // The gang tasks involved in parallel reference processing create
1587     // their own instances of these closures, which do their own
1588     // synchronization among themselves.
1589     G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */);
1590     G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */);
1591 
1592     // We need at least one active thread. If reference processing
1593     // is not multi-threaded we use the current (VMThread) thread,
1594     // otherwise we use the work gang from the G1CollectedHeap and
1595     // we utilize all the worker threads we can.
1596     bool processing_is_mt = rp->processing_is_mt();
1597     uint active_workers = (processing_is_mt ? _g1h->workers()->active_workers() : 1U);
1598     active_workers = MAX2(MIN2(active_workers, _max_num_tasks), 1U);
1599 
1600     // Parallel processing task executor.
1601     G1CMRefProcTaskExecutor par_task_executor(_g1h, this,
1602                                               _g1h->workers(), active_workers);
1603     AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL);
1604 
1605     // Set the concurrency level. The phase was already set prior to
1606     // executing the remark task.
1607     set_concurrency(active_workers);
1608 
1609     // Set the degree of MT processing here.  If the discovery was done MT,
1610     // the number of threads involved during discovery could differ from
1611     // the number of active workers.  This is OK as long as the discovered
1612     // Reference lists are balanced (see balance_all_queues() and balance_queues()).
1613     rp->set_active_mt_degree(active_workers);
1614 
1615     ReferenceProcessorPhaseTimes pt(_gc_timer_cm, rp->max_num_queues());
1616 
1617     // Process the weak references.
1618     const ReferenceProcessorStats& stats =
1619         rp->process_discovered_references(&g1_is_alive,
1620                                           &g1_keep_alive,
1621                                           &g1_drain_mark_stack,
1622                                           executor,
1623                                           &pt);
1624     _gc_tracer_cm->report_gc_reference_stats(stats);
1625     pt.print_all_references();
1626 
1627     // The do_oop work routines of the keep_alive and drain_marking_stack
1628     // oop closures will set the has_overflown flag if we overflow the
1629     // global marking stack.
1630 
1631     assert(has_overflown() || _global_mark_stack.is_empty(),
1632            "Mark stack should be empty (unless it has overflown)");
1633 
1634     assert(rp->num_queues() == active_workers, "why not");
1635 
1636     rp->verify_no_references_recorded();
1637     assert(!rp->discovery_enabled(), "Post condition");
1638   }
1639 
1640   if (has_overflown()) {
1641     // We can not trust g1_is_alive and the contents of the heap if the marking stack
1642     // overflowed while processing references. Exit the VM.
1643     fatal("Overflow during reference processing, can not continue. Please "
1644           "increase MarkStackSizeMax (current value: " SIZE_FORMAT ") and "
1645           "restart.", MarkStackSizeMax);
1646     return;
1647   }
1648 
1649   assert(_global_mark_stack.is_empty(), "Marking should have completed");
1650 
1651   {
1652     GCTraceTime(Debug, gc, phases) debug("Weak Processing", _gc_timer_cm);
1653     WeakProcessor::weak_oops_do(_g1h->workers(), &g1_is_alive, &do_nothing_cl, 1);
1654   }
1655 
1656   // Unload Klasses, String, Code Cache, etc.
1657   if (ClassUnloadingWithConcurrentMark) {
1658     GCTraceTime(Debug, gc, phases) debug("Class Unloading", _gc_timer_cm);
1659     bool purged_classes = SystemDictionary::do_unloading(_gc_timer_cm);
1660     _g1h->complete_cleaning(&g1_is_alive, purged_classes);
1661   } else if (StringDedup::is_enabled()) {
1662     GCTraceTime(Debug, gc, phases) debug("String Deduplication", _gc_timer_cm);
1663     _g1h->string_dedup_cleaning(&g1_is_alive, NULL);
1664   }
1665 }
1666 
1667 class G1PrecleanYieldClosure : public YieldClosure {
1668   G1ConcurrentMark* _cm;
1669 
1670 public:
1671   G1PrecleanYieldClosure(G1ConcurrentMark* cm) : _cm(cm) { }
1672 
1673   virtual bool should_return() {
1674     return _cm->has_aborted();
1675   }
1676 
1677   virtual bool should_return_fine_grain() {
1678     _cm->do_yield_check();
1679     return _cm->has_aborted();
1680   }
1681 };
1682 
1683 void G1ConcurrentMark::preclean() {
1684   assert(G1UseReferencePrecleaning, "Precleaning must be enabled.");
1685 
1686   SuspendibleThreadSetJoiner joiner;
1687 
1688   G1CMKeepAliveAndDrainClosure keep_alive(this, task(0), true /* is_serial */);
1689   G1CMDrainMarkingStackClosure drain_mark_stack(this, task(0), true /* is_serial */);
1690 
1691   set_concurrency_and_phase(1, true);
1692 
1693   G1PrecleanYieldClosure yield_cl(this);
1694 
1695   ReferenceProcessor* rp = _g1h->ref_processor_cm();
1696   // Precleaning is single threaded. Temporarily disable MT discovery.
1697   ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(rp, false);
1698   rp->preclean_discovered_references(rp->is_alive_non_header(),
1699                                      &keep_alive,
1700                                      &drain_mark_stack,
1701                                      &yield_cl,
1702                                      _gc_timer_cm);
1703 }
1704 
1705 // When sampling object counts, we already swapped the mark bitmaps, so we need to use
1706 // the prev bitmap determining liveness.
1707 class G1ObjectCountIsAliveClosure: public BoolObjectClosure {
1708   G1CollectedHeap* _g1h;
1709 public:
1710   G1ObjectCountIsAliveClosure(G1CollectedHeap* g1h) : _g1h(g1h) { }
1711 
1712   bool do_object_b(oop obj) {
1713     HeapWord* addr = (HeapWord*)obj;
1714     return addr != NULL &&
1715            (!_g1h->is_in_g1_reserved(addr) || !_g1h->is_obj_dead(obj));
1716   }
1717 };
1718 
1719 void G1ConcurrentMark::report_object_count(bool mark_completed) {
1720   // Depending on the completion of the marking liveness needs to be determined
1721   // using either the next or prev bitmap.
1722   if (mark_completed) {
1723     G1ObjectCountIsAliveClosure is_alive(_g1h);
1724     _gc_tracer_cm->report_object_count_after_gc(&is_alive);
1725   } else {
1726     G1CMIsAliveClosure is_alive(_g1h);
1727     _gc_tracer_cm->report_object_count_after_gc(&is_alive);
1728   }
1729 }
1730 
1731 
1732 void G1ConcurrentMark::swap_mark_bitmaps() {
1733   G1CMBitMap* temp = _prev_mark_bitmap;
1734   _prev_mark_bitmap = _next_mark_bitmap;
1735   _next_mark_bitmap = temp;
1736   _g1h->collector_state()->set_clearing_next_bitmap(true);
1737 }
1738 
1739 // Closure for marking entries in SATB buffers.
1740 class G1CMSATBBufferClosure : public SATBBufferClosure {
1741 private:
1742   G1CMTask* _task;
1743   G1CollectedHeap* _g1h;
1744 
1745   // This is very similar to G1CMTask::deal_with_reference, but with
1746   // more relaxed requirements for the argument, so this must be more
1747   // circumspect about treating the argument as an object.
1748   void do_entry(void* entry) const {
1749     _task->increment_refs_reached();
1750     oop const obj = static_cast<oop>(entry);
1751     _task->make_reference_grey(obj);
1752   }
1753 
1754 public:
1755   G1CMSATBBufferClosure(G1CMTask* task, G1CollectedHeap* g1h)
1756     : _task(task), _g1h(g1h) { }
1757 
1758   virtual void do_buffer(void** buffer, size_t size) {
1759     for (size_t i = 0; i < size; ++i) {
1760       do_entry(buffer[i]);
1761     }
1762   }
1763 };
1764 
1765 class G1RemarkThreadsClosure : public ThreadClosure {
1766   G1CMSATBBufferClosure _cm_satb_cl;
1767   G1CMOopClosure _cm_cl;
1768   MarkingCodeBlobClosure _code_cl;
1769   uintx _claim_token;
1770 
1771  public:
1772   G1RemarkThreadsClosure(G1CollectedHeap* g1h, G1CMTask* task) :
1773     _cm_satb_cl(task, g1h),
1774     _cm_cl(g1h, task),
1775     _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations),
1776     _claim_token(Threads::thread_claim_token()) {}
1777 
1778   void do_thread(Thread* thread) {
1779     if (thread->claim_threads_do(true, _claim_token)) {
1780       SATBMarkQueue& queue = G1ThreadLocalData::satb_mark_queue(thread);
1781       queue.apply_closure_and_empty(&_cm_satb_cl);
1782       if (thread->is_Java_thread()) {
1783         // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking
1784         // however the liveness of oops reachable from nmethods have very complex lifecycles:
1785         // * Alive if on the stack of an executing method
1786         // * Weakly reachable otherwise
1787         // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be
1788         // live by the SATB invariant but other oops recorded in nmethods may behave differently.
1789         JavaThread* jt = (JavaThread*)thread;
1790         jt->nmethods_do(&_code_cl);
1791       }
1792     }
1793   }
1794 };
1795 
1796 class G1CMRemarkTask : public AbstractGangTask {
1797   G1ConcurrentMark* _cm;
1798 public:
1799   void work(uint worker_id) {
1800     G1CMTask* task = _cm->task(worker_id);
1801     task->record_start_time();
1802     {
1803       ResourceMark rm;
1804       HandleMark hm;
1805 
1806       G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task);
1807       Threads::threads_do(&threads_f);
1808     }
1809 
1810     do {
1811       task->do_marking_step(1000000000.0 /* something very large */,
1812                             true         /* do_termination       */,
1813                             false        /* is_serial            */);
1814     } while (task->has_aborted() && !_cm->has_overflown());
1815     // If we overflow, then we do not want to restart. We instead
1816     // want to abort remark and do concurrent marking again.
1817     task->record_end_time();
1818   }
1819 
1820   G1CMRemarkTask(G1ConcurrentMark* cm, uint active_workers) :
1821     AbstractGangTask("Par Remark"), _cm(cm) {
1822     _cm->terminator()->reset_for_reuse(active_workers);
1823   }
1824 };
1825 
1826 void G1ConcurrentMark::finalize_marking() {
1827   ResourceMark rm;
1828   HandleMark   hm;
1829 
1830   _g1h->ensure_parsability(false);
1831 
1832   // this is remark, so we'll use up all active threads
1833   uint active_workers = _g1h->workers()->active_workers();
1834   set_concurrency_and_phase(active_workers, false /* concurrent */);
1835   // Leave _parallel_marking_threads at it's
1836   // value originally calculated in the G1ConcurrentMark
1837   // constructor and pass values of the active workers
1838   // through the gang in the task.
1839 
1840   {
1841     StrongRootsScope srs(active_workers);
1842 
1843     G1CMRemarkTask remarkTask(this, active_workers);
1844     // We will start all available threads, even if we decide that the
1845     // active_workers will be fewer. The extra ones will just bail out
1846     // immediately.
1847     _g1h->workers()->run_task(&remarkTask);
1848   }
1849 
1850   SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
1851   guarantee(has_overflown() ||
1852             satb_mq_set.completed_buffers_num() == 0,
1853             "Invariant: has_overflown = %s, num buffers = " SIZE_FORMAT,
1854             BOOL_TO_STR(has_overflown()),
1855             satb_mq_set.completed_buffers_num());
1856 
1857   print_stats();
1858 }
1859 
1860 void G1ConcurrentMark::flush_all_task_caches() {
1861   size_t hits = 0;
1862   size_t misses = 0;
1863   for (uint i = 0; i < _max_num_tasks; i++) {
1864     Pair<size_t, size_t> stats = _tasks[i]->flush_mark_stats_cache();
1865     hits += stats.first;
1866     misses += stats.second;
1867   }
1868   size_t sum = hits + misses;
1869   log_debug(gc, stats)("Mark stats cache hits " SIZE_FORMAT " misses " SIZE_FORMAT " ratio %1.3lf",
1870                        hits, misses, percent_of(hits, sum));
1871 }
1872 
1873 void G1ConcurrentMark::clear_range_in_prev_bitmap(MemRegion mr) {
1874   _prev_mark_bitmap->clear_range(mr);
1875 }
1876 
1877 HeapRegion*
1878 G1ConcurrentMark::claim_region(uint worker_id) {
1879   // "checkpoint" the finger
1880   HeapWord* finger = _finger;
1881 
1882   while (finger < _heap.end()) {
1883     assert(_g1h->is_in_g1_reserved(finger), "invariant");
1884 
1885     HeapRegion* curr_region = _g1h->heap_region_containing(finger);
1886     // Make sure that the reads below do not float before loading curr_region.
1887     OrderAccess::loadload();
1888     // Above heap_region_containing may return NULL as we always scan claim
1889     // until the end of the heap. In this case, just jump to the next region.
1890     HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords;
1891 
1892     // Is the gap between reading the finger and doing the CAS too long?
1893     HeapWord* res = Atomic::cmpxchg(end, &_finger, finger);
1894     if (res == finger && curr_region != NULL) {
1895       // we succeeded
1896       HeapWord*   bottom        = curr_region->bottom();
1897       HeapWord*   limit         = curr_region->next_top_at_mark_start();
1898 
1899       // notice that _finger == end cannot be guaranteed here since,
1900       // someone else might have moved the finger even further
1901       assert(_finger >= end, "the finger should have moved forward");
1902 
1903       if (limit > bottom) {
1904         return curr_region;
1905       } else {
1906         assert(limit == bottom,
1907                "the region limit should be at bottom");
1908         // we return NULL and the caller should try calling
1909         // claim_region() again.
1910         return NULL;
1911       }
1912     } else {
1913       assert(_finger > finger, "the finger should have moved forward");
1914       // read it again
1915       finger = _finger;
1916     }
1917   }
1918 
1919   return NULL;
1920 }
1921 
1922 #ifndef PRODUCT
1923 class VerifyNoCSetOops {
1924   G1CollectedHeap* _g1h;
1925   const char* _phase;
1926   int _info;
1927 
1928 public:
1929   VerifyNoCSetOops(const char* phase, int info = -1) :
1930     _g1h(G1CollectedHeap::heap()),
1931     _phase(phase),
1932     _info(info)
1933   { }
1934 
1935   void operator()(G1TaskQueueEntry task_entry) const {
1936     if (task_entry.is_array_slice()) {
1937       guarantee(_g1h->is_in_reserved(task_entry.slice()), "Slice " PTR_FORMAT " must be in heap.", p2i(task_entry.slice()));
1938       return;
1939     }
1940     guarantee(oopDesc::is_oop(task_entry.obj()),
1941               "Non-oop " PTR_FORMAT ", phase: %s, info: %d",
1942               p2i(task_entry.obj()), _phase, _info);
1943     HeapRegion* r = _g1h->heap_region_containing(task_entry.obj());
1944     guarantee(!(r->in_collection_set() || r->has_index_in_opt_cset()),
1945               "obj " PTR_FORMAT " from %s (%d) in region %u in (optional) collection set",
1946               p2i(task_entry.obj()), _phase, _info, r->hrm_index());
1947   }
1948 };
1949 
1950 void G1ConcurrentMark::verify_no_collection_set_oops() {
1951   assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
1952   if (!_g1h->collector_state()->mark_or_rebuild_in_progress()) {
1953     return;
1954   }
1955 
1956   // Verify entries on the global mark stack
1957   _global_mark_stack.iterate(VerifyNoCSetOops("Stack"));
1958 
1959   // Verify entries on the task queues
1960   for (uint i = 0; i < _max_num_tasks; ++i) {
1961     G1CMTaskQueue* queue = _task_queues->queue(i);
1962     queue->iterate(VerifyNoCSetOops("Queue", i));
1963   }
1964 
1965   // Verify the global finger
1966   HeapWord* global_finger = finger();
1967   if (global_finger != NULL && global_finger < _heap.end()) {
1968     // Since we always iterate over all regions, we might get a NULL HeapRegion
1969     // here.
1970     HeapRegion* global_hr = _g1h->heap_region_containing(global_finger);
1971     guarantee(global_hr == NULL || global_finger == global_hr->bottom(),
1972               "global finger: " PTR_FORMAT " region: " HR_FORMAT,
1973               p2i(global_finger), HR_FORMAT_PARAMS(global_hr));
1974   }
1975 
1976   // Verify the task fingers
1977   assert(_num_concurrent_workers <= _max_num_tasks, "sanity");
1978   for (uint i = 0; i < _num_concurrent_workers; ++i) {
1979     G1CMTask* task = _tasks[i];
1980     HeapWord* task_finger = task->finger();
1981     if (task_finger != NULL && task_finger < _heap.end()) {
1982       // See above note on the global finger verification.
1983       HeapRegion* r = _g1h->heap_region_containing(task_finger);
1984       guarantee(r == NULL || task_finger == r->bottom() ||
1985                 !r->in_collection_set() || !r->has_index_in_opt_cset(),
1986                 "task finger: " PTR_FORMAT " region: " HR_FORMAT,
1987                 p2i(task_finger), HR_FORMAT_PARAMS(r));
1988     }
1989   }
1990 }
1991 #endif // PRODUCT
1992 
1993 void G1ConcurrentMark::rebuild_rem_set_concurrently() {
1994   _g1h->rem_set()->rebuild_rem_set(this, _concurrent_workers, _worker_id_offset);
1995 }
1996 
1997 void G1ConcurrentMark::print_stats() {
1998   if (!log_is_enabled(Debug, gc, stats)) {
1999     return;
2000   }
2001   log_debug(gc, stats)("---------------------------------------------------------------------");
2002   for (size_t i = 0; i < _num_active_tasks; ++i) {
2003     _tasks[i]->print_stats();
2004     log_debug(gc, stats)("---------------------------------------------------------------------");
2005   }
2006 }
2007 
2008 void G1ConcurrentMark::concurrent_cycle_abort() {
2009   if (!cm_thread()->during_cycle() || _has_aborted) {
2010     // We haven't started a concurrent cycle or we have already aborted it. No need to do anything.
2011     return;
2012   }
2013 
2014   // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next
2015   // concurrent bitmap clearing.
2016   {
2017     GCTraceTime(Debug, gc) debug("Clear Next Bitmap");
2018     clear_bitmap(_next_mark_bitmap, _g1h->workers(), false);
2019   }
2020   // Note we cannot clear the previous marking bitmap here
2021   // since VerifyDuringGC verifies the objects marked during
2022   // a full GC against the previous bitmap.
2023 
2024   // Empty mark stack
2025   reset_marking_for_restart();
2026   for (uint i = 0; i < _max_num_tasks; ++i) {
2027     _tasks[i]->clear_region_fields();
2028   }
2029   _first_overflow_barrier_sync.abort();
2030   _second_overflow_barrier_sync.abort();
2031   _has_aborted = true;
2032 
2033   SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
2034   satb_mq_set.abandon_partial_marking();
2035   // This can be called either during or outside marking, we'll read
2036   // the expected_active value from the SATB queue set.
2037   satb_mq_set.set_active_all_threads(
2038                                  false, /* new active value */
2039                                  satb_mq_set.is_active() /* expected_active */);
2040 }
2041 
2042 static void print_ms_time_info(const char* prefix, const char* name,
2043                                NumberSeq& ns) {
2044   log_trace(gc, marking)("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).",
2045                          prefix, ns.num(), name, ns.sum()/1000.0, ns.avg());
2046   if (ns.num() > 0) {
2047     log_trace(gc, marking)("%s         [std. dev = %8.2f ms, max = %8.2f ms]",
2048                            prefix, ns.sd(), ns.maximum());
2049   }
2050 }
2051 
2052 void G1ConcurrentMark::print_summary_info() {
2053   Log(gc, marking) log;
2054   if (!log.is_trace()) {
2055     return;
2056   }
2057 
2058   log.trace(" Concurrent marking:");
2059   print_ms_time_info("  ", "init marks", _init_times);
2060   print_ms_time_info("  ", "remarks", _remark_times);
2061   {
2062     print_ms_time_info("     ", "final marks", _remark_mark_times);
2063     print_ms_time_info("     ", "weak refs", _remark_weak_ref_times);
2064 
2065   }
2066   print_ms_time_info("  ", "cleanups", _cleanup_times);
2067   log.trace("    Finalize live data total time = %8.2f s (avg = %8.2f ms).",
2068             _total_cleanup_time, (_cleanup_times.num() > 0 ? _total_cleanup_time * 1000.0 / (double)_cleanup_times.num() : 0.0));
2069   log.trace("  Total stop_world time = %8.2f s.",
2070             (_init_times.sum() + _remark_times.sum() + _cleanup_times.sum())/1000.0);
2071   log.trace("  Total concurrent time = %8.2f s (%8.2f s marking).",
2072             cm_thread()->vtime_accum(), cm_thread()->vtime_mark_accum());
2073 }
2074 
2075 void G1ConcurrentMark::print_worker_threads_on(outputStream* st) const {
2076   _concurrent_workers->print_worker_threads_on(st);
2077 }
2078 
2079 void G1ConcurrentMark::threads_do(ThreadClosure* tc) const {
2080   _concurrent_workers->threads_do(tc);
2081 }
2082 
2083 void G1ConcurrentMark::print_on_error(outputStream* st) const {
2084   st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT,
2085                p2i(_prev_mark_bitmap), p2i(_next_mark_bitmap));
2086   _prev_mark_bitmap->print_on_error(st, " Prev Bits: ");
2087   _next_mark_bitmap->print_on_error(st, " Next Bits: ");
2088 }
2089 
2090 static ReferenceProcessor* get_cm_oop_closure_ref_processor(G1CollectedHeap* g1h) {
2091   ReferenceProcessor* result = g1h->ref_processor_cm();
2092   assert(result != NULL, "CM reference processor should not be NULL");
2093   return result;
2094 }
2095 
2096 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
2097                                G1CMTask* task)
2098   : MetadataVisitingOopIterateClosure(get_cm_oop_closure_ref_processor(g1h)),
2099     _g1h(g1h), _task(task)
2100 { }
2101 
2102 void G1CMTask::setup_for_region(HeapRegion* hr) {
2103   assert(hr != NULL,
2104         "claim_region() should have filtered out NULL regions");
2105   _curr_region  = hr;
2106   _finger       = hr->bottom();
2107   update_region_limit();
2108 }
2109 
2110 void G1CMTask::update_region_limit() {
2111   HeapRegion* hr            = _curr_region;
2112   HeapWord* bottom          = hr->bottom();
2113   HeapWord* limit           = hr->next_top_at_mark_start();
2114 
2115   if (limit == bottom) {
2116     // The region was collected underneath our feet.
2117     // We set the finger to bottom to ensure that the bitmap
2118     // iteration that will follow this will not do anything.
2119     // (this is not a condition that holds when we set the region up,
2120     // as the region is not supposed to be empty in the first place)
2121     _finger = bottom;
2122   } else if (limit >= _region_limit) {
2123     assert(limit >= _finger, "peace of mind");
2124   } else {
2125     assert(limit < _region_limit, "only way to get here");
2126     // This can happen under some pretty unusual circumstances.  An
2127     // evacuation pause empties the region underneath our feet (NTAMS
2128     // at bottom). We then do some allocation in the region (NTAMS
2129     // stays at bottom), followed by the region being used as a GC
2130     // alloc region (NTAMS will move to top() and the objects
2131     // originally below it will be grayed). All objects now marked in
2132     // the region are explicitly grayed, if below the global finger,
2133     // and we do not need in fact to scan anything else. So, we simply
2134     // set _finger to be limit to ensure that the bitmap iteration
2135     // doesn't do anything.
2136     _finger = limit;
2137   }
2138 
2139   _region_limit = limit;
2140 }
2141 
2142 void G1CMTask::giveup_current_region() {
2143   assert(_curr_region != NULL, "invariant");
2144   clear_region_fields();
2145 }
2146 
2147 void G1CMTask::clear_region_fields() {
2148   // Values for these three fields that indicate that we're not
2149   // holding on to a region.
2150   _curr_region   = NULL;
2151   _finger        = NULL;
2152   _region_limit  = NULL;
2153 }
2154 
2155 void G1CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) {
2156   if (cm_oop_closure == NULL) {
2157     assert(_cm_oop_closure != NULL, "invariant");
2158   } else {
2159     assert(_cm_oop_closure == NULL, "invariant");
2160   }
2161   _cm_oop_closure = cm_oop_closure;
2162 }
2163 
2164 void G1CMTask::reset(G1CMBitMap* next_mark_bitmap) {
2165   guarantee(next_mark_bitmap != NULL, "invariant");
2166   _next_mark_bitmap              = next_mark_bitmap;
2167   clear_region_fields();
2168 
2169   _calls                         = 0;
2170   _elapsed_time_ms               = 0.0;
2171   _termination_time_ms           = 0.0;
2172   _termination_start_time_ms     = 0.0;
2173 
2174   _mark_stats_cache.reset();
2175 }
2176 
2177 bool G1CMTask::should_exit_termination() {
2178   if (!regular_clock_call()) {
2179     return true;
2180   }
2181 
2182   // This is called when we are in the termination protocol. We should
2183   // quit if, for some reason, this task wants to abort or the global
2184   // stack is not empty (this means that we can get work from it).
2185   return !_cm->mark_stack_empty() || has_aborted();
2186 }
2187 
2188 void G1CMTask::reached_limit() {
2189   assert(_words_scanned >= _words_scanned_limit ||
2190          _refs_reached >= _refs_reached_limit ,
2191          "shouldn't have been called otherwise");
2192   abort_marking_if_regular_check_fail();
2193 }
2194 
2195 bool G1CMTask::regular_clock_call() {
2196   if (has_aborted()) {
2197     return false;
2198   }
2199 
2200   // First, we need to recalculate the words scanned and refs reached
2201   // limits for the next clock call.
2202   recalculate_limits();
2203 
2204   // During the regular clock call we do the following
2205 
2206   // (1) If an overflow has been flagged, then we abort.
2207   if (_cm->has_overflown()) {
2208     return false;
2209   }
2210 
2211   // If we are not concurrent (i.e. we're doing remark) we don't need
2212   // to check anything else. The other steps are only needed during
2213   // the concurrent marking phase.
2214   if (!_cm->concurrent()) {
2215     return true;
2216   }
2217 
2218   // (2) If marking has been aborted for Full GC, then we also abort.
2219   if (_cm->has_aborted()) {
2220     return false;
2221   }
2222 
2223   double curr_time_ms = os::elapsedVTime() * 1000.0;
2224 
2225   // (4) We check whether we should yield. If we have to, then we abort.
2226   if (SuspendibleThreadSet::should_yield()) {
2227     // We should yield. To do this we abort the task. The caller is
2228     // responsible for yielding.
2229     return false;
2230   }
2231 
2232   // (5) We check whether we've reached our time quota. If we have,
2233   // then we abort.
2234   double elapsed_time_ms = curr_time_ms - _start_time_ms;
2235   if (elapsed_time_ms > _time_target_ms) {
2236     _has_timed_out = true;
2237     return false;
2238   }
2239 
2240   // (6) Finally, we check whether there are enough completed STAB
2241   // buffers available for processing. If there are, we abort.
2242   SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
2243   if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) {
2244     // we do need to process SATB buffers, we'll abort and restart
2245     // the marking task to do so
2246     return false;
2247   }
2248   return true;
2249 }
2250 
2251 void G1CMTask::recalculate_limits() {
2252   _real_words_scanned_limit = _words_scanned + words_scanned_period;
2253   _words_scanned_limit      = _real_words_scanned_limit;
2254 
2255   _real_refs_reached_limit  = _refs_reached  + refs_reached_period;
2256   _refs_reached_limit       = _real_refs_reached_limit;
2257 }
2258 
2259 void G1CMTask::decrease_limits() {
2260   // This is called when we believe that we're going to do an infrequent
2261   // operation which will increase the per byte scanned cost (i.e. move
2262   // entries to/from the global stack). It basically tries to decrease the
2263   // scanning limit so that the clock is called earlier.
2264 
2265   _words_scanned_limit = _real_words_scanned_limit - 3 * words_scanned_period / 4;
2266   _refs_reached_limit  = _real_refs_reached_limit - 3 * refs_reached_period / 4;
2267 }
2268 
2269 void G1CMTask::move_entries_to_global_stack() {
2270   // Local array where we'll store the entries that will be popped
2271   // from the local queue.
2272   G1TaskQueueEntry buffer[G1CMMarkStack::EntriesPerChunk];
2273 
2274   size_t n = 0;
2275   G1TaskQueueEntry task_entry;
2276   while (n < G1CMMarkStack::EntriesPerChunk && _task_queue->pop_local(task_entry)) {
2277     buffer[n] = task_entry;
2278     ++n;
2279   }
2280   if (n < G1CMMarkStack::EntriesPerChunk) {
2281     buffer[n] = G1TaskQueueEntry();
2282   }
2283 
2284   if (n > 0) {
2285     if (!_cm->mark_stack_push(buffer)) {
2286       set_has_aborted();
2287     }
2288   }
2289 
2290   // This operation was quite expensive, so decrease the limits.
2291   decrease_limits();
2292 }
2293 
2294 bool G1CMTask::get_entries_from_global_stack() {
2295   // Local array where we'll store the entries that will be popped
2296   // from the global stack.
2297   G1TaskQueueEntry buffer[G1CMMarkStack::EntriesPerChunk];
2298 
2299   if (!_cm->mark_stack_pop(buffer)) {
2300     return false;
2301   }
2302 
2303   // We did actually pop at least one entry.
2304   for (size_t i = 0; i < G1CMMarkStack::EntriesPerChunk; ++i) {
2305     G1TaskQueueEntry task_entry = buffer[i];
2306     if (task_entry.is_null()) {
2307       break;
2308     }
2309     assert(task_entry.is_array_slice() || oopDesc::is_oop(task_entry.obj()), "Element " PTR_FORMAT " must be an array slice or oop", p2i(task_entry.obj()));
2310     bool success = _task_queue->push(task_entry);
2311     // We only call this when the local queue is empty or under a
2312     // given target limit. So, we do not expect this push to fail.
2313     assert(success, "invariant");
2314   }
2315 
2316   // This operation was quite expensive, so decrease the limits
2317   decrease_limits();
2318   return true;
2319 }
2320 
2321 void G1CMTask::drain_local_queue(bool partially) {
2322   if (has_aborted()) {
2323     return;
2324   }
2325 
2326   // Decide what the target size is, depending whether we're going to
2327   // drain it partially (so that other tasks can steal if they run out
2328   // of things to do) or totally (at the very end).
2329   size_t target_size;
2330   if (partially) {
2331     target_size = MIN2((size_t)_task_queue->max_elems()/3, (size_t)GCDrainStackTargetSize);
2332   } else {
2333     target_size = 0;
2334   }
2335 
2336   if (_task_queue->size() > target_size) {
2337     G1TaskQueueEntry entry;
2338     bool ret = _task_queue->pop_local(entry);
2339     while (ret) {
2340       scan_task_entry(entry);
2341       if (_task_queue->size() <= target_size || has_aborted()) {
2342         ret = false;
2343       } else {
2344         ret = _task_queue->pop_local(entry);
2345       }
2346     }
2347   }
2348 }
2349 
2350 void G1CMTask::drain_global_stack(bool partially) {
2351   if (has_aborted()) {
2352     return;
2353   }
2354 
2355   // We have a policy to drain the local queue before we attempt to
2356   // drain the global stack.
2357   assert(partially || _task_queue->size() == 0, "invariant");
2358 
2359   // Decide what the target size is, depending whether we're going to
2360   // drain it partially (so that other tasks can steal if they run out
2361   // of things to do) or totally (at the very end).
2362   // Notice that when draining the global mark stack partially, due to the racyness
2363   // of the mark stack size update we might in fact drop below the target. But,
2364   // this is not a problem.
2365   // In case of total draining, we simply process until the global mark stack is
2366   // totally empty, disregarding the size counter.
2367   if (partially) {
2368     size_t const target_size = _cm->partial_mark_stack_size_target();
2369     while (!has_aborted() && _cm->mark_stack_size() > target_size) {
2370       if (get_entries_from_global_stack()) {
2371         drain_local_queue(partially);
2372       }
2373     }
2374   } else {
2375     while (!has_aborted() && get_entries_from_global_stack()) {
2376       drain_local_queue(partially);
2377     }
2378   }
2379 }
2380 
2381 // SATB Queue has several assumptions on whether to call the par or
2382 // non-par versions of the methods. this is why some of the code is
2383 // replicated. We should really get rid of the single-threaded version
2384 // of the code to simplify things.
2385 void G1CMTask::drain_satb_buffers() {
2386   if (has_aborted()) {
2387     return;
2388   }
2389 
2390   // We set this so that the regular clock knows that we're in the
2391   // middle of draining buffers and doesn't set the abort flag when it
2392   // notices that SATB buffers are available for draining. It'd be
2393   // very counter productive if it did that. :-)
2394   _draining_satb_buffers = true;
2395 
2396   G1CMSATBBufferClosure satb_cl(this, _g1h);
2397   SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
2398 
2399   // This keeps claiming and applying the closure to completed buffers
2400   // until we run out of buffers or we need to abort.
2401   while (!has_aborted() &&
2402          satb_mq_set.apply_closure_to_completed_buffer(&satb_cl)) {
2403     abort_marking_if_regular_check_fail();
2404   }
2405 
2406   _draining_satb_buffers = false;
2407 
2408   assert(has_aborted() ||
2409          _cm->concurrent() ||
2410          satb_mq_set.completed_buffers_num() == 0, "invariant");
2411 
2412   // again, this was a potentially expensive operation, decrease the
2413   // limits to get the regular clock call early
2414   decrease_limits();
2415 }
2416 
2417 void G1CMTask::clear_mark_stats_cache(uint region_idx) {
2418   _mark_stats_cache.reset(region_idx);
2419 }
2420 
2421 Pair<size_t, size_t> G1CMTask::flush_mark_stats_cache() {
2422   return _mark_stats_cache.evict_all();
2423 }
2424 
2425 void G1CMTask::print_stats() {
2426   log_debug(gc, stats)("Marking Stats, task = %u, calls = %u", _worker_id, _calls);
2427   log_debug(gc, stats)("  Elapsed time = %1.2lfms, Termination time = %1.2lfms",
2428                        _elapsed_time_ms, _termination_time_ms);
2429   log_debug(gc, stats)("  Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms max = %1.2lfms, total = %1.2lfms",
2430                        _step_times_ms.num(),
2431                        _step_times_ms.avg(),
2432                        _step_times_ms.sd(),
2433                        _step_times_ms.maximum(),
2434                        _step_times_ms.sum());
2435   size_t const hits = _mark_stats_cache.hits();
2436   size_t const misses = _mark_stats_cache.misses();
2437   log_debug(gc, stats)("  Mark Stats Cache: hits " SIZE_FORMAT " misses " SIZE_FORMAT " ratio %.3f",
2438                        hits, misses, percent_of(hits, hits + misses));
2439 }
2440 
2441 bool G1ConcurrentMark::try_stealing(uint worker_id, G1TaskQueueEntry& task_entry) {
2442   return _task_queues->steal(worker_id, task_entry);
2443 }
2444 
2445 /*****************************************************************************
2446 
2447     The do_marking_step(time_target_ms, ...) method is the building
2448     block of the parallel marking framework. It can be called in parallel
2449     with other invocations of do_marking_step() on different tasks
2450     (but only one per task, obviously) and concurrently with the
2451     mutator threads, or during remark, hence it eliminates the need
2452     for two versions of the code. When called during remark, it will
2453     pick up from where the task left off during the concurrent marking
2454     phase. Interestingly, tasks are also claimable during evacuation
2455     pauses too, since do_marking_step() ensures that it aborts before
2456     it needs to yield.
2457 
2458     The data structures that it uses to do marking work are the
2459     following:
2460 
2461       (1) Marking Bitmap. If there are gray objects that appear only
2462       on the bitmap (this happens either when dealing with an overflow
2463       or when the initial marking phase has simply marked the roots
2464       and didn't push them on the stack), then tasks claim heap
2465       regions whose bitmap they then scan to find gray objects. A
2466       global finger indicates where the end of the last claimed region
2467       is. A local finger indicates how far into the region a task has
2468       scanned. The two fingers are used to determine how to gray an
2469       object (i.e. whether simply marking it is OK, as it will be
2470       visited by a task in the future, or whether it needs to be also
2471       pushed on a stack).
2472 
2473       (2) Local Queue. The local queue of the task which is accessed
2474       reasonably efficiently by the task. Other tasks can steal from
2475       it when they run out of work. Throughout the marking phase, a
2476       task attempts to keep its local queue short but not totally
2477       empty, so that entries are available for stealing by other
2478       tasks. Only when there is no more work, a task will totally
2479       drain its local queue.
2480 
2481       (3) Global Mark Stack. This handles local queue overflow. During
2482       marking only sets of entries are moved between it and the local
2483       queues, as access to it requires a mutex and more fine-grain
2484       interaction with it which might cause contention. If it
2485       overflows, then the marking phase should restart and iterate
2486       over the bitmap to identify gray objects. Throughout the marking
2487       phase, tasks attempt to keep the global mark stack at a small
2488       length but not totally empty, so that entries are available for
2489       popping by other tasks. Only when there is no more work, tasks
2490       will totally drain the global mark stack.
2491 
2492       (4) SATB Buffer Queue. This is where completed SATB buffers are
2493       made available. Buffers are regularly removed from this queue
2494       and scanned for roots, so that the queue doesn't get too
2495       long. During remark, all completed buffers are processed, as
2496       well as the filled in parts of any uncompleted buffers.
2497 
2498     The do_marking_step() method tries to abort when the time target
2499     has been reached. There are a few other cases when the
2500     do_marking_step() method also aborts:
2501 
2502       (1) When the marking phase has been aborted (after a Full GC).
2503 
2504       (2) When a global overflow (on the global stack) has been
2505       triggered. Before the task aborts, it will actually sync up with
2506       the other tasks to ensure that all the marking data structures
2507       (local queues, stacks, fingers etc.)  are re-initialized so that
2508       when do_marking_step() completes, the marking phase can
2509       immediately restart.
2510 
2511       (3) When enough completed SATB buffers are available. The
2512       do_marking_step() method only tries to drain SATB buffers right
2513       at the beginning. So, if enough buffers are available, the
2514       marking step aborts and the SATB buffers are processed at
2515       the beginning of the next invocation.
2516 
2517       (4) To yield. when we have to yield then we abort and yield
2518       right at the end of do_marking_step(). This saves us from a lot
2519       of hassle as, by yielding we might allow a Full GC. If this
2520       happens then objects will be compacted underneath our feet, the
2521       heap might shrink, etc. We save checking for this by just
2522       aborting and doing the yield right at the end.
2523 
2524     From the above it follows that the do_marking_step() method should
2525     be called in a loop (or, otherwise, regularly) until it completes.
2526 
2527     If a marking step completes without its has_aborted() flag being
2528     true, it means it has completed the current marking phase (and
2529     also all other marking tasks have done so and have all synced up).
2530 
2531     A method called regular_clock_call() is invoked "regularly" (in
2532     sub ms intervals) throughout marking. It is this clock method that
2533     checks all the abort conditions which were mentioned above and
2534     decides when the task should abort. A work-based scheme is used to
2535     trigger this clock method: when the number of object words the
2536     marking phase has scanned or the number of references the marking
2537     phase has visited reach a given limit. Additional invocations to
2538     the method clock have been planted in a few other strategic places
2539     too. The initial reason for the clock method was to avoid calling
2540     vtime too regularly, as it is quite expensive. So, once it was in
2541     place, it was natural to piggy-back all the other conditions on it
2542     too and not constantly check them throughout the code.
2543 
2544     If do_termination is true then do_marking_step will enter its
2545     termination protocol.
2546 
2547     The value of is_serial must be true when do_marking_step is being
2548     called serially (i.e. by the VMThread) and do_marking_step should
2549     skip any synchronization in the termination and overflow code.
2550     Examples include the serial remark code and the serial reference
2551     processing closures.
2552 
2553     The value of is_serial must be false when do_marking_step is
2554     being called by any of the worker threads in a work gang.
2555     Examples include the concurrent marking code (CMMarkingTask),
2556     the MT remark code, and the MT reference processing closures.
2557 
2558  *****************************************************************************/
2559 
2560 void G1CMTask::do_marking_step(double time_target_ms,
2561                                bool do_termination,
2562                                bool is_serial) {
2563   assert(time_target_ms >= 1.0, "minimum granularity is 1ms");
2564 
2565   _start_time_ms = os::elapsedVTime() * 1000.0;
2566 
2567   // If do_stealing is true then do_marking_step will attempt to
2568   // steal work from the other G1CMTasks. It only makes sense to
2569   // enable stealing when the termination protocol is enabled
2570   // and do_marking_step() is not being called serially.
2571   bool do_stealing = do_termination && !is_serial;
2572 
2573   double diff_prediction_ms = _g1h->policy()->predictor().get_new_prediction(&_marking_step_diffs_ms);
2574   _time_target_ms = time_target_ms - diff_prediction_ms;
2575 
2576   // set up the variables that are used in the work-based scheme to
2577   // call the regular clock method
2578   _words_scanned = 0;
2579   _refs_reached  = 0;
2580   recalculate_limits();
2581 
2582   // clear all flags
2583   clear_has_aborted();
2584   _has_timed_out = false;
2585   _draining_satb_buffers = false;
2586 
2587   ++_calls;
2588 
2589   // Set up the bitmap and oop closures. Anything that uses them is
2590   // eventually called from this method, so it is OK to allocate these
2591   // statically.
2592   G1CMBitMapClosure bitmap_closure(this, _cm);
2593   G1CMOopClosure cm_oop_closure(_g1h, this);
2594   set_cm_oop_closure(&cm_oop_closure);
2595 
2596   if (_cm->has_overflown()) {
2597     // This can happen if the mark stack overflows during a GC pause
2598     // and this task, after a yield point, restarts. We have to abort
2599     // as we need to get into the overflow protocol which happens
2600     // right at the end of this task.
2601     set_has_aborted();
2602   }
2603 
2604   // First drain any available SATB buffers. After this, we will not
2605   // look at SATB buffers before the next invocation of this method.
2606   // If enough completed SATB buffers are queued up, the regular clock
2607   // will abort this task so that it restarts.
2608   drain_satb_buffers();
2609   // ...then partially drain the local queue and the global stack
2610   drain_local_queue(true);
2611   drain_global_stack(true);
2612 
2613   do {
2614     if (!has_aborted() && _curr_region != NULL) {
2615       // This means that we're already holding on to a region.
2616       assert(_finger != NULL, "if region is not NULL, then the finger "
2617              "should not be NULL either");
2618 
2619       // We might have restarted this task after an evacuation pause
2620       // which might have evacuated the region we're holding on to
2621       // underneath our feet. Let's read its limit again to make sure
2622       // that we do not iterate over a region of the heap that
2623       // contains garbage (update_region_limit() will also move
2624       // _finger to the start of the region if it is found empty).
2625       update_region_limit();
2626       // We will start from _finger not from the start of the region,
2627       // as we might be restarting this task after aborting half-way
2628       // through scanning this region. In this case, _finger points to
2629       // the address where we last found a marked object. If this is a
2630       // fresh region, _finger points to start().
2631       MemRegion mr = MemRegion(_finger, _region_limit);
2632 
2633       assert(!_curr_region->is_humongous() || mr.start() == _curr_region->bottom(),
2634              "humongous regions should go around loop once only");
2635 
2636       // Some special cases:
2637       // If the memory region is empty, we can just give up the region.
2638       // If the current region is humongous then we only need to check
2639       // the bitmap for the bit associated with the start of the object,
2640       // scan the object if it's live, and give up the region.
2641       // Otherwise, let's iterate over the bitmap of the part of the region
2642       // that is left.
2643       // If the iteration is successful, give up the region.
2644       if (mr.is_empty()) {
2645         giveup_current_region();
2646         abort_marking_if_regular_check_fail();
2647       } else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) {
2648         if (_next_mark_bitmap->is_marked(mr.start())) {
2649           // The object is marked - apply the closure
2650           bitmap_closure.do_addr(mr.start());
2651         }
2652         // Even if this task aborted while scanning the humongous object
2653         // we can (and should) give up the current region.
2654         giveup_current_region();
2655         abort_marking_if_regular_check_fail();
2656       } else if (_next_mark_bitmap->iterate(&bitmap_closure, mr)) {
2657         giveup_current_region();
2658         abort_marking_if_regular_check_fail();
2659       } else {
2660         assert(has_aborted(), "currently the only way to do so");
2661         // The only way to abort the bitmap iteration is to return
2662         // false from the do_bit() method. However, inside the
2663         // do_bit() method we move the _finger to point to the
2664         // object currently being looked at. So, if we bail out, we
2665         // have definitely set _finger to something non-null.
2666         assert(_finger != NULL, "invariant");
2667 
2668         // Region iteration was actually aborted. So now _finger
2669         // points to the address of the object we last scanned. If we
2670         // leave it there, when we restart this task, we will rescan
2671         // the object. It is easy to avoid this. We move the finger by
2672         // enough to point to the next possible object header.
2673         assert(_finger < _region_limit, "invariant");
2674         HeapWord* const new_finger = _finger + ((oop)_finger)->size();
2675         // Check if bitmap iteration was aborted while scanning the last object
2676         if (new_finger >= _region_limit) {
2677           giveup_current_region();
2678         } else {
2679           move_finger_to(new_finger);
2680         }
2681       }
2682     }
2683     // At this point we have either completed iterating over the
2684     // region we were holding on to, or we have aborted.
2685 
2686     // We then partially drain the local queue and the global stack.
2687     // (Do we really need this?)
2688     drain_local_queue(true);
2689     drain_global_stack(true);
2690 
2691     // Read the note on the claim_region() method on why it might
2692     // return NULL with potentially more regions available for
2693     // claiming and why we have to check out_of_regions() to determine
2694     // whether we're done or not.
2695     while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) {
2696       // We are going to try to claim a new region. We should have
2697       // given up on the previous one.
2698       // Separated the asserts so that we know which one fires.
2699       assert(_curr_region  == NULL, "invariant");
2700       assert(_finger       == NULL, "invariant");
2701       assert(_region_limit == NULL, "invariant");
2702       HeapRegion* claimed_region = _cm->claim_region(_worker_id);
2703       if (claimed_region != NULL) {
2704         // Yes, we managed to claim one
2705         setup_for_region(claimed_region);
2706         assert(_curr_region == claimed_region, "invariant");
2707       }
2708       // It is important to call the regular clock here. It might take
2709       // a while to claim a region if, for example, we hit a large
2710       // block of empty regions. So we need to call the regular clock
2711       // method once round the loop to make sure it's called
2712       // frequently enough.
2713       abort_marking_if_regular_check_fail();
2714     }
2715 
2716     if (!has_aborted() && _curr_region == NULL) {
2717       assert(_cm->out_of_regions(),
2718              "at this point we should be out of regions");
2719     }
2720   } while ( _curr_region != NULL && !has_aborted());
2721 
2722   if (!has_aborted()) {
2723     // We cannot check whether the global stack is empty, since other
2724     // tasks might be pushing objects to it concurrently.
2725     assert(_cm->out_of_regions(),
2726            "at this point we should be out of regions");
2727     // Try to reduce the number of available SATB buffers so that
2728     // remark has less work to do.
2729     drain_satb_buffers();
2730   }
2731 
2732   // Since we've done everything else, we can now totally drain the
2733   // local queue and global stack.
2734   drain_local_queue(false);
2735   drain_global_stack(false);
2736 
2737   // Attempt at work stealing from other task's queues.
2738   if (do_stealing && !has_aborted()) {
2739     // We have not aborted. This means that we have finished all that
2740     // we could. Let's try to do some stealing...
2741 
2742     // We cannot check whether the global stack is empty, since other
2743     // tasks might be pushing objects to it concurrently.
2744     assert(_cm->out_of_regions() && _task_queue->size() == 0,
2745            "only way to reach here");
2746     while (!has_aborted()) {
2747       G1TaskQueueEntry entry;
2748       if (_cm->try_stealing(_worker_id, entry)) {
2749         scan_task_entry(entry);
2750 
2751         // And since we're towards the end, let's totally drain the
2752         // local queue and global stack.
2753         drain_local_queue(false);
2754         drain_global_stack(false);
2755       } else {
2756         break;
2757       }
2758     }
2759   }
2760 
2761   // We still haven't aborted. Now, let's try to get into the
2762   // termination protocol.
2763   if (do_termination && !has_aborted()) {
2764     // We cannot check whether the global stack is empty, since other
2765     // tasks might be concurrently pushing objects on it.
2766     // Separated the asserts so that we know which one fires.
2767     assert(_cm->out_of_regions(), "only way to reach here");
2768     assert(_task_queue->size() == 0, "only way to reach here");
2769     _termination_start_time_ms = os::elapsedVTime() * 1000.0;
2770 
2771     // The G1CMTask class also extends the TerminatorTerminator class,
2772     // hence its should_exit_termination() method will also decide
2773     // whether to exit the termination protocol or not.
2774     bool finished = (is_serial ||
2775                      _cm->terminator()->offer_termination(this));
2776     double termination_end_time_ms = os::elapsedVTime() * 1000.0;
2777     _termination_time_ms +=
2778       termination_end_time_ms - _termination_start_time_ms;
2779 
2780     if (finished) {
2781       // We're all done.
2782 
2783       // We can now guarantee that the global stack is empty, since
2784       // all other tasks have finished. We separated the guarantees so
2785       // that, if a condition is false, we can immediately find out
2786       // which one.
2787       guarantee(_cm->out_of_regions(), "only way to reach here");
2788       guarantee(_cm->mark_stack_empty(), "only way to reach here");
2789       guarantee(_task_queue->size() == 0, "only way to reach here");
2790       guarantee(!_cm->has_overflown(), "only way to reach here");
2791       guarantee(!has_aborted(), "should never happen if termination has completed");
2792     } else {
2793       // Apparently there's more work to do. Let's abort this task. It
2794       // will restart it and we can hopefully find more things to do.
2795       set_has_aborted();
2796     }
2797   }
2798 
2799   // Mainly for debugging purposes to make sure that a pointer to the
2800   // closure which was statically allocated in this frame doesn't
2801   // escape it by accident.
2802   set_cm_oop_closure(NULL);
2803   double end_time_ms = os::elapsedVTime() * 1000.0;
2804   double elapsed_time_ms = end_time_ms - _start_time_ms;
2805   // Update the step history.
2806   _step_times_ms.add(elapsed_time_ms);
2807 
2808   if (has_aborted()) {
2809     // The task was aborted for some reason.
2810     if (_has_timed_out) {
2811       double diff_ms = elapsed_time_ms - _time_target_ms;
2812       // Keep statistics of how well we did with respect to hitting
2813       // our target only if we actually timed out (if we aborted for
2814       // other reasons, then the results might get skewed).
2815       _marking_step_diffs_ms.add(diff_ms);
2816     }
2817 
2818     if (_cm->has_overflown()) {
2819       // This is the interesting one. We aborted because a global
2820       // overflow was raised. This means we have to restart the
2821       // marking phase and start iterating over regions. However, in
2822       // order to do this we have to make sure that all tasks stop
2823       // what they are doing and re-initialize in a safe manner. We
2824       // will achieve this with the use of two barrier sync points.
2825 
2826       if (!is_serial) {
2827         // We only need to enter the sync barrier if being called
2828         // from a parallel context
2829         _cm->enter_first_sync_barrier(_worker_id);
2830 
2831         // When we exit this sync barrier we know that all tasks have
2832         // stopped doing marking work. So, it's now safe to
2833         // re-initialize our data structures.
2834       }
2835 
2836       clear_region_fields();
2837       flush_mark_stats_cache();
2838 
2839       if (!is_serial) {
2840         // If we're executing the concurrent phase of marking, reset the marking
2841         // state; otherwise the marking state is reset after reference processing,
2842         // during the remark pause.
2843         // If we reset here as a result of an overflow during the remark we will
2844         // see assertion failures from any subsequent set_concurrency_and_phase()
2845         // calls.
2846         if (_cm->concurrent() && _worker_id == 0) {
2847           // Worker 0 is responsible for clearing the global data structures because
2848           // of an overflow. During STW we should not clear the overflow flag (in
2849           // G1ConcurrentMark::reset_marking_state()) since we rely on it being true when we exit
2850           // method to abort the pause and restart concurrent marking.
2851           _cm->reset_marking_for_restart();
2852 
2853           log_info(gc, marking)("Concurrent Mark reset for overflow");
2854         }
2855 
2856         // ...and enter the second barrier.
2857         _cm->enter_second_sync_barrier(_worker_id);
2858       }
2859       // At this point, if we're during the concurrent phase of
2860       // marking, everything has been re-initialized and we're
2861       // ready to restart.
2862     }
2863   }
2864 }
2865 
2866 G1CMTask::G1CMTask(uint worker_id,
2867                    G1ConcurrentMark* cm,
2868                    G1CMTaskQueue* task_queue,
2869                    G1RegionMarkStats* mark_stats,
2870                    uint max_regions) :
2871   _objArray_processor(this),
2872   _worker_id(worker_id),
2873   _g1h(G1CollectedHeap::heap()),
2874   _cm(cm),
2875   _next_mark_bitmap(NULL),
2876   _task_queue(task_queue),
2877   _mark_stats_cache(mark_stats, max_regions, RegionMarkStatsCacheSize),
2878   _calls(0),
2879   _time_target_ms(0.0),
2880   _start_time_ms(0.0),
2881   _cm_oop_closure(NULL),
2882   _curr_region(NULL),
2883   _finger(NULL),
2884   _region_limit(NULL),
2885   _words_scanned(0),
2886   _words_scanned_limit(0),
2887   _real_words_scanned_limit(0),
2888   _refs_reached(0),
2889   _refs_reached_limit(0),
2890   _real_refs_reached_limit(0),
2891   _has_aborted(false),
2892   _has_timed_out(false),
2893   _draining_satb_buffers(false),
2894   _step_times_ms(),
2895   _elapsed_time_ms(0.0),
2896   _termination_time_ms(0.0),
2897   _termination_start_time_ms(0.0),
2898   _marking_step_diffs_ms()
2899 {
2900   guarantee(task_queue != NULL, "invariant");
2901 
2902   _marking_step_diffs_ms.add(0.5);
2903 }
2904 
2905 // These are formatting macros that are used below to ensure
2906 // consistent formatting. The *_H_* versions are used to format the
2907 // header for a particular value and they should be kept consistent
2908 // with the corresponding macro. Also note that most of the macros add
2909 // the necessary white space (as a prefix) which makes them a bit
2910 // easier to compose.
2911 
2912 // All the output lines are prefixed with this string to be able to
2913 // identify them easily in a large log file.
2914 #define G1PPRL_LINE_PREFIX            "###"
2915 
2916 #define G1PPRL_ADDR_BASE_FORMAT    " " PTR_FORMAT "-" PTR_FORMAT
2917 #ifdef _LP64
2918 #define G1PPRL_ADDR_BASE_H_FORMAT  " %37s"
2919 #else // _LP64
2920 #define G1PPRL_ADDR_BASE_H_FORMAT  " %21s"
2921 #endif // _LP64
2922 
2923 // For per-region info
2924 #define G1PPRL_TYPE_FORMAT            "   %-4s"
2925 #define G1PPRL_TYPE_H_FORMAT          "   %4s"
2926 #define G1PPRL_STATE_FORMAT           "   %-5s"
2927 #define G1PPRL_STATE_H_FORMAT         "   %5s"
2928 #define G1PPRL_BYTE_FORMAT            "  " SIZE_FORMAT_W(9)
2929 #define G1PPRL_BYTE_H_FORMAT          "  %9s"
2930 #define G1PPRL_DOUBLE_FORMAT          "  %14.1f"
2931 #define G1PPRL_DOUBLE_H_FORMAT        "  %14s"
2932 
2933 // For summary info
2934 #define G1PPRL_SUM_ADDR_FORMAT(tag)    "  " tag ":" G1PPRL_ADDR_BASE_FORMAT
2935 #define G1PPRL_SUM_BYTE_FORMAT(tag)    "  " tag ": " SIZE_FORMAT
2936 #define G1PPRL_SUM_MB_FORMAT(tag)      "  " tag ": %1.2f MB"
2937 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag) " / %1.2f %%"
2938 
2939 G1PrintRegionLivenessInfoClosure::G1PrintRegionLivenessInfoClosure(const char* phase_name) :
2940   _total_used_bytes(0), _total_capacity_bytes(0),
2941   _total_prev_live_bytes(0), _total_next_live_bytes(0),
2942   _total_remset_bytes(0), _total_strong_code_roots_bytes(0)
2943 {
2944   if (!log_is_enabled(Trace, gc, liveness)) {
2945     return;
2946   }
2947 
2948   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2949   MemRegion g1_reserved = g1h->g1_reserved();
2950   double now = os::elapsedTime();
2951 
2952   // Print the header of the output.
2953   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now);
2954   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" HEAP"
2955                           G1PPRL_SUM_ADDR_FORMAT("reserved")
2956                           G1PPRL_SUM_BYTE_FORMAT("region-size"),
2957                           p2i(g1_reserved.start()), p2i(g1_reserved.end()),
2958                           HeapRegion::GrainBytes);
2959   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX);
2960   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
2961                           G1PPRL_TYPE_H_FORMAT
2962                           G1PPRL_ADDR_BASE_H_FORMAT
2963                           G1PPRL_BYTE_H_FORMAT
2964                           G1PPRL_BYTE_H_FORMAT
2965                           G1PPRL_BYTE_H_FORMAT
2966                           G1PPRL_DOUBLE_H_FORMAT
2967                           G1PPRL_BYTE_H_FORMAT
2968                           G1PPRL_STATE_H_FORMAT
2969                           G1PPRL_BYTE_H_FORMAT,
2970                           "type", "address-range",
2971                           "used", "prev-live", "next-live", "gc-eff",
2972                           "remset", "state", "code-roots");
2973   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
2974                           G1PPRL_TYPE_H_FORMAT
2975                           G1PPRL_ADDR_BASE_H_FORMAT
2976                           G1PPRL_BYTE_H_FORMAT
2977                           G1PPRL_BYTE_H_FORMAT
2978                           G1PPRL_BYTE_H_FORMAT
2979                           G1PPRL_DOUBLE_H_FORMAT
2980                           G1PPRL_BYTE_H_FORMAT
2981                           G1PPRL_STATE_H_FORMAT
2982                           G1PPRL_BYTE_H_FORMAT,
2983                           "", "",
2984                           "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)",
2985                           "(bytes)", "", "(bytes)");
2986 }
2987 
2988 bool G1PrintRegionLivenessInfoClosure::do_heap_region(HeapRegion* r) {
2989   if (!log_is_enabled(Trace, gc, liveness)) {
2990     return false;
2991   }
2992 
2993   const char* type       = r->get_type_str();
2994   HeapWord* bottom       = r->bottom();
2995   HeapWord* end          = r->end();
2996   size_t capacity_bytes  = r->capacity();
2997   size_t used_bytes      = r->used();
2998   size_t prev_live_bytes = r->live_bytes();
2999   size_t next_live_bytes = r->next_live_bytes();
3000   double gc_eff          = r->gc_efficiency();
3001   size_t remset_bytes    = r->rem_set()->mem_size();
3002   size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size();
3003   const char* remset_type = r->rem_set()->get_short_state_str();
3004 
3005   _total_used_bytes      += used_bytes;
3006   _total_capacity_bytes  += capacity_bytes;
3007   _total_prev_live_bytes += prev_live_bytes;
3008   _total_next_live_bytes += next_live_bytes;
3009   _total_remset_bytes    += remset_bytes;
3010   _total_strong_code_roots_bytes += strong_code_roots_bytes;
3011 
3012   // Print a line for this particular region.
3013   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
3014                           G1PPRL_TYPE_FORMAT
3015                           G1PPRL_ADDR_BASE_FORMAT
3016                           G1PPRL_BYTE_FORMAT
3017                           G1PPRL_BYTE_FORMAT
3018                           G1PPRL_BYTE_FORMAT
3019                           G1PPRL_DOUBLE_FORMAT
3020                           G1PPRL_BYTE_FORMAT
3021                           G1PPRL_STATE_FORMAT
3022                           G1PPRL_BYTE_FORMAT,
3023                           type, p2i(bottom), p2i(end),
3024                           used_bytes, prev_live_bytes, next_live_bytes, gc_eff,
3025                           remset_bytes, remset_type, strong_code_roots_bytes);
3026 
3027   return false;
3028 }
3029 
3030 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() {
3031   if (!log_is_enabled(Trace, gc, liveness)) {
3032     return;
3033   }
3034 
3035   // add static memory usages to remembered set sizes
3036   _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size();
3037   // Print the footer of the output.
3038   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX);
3039   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
3040                          " SUMMARY"
3041                          G1PPRL_SUM_MB_FORMAT("capacity")
3042                          G1PPRL_SUM_MB_PERC_FORMAT("used")
3043                          G1PPRL_SUM_MB_PERC_FORMAT("prev-live")
3044                          G1PPRL_SUM_MB_PERC_FORMAT("next-live")
3045                          G1PPRL_SUM_MB_FORMAT("remset")
3046                          G1PPRL_SUM_MB_FORMAT("code-roots"),
3047                          bytes_to_mb(_total_capacity_bytes),
3048                          bytes_to_mb(_total_used_bytes),
3049                          percent_of(_total_used_bytes, _total_capacity_bytes),
3050                          bytes_to_mb(_total_prev_live_bytes),
3051                          percent_of(_total_prev_live_bytes, _total_capacity_bytes),
3052                          bytes_to_mb(_total_next_live_bytes),
3053                          percent_of(_total_next_live_bytes, _total_capacity_bytes),
3054                          bytes_to_mb(_total_remset_bytes),
3055                          bytes_to_mb(_total_strong_code_roots_bytes));
3056 }