1 /*
   2  * Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 
  27 #include "classfile/classLoaderDataGraph.inline.hpp"
  28 #include "classfile/javaClasses.inline.hpp"
  29 #include "compiler/oopMap.hpp"
  30 #include "gc/g1/g1Allocator.hpp"
  31 #include "gc/g1/g1CardSetMemory.hpp"
  32 #include "gc/g1/g1CollectedHeap.inline.hpp"
  33 #include "gc/g1/g1CollectionSetCandidates.inline.hpp"
  34 #include "gc/g1/g1CollectorState.hpp"
  35 #include "gc/g1/g1ConcurrentMark.hpp"
  36 #include "gc/g1/g1GCPhaseTimes.hpp"
  37 #include "gc/g1/g1EvacFailureRegions.inline.hpp"
  38 #include "gc/g1/g1EvacInfo.hpp"
  39 #include "gc/g1/g1HRPrinter.hpp"
  40 #include "gc/g1/g1MonitoringSupport.hpp"
  41 #include "gc/g1/g1ParScanThreadState.inline.hpp"
  42 #include "gc/g1/g1Policy.hpp"
  43 #include "gc/g1/g1RedirtyCardsQueue.hpp"
  44 #include "gc/g1/g1RegionPinCache.inline.hpp"
  45 #include "gc/g1/g1RemSet.hpp"
  46 #include "gc/g1/g1RootProcessor.hpp"
  47 #include "gc/g1/g1Trace.hpp"
  48 #include "gc/g1/g1YoungCollector.hpp"
  49 #include "gc/g1/g1YoungGCAllocationFailureInjector.hpp"
  50 #include "gc/g1/g1YoungGCPostEvacuateTasks.hpp"
  51 #include "gc/g1/g1YoungGCPreEvacuateTasks.hpp"
  52 #include "gc/g1/g1_globals.hpp"
  53 #include "gc/shared/concurrentGCBreakpoints.hpp"
  54 #include "gc/shared/gcTraceTime.inline.hpp"
  55 #include "gc/shared/gcTimer.hpp"
  56 #include "gc/shared/referenceProcessor.hpp"
  57 #include "gc/shared/weakProcessor.inline.hpp"
  58 #include "gc/shared/workerPolicy.hpp"
  59 #include "gc/shared/workerThread.hpp"
  60 #include "jfr/jfrEvents.hpp"
  61 #include "memory/resourceArea.hpp"
  62 #include "runtime/threads.hpp"
  63 #include "utilities/ticks.hpp"
  64 
  65 // GCTraceTime wrapper that constructs the message according to GC pause type and
  66 // GC cause.
  67 // The code relies on the fact that GCTraceTimeWrapper stores the string passed
  68 // initially as a reference only, so that we can modify it as needed.
  69 class G1YoungGCTraceTime {
  70   G1YoungCollector* _collector;
  71 
  72   G1GCPauseType _pause_type;
  73   GCCause::Cause _pause_cause;
  74 
  75   static const uint MaxYoungGCNameLength = 128;
  76   char _young_gc_name_data[MaxYoungGCNameLength];
  77 
  78   GCTraceTime(Info, gc) _tt;
  79 
  80   const char* update_young_gc_name() {
  81     char evacuation_failed_string[48];
  82     evacuation_failed_string[0] = '\0';
  83 
  84     if (_collector->evacuation_failed()) {
  85       snprintf(evacuation_failed_string,
  86                ARRAY_SIZE(evacuation_failed_string),
  87                " (Evacuation Failure: %s%s%s)",
  88                _collector->evacuation_alloc_failed() ? "Allocation" : "",
  89                _collector->evacuation_alloc_failed() && _collector->evacuation_pinned() ? " / " : "",
  90                _collector->evacuation_pinned() ? "Pinned" : "");
  91     }
  92     snprintf(_young_gc_name_data,
  93              MaxYoungGCNameLength,
  94              "Pause Young (%s) (%s)%s",
  95              G1GCPauseTypeHelper::to_string(_pause_type),
  96              GCCause::to_string(_pause_cause),
  97              evacuation_failed_string);
  98     return _young_gc_name_data;
  99   }
 100 
 101 public:
 102   G1YoungGCTraceTime(G1YoungCollector* collector, GCCause::Cause cause) :
 103     _collector(collector),
 104     // Take snapshot of current pause type at start as it may be modified during gc.
 105     // The strings for all Concurrent Start pauses are the same, so the parameter
 106     // does not matter here.
 107     _pause_type(_collector->collector_state()->young_gc_pause_type(false /* concurrent_operation_is_full_mark */)),
 108     _pause_cause(cause),
 109     // Fake a "no cause" and manually add the correct string in update_young_gc_name()
 110     // to make the string look more natural.
 111     _tt(update_young_gc_name(), nullptr, GCCause::_no_gc, true) {
 112   }
 113 
 114   ~G1YoungGCTraceTime() {
 115     update_young_gc_name();
 116   }
 117 };
 118 
 119 class G1YoungGCNotifyPauseMark : public StackObj {
 120   G1YoungCollector* _collector;
 121 
 122 public:
 123   G1YoungGCNotifyPauseMark(G1YoungCollector* collector) : _collector(collector) {
 124     G1CollectedHeap::heap()->policy()->record_young_gc_pause_start();
 125   }
 126 
 127   ~G1YoungGCNotifyPauseMark() {
 128     G1CollectedHeap::heap()->policy()->record_young_gc_pause_end(_collector->evacuation_failed());
 129   }
 130 };
 131 
 132 class G1YoungGCJFRTracerMark : public G1JFRTracerMark {
 133   G1EvacInfo _evacuation_info;
 134 
 135   G1NewTracer* tracer() const { return (G1NewTracer*)_tracer; }
 136 
 137 public:
 138 
 139   G1EvacInfo* evacuation_info() { return &_evacuation_info; }
 140 
 141   G1YoungGCJFRTracerMark(STWGCTimer* gc_timer_stw, G1NewTracer* gc_tracer_stw, GCCause::Cause cause) :
 142     G1JFRTracerMark(gc_timer_stw, gc_tracer_stw), _evacuation_info() { }
 143 
 144   void report_pause_type(G1GCPauseType type) {
 145     tracer()->report_young_gc_pause(type);
 146   }
 147 
 148   ~G1YoungGCJFRTracerMark() {
 149     G1CollectedHeap* g1h = G1CollectedHeap::heap();
 150 
 151     tracer()->report_evacuation_info(&_evacuation_info);
 152     tracer()->report_tenuring_threshold(g1h->policy()->tenuring_threshold());
 153   }
 154 };
 155 
 156 class G1YoungGCVerifierMark : public StackObj {
 157   G1YoungCollector* _collector;
 158   G1HeapVerifier::G1VerifyType _type;
 159 
 160   static G1HeapVerifier::G1VerifyType young_collection_verify_type() {
 161     G1CollectorState* state = G1CollectedHeap::heap()->collector_state();
 162     if (state->in_concurrent_start_gc()) {
 163       return G1HeapVerifier::G1VerifyConcurrentStart;
 164     } else if (state->in_young_only_phase()) {
 165       return G1HeapVerifier::G1VerifyYoungNormal;
 166     } else {
 167       return G1HeapVerifier::G1VerifyMixed;
 168     }
 169   }
 170 
 171 public:
 172   G1YoungGCVerifierMark(G1YoungCollector* collector) : _collector(collector), _type(young_collection_verify_type()) {
 173     G1CollectedHeap::heap()->verify_before_young_collection(_type);
 174   }
 175 
 176   ~G1YoungGCVerifierMark() {
 177     // Inject evacuation failure tag into type if needed.
 178     G1HeapVerifier::G1VerifyType type = _type;
 179     if (_collector->evacuation_failed()) {
 180       type = (G1HeapVerifier::G1VerifyType)(type | G1HeapVerifier::G1VerifyYoungEvacFail);
 181     }
 182     G1CollectedHeap::heap()->verify_after_young_collection(type);
 183   }
 184 };
 185 
 186 G1Allocator* G1YoungCollector::allocator() const {
 187   return _g1h->allocator();
 188 }
 189 
 190 G1CollectionSet* G1YoungCollector::collection_set() const {
 191   return _g1h->collection_set();
 192 }
 193 
 194 G1CollectorState* G1YoungCollector::collector_state() const {
 195   return _g1h->collector_state();
 196 }
 197 
 198 G1ConcurrentMark* G1YoungCollector::concurrent_mark() const {
 199   return _g1h->concurrent_mark();
 200 }
 201 
 202 STWGCTimer* G1YoungCollector::gc_timer_stw() const {
 203   return _g1h->gc_timer_stw();
 204 }
 205 
 206 G1NewTracer* G1YoungCollector::gc_tracer_stw() const {
 207   return _g1h->gc_tracer_stw();
 208 }
 209 
 210 G1Policy* G1YoungCollector::policy() const {
 211   return _g1h->policy();
 212 }
 213 
 214 G1GCPhaseTimes* G1YoungCollector::phase_times() const {
 215   return _g1h->phase_times();
 216 }
 217 
 218 G1HRPrinter* G1YoungCollector::hr_printer() const {
 219   return _g1h->hr_printer();
 220 }
 221 
 222 G1MonitoringSupport* G1YoungCollector::monitoring_support() const {
 223   return _g1h->monitoring_support();
 224 }
 225 
 226 G1RemSet* G1YoungCollector::rem_set() const {
 227   return _g1h->rem_set();
 228 }
 229 
 230 G1ScannerTasksQueueSet* G1YoungCollector::task_queues() const {
 231   return _g1h->task_queues();
 232 }
 233 
 234 G1SurvivorRegions* G1YoungCollector::survivor_regions() const {
 235   return _g1h->survivor();
 236 }
 237 
 238 ReferenceProcessor* G1YoungCollector::ref_processor_stw() const {
 239   return _g1h->ref_processor_stw();
 240 }
 241 
 242 WorkerThreads* G1YoungCollector::workers() const {
 243   return _g1h->workers();
 244 }
 245 
 246 G1YoungGCAllocationFailureInjector* G1YoungCollector::allocation_failure_injector() const {
 247   return _g1h->allocation_failure_injector();
 248 }
 249 
 250 
 251 void G1YoungCollector::wait_for_root_region_scanning() {
 252   Ticks start = Ticks::now();
 253   // We have to wait until the CM threads finish scanning the
 254   // root regions as it's the only way to ensure that all the
 255   // objects on them have been correctly scanned before we start
 256   // moving them during the GC.
 257   bool waited = concurrent_mark()->wait_until_root_region_scan_finished();
 258   Tickspan wait_time;
 259   if (waited) {
 260     wait_time = (Ticks::now() - start);
 261   }
 262   phase_times()->record_root_region_scan_wait_time(wait_time.seconds() * MILLIUNITS);
 263 }
 264 
 265 class G1PrintCollectionSetClosure : public HeapRegionClosure {
 266 private:
 267   G1HRPrinter* _hr_printer;
 268 public:
 269   G1PrintCollectionSetClosure(G1HRPrinter* hr_printer) : HeapRegionClosure(), _hr_printer(hr_printer) { }
 270 
 271   virtual bool do_heap_region(HeapRegion* r) {
 272     _hr_printer->cset(r);
 273     return false;
 274   }
 275 };
 276 
 277 void G1YoungCollector::calculate_collection_set(G1EvacInfo* evacuation_info, double target_pause_time_ms) {
 278   // Forget the current allocation region (we might even choose it to be part
 279   // of the collection set!) before finalizing the collection set.
 280   allocator()->release_mutator_alloc_regions();
 281 
 282   collection_set()->finalize_initial_collection_set(target_pause_time_ms, survivor_regions());
 283   evacuation_info->set_collection_set_regions(collection_set()->region_length() +
 284                                               collection_set()->optional_region_length());
 285 
 286   concurrent_mark()->verify_no_collection_set_oops();
 287 
 288   if (hr_printer()->is_active()) {
 289     G1PrintCollectionSetClosure cl(hr_printer());
 290     collection_set()->iterate(&cl);
 291     collection_set()->iterate_optional(&cl);
 292   }
 293 }
 294 
 295 class G1PrepareEvacuationTask : public WorkerTask {
 296   class G1PrepareRegionsClosure : public HeapRegionClosure {
 297     G1CollectedHeap* _g1h;
 298     G1PrepareEvacuationTask* _parent_task;
 299     uint _worker_humongous_total;
 300     uint _worker_humongous_candidates;
 301 
 302     G1MonotonicArenaMemoryStats _card_set_stats;
 303 
 304     void sample_card_set_size(HeapRegion* hr) {
 305       // Sample card set sizes for young gen and humongous before GC: this makes
 306       // the policy to give back memory to the OS keep the most recent amount of
 307       // memory for these regions.
 308       if (hr->is_young() || hr->is_starts_humongous()) {
 309         _card_set_stats.add(hr->rem_set()->card_set_memory_stats());
 310       }
 311     }
 312 
 313     bool humongous_region_is_candidate(HeapRegion* region) const {
 314       assert(region->is_starts_humongous(), "Must start a humongous object");
 315 
 316       oop obj = cast_to_oop(region->bottom());
 317 
 318       // Dead objects cannot be eager reclaim candidates. Due to class
 319       // unloading it is unsafe to query their classes so we return early.
 320       if (_g1h->is_obj_dead(obj, region)) {
 321         return false;
 322       }
 323 
 324       // If we do not have a complete remembered set for the region, then we can
 325       // not be sure that we have all references to it.
 326       if (!region->rem_set()->is_complete()) {
 327         return false;
 328       }
 329       // We also cannot collect the humongous object if it is pinned.
 330       if (region->has_pinned_objects()) {
 331         return false;
 332       }
 333       // Candidate selection must satisfy the following constraints
 334       // while concurrent marking is in progress:
 335       //
 336       // * In order to maintain SATB invariants, an object must not be
 337       // reclaimed if it was allocated before the start of marking and
 338       // has not had its references scanned.  Such an object must have
 339       // its references (including type metadata) scanned to ensure no
 340       // live objects are missed by the marking process.  Objects
 341       // allocated after the start of concurrent marking don't need to
 342       // be scanned.
 343       //
 344       // * An object must not be reclaimed if it is on the concurrent
 345       // mark stack.  Objects allocated after the start of concurrent
 346       // marking are never pushed on the mark stack.
 347       //
 348       // Nominating only objects allocated after the start of concurrent
 349       // marking is sufficient to meet both constraints.  This may miss
 350       // some objects that satisfy the constraints, but the marking data
 351       // structures don't support efficiently performing the needed
 352       // additional tests or scrubbing of the mark stack.
 353       //
 354       // However, we presently only nominate is_typeArray() objects.
 355       // A humongous object containing references induces remembered
 356       // set entries on other regions.  In order to reclaim such an
 357       // object, those remembered sets would need to be cleaned up.
 358       //
 359       // We also treat is_typeArray() objects specially, allowing them
 360       // to be reclaimed even if allocated before the start of
 361       // concurrent mark.  For this we rely on mark stack insertion to
 362       // exclude is_typeArray() objects, preventing reclaiming an object
 363       // that is in the mark stack.  We also rely on the metadata for
 364       // such objects to be built-in and so ensured to be kept live.
 365       // Frequent allocation and drop of large binary blobs is an
 366       // important use case for eager reclaim, and this special handling
 367       // may reduce needed headroom.
 368 
 369       return obj->is_typeArray() &&
 370              _g1h->is_potential_eager_reclaim_candidate(region);
 371     }
 372 
 373   public:
 374     G1PrepareRegionsClosure(G1CollectedHeap* g1h, G1PrepareEvacuationTask* parent_task) :
 375       _g1h(g1h),
 376       _parent_task(parent_task),
 377       _worker_humongous_total(0),
 378       _worker_humongous_candidates(0) { }
 379 
 380     ~G1PrepareRegionsClosure() {
 381       _parent_task->add_humongous_candidates(_worker_humongous_candidates);
 382       _parent_task->add_humongous_total(_worker_humongous_total);
 383     }
 384 
 385     virtual bool do_heap_region(HeapRegion* hr) {
 386       // First prepare the region for scanning
 387       _g1h->rem_set()->prepare_region_for_scan(hr);
 388 
 389       sample_card_set_size(hr);
 390 
 391       // Now check if region is a humongous candidate
 392       if (!hr->is_starts_humongous()) {
 393         _g1h->register_region_with_region_attr(hr);
 394         return false;
 395       }
 396 
 397       uint index = hr->hrm_index();
 398       if (humongous_region_is_candidate(hr)) {
 399         _g1h->register_humongous_candidate_region_with_region_attr(index);
 400         _worker_humongous_candidates++;
 401         // We will later handle the remembered sets of these regions.
 402       } else {
 403         _g1h->register_region_with_region_attr(hr);
 404       }
 405       log_debug(gc, humongous)("Humongous region %u (object size %zu @ " PTR_FORMAT ") remset %zu code roots %zu "
 406                                "marked %d pinned count %zu reclaim candidate %d type array %d",
 407                                index,
 408                                cast_to_oop(hr->bottom())->size() * HeapWordSize,
 409                                p2i(hr->bottom()),
 410                                hr->rem_set()->occupied(),
 411                                hr->rem_set()->code_roots_list_length(),
 412                                _g1h->concurrent_mark()->mark_bitmap()->is_marked(hr->bottom()),
 413                                hr->pinned_count(),
 414                                _g1h->is_humongous_reclaim_candidate(index),
 415                                cast_to_oop(hr->bottom())->is_typeArray()
 416                               );
 417       _worker_humongous_total++;
 418 
 419       return false;
 420     }
 421 
 422     G1MonotonicArenaMemoryStats card_set_stats() const {
 423       return _card_set_stats;
 424     }
 425   };
 426 
 427   G1CollectedHeap* _g1h;
 428   HeapRegionClaimer _claimer;
 429   volatile uint _humongous_total;
 430   volatile uint _humongous_candidates;
 431 
 432   G1MonotonicArenaMemoryStats _all_card_set_stats;
 433 
 434 public:
 435   G1PrepareEvacuationTask(G1CollectedHeap* g1h) :
 436     WorkerTask("Prepare Evacuation"),
 437     _g1h(g1h),
 438     _claimer(_g1h->workers()->active_workers()),
 439     _humongous_total(0),
 440     _humongous_candidates(0) { }
 441 
 442   void work(uint worker_id) {
 443     G1PrepareRegionsClosure cl(_g1h, this);
 444     _g1h->heap_region_par_iterate_from_worker_offset(&cl, &_claimer, worker_id);
 445 
 446     MutexLocker x(G1RareEvent_lock, Mutex::_no_safepoint_check_flag);
 447     _all_card_set_stats.add(cl.card_set_stats());
 448   }
 449 
 450   void add_humongous_candidates(uint candidates) {
 451     Atomic::add(&_humongous_candidates, candidates);
 452   }
 453 
 454   void add_humongous_total(uint total) {
 455     Atomic::add(&_humongous_total, total);
 456   }
 457 
 458   uint humongous_candidates() {
 459     return _humongous_candidates;
 460   }
 461 
 462   uint humongous_total() {
 463     return _humongous_total;
 464   }
 465 
 466   const G1MonotonicArenaMemoryStats all_card_set_stats() const {
 467     return _all_card_set_stats;
 468   }
 469 };
 470 
 471 Tickspan G1YoungCollector::run_task_timed(WorkerTask* task) {
 472   Ticks start = Ticks::now();
 473   workers()->run_task(task);
 474   return Ticks::now() - start;
 475 }
 476 
 477 void G1YoungCollector::set_young_collection_default_active_worker_threads(){
 478   uint active_workers = WorkerPolicy::calc_active_workers(workers()->max_workers(),
 479                                                           workers()->active_workers(),
 480                                                           Threads::number_of_non_daemon_threads());
 481   active_workers = workers()->set_active_workers(active_workers);
 482   log_info(gc,task)("Using %u workers of %u for evacuation", active_workers, workers()->max_workers());
 483 }
 484 
 485 void G1YoungCollector::pre_evacuate_collection_set(G1EvacInfo* evacuation_info) {
 486   // Flush various data in thread-local buffers to be able to determine the collection
 487   // set
 488   {
 489     Ticks start = Ticks::now();
 490     G1PreEvacuateCollectionSetBatchTask cl;
 491     G1CollectedHeap::heap()->run_batch_task(&cl);
 492     phase_times()->record_pre_evacuate_prepare_time_ms((Ticks::now() - start).seconds() * 1000.0);
 493   }
 494 
 495   // Needs log buffers flushed.
 496   calculate_collection_set(evacuation_info, policy()->max_pause_time_ms());
 497 
 498   if (collector_state()->in_concurrent_start_gc()) {
 499     concurrent_mark()->pre_concurrent_start(_gc_cause);
 500   }
 501 
 502   // Please see comment in g1CollectedHeap.hpp and
 503   // G1CollectedHeap::ref_processing_init() to see how
 504   // reference processing currently works in G1.
 505   ref_processor_stw()->start_discovery(false /* always_clear */);
 506 
 507   _evac_failure_regions.pre_collection(_g1h->max_reserved_regions());
 508 
 509   _g1h->gc_prologue(false);
 510 
 511   // Initialize the GC alloc regions.
 512   allocator()->init_gc_alloc_regions(evacuation_info);
 513 
 514   {
 515     Ticks start = Ticks::now();
 516     rem_set()->prepare_for_scan_heap_roots();
 517     phase_times()->record_prepare_heap_roots_time_ms((Ticks::now() - start).seconds() * 1000.0);
 518   }
 519 
 520   {
 521     G1PrepareEvacuationTask g1_prep_task(_g1h);
 522     Tickspan task_time = run_task_timed(&g1_prep_task);
 523 
 524     _g1h->set_young_gen_card_set_stats(g1_prep_task.all_card_set_stats());
 525     _g1h->set_humongous_stats(g1_prep_task.humongous_total(), g1_prep_task.humongous_candidates());
 526 
 527     phase_times()->record_register_regions(task_time.seconds() * 1000.0);
 528   }
 529 
 530   assert(_g1h->verifier()->check_region_attr_table(), "Inconsistency in the region attributes table.");
 531 
 532 #if COMPILER2_OR_JVMCI
 533   DerivedPointerTable::clear();
 534 #endif
 535 
 536   allocation_failure_injector()->arm_if_needed();
 537 }
 538 
 539 class G1ParEvacuateFollowersClosure : public VoidClosure {
 540   double _start_term;
 541   double _term_time;
 542   size_t _term_attempts;
 543 
 544   void start_term_time() { _term_attempts++; _start_term = os::elapsedTime(); }
 545   void end_term_time() { _term_time += (os::elapsedTime() - _start_term); }
 546 
 547   G1CollectedHeap*              _g1h;
 548   G1ParScanThreadState*         _par_scan_state;
 549   G1ScannerTasksQueueSet*       _queues;
 550   TaskTerminator*               _terminator;
 551   G1GCPhaseTimes::GCParPhases   _phase;
 552 
 553   G1ParScanThreadState*   par_scan_state() { return _par_scan_state; }
 554   G1ScannerTasksQueueSet* queues()         { return _queues; }
 555   TaskTerminator*         terminator()     { return _terminator; }
 556 
 557   inline bool offer_termination() {
 558     EventGCPhaseParallel event;
 559     G1ParScanThreadState* const pss = par_scan_state();
 560     start_term_time();
 561     const bool res = (terminator() == nullptr) ? true : terminator()->offer_termination();
 562     end_term_time();
 563     event.commit(GCId::current(), pss->worker_id(), G1GCPhaseTimes::phase_name(G1GCPhaseTimes::Termination));
 564     return res;
 565   }
 566 
 567 public:
 568   G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h,
 569                                 G1ParScanThreadState* par_scan_state,
 570                                 G1ScannerTasksQueueSet* queues,
 571                                 TaskTerminator* terminator,
 572                                 G1GCPhaseTimes::GCParPhases phase)
 573     : _start_term(0.0), _term_time(0.0), _term_attempts(0),
 574       _g1h(g1h), _par_scan_state(par_scan_state),
 575       _queues(queues), _terminator(terminator), _phase(phase) {}
 576 
 577   void do_void() {
 578     EventGCPhaseParallel event;
 579     G1ParScanThreadState* const pss = par_scan_state();
 580     pss->trim_queue();
 581     event.commit(GCId::current(), pss->worker_id(), G1GCPhaseTimes::phase_name(_phase));
 582     do {
 583       EventGCPhaseParallel event;
 584       pss->steal_and_trim_queue(queues());
 585       event.commit(GCId::current(), pss->worker_id(), G1GCPhaseTimes::phase_name(_phase));
 586     } while (!offer_termination());
 587   }
 588 
 589   double term_time() const { return _term_time; }
 590   size_t term_attempts() const { return _term_attempts; }
 591 };
 592 
 593 class G1EvacuateRegionsBaseTask : public WorkerTask {
 594 protected:
 595   G1CollectedHeap* _g1h;
 596   G1ParScanThreadStateSet* _per_thread_states;
 597   G1ScannerTasksQueueSet* _task_queues;
 598   TaskTerminator _terminator;
 599   uint _num_workers;
 600 
 601   void evacuate_live_objects(G1ParScanThreadState* pss,
 602                              uint worker_id,
 603                              G1GCPhaseTimes::GCParPhases objcopy_phase,
 604                              G1GCPhaseTimes::GCParPhases termination_phase) {
 605     G1GCPhaseTimes* p = _g1h->phase_times();
 606 
 607     Ticks start = Ticks::now();
 608     G1ParEvacuateFollowersClosure cl(_g1h, pss, _task_queues, &_terminator, objcopy_phase);
 609     cl.do_void();
 610 
 611     assert(pss->queue_is_empty(), "should be empty");
 612 
 613     Tickspan evac_time = (Ticks::now() - start);
 614     p->record_or_add_time_secs(objcopy_phase, worker_id, evac_time.seconds() - cl.term_time());
 615 
 616     if (termination_phase == G1GCPhaseTimes::Termination) {
 617       p->record_time_secs(termination_phase, worker_id, cl.term_time());
 618       p->record_thread_work_item(termination_phase, worker_id, cl.term_attempts());
 619     } else {
 620       p->record_or_add_time_secs(termination_phase, worker_id, cl.term_time());
 621       p->record_or_add_thread_work_item(termination_phase, worker_id, cl.term_attempts());
 622     }
 623     assert(pss->trim_ticks().value() == 0,
 624            "Unexpected partial trimming during evacuation value " JLONG_FORMAT,
 625            pss->trim_ticks().value());
 626   }
 627 
 628   virtual void start_work(uint worker_id) { }
 629 
 630   virtual void end_work(uint worker_id) { }
 631 
 632   virtual void scan_roots(G1ParScanThreadState* pss, uint worker_id) = 0;
 633 
 634   virtual void evacuate_live_objects(G1ParScanThreadState* pss, uint worker_id) = 0;
 635 
 636 public:
 637   G1EvacuateRegionsBaseTask(const char* name,
 638                             G1ParScanThreadStateSet* per_thread_states,
 639                             G1ScannerTasksQueueSet* task_queues,
 640                             uint num_workers) :
 641     WorkerTask(name),
 642     _g1h(G1CollectedHeap::heap()),
 643     _per_thread_states(per_thread_states),
 644     _task_queues(task_queues),
 645     _terminator(num_workers, _task_queues),
 646     _num_workers(num_workers)
 647   { }
 648 
 649   void work(uint worker_id) {
 650     start_work(worker_id);
 651 
 652     {
 653       ResourceMark rm;
 654 
 655       G1ParScanThreadState* pss = _per_thread_states->state_for_worker(worker_id);
 656       pss->set_ref_discoverer(_g1h->ref_processor_stw());
 657 
 658       scan_roots(pss, worker_id);
 659       evacuate_live_objects(pss, worker_id);
 660     }
 661 
 662     end_work(worker_id);
 663   }
 664 };
 665 
 666 class G1EvacuateRegionsTask : public G1EvacuateRegionsBaseTask {
 667   G1RootProcessor* _root_processor;
 668   bool _has_optional_evacuation_work;
 669 
 670   void scan_roots(G1ParScanThreadState* pss, uint worker_id) {
 671     _root_processor->evacuate_roots(pss, worker_id);
 672     _g1h->rem_set()->scan_heap_roots(pss, worker_id, G1GCPhaseTimes::ScanHR, G1GCPhaseTimes::ObjCopy, _has_optional_evacuation_work);
 673     _g1h->rem_set()->scan_collection_set_regions(pss, worker_id, G1GCPhaseTimes::ScanHR, G1GCPhaseTimes::CodeRoots, G1GCPhaseTimes::ObjCopy);
 674   }
 675 
 676   void evacuate_live_objects(G1ParScanThreadState* pss, uint worker_id) {
 677     G1EvacuateRegionsBaseTask::evacuate_live_objects(pss, worker_id, G1GCPhaseTimes::ObjCopy, G1GCPhaseTimes::Termination);
 678   }
 679 
 680   void start_work(uint worker_id) {
 681     _g1h->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerStart, worker_id, Ticks::now().seconds());
 682   }
 683 
 684   void end_work(uint worker_id) {
 685     _g1h->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerEnd, worker_id, Ticks::now().seconds());
 686   }
 687 
 688 public:
 689   G1EvacuateRegionsTask(G1CollectedHeap* g1h,
 690                         G1ParScanThreadStateSet* per_thread_states,
 691                         G1ScannerTasksQueueSet* task_queues,
 692                         G1RootProcessor* root_processor,
 693                         uint num_workers,
 694                         bool has_optional_evacuation_work) :
 695     G1EvacuateRegionsBaseTask("G1 Evacuate Regions", per_thread_states, task_queues, num_workers),
 696     _root_processor(root_processor),
 697     _has_optional_evacuation_work(has_optional_evacuation_work)
 698   { }
 699 };
 700 
 701 void G1YoungCollector::evacuate_initial_collection_set(G1ParScanThreadStateSet* per_thread_states,
 702                                                       bool has_optional_evacuation_work) {
 703   G1GCPhaseTimes* p = phase_times();
 704 
 705   {
 706     Ticks start = Ticks::now();
 707     rem_set()->merge_heap_roots(true /* initial_evacuation */);
 708     p->record_merge_heap_roots_time((Ticks::now() - start).seconds() * 1000.0);
 709   }
 710 
 711   Tickspan task_time;
 712   const uint num_workers = workers()->active_workers();
 713 
 714   Ticks start_processing = Ticks::now();
 715   {
 716     G1RootProcessor root_processor(_g1h, num_workers);
 717     G1EvacuateRegionsTask g1_par_task(_g1h,
 718                                       per_thread_states,
 719                                       task_queues(),
 720                                       &root_processor,
 721                                       num_workers,
 722                                       has_optional_evacuation_work);
 723     task_time = run_task_timed(&g1_par_task);
 724     // Closing the inner scope will execute the destructor for the
 725     // G1RootProcessor object. By subtracting the WorkerThreads task from the total
 726     // time of this scope, we get the "NMethod List Cleanup" time. This list is
 727     // constructed during "STW two-phase nmethod root processing", see more in
 728     // nmethod.hpp
 729   }
 730   Tickspan total_processing = Ticks::now() - start_processing;
 731 
 732   p->record_initial_evac_time(task_time.seconds() * 1000.0);
 733   p->record_or_add_nmethod_list_cleanup_time((total_processing - task_time).seconds() * 1000.0);
 734 
 735   rem_set()->complete_evac_phase(has_optional_evacuation_work);
 736 }
 737 
 738 class G1EvacuateOptionalRegionsTask : public G1EvacuateRegionsBaseTask {
 739 
 740   void scan_roots(G1ParScanThreadState* pss, uint worker_id) {
 741     _g1h->rem_set()->scan_heap_roots(pss, worker_id, G1GCPhaseTimes::OptScanHR, G1GCPhaseTimes::OptObjCopy, true /* remember_already_scanned_cards */);
 742     _g1h->rem_set()->scan_collection_set_regions(pss, worker_id, G1GCPhaseTimes::OptScanHR, G1GCPhaseTimes::OptCodeRoots, G1GCPhaseTimes::OptObjCopy);
 743   }
 744 
 745   void evacuate_live_objects(G1ParScanThreadState* pss, uint worker_id) {
 746     G1EvacuateRegionsBaseTask::evacuate_live_objects(pss, worker_id, G1GCPhaseTimes::OptObjCopy, G1GCPhaseTimes::OptTermination);
 747   }
 748 
 749 public:
 750   G1EvacuateOptionalRegionsTask(G1ParScanThreadStateSet* per_thread_states,
 751                                 G1ScannerTasksQueueSet* queues,
 752                                 uint num_workers) :
 753     G1EvacuateRegionsBaseTask("G1 Evacuate Optional Regions", per_thread_states, queues, num_workers) {
 754   }
 755 };
 756 
 757 void G1YoungCollector::evacuate_next_optional_regions(G1ParScanThreadStateSet* per_thread_states) {
 758   // To access the protected constructor/destructor
 759   class G1MarkScope : public MarkScope { };
 760 
 761   Tickspan task_time;
 762 
 763   Ticks start_processing = Ticks::now();
 764   {
 765     G1MarkScope code_mark_scope;
 766     G1EvacuateOptionalRegionsTask task(per_thread_states, task_queues(), workers()->active_workers());
 767     task_time = run_task_timed(&task);
 768     // See comment in evacuate_initial_collection_set() for the reason of the scope.
 769   }
 770   Tickspan total_processing = Ticks::now() - start_processing;
 771 
 772   G1GCPhaseTimes* p = phase_times();
 773   p->record_or_add_nmethod_list_cleanup_time((total_processing - task_time).seconds() * 1000.0);
 774 }
 775 
 776 void G1YoungCollector::evacuate_optional_collection_set(G1ParScanThreadStateSet* per_thread_states) {
 777   const double collection_start_time_ms = phase_times()->cur_collection_start_sec() * 1000.0;
 778 
 779   while (!evacuation_alloc_failed() && collection_set()->optional_region_length() > 0) {
 780 
 781     double time_used_ms = os::elapsedTime() * 1000.0 - collection_start_time_ms;
 782     double time_left_ms = MaxGCPauseMillis - time_used_ms;
 783 
 784     if (time_left_ms < 0 ||
 785         !collection_set()->finalize_optional_for_evacuation(time_left_ms * policy()->optional_evacuation_fraction())) {
 786       log_trace(gc, ergo, cset)("Skipping evacuation of %u optional regions, no more regions can be evacuated in %.3fms",
 787                                 collection_set()->optional_region_length(), time_left_ms);
 788       break;
 789     }
 790 
 791     {
 792       Ticks start = Ticks::now();
 793       rem_set()->merge_heap_roots(false /* initial_evacuation */);
 794       phase_times()->record_or_add_optional_merge_heap_roots_time((Ticks::now() - start).seconds() * 1000.0);
 795     }
 796 
 797     {
 798       Ticks start = Ticks::now();
 799       evacuate_next_optional_regions(per_thread_states);
 800       phase_times()->record_or_add_optional_evac_time((Ticks::now() - start).seconds() * 1000.0);
 801     }
 802 
 803     rem_set()->complete_evac_phase(true /* has_more_than_one_evacuation_phase */);
 804   }
 805 
 806   collection_set()->abandon_optional_collection_set(per_thread_states);
 807 }
 808 
 809 // Non Copying Keep Alive closure
 810 class G1KeepAliveClosure: public OopClosure {
 811   G1CollectedHeap*_g1h;
 812 public:
 813   G1KeepAliveClosure(G1CollectedHeap* g1h) :_g1h(g1h) {}
 814   void do_oop(narrowOop* p) { guarantee(false, "Not needed"); }
 815   void do_oop(oop* p) {
 816     oop obj = *p;
 817     assert(obj != nullptr, "the caller should have filtered out null values");
 818 
 819     const G1HeapRegionAttr region_attr =_g1h->region_attr(obj);
 820     if (!region_attr.is_in_cset_or_humongous_candidate()) {
 821       return;
 822     }
 823     if (region_attr.is_in_cset()) {
 824       assert(obj->is_forwarded(), "invariant" );
 825       *p = obj->forwardee();
 826     } else {
 827       assert(!obj->is_forwarded(), "invariant" );
 828       assert(region_attr.is_humongous_candidate(),
 829              "Only allowed G1HeapRegionAttr state is IsHumongous, but is %d", region_attr.type());
 830      _g1h->set_humongous_is_live(obj);
 831     }
 832   }
 833 };
 834 
 835 // Copying Keep Alive closure - can be called from both
 836 // serial and parallel code as long as different worker
 837 // threads utilize different G1ParScanThreadState instances
 838 // and different queues.
 839 class G1CopyingKeepAliveClosure: public OopClosure {
 840   G1CollectedHeap* _g1h;
 841   G1ParScanThreadState*    _par_scan_state;
 842 
 843 public:
 844   G1CopyingKeepAliveClosure(G1CollectedHeap* g1h,
 845                             G1ParScanThreadState* pss):
 846     _g1h(g1h),
 847     _par_scan_state(pss)
 848   {}
 849 
 850   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
 851   virtual void do_oop(      oop* p) { do_oop_work(p); }
 852 
 853   template <class T> void do_oop_work(T* p) {
 854     oop obj = RawAccess<>::oop_load(p);
 855 
 856     if (_g1h->is_in_cset_or_humongous_candidate(obj)) {
 857       // If the referent object has been forwarded (either copied
 858       // to a new location or to itself in the event of an
 859       // evacuation failure) then we need to update the reference
 860       // field and, if both reference and referent are in the G1
 861       // heap, update the RSet for the referent.
 862       //
 863       // If the referent has not been forwarded then we have to keep
 864       // it alive by policy. Therefore we have copy the referent.
 865       //
 866       // When the queue is drained (after each phase of reference processing)
 867       // the object and it's followers will be copied, the reference field set
 868       // to point to the new location, and the RSet updated.
 869       _par_scan_state->push_on_queue(ScannerTask(p));
 870     }
 871   }
 872 };
 873 
 874 class G1STWRefProcProxyTask : public RefProcProxyTask {
 875   G1CollectedHeap& _g1h;
 876   G1ParScanThreadStateSet& _pss;
 877   TaskTerminator _terminator;
 878   G1ScannerTasksQueueSet& _task_queues;
 879 
 880   // Special closure for enqueuing discovered fields: during enqueue the card table
 881   // may not be in shape to properly handle normal barrier calls (e.g. card marks
 882   // in regions that failed evacuation, scribbling of various values by card table
 883   // scan code). Additionally the regular barrier enqueues into the "global"
 884   // DCQS, but during GC we need these to-be-refined entries in the GC local queue
 885   // so that after clearing the card table, the redirty cards phase will properly
 886   // mark all dirty cards to be picked up by refinement.
 887   class G1EnqueueDiscoveredFieldClosure : public EnqueueDiscoveredFieldClosure {
 888     G1CollectedHeap* _g1h;
 889     G1ParScanThreadState* _pss;
 890 
 891   public:
 892     G1EnqueueDiscoveredFieldClosure(G1CollectedHeap* g1h, G1ParScanThreadState* pss) : _g1h(g1h), _pss(pss) { }
 893 
 894     void enqueue(HeapWord* discovered_field_addr, oop value) override {
 895       assert(_g1h->is_in(discovered_field_addr), PTR_FORMAT " is not in heap ", p2i(discovered_field_addr));
 896       // Store the value first, whatever it is.
 897       RawAccess<>::oop_store(discovered_field_addr, value);
 898       if (value == nullptr) {
 899         return;
 900       }
 901       _pss->write_ref_field_post(discovered_field_addr, value);
 902     }
 903   };
 904 
 905 public:
 906   G1STWRefProcProxyTask(uint max_workers, G1CollectedHeap& g1h, G1ParScanThreadStateSet& pss, G1ScannerTasksQueueSet& task_queues)
 907     : RefProcProxyTask("G1STWRefProcProxyTask", max_workers),
 908       _g1h(g1h),
 909       _pss(pss),
 910       _terminator(max_workers, &task_queues),
 911       _task_queues(task_queues) {}
 912 
 913   void work(uint worker_id) override {
 914     assert(worker_id < _max_workers, "sanity");
 915     uint index = (_tm == RefProcThreadModel::Single) ? 0 : worker_id;
 916 
 917     G1ParScanThreadState* pss = _pss.state_for_worker(index);
 918     pss->set_ref_discoverer(nullptr);
 919 
 920     G1STWIsAliveClosure is_alive(&_g1h);
 921     G1CopyingKeepAliveClosure keep_alive(&_g1h, pss);
 922     G1EnqueueDiscoveredFieldClosure enqueue(&_g1h, pss);
 923     G1ParEvacuateFollowersClosure complete_gc(&_g1h, pss, &_task_queues, _tm == RefProcThreadModel::Single ? nullptr : &_terminator, G1GCPhaseTimes::ObjCopy);
 924     _rp_task->rp_work(worker_id, &is_alive, &keep_alive, &enqueue, &complete_gc);
 925 
 926     // We have completed copying any necessary live referent objects.
 927     assert(pss->queue_is_empty(), "both queue and overflow should be empty");
 928   }
 929 
 930   void prepare_run_task_hook() override {
 931     _terminator.reset_for_reuse(_queue_count);
 932   }
 933 };
 934 
 935 void G1YoungCollector::process_discovered_references(G1ParScanThreadStateSet* per_thread_states) {
 936   Ticks start = Ticks::now();
 937 
 938   ReferenceProcessor* rp = ref_processor_stw();
 939   assert(rp->discovery_enabled(), "should have been enabled");
 940 
 941   uint no_of_gc_workers = workers()->active_workers();
 942   rp->set_active_mt_degree(no_of_gc_workers);
 943 
 944   G1STWRefProcProxyTask task(rp->max_num_queues(), *_g1h, *per_thread_states, *task_queues());
 945   ReferenceProcessorPhaseTimes& pt = *phase_times()->ref_phase_times();
 946   ReferenceProcessorStats stats = rp->process_discovered_references(task, pt);
 947 
 948   gc_tracer_stw()->report_gc_reference_stats(stats);
 949 
 950   _g1h->make_pending_list_reachable();
 951 
 952   phase_times()->record_ref_proc_time((Ticks::now() - start).seconds() * MILLIUNITS);
 953 }
 954 
 955 void G1YoungCollector::post_evacuate_cleanup_1(G1ParScanThreadStateSet* per_thread_states) {
 956   Ticks start = Ticks::now();
 957   {
 958     G1PostEvacuateCollectionSetCleanupTask1 cl(per_thread_states, &_evac_failure_regions);
 959     _g1h->run_batch_task(&cl);
 960   }
 961   phase_times()->record_post_evacuate_cleanup_task_1_time((Ticks::now() - start).seconds() * 1000.0);
 962 }
 963 
 964 void G1YoungCollector::post_evacuate_cleanup_2(G1ParScanThreadStateSet* per_thread_states,
 965                                                G1EvacInfo* evacuation_info) {
 966   Ticks start = Ticks::now();
 967   {
 968     G1PostEvacuateCollectionSetCleanupTask2 cl(per_thread_states, evacuation_info, &_evac_failure_regions);
 969     _g1h->run_batch_task(&cl);
 970   }
 971   phase_times()->record_post_evacuate_cleanup_task_2_time((Ticks::now() - start).seconds() * 1000.0);
 972 }
 973 
 974 void G1YoungCollector::enqueue_candidates_as_root_regions() {
 975   assert(collector_state()->in_concurrent_start_gc(), "must be");
 976 
 977   G1CollectionSetCandidates* candidates = collection_set()->candidates();
 978   for (HeapRegion* r : *candidates) {
 979     _g1h->concurrent_mark()->add_root_region(r);
 980   }
 981 }
 982 
 983 void G1YoungCollector::post_evacuate_collection_set(G1EvacInfo* evacuation_info,
 984                                                     G1ParScanThreadStateSet* per_thread_states) {
 985   G1GCPhaseTimes* p = phase_times();
 986 
 987   // Process any discovered reference objects - we have
 988   // to do this _before_ we retire the GC alloc regions
 989   // as we may have to copy some 'reachable' referent
 990   // objects (and their reachable sub-graphs) that were
 991   // not copied during the pause.
 992   process_discovered_references(per_thread_states);
 993 
 994   G1STWIsAliveClosure is_alive(_g1h);
 995   G1KeepAliveClosure keep_alive(_g1h);
 996 
 997   WeakProcessor::weak_oops_do(workers(), &is_alive, &keep_alive, p->weak_phase_times());
 998 
 999   allocator()->release_gc_alloc_regions(evacuation_info);
1000 
1001   post_evacuate_cleanup_1(per_thread_states);
1002 
1003   post_evacuate_cleanup_2(per_thread_states, evacuation_info);
1004 
1005   // Regions in the collection set candidates are roots for the marking (they are
1006   // not marked through considering they are very likely to be reclaimed soon.
1007   // They need to be enqueued explicitly compared to survivor regions.
1008   if (collector_state()->in_concurrent_start_gc()) {
1009     enqueue_candidates_as_root_regions();
1010   }
1011 
1012   _evac_failure_regions.post_collection();
1013 
1014   assert_used_and_recalculate_used_equal(_g1h);
1015 
1016   _g1h->rebuild_free_region_list();
1017 
1018   _g1h->record_obj_copy_mem_stats();
1019 
1020   evacuation_info->set_bytes_used(_g1h->bytes_used_during_gc());
1021 
1022   _g1h->prepare_for_mutator_after_young_collection();
1023 
1024   _g1h->gc_epilogue(false);
1025 
1026   _g1h->expand_heap_after_young_collection();
1027 }
1028 
1029 bool G1YoungCollector::evacuation_failed() const {
1030   return _evac_failure_regions.has_regions_evac_failed();
1031 }
1032 
1033 bool G1YoungCollector::evacuation_pinned() const {
1034   return _evac_failure_regions.has_regions_evac_pinned();
1035 }
1036 
1037 bool G1YoungCollector::evacuation_alloc_failed() const {
1038   return _evac_failure_regions.has_regions_alloc_failed();
1039 }
1040 
1041 G1YoungCollector::G1YoungCollector(GCCause::Cause gc_cause) :
1042   _g1h(G1CollectedHeap::heap()),
1043   _gc_cause(gc_cause),
1044   _concurrent_operation_is_full_mark(false),
1045   _evac_failure_regions()
1046 {
1047 }
1048 
1049 void G1YoungCollector::collect() {
1050   // Do timing/tracing/statistics/pre- and post-logging/verification work not
1051   // directly related to the collection. They should not be accounted for in
1052   // collection work timing.
1053 
1054   // The G1YoungGCTraceTime message depends on collector state, so must come after
1055   // determining collector state.
1056   G1YoungGCTraceTime tm(this, _gc_cause);
1057 
1058   // JFR
1059   G1YoungGCJFRTracerMark jtm(gc_timer_stw(), gc_tracer_stw(), _gc_cause);
1060   // JStat/MXBeans
1061   G1YoungGCMonitoringScope ms(monitoring_support(),
1062                               !collection_set()->candidates()->is_empty() /* all_memory_pools_affected */);
1063   // Create the heap printer before internal pause timing to have
1064   // heap information printed as last part of detailed GC log.
1065   G1HeapPrinterMark hpm(_g1h);
1066   // Young GC internal pause timing
1067   G1YoungGCNotifyPauseMark npm(this);
1068 
1069   // Verification may use the workers, so they must be set up before.
1070   // Individual parallel phases may override this.
1071   set_young_collection_default_active_worker_threads();
1072 
1073   // Wait for root region scan here to make sure that it is done before any
1074   // use of the STW workers to maximize cpu use (i.e. all cores are available
1075   // just to do that).
1076   wait_for_root_region_scanning();
1077 
1078   G1YoungGCVerifierMark vm(this);
1079   {
1080     // Actual collection work starts and is executed (only) in this scope.
1081 
1082     // Young GC internal collection timing. The elapsed time recorded in the
1083     // policy for the collection deliberately elides verification (and some
1084     // other trivial setup above).
1085     policy()->record_young_collection_start();
1086 
1087     pre_evacuate_collection_set(jtm.evacuation_info());
1088 
1089     G1ParScanThreadStateSet per_thread_states(_g1h,
1090                                               workers()->active_workers(),
1091                                               collection_set(),
1092                                               &_evac_failure_regions);
1093 
1094     bool may_do_optional_evacuation = collection_set()->optional_region_length() != 0;
1095     // Actually do the work...
1096     evacuate_initial_collection_set(&per_thread_states, may_do_optional_evacuation);
1097 
1098     if (may_do_optional_evacuation) {
1099       evacuate_optional_collection_set(&per_thread_states);
1100     }
1101     post_evacuate_collection_set(jtm.evacuation_info(), &per_thread_states);
1102 
1103     // Refine the type of a concurrent mark operation now that we did the
1104     // evacuation, eventually aborting it.
1105     _concurrent_operation_is_full_mark = policy()->concurrent_operation_is_full_mark("Revise IHOP");
1106 
1107     // Need to report the collection pause now since record_collection_pause_end()
1108     // modifies it to the next state.
1109     jtm.report_pause_type(collector_state()->young_gc_pause_type(_concurrent_operation_is_full_mark));
1110 
1111     policy()->record_young_collection_end(_concurrent_operation_is_full_mark, evacuation_alloc_failed());
1112   }
1113   TASKQUEUE_STATS_ONLY(_g1h->task_queues()->print_and_reset_taskqueue_stats("Oop Queue");)
1114 }