1 /*
   2  * Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 
  27 #include "classfile/classLoaderDataGraph.inline.hpp"
  28 #include "classfile/javaClasses.inline.hpp"
  29 #include "compiler/oopMap.hpp"
  30 #include "gc/g1/g1Allocator.hpp"
  31 #include "gc/g1/g1CardSetMemory.hpp"
  32 #include "gc/g1/g1CollectedHeap.inline.hpp"
  33 #include "gc/g1/g1CollectionSetCandidates.inline.hpp"
  34 #include "gc/g1/g1CollectorState.hpp"
  35 #include "gc/g1/g1ConcurrentMark.hpp"
  36 #include "gc/g1/g1GCPhaseTimes.hpp"
  37 #include "gc/g1/g1EvacFailureRegions.inline.hpp"
  38 #include "gc/g1/g1EvacInfo.hpp"
  39 #include "gc/g1/g1HRPrinter.hpp"
  40 #include "gc/g1/g1MonitoringSupport.hpp"
  41 #include "gc/g1/g1ParScanThreadState.inline.hpp"
  42 #include "gc/g1/g1Policy.hpp"
  43 #include "gc/g1/g1RedirtyCardsQueue.hpp"
  44 #include "gc/g1/g1RegionPinCache.inline.hpp"
  45 #include "gc/g1/g1RemSet.hpp"
  46 #include "gc/g1/g1RootProcessor.hpp"
  47 #include "gc/g1/g1Trace.hpp"
  48 #include "gc/g1/g1YoungCollector.hpp"
  49 #include "gc/g1/g1YoungGCAllocationFailureInjector.hpp"
  50 #include "gc/g1/g1YoungGCPostEvacuateTasks.hpp"
  51 #include "gc/g1/g1YoungGCPreEvacuateTasks.hpp"
  52 #include "gc/g1/g1_globals.hpp"
  53 #include "gc/shared/concurrentGCBreakpoints.hpp"
  54 #include "gc/shared/gcTraceTime.inline.hpp"
  55 #include "gc/shared/gcTimer.hpp"
  56 #include "gc/shared/preservedMarks.hpp"
  57 #include "gc/shared/referenceProcessor.hpp"
  58 #include "gc/shared/weakProcessor.inline.hpp"
  59 #include "gc/shared/workerPolicy.hpp"
  60 #include "gc/shared/workerThread.hpp"
  61 #include "jfr/jfrEvents.hpp"
  62 #include "memory/resourceArea.hpp"
  63 #include "runtime/threads.hpp"
  64 #include "utilities/ticks.hpp"
  65 
  66 // GCTraceTime wrapper that constructs the message according to GC pause type and
  67 // GC cause.
  68 // The code relies on the fact that GCTraceTimeWrapper stores the string passed
  69 // initially as a reference only, so that we can modify it as needed.
  70 class G1YoungGCTraceTime {
  71   G1YoungCollector* _collector;
  72 
  73   G1GCPauseType _pause_type;
  74   GCCause::Cause _pause_cause;
  75 
  76   static const uint MaxYoungGCNameLength = 128;
  77   char _young_gc_name_data[MaxYoungGCNameLength];
  78 
  79   GCTraceTime(Info, gc) _tt;
  80 
  81   const char* update_young_gc_name() {
  82     char evacuation_failed_string[48];
  83     evacuation_failed_string[0] = '\0';
  84 
  85     if (_collector->evacuation_failed()) {
  86       snprintf(evacuation_failed_string,
  87                ARRAY_SIZE(evacuation_failed_string),
  88                " (Evacuation Failure: %s%s%s)",
  89                _collector->evacuation_alloc_failed() ? "Allocation" : "",
  90                _collector->evacuation_alloc_failed() && _collector->evacuation_pinned() ? " / " : "",
  91                _collector->evacuation_pinned() ? "Pinned" : "");
  92     }
  93     snprintf(_young_gc_name_data,
  94              MaxYoungGCNameLength,
  95              "Pause Young (%s) (%s)%s",
  96              G1GCPauseTypeHelper::to_string(_pause_type),
  97              GCCause::to_string(_pause_cause),
  98              evacuation_failed_string);
  99     return _young_gc_name_data;
 100   }
 101 
 102 public:
 103   G1YoungGCTraceTime(G1YoungCollector* collector, GCCause::Cause cause) :
 104     _collector(collector),
 105     // Take snapshot of current pause type at start as it may be modified during gc.
 106     // The strings for all Concurrent Start pauses are the same, so the parameter
 107     // does not matter here.
 108     _pause_type(_collector->collector_state()->young_gc_pause_type(false /* concurrent_operation_is_full_mark */)),
 109     _pause_cause(cause),
 110     // Fake a "no cause" and manually add the correct string in update_young_gc_name()
 111     // to make the string look more natural.
 112     _tt(update_young_gc_name(), nullptr, GCCause::_no_gc, true) {
 113   }
 114 
 115   ~G1YoungGCTraceTime() {
 116     update_young_gc_name();
 117   }
 118 };
 119 
 120 class G1YoungGCNotifyPauseMark : public StackObj {
 121   G1YoungCollector* _collector;
 122 
 123 public:
 124   G1YoungGCNotifyPauseMark(G1YoungCollector* collector) : _collector(collector) {
 125     G1CollectedHeap::heap()->policy()->record_young_gc_pause_start();
 126   }
 127 
 128   ~G1YoungGCNotifyPauseMark() {
 129     G1CollectedHeap::heap()->policy()->record_young_gc_pause_end(_collector->evacuation_failed());
 130   }
 131 };
 132 
 133 class G1YoungGCJFRTracerMark : public G1JFRTracerMark {
 134   G1EvacInfo _evacuation_info;
 135 
 136   G1NewTracer* tracer() const { return (G1NewTracer*)_tracer; }
 137 
 138 public:
 139 
 140   G1EvacInfo* evacuation_info() { return &_evacuation_info; }
 141 
 142   G1YoungGCJFRTracerMark(STWGCTimer* gc_timer_stw, G1NewTracer* gc_tracer_stw, GCCause::Cause cause) :
 143     G1JFRTracerMark(gc_timer_stw, gc_tracer_stw), _evacuation_info() { }
 144 
 145   void report_pause_type(G1GCPauseType type) {
 146     tracer()->report_young_gc_pause(type);
 147   }
 148 
 149   ~G1YoungGCJFRTracerMark() {
 150     G1CollectedHeap* g1h = G1CollectedHeap::heap();
 151 
 152     tracer()->report_evacuation_info(&_evacuation_info);
 153     tracer()->report_tenuring_threshold(g1h->policy()->tenuring_threshold());
 154   }
 155 };
 156 
 157 class G1YoungGCVerifierMark : public StackObj {
 158   G1YoungCollector* _collector;
 159   G1HeapVerifier::G1VerifyType _type;
 160 
 161   static G1HeapVerifier::G1VerifyType young_collection_verify_type() {
 162     G1CollectorState* state = G1CollectedHeap::heap()->collector_state();
 163     if (state->in_concurrent_start_gc()) {
 164       return G1HeapVerifier::G1VerifyConcurrentStart;
 165     } else if (state->in_young_only_phase()) {
 166       return G1HeapVerifier::G1VerifyYoungNormal;
 167     } else {
 168       return G1HeapVerifier::G1VerifyMixed;
 169     }
 170   }
 171 
 172 public:
 173   G1YoungGCVerifierMark(G1YoungCollector* collector) : _collector(collector), _type(young_collection_verify_type()) {
 174     G1CollectedHeap::heap()->verify_before_young_collection(_type);
 175   }
 176 
 177   ~G1YoungGCVerifierMark() {
 178     // Inject evacuation failure tag into type if needed.
 179     G1HeapVerifier::G1VerifyType type = _type;
 180     if (_collector->evacuation_failed()) {
 181       type = (G1HeapVerifier::G1VerifyType)(type | G1HeapVerifier::G1VerifyYoungEvacFail);
 182     }
 183     G1CollectedHeap::heap()->verify_after_young_collection(type);
 184   }
 185 };
 186 
 187 G1Allocator* G1YoungCollector::allocator() const {
 188   return _g1h->allocator();
 189 }
 190 
 191 G1CollectionSet* G1YoungCollector::collection_set() const {
 192   return _g1h->collection_set();
 193 }
 194 
 195 G1CollectorState* G1YoungCollector::collector_state() const {
 196   return _g1h->collector_state();
 197 }
 198 
 199 G1ConcurrentMark* G1YoungCollector::concurrent_mark() const {
 200   return _g1h->concurrent_mark();
 201 }
 202 
 203 STWGCTimer* G1YoungCollector::gc_timer_stw() const {
 204   return _g1h->gc_timer_stw();
 205 }
 206 
 207 G1NewTracer* G1YoungCollector::gc_tracer_stw() const {
 208   return _g1h->gc_tracer_stw();
 209 }
 210 
 211 G1Policy* G1YoungCollector::policy() const {
 212   return _g1h->policy();
 213 }
 214 
 215 G1GCPhaseTimes* G1YoungCollector::phase_times() const {
 216   return _g1h->phase_times();
 217 }
 218 
 219 G1HRPrinter* G1YoungCollector::hr_printer() const {
 220   return _g1h->hr_printer();
 221 }
 222 
 223 G1MonitoringSupport* G1YoungCollector::monitoring_support() const {
 224   return _g1h->monitoring_support();
 225 }
 226 
 227 G1RemSet* G1YoungCollector::rem_set() const {
 228   return _g1h->rem_set();
 229 }
 230 
 231 G1ScannerTasksQueueSet* G1YoungCollector::task_queues() const {
 232   return _g1h->task_queues();
 233 }
 234 
 235 G1SurvivorRegions* G1YoungCollector::survivor_regions() const {
 236   return _g1h->survivor();
 237 }
 238 
 239 ReferenceProcessor* G1YoungCollector::ref_processor_stw() const {
 240   return _g1h->ref_processor_stw();
 241 }
 242 
 243 WorkerThreads* G1YoungCollector::workers() const {
 244   return _g1h->workers();
 245 }
 246 
 247 G1YoungGCAllocationFailureInjector* G1YoungCollector::allocation_failure_injector() const {
 248   return _g1h->allocation_failure_injector();
 249 }
 250 
 251 
 252 void G1YoungCollector::wait_for_root_region_scanning() {
 253   Ticks start = Ticks::now();
 254   // We have to wait until the CM threads finish scanning the
 255   // root regions as it's the only way to ensure that all the
 256   // objects on them have been correctly scanned before we start
 257   // moving them during the GC.
 258   bool waited = concurrent_mark()->wait_until_root_region_scan_finished();
 259   Tickspan wait_time;
 260   if (waited) {
 261     wait_time = (Ticks::now() - start);
 262   }
 263   phase_times()->record_root_region_scan_wait_time(wait_time.seconds() * MILLIUNITS);
 264 }
 265 
 266 class G1PrintCollectionSetClosure : public HeapRegionClosure {
 267 private:
 268   G1HRPrinter* _hr_printer;
 269 public:
 270   G1PrintCollectionSetClosure(G1HRPrinter* hr_printer) : HeapRegionClosure(), _hr_printer(hr_printer) { }
 271 
 272   virtual bool do_heap_region(HeapRegion* r) {
 273     _hr_printer->cset(r);
 274     return false;
 275   }
 276 };
 277 
 278 void G1YoungCollector::calculate_collection_set(G1EvacInfo* evacuation_info, double target_pause_time_ms) {
 279   // Forget the current allocation region (we might even choose it to be part
 280   // of the collection set!) before finalizing the collection set.
 281   allocator()->release_mutator_alloc_regions();
 282 
 283   collection_set()->finalize_initial_collection_set(target_pause_time_ms, survivor_regions());
 284   evacuation_info->set_collection_set_regions(collection_set()->region_length() +
 285                                               collection_set()->optional_region_length());
 286 
 287   concurrent_mark()->verify_no_collection_set_oops();
 288 
 289   if (hr_printer()->is_active()) {
 290     G1PrintCollectionSetClosure cl(hr_printer());
 291     collection_set()->iterate(&cl);
 292     collection_set()->iterate_optional(&cl);
 293   }
 294 }
 295 
 296 class G1PrepareEvacuationTask : public WorkerTask {
 297   class G1PrepareRegionsClosure : public HeapRegionClosure {
 298     G1CollectedHeap* _g1h;
 299     G1PrepareEvacuationTask* _parent_task;
 300     uint _worker_humongous_total;
 301     uint _worker_humongous_candidates;
 302 
 303     G1MonotonicArenaMemoryStats _card_set_stats;
 304 
 305     void sample_card_set_size(HeapRegion* hr) {
 306       // Sample card set sizes for young gen and humongous before GC: this makes
 307       // the policy to give back memory to the OS keep the most recent amount of
 308       // memory for these regions.
 309       if (hr->is_young() || hr->is_starts_humongous()) {
 310         _card_set_stats.add(hr->rem_set()->card_set_memory_stats());
 311       }
 312     }
 313 
 314     bool humongous_region_is_candidate(HeapRegion* region) const {
 315       assert(region->is_starts_humongous(), "Must start a humongous object");
 316 
 317       oop obj = cast_to_oop(region->bottom());
 318 
 319       // Dead objects cannot be eager reclaim candidates. Due to class
 320       // unloading it is unsafe to query their classes so we return early.
 321       if (_g1h->is_obj_dead(obj, region)) {
 322         return false;
 323       }
 324 
 325       // If we do not have a complete remembered set for the region, then we can
 326       // not be sure that we have all references to it.
 327       if (!region->rem_set()->is_complete()) {
 328         return false;
 329       }
 330       // We also cannot collect the humongous object if it is pinned.
 331       if (region->has_pinned_objects()) {
 332         return false;
 333       }
 334       // Candidate selection must satisfy the following constraints
 335       // while concurrent marking is in progress:
 336       //
 337       // * In order to maintain SATB invariants, an object must not be
 338       // reclaimed if it was allocated before the start of marking and
 339       // has not had its references scanned.  Such an object must have
 340       // its references (including type metadata) scanned to ensure no
 341       // live objects are missed by the marking process.  Objects
 342       // allocated after the start of concurrent marking don't need to
 343       // be scanned.
 344       //
 345       // * An object must not be reclaimed if it is on the concurrent
 346       // mark stack.  Objects allocated after the start of concurrent
 347       // marking are never pushed on the mark stack.
 348       //
 349       // Nominating only objects allocated after the start of concurrent
 350       // marking is sufficient to meet both constraints.  This may miss
 351       // some objects that satisfy the constraints, but the marking data
 352       // structures don't support efficiently performing the needed
 353       // additional tests or scrubbing of the mark stack.
 354       //
 355       // However, we presently only nominate is_typeArray() objects.
 356       // A humongous object containing references induces remembered
 357       // set entries on other regions.  In order to reclaim such an
 358       // object, those remembered sets would need to be cleaned up.
 359       //
 360       // We also treat is_typeArray() objects specially, allowing them
 361       // to be reclaimed even if allocated before the start of
 362       // concurrent mark.  For this we rely on mark stack insertion to
 363       // exclude is_typeArray() objects, preventing reclaiming an object
 364       // that is in the mark stack.  We also rely on the metadata for
 365       // such objects to be built-in and so ensured to be kept live.
 366       // Frequent allocation and drop of large binary blobs is an
 367       // important use case for eager reclaim, and this special handling
 368       // may reduce needed headroom.
 369 
 370       return obj->is_typeArray() &&
 371              _g1h->is_potential_eager_reclaim_candidate(region);
 372     }
 373 
 374   public:
 375     G1PrepareRegionsClosure(G1CollectedHeap* g1h, G1PrepareEvacuationTask* parent_task) :
 376       _g1h(g1h),
 377       _parent_task(parent_task),
 378       _worker_humongous_total(0),
 379       _worker_humongous_candidates(0) { }
 380 
 381     ~G1PrepareRegionsClosure() {
 382       _parent_task->add_humongous_candidates(_worker_humongous_candidates);
 383       _parent_task->add_humongous_total(_worker_humongous_total);
 384     }
 385 
 386     virtual bool do_heap_region(HeapRegion* hr) {
 387       // First prepare the region for scanning
 388       _g1h->rem_set()->prepare_region_for_scan(hr);
 389 
 390       sample_card_set_size(hr);
 391 
 392       // Now check if region is a humongous candidate
 393       if (!hr->is_starts_humongous()) {
 394         _g1h->register_region_with_region_attr(hr);
 395         return false;
 396       }
 397 
 398       uint index = hr->hrm_index();
 399       if (humongous_region_is_candidate(hr)) {
 400         _g1h->register_humongous_candidate_region_with_region_attr(index);
 401         _worker_humongous_candidates++;
 402         // We will later handle the remembered sets of these regions.
 403       } else {
 404         _g1h->register_region_with_region_attr(hr);
 405       }
 406       log_debug(gc, humongous)("Humongous region %u (object size %zu @ " PTR_FORMAT ") remset %zu code roots %zu "
 407                                "marked %d pinned count %zu reclaim candidate %d type array %d",
 408                                index,
 409                                cast_to_oop(hr->bottom())->size() * HeapWordSize,
 410                                p2i(hr->bottom()),
 411                                hr->rem_set()->occupied(),
 412                                hr->rem_set()->code_roots_list_length(),
 413                                _g1h->concurrent_mark()->mark_bitmap()->is_marked(hr->bottom()),
 414                                hr->pinned_count(),
 415                                _g1h->is_humongous_reclaim_candidate(index),
 416                                cast_to_oop(hr->bottom())->is_typeArray()
 417                               );
 418       _worker_humongous_total++;
 419 
 420       return false;
 421     }
 422 
 423     G1MonotonicArenaMemoryStats card_set_stats() const {
 424       return _card_set_stats;
 425     }
 426   };
 427 
 428   G1CollectedHeap* _g1h;
 429   HeapRegionClaimer _claimer;
 430   volatile uint _humongous_total;
 431   volatile uint _humongous_candidates;
 432 
 433   G1MonotonicArenaMemoryStats _all_card_set_stats;
 434 
 435 public:
 436   G1PrepareEvacuationTask(G1CollectedHeap* g1h) :
 437     WorkerTask("Prepare Evacuation"),
 438     _g1h(g1h),
 439     _claimer(_g1h->workers()->active_workers()),
 440     _humongous_total(0),
 441     _humongous_candidates(0) { }
 442 
 443   void work(uint worker_id) {
 444     G1PrepareRegionsClosure cl(_g1h, this);
 445     _g1h->heap_region_par_iterate_from_worker_offset(&cl, &_claimer, worker_id);
 446 
 447     MutexLocker x(G1RareEvent_lock, Mutex::_no_safepoint_check_flag);
 448     _all_card_set_stats.add(cl.card_set_stats());
 449   }
 450 
 451   void add_humongous_candidates(uint candidates) {
 452     Atomic::add(&_humongous_candidates, candidates);
 453   }
 454 
 455   void add_humongous_total(uint total) {
 456     Atomic::add(&_humongous_total, total);
 457   }
 458 
 459   uint humongous_candidates() {
 460     return _humongous_candidates;
 461   }
 462 
 463   uint humongous_total() {
 464     return _humongous_total;
 465   }
 466 
 467   const G1MonotonicArenaMemoryStats all_card_set_stats() const {
 468     return _all_card_set_stats;
 469   }
 470 };
 471 
 472 Tickspan G1YoungCollector::run_task_timed(WorkerTask* task) {
 473   Ticks start = Ticks::now();
 474   workers()->run_task(task);
 475   return Ticks::now() - start;
 476 }
 477 
 478 void G1YoungCollector::set_young_collection_default_active_worker_threads(){
 479   uint active_workers = WorkerPolicy::calc_active_workers(workers()->max_workers(),
 480                                                           workers()->active_workers(),
 481                                                           Threads::number_of_non_daemon_threads());
 482   active_workers = workers()->set_active_workers(active_workers);
 483   log_info(gc,task)("Using %u workers of %u for evacuation", active_workers, workers()->max_workers());
 484 }
 485 
 486 void G1YoungCollector::pre_evacuate_collection_set(G1EvacInfo* evacuation_info) {
 487   // Flush various data in thread-local buffers to be able to determine the collection
 488   // set
 489   {
 490     Ticks start = Ticks::now();
 491     G1PreEvacuateCollectionSetBatchTask cl;
 492     G1CollectedHeap::heap()->run_batch_task(&cl);
 493     phase_times()->record_pre_evacuate_prepare_time_ms((Ticks::now() - start).seconds() * 1000.0);
 494   }
 495 
 496   // Needs log buffers flushed.
 497   calculate_collection_set(evacuation_info, policy()->max_pause_time_ms());
 498 
 499   if (collector_state()->in_concurrent_start_gc()) {
 500     concurrent_mark()->pre_concurrent_start(_gc_cause);
 501   }
 502 
 503   // Please see comment in g1CollectedHeap.hpp and
 504   // G1CollectedHeap::ref_processing_init() to see how
 505   // reference processing currently works in G1.
 506   ref_processor_stw()->start_discovery(false /* always_clear */);
 507 
 508   _evac_failure_regions.pre_collection(_g1h->max_reserved_regions());
 509 
 510   _g1h->gc_prologue(false);
 511 
 512   // Initialize the GC alloc regions.
 513   allocator()->init_gc_alloc_regions(evacuation_info);
 514 
 515   {
 516     Ticks start = Ticks::now();
 517     rem_set()->prepare_for_scan_heap_roots();
 518     phase_times()->record_prepare_heap_roots_time_ms((Ticks::now() - start).seconds() * 1000.0);
 519   }
 520 
 521   {
 522     G1PrepareEvacuationTask g1_prep_task(_g1h);
 523     Tickspan task_time = run_task_timed(&g1_prep_task);
 524 
 525     _g1h->set_young_gen_card_set_stats(g1_prep_task.all_card_set_stats());
 526     _g1h->set_humongous_stats(g1_prep_task.humongous_total(), g1_prep_task.humongous_candidates());
 527 
 528     phase_times()->record_register_regions(task_time.seconds() * 1000.0);
 529   }
 530 
 531   assert(_g1h->verifier()->check_region_attr_table(), "Inconsistency in the region attributes table.");
 532 
 533 #if COMPILER2_OR_JVMCI
 534   DerivedPointerTable::clear();
 535 #endif
 536 
 537   allocation_failure_injector()->arm_if_needed();
 538 }
 539 
 540 class G1ParEvacuateFollowersClosure : public VoidClosure {
 541   double _start_term;
 542   double _term_time;
 543   size_t _term_attempts;
 544 
 545   void start_term_time() { _term_attempts++; _start_term = os::elapsedTime(); }
 546   void end_term_time() { _term_time += (os::elapsedTime() - _start_term); }
 547 
 548   G1CollectedHeap*              _g1h;
 549   G1ParScanThreadState*         _par_scan_state;
 550   G1ScannerTasksQueueSet*       _queues;
 551   TaskTerminator*               _terminator;
 552   G1GCPhaseTimes::GCParPhases   _phase;
 553 
 554   G1ParScanThreadState*   par_scan_state() { return _par_scan_state; }
 555   G1ScannerTasksQueueSet* queues()         { return _queues; }
 556   TaskTerminator*         terminator()     { return _terminator; }
 557 
 558   inline bool offer_termination() {
 559     EventGCPhaseParallel event;
 560     G1ParScanThreadState* const pss = par_scan_state();
 561     start_term_time();
 562     const bool res = (terminator() == nullptr) ? true : terminator()->offer_termination();
 563     end_term_time();
 564     event.commit(GCId::current(), pss->worker_id(), G1GCPhaseTimes::phase_name(G1GCPhaseTimes::Termination));
 565     return res;
 566   }
 567 
 568 public:
 569   G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h,
 570                                 G1ParScanThreadState* par_scan_state,
 571                                 G1ScannerTasksQueueSet* queues,
 572                                 TaskTerminator* terminator,
 573                                 G1GCPhaseTimes::GCParPhases phase)
 574     : _start_term(0.0), _term_time(0.0), _term_attempts(0),
 575       _g1h(g1h), _par_scan_state(par_scan_state),
 576       _queues(queues), _terminator(terminator), _phase(phase) {}
 577 
 578   void do_void() {
 579     EventGCPhaseParallel event;
 580     G1ParScanThreadState* const pss = par_scan_state();
 581     pss->trim_queue();
 582     event.commit(GCId::current(), pss->worker_id(), G1GCPhaseTimes::phase_name(_phase));
 583     do {
 584       EventGCPhaseParallel event;
 585       pss->steal_and_trim_queue(queues());
 586       event.commit(GCId::current(), pss->worker_id(), G1GCPhaseTimes::phase_name(_phase));
 587     } while (!offer_termination());
 588   }
 589 
 590   double term_time() const { return _term_time; }
 591   size_t term_attempts() const { return _term_attempts; }
 592 };
 593 
 594 class G1EvacuateRegionsBaseTask : public WorkerTask {
 595 protected:
 596   G1CollectedHeap* _g1h;
 597   G1ParScanThreadStateSet* _per_thread_states;
 598   G1ScannerTasksQueueSet* _task_queues;
 599   TaskTerminator _terminator;
 600   uint _num_workers;
 601 
 602   void evacuate_live_objects(G1ParScanThreadState* pss,
 603                              uint worker_id,
 604                              G1GCPhaseTimes::GCParPhases objcopy_phase,
 605                              G1GCPhaseTimes::GCParPhases termination_phase) {
 606     G1GCPhaseTimes* p = _g1h->phase_times();
 607 
 608     Ticks start = Ticks::now();
 609     G1ParEvacuateFollowersClosure cl(_g1h, pss, _task_queues, &_terminator, objcopy_phase);
 610     cl.do_void();
 611 
 612     assert(pss->queue_is_empty(), "should be empty");
 613 
 614     Tickspan evac_time = (Ticks::now() - start);
 615     p->record_or_add_time_secs(objcopy_phase, worker_id, evac_time.seconds() - cl.term_time());
 616 
 617     if (termination_phase == G1GCPhaseTimes::Termination) {
 618       p->record_time_secs(termination_phase, worker_id, cl.term_time());
 619       p->record_thread_work_item(termination_phase, worker_id, cl.term_attempts());
 620     } else {
 621       p->record_or_add_time_secs(termination_phase, worker_id, cl.term_time());
 622       p->record_or_add_thread_work_item(termination_phase, worker_id, cl.term_attempts());
 623     }
 624     assert(pss->trim_ticks().value() == 0,
 625            "Unexpected partial trimming during evacuation value " JLONG_FORMAT,
 626            pss->trim_ticks().value());
 627   }
 628 
 629   virtual void start_work(uint worker_id) { }
 630 
 631   virtual void end_work(uint worker_id) { }
 632 
 633   virtual void scan_roots(G1ParScanThreadState* pss, uint worker_id) = 0;
 634 
 635   virtual void evacuate_live_objects(G1ParScanThreadState* pss, uint worker_id) = 0;
 636 
 637 public:
 638   G1EvacuateRegionsBaseTask(const char* name,
 639                             G1ParScanThreadStateSet* per_thread_states,
 640                             G1ScannerTasksQueueSet* task_queues,
 641                             uint num_workers) :
 642     WorkerTask(name),
 643     _g1h(G1CollectedHeap::heap()),
 644     _per_thread_states(per_thread_states),
 645     _task_queues(task_queues),
 646     _terminator(num_workers, _task_queues),
 647     _num_workers(num_workers)
 648   { }
 649 
 650   void work(uint worker_id) {
 651     start_work(worker_id);
 652 
 653     {
 654       ResourceMark rm;
 655 
 656       G1ParScanThreadState* pss = _per_thread_states->state_for_worker(worker_id);
 657       pss->set_ref_discoverer(_g1h->ref_processor_stw());
 658 
 659       scan_roots(pss, worker_id);
 660       evacuate_live_objects(pss, worker_id);
 661     }
 662 
 663     end_work(worker_id);
 664   }
 665 };
 666 
 667 class G1EvacuateRegionsTask : public G1EvacuateRegionsBaseTask {
 668   G1RootProcessor* _root_processor;
 669   bool _has_optional_evacuation_work;
 670 
 671   void scan_roots(G1ParScanThreadState* pss, uint worker_id) {
 672     _root_processor->evacuate_roots(pss, worker_id);
 673     _g1h->rem_set()->scan_heap_roots(pss, worker_id, G1GCPhaseTimes::ScanHR, G1GCPhaseTimes::ObjCopy, _has_optional_evacuation_work);
 674     _g1h->rem_set()->scan_collection_set_regions(pss, worker_id, G1GCPhaseTimes::ScanHR, G1GCPhaseTimes::CodeRoots, G1GCPhaseTimes::ObjCopy);
 675   }
 676 
 677   void evacuate_live_objects(G1ParScanThreadState* pss, uint worker_id) {
 678     G1EvacuateRegionsBaseTask::evacuate_live_objects(pss, worker_id, G1GCPhaseTimes::ObjCopy, G1GCPhaseTimes::Termination);
 679   }
 680 
 681   void start_work(uint worker_id) {
 682     _g1h->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerStart, worker_id, Ticks::now().seconds());
 683   }
 684 
 685   void end_work(uint worker_id) {
 686     _g1h->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerEnd, worker_id, Ticks::now().seconds());
 687   }
 688 
 689 public:
 690   G1EvacuateRegionsTask(G1CollectedHeap* g1h,
 691                         G1ParScanThreadStateSet* per_thread_states,
 692                         G1ScannerTasksQueueSet* task_queues,
 693                         G1RootProcessor* root_processor,
 694                         uint num_workers,
 695                         bool has_optional_evacuation_work) :
 696     G1EvacuateRegionsBaseTask("G1 Evacuate Regions", per_thread_states, task_queues, num_workers),
 697     _root_processor(root_processor),
 698     _has_optional_evacuation_work(has_optional_evacuation_work)
 699   { }
 700 };
 701 
 702 void G1YoungCollector::evacuate_initial_collection_set(G1ParScanThreadStateSet* per_thread_states,
 703                                                       bool has_optional_evacuation_work) {
 704   G1GCPhaseTimes* p = phase_times();
 705 
 706   {
 707     Ticks start = Ticks::now();
 708     rem_set()->merge_heap_roots(true /* initial_evacuation */);
 709     p->record_merge_heap_roots_time((Ticks::now() - start).seconds() * 1000.0);
 710   }
 711 
 712   Tickspan task_time;
 713   const uint num_workers = workers()->active_workers();
 714 
 715   Ticks start_processing = Ticks::now();
 716   {
 717     G1RootProcessor root_processor(_g1h, num_workers);
 718     G1EvacuateRegionsTask g1_par_task(_g1h,
 719                                       per_thread_states,
 720                                       task_queues(),
 721                                       &root_processor,
 722                                       num_workers,
 723                                       has_optional_evacuation_work);
 724     task_time = run_task_timed(&g1_par_task);
 725     // Closing the inner scope will execute the destructor for the
 726     // G1RootProcessor object. By subtracting the WorkerThreads task from the total
 727     // time of this scope, we get the "NMethod List Cleanup" time. This list is
 728     // constructed during "STW two-phase nmethod root processing", see more in
 729     // nmethod.hpp
 730   }
 731   Tickspan total_processing = Ticks::now() - start_processing;
 732 
 733   p->record_initial_evac_time(task_time.seconds() * 1000.0);
 734   p->record_or_add_nmethod_list_cleanup_time((total_processing - task_time).seconds() * 1000.0);
 735 
 736   rem_set()->complete_evac_phase(has_optional_evacuation_work);
 737 }
 738 
 739 class G1EvacuateOptionalRegionsTask : public G1EvacuateRegionsBaseTask {
 740 
 741   void scan_roots(G1ParScanThreadState* pss, uint worker_id) {
 742     _g1h->rem_set()->scan_heap_roots(pss, worker_id, G1GCPhaseTimes::OptScanHR, G1GCPhaseTimes::OptObjCopy, true /* remember_already_scanned_cards */);
 743     _g1h->rem_set()->scan_collection_set_regions(pss, worker_id, G1GCPhaseTimes::OptScanHR, G1GCPhaseTimes::OptCodeRoots, G1GCPhaseTimes::OptObjCopy);
 744   }
 745 
 746   void evacuate_live_objects(G1ParScanThreadState* pss, uint worker_id) {
 747     G1EvacuateRegionsBaseTask::evacuate_live_objects(pss, worker_id, G1GCPhaseTimes::OptObjCopy, G1GCPhaseTimes::OptTermination);
 748   }
 749 
 750 public:
 751   G1EvacuateOptionalRegionsTask(G1ParScanThreadStateSet* per_thread_states,
 752                                 G1ScannerTasksQueueSet* queues,
 753                                 uint num_workers) :
 754     G1EvacuateRegionsBaseTask("G1 Evacuate Optional Regions", per_thread_states, queues, num_workers) {
 755   }
 756 };
 757 
 758 void G1YoungCollector::evacuate_next_optional_regions(G1ParScanThreadStateSet* per_thread_states) {
 759   // To access the protected constructor/destructor
 760   class G1MarkScope : public MarkScope { };
 761 
 762   Tickspan task_time;
 763 
 764   Ticks start_processing = Ticks::now();
 765   {
 766     G1MarkScope code_mark_scope;
 767     G1EvacuateOptionalRegionsTask task(per_thread_states, task_queues(), workers()->active_workers());
 768     task_time = run_task_timed(&task);
 769     // See comment in evacuate_initial_collection_set() for the reason of the scope.
 770   }
 771   Tickspan total_processing = Ticks::now() - start_processing;
 772 
 773   G1GCPhaseTimes* p = phase_times();
 774   p->record_or_add_nmethod_list_cleanup_time((total_processing - task_time).seconds() * 1000.0);
 775 }
 776 
 777 void G1YoungCollector::evacuate_optional_collection_set(G1ParScanThreadStateSet* per_thread_states) {
 778   const double collection_start_time_ms = phase_times()->cur_collection_start_sec() * 1000.0;
 779 
 780   while (!evacuation_alloc_failed() && collection_set()->optional_region_length() > 0) {
 781 
 782     double time_used_ms = os::elapsedTime() * 1000.0 - collection_start_time_ms;
 783     double time_left_ms = MaxGCPauseMillis - time_used_ms;
 784 
 785     if (time_left_ms < 0 ||
 786         !collection_set()->finalize_optional_for_evacuation(time_left_ms * policy()->optional_evacuation_fraction())) {
 787       log_trace(gc, ergo, cset)("Skipping evacuation of %u optional regions, no more regions can be evacuated in %.3fms",
 788                                 collection_set()->optional_region_length(), time_left_ms);
 789       break;
 790     }
 791 
 792     {
 793       Ticks start = Ticks::now();
 794       rem_set()->merge_heap_roots(false /* initial_evacuation */);
 795       phase_times()->record_or_add_optional_merge_heap_roots_time((Ticks::now() - start).seconds() * 1000.0);
 796     }
 797 
 798     {
 799       Ticks start = Ticks::now();
 800       evacuate_next_optional_regions(per_thread_states);
 801       phase_times()->record_or_add_optional_evac_time((Ticks::now() - start).seconds() * 1000.0);
 802     }
 803 
 804     rem_set()->complete_evac_phase(true /* has_more_than_one_evacuation_phase */);
 805   }
 806 
 807   collection_set()->abandon_optional_collection_set(per_thread_states);
 808 }
 809 
 810 // Non Copying Keep Alive closure
 811 class G1KeepAliveClosure: public OopClosure {
 812   G1CollectedHeap*_g1h;
 813 public:
 814   G1KeepAliveClosure(G1CollectedHeap* g1h) :_g1h(g1h) {}
 815   void do_oop(narrowOop* p) { guarantee(false, "Not needed"); }
 816   void do_oop(oop* p) {
 817     oop obj = *p;
 818     assert(obj != nullptr, "the caller should have filtered out null values");
 819 
 820     const G1HeapRegionAttr region_attr =_g1h->region_attr(obj);
 821     if (!region_attr.is_in_cset_or_humongous_candidate()) {
 822       return;
 823     }
 824     if (region_attr.is_in_cset()) {
 825       assert(obj->is_forwarded(), "invariant" );
 826       *p = obj->forwardee();
 827     } else {
 828       assert(!obj->is_forwarded(), "invariant" );
 829       assert(region_attr.is_humongous_candidate(),
 830              "Only allowed G1HeapRegionAttr state is IsHumongous, but is %d", region_attr.type());
 831      _g1h->set_humongous_is_live(obj);
 832     }
 833   }
 834 };
 835 
 836 // Copying Keep Alive closure - can be called from both
 837 // serial and parallel code as long as different worker
 838 // threads utilize different G1ParScanThreadState instances
 839 // and different queues.
 840 class G1CopyingKeepAliveClosure: public OopClosure {
 841   G1CollectedHeap* _g1h;
 842   G1ParScanThreadState*    _par_scan_state;
 843 
 844 public:
 845   G1CopyingKeepAliveClosure(G1CollectedHeap* g1h,
 846                             G1ParScanThreadState* pss):
 847     _g1h(g1h),
 848     _par_scan_state(pss)
 849   {}
 850 
 851   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
 852   virtual void do_oop(      oop* p) { do_oop_work(p); }
 853 
 854   template <class T> void do_oop_work(T* p) {
 855     oop obj = RawAccess<>::oop_load(p);
 856 
 857     if (_g1h->is_in_cset_or_humongous_candidate(obj)) {
 858       // If the referent object has been forwarded (either copied
 859       // to a new location or to itself in the event of an
 860       // evacuation failure) then we need to update the reference
 861       // field and, if both reference and referent are in the G1
 862       // heap, update the RSet for the referent.
 863       //
 864       // If the referent has not been forwarded then we have to keep
 865       // it alive by policy. Therefore we have copy the referent.
 866       //
 867       // When the queue is drained (after each phase of reference processing)
 868       // the object and it's followers will be copied, the reference field set
 869       // to point to the new location, and the RSet updated.
 870       _par_scan_state->push_on_queue(ScannerTask(p));
 871     }
 872   }
 873 };
 874 
 875 class G1STWRefProcProxyTask : public RefProcProxyTask {
 876   G1CollectedHeap& _g1h;
 877   G1ParScanThreadStateSet& _pss;
 878   TaskTerminator _terminator;
 879   G1ScannerTasksQueueSet& _task_queues;
 880 
 881   // Special closure for enqueuing discovered fields: during enqueue the card table
 882   // may not be in shape to properly handle normal barrier calls (e.g. card marks
 883   // in regions that failed evacuation, scribbling of various values by card table
 884   // scan code). Additionally the regular barrier enqueues into the "global"
 885   // DCQS, but during GC we need these to-be-refined entries in the GC local queue
 886   // so that after clearing the card table, the redirty cards phase will properly
 887   // mark all dirty cards to be picked up by refinement.
 888   class G1EnqueueDiscoveredFieldClosure : public EnqueueDiscoveredFieldClosure {
 889     G1CollectedHeap* _g1h;
 890     G1ParScanThreadState* _pss;
 891 
 892   public:
 893     G1EnqueueDiscoveredFieldClosure(G1CollectedHeap* g1h, G1ParScanThreadState* pss) : _g1h(g1h), _pss(pss) { }
 894 
 895     void enqueue(HeapWord* discovered_field_addr, oop value) override {
 896       assert(_g1h->is_in(discovered_field_addr), PTR_FORMAT " is not in heap ", p2i(discovered_field_addr));
 897       // Store the value first, whatever it is.
 898       RawAccess<>::oop_store(discovered_field_addr, value);
 899       if (value == nullptr) {
 900         return;
 901       }
 902       _pss->write_ref_field_post(discovered_field_addr, value);
 903     }
 904   };
 905 
 906 public:
 907   G1STWRefProcProxyTask(uint max_workers, G1CollectedHeap& g1h, G1ParScanThreadStateSet& pss, G1ScannerTasksQueueSet& task_queues)
 908     : RefProcProxyTask("G1STWRefProcProxyTask", max_workers),
 909       _g1h(g1h),
 910       _pss(pss),
 911       _terminator(max_workers, &task_queues),
 912       _task_queues(task_queues) {}
 913 
 914   void work(uint worker_id) override {
 915     assert(worker_id < _max_workers, "sanity");
 916     uint index = (_tm == RefProcThreadModel::Single) ? 0 : worker_id;
 917 
 918     G1ParScanThreadState* pss = _pss.state_for_worker(index);
 919     pss->set_ref_discoverer(nullptr);
 920 
 921     G1STWIsAliveClosure is_alive(&_g1h);
 922     G1CopyingKeepAliveClosure keep_alive(&_g1h, pss);
 923     G1EnqueueDiscoveredFieldClosure enqueue(&_g1h, pss);
 924     G1ParEvacuateFollowersClosure complete_gc(&_g1h, pss, &_task_queues, _tm == RefProcThreadModel::Single ? nullptr : &_terminator, G1GCPhaseTimes::ObjCopy);
 925     _rp_task->rp_work(worker_id, &is_alive, &keep_alive, &enqueue, &complete_gc);
 926 
 927     // We have completed copying any necessary live referent objects.
 928     assert(pss->queue_is_empty(), "both queue and overflow should be empty");
 929   }
 930 
 931   void prepare_run_task_hook() override {
 932     _terminator.reset_for_reuse(_queue_count);
 933   }
 934 };
 935 
 936 void G1YoungCollector::process_discovered_references(G1ParScanThreadStateSet* per_thread_states) {
 937   Ticks start = Ticks::now();
 938 
 939   ReferenceProcessor* rp = ref_processor_stw();
 940   assert(rp->discovery_enabled(), "should have been enabled");
 941 
 942   uint no_of_gc_workers = workers()->active_workers();
 943   rp->set_active_mt_degree(no_of_gc_workers);
 944 
 945   G1STWRefProcProxyTask task(rp->max_num_queues(), *_g1h, *per_thread_states, *task_queues());
 946   ReferenceProcessorPhaseTimes& pt = *phase_times()->ref_phase_times();
 947   ReferenceProcessorStats stats = rp->process_discovered_references(task, pt);
 948 
 949   gc_tracer_stw()->report_gc_reference_stats(stats);
 950 
 951   _g1h->make_pending_list_reachable();
 952 
 953   phase_times()->record_ref_proc_time((Ticks::now() - start).seconds() * MILLIUNITS);
 954 }
 955 
 956 void G1YoungCollector::post_evacuate_cleanup_1(G1ParScanThreadStateSet* per_thread_states) {
 957   Ticks start = Ticks::now();
 958   {
 959     G1PostEvacuateCollectionSetCleanupTask1 cl(per_thread_states, &_evac_failure_regions);
 960     _g1h->run_batch_task(&cl);
 961   }
 962   phase_times()->record_post_evacuate_cleanup_task_1_time((Ticks::now() - start).seconds() * 1000.0);
 963 }
 964 
 965 void G1YoungCollector::post_evacuate_cleanup_2(G1ParScanThreadStateSet* per_thread_states,
 966                                                G1EvacInfo* evacuation_info) {
 967   Ticks start = Ticks::now();
 968   {
 969     G1PostEvacuateCollectionSetCleanupTask2 cl(per_thread_states, evacuation_info, &_evac_failure_regions);
 970     _g1h->run_batch_task(&cl);
 971   }
 972   phase_times()->record_post_evacuate_cleanup_task_2_time((Ticks::now() - start).seconds() * 1000.0);
 973 }
 974 
 975 void G1YoungCollector::enqueue_candidates_as_root_regions() {
 976   assert(collector_state()->in_concurrent_start_gc(), "must be");
 977 
 978   G1CollectionSetCandidates* candidates = collection_set()->candidates();
 979   for (HeapRegion* r : *candidates) {
 980     _g1h->concurrent_mark()->add_root_region(r);
 981   }
 982 }
 983 
 984 void G1YoungCollector::post_evacuate_collection_set(G1EvacInfo* evacuation_info,
 985                                                     G1ParScanThreadStateSet* per_thread_states) {
 986   G1GCPhaseTimes* p = phase_times();
 987 
 988   // Process any discovered reference objects - we have
 989   // to do this _before_ we retire the GC alloc regions
 990   // as we may have to copy some 'reachable' referent
 991   // objects (and their reachable sub-graphs) that were
 992   // not copied during the pause.
 993   process_discovered_references(per_thread_states);
 994 
 995   G1STWIsAliveClosure is_alive(_g1h);
 996   G1KeepAliveClosure keep_alive(_g1h);
 997 
 998   WeakProcessor::weak_oops_do(workers(), &is_alive, &keep_alive, p->weak_phase_times());
 999 
1000   allocator()->release_gc_alloc_regions(evacuation_info);
1001 
1002   post_evacuate_cleanup_1(per_thread_states);
1003 
1004   post_evacuate_cleanup_2(per_thread_states, evacuation_info);
1005 
1006   // Regions in the collection set candidates are roots for the marking (they are
1007   // not marked through considering they are very likely to be reclaimed soon.
1008   // They need to be enqueued explicitly compared to survivor regions.
1009   if (collector_state()->in_concurrent_start_gc()) {
1010     enqueue_candidates_as_root_regions();
1011   }
1012 
1013   _evac_failure_regions.post_collection();
1014 
1015   assert_used_and_recalculate_used_equal(_g1h);
1016 
1017   _g1h->rebuild_free_region_list();
1018 
1019   _g1h->record_obj_copy_mem_stats();
1020 
1021   evacuation_info->set_bytes_used(_g1h->bytes_used_during_gc());
1022 
1023   _g1h->prepare_for_mutator_after_young_collection();
1024 
1025   _g1h->gc_epilogue(false);
1026 
1027   _g1h->expand_heap_after_young_collection();
1028 }
1029 
1030 bool G1YoungCollector::evacuation_failed() const {
1031   return _evac_failure_regions.has_regions_evac_failed();
1032 }
1033 
1034 bool G1YoungCollector::evacuation_pinned() const {
1035   return _evac_failure_regions.has_regions_evac_pinned();
1036 }
1037 
1038 bool G1YoungCollector::evacuation_alloc_failed() const {
1039   return _evac_failure_regions.has_regions_alloc_failed();
1040 }
1041 
1042 G1YoungCollector::G1YoungCollector(GCCause::Cause gc_cause) :
1043   _g1h(G1CollectedHeap::heap()),
1044   _gc_cause(gc_cause),
1045   _concurrent_operation_is_full_mark(false),
1046   _evac_failure_regions()
1047 {
1048 }
1049 
1050 void G1YoungCollector::collect() {
1051   // Do timing/tracing/statistics/pre- and post-logging/verification work not
1052   // directly related to the collection. They should not be accounted for in
1053   // collection work timing.
1054 
1055   // The G1YoungGCTraceTime message depends on collector state, so must come after
1056   // determining collector state.
1057   G1YoungGCTraceTime tm(this, _gc_cause);
1058 
1059   // JFR
1060   G1YoungGCJFRTracerMark jtm(gc_timer_stw(), gc_tracer_stw(), _gc_cause);
1061   // JStat/MXBeans
1062   G1YoungGCMonitoringScope ms(monitoring_support(),
1063                               !collection_set()->candidates()->is_empty() /* all_memory_pools_affected */);
1064   // Create the heap printer before internal pause timing to have
1065   // heap information printed as last part of detailed GC log.
1066   G1HeapPrinterMark hpm(_g1h);
1067   // Young GC internal pause timing
1068   G1YoungGCNotifyPauseMark npm(this);
1069 
1070   // Verification may use the workers, so they must be set up before.
1071   // Individual parallel phases may override this.
1072   set_young_collection_default_active_worker_threads();
1073 
1074   // Wait for root region scan here to make sure that it is done before any
1075   // use of the STW workers to maximize cpu use (i.e. all cores are available
1076   // just to do that).
1077   wait_for_root_region_scanning();
1078 
1079   G1YoungGCVerifierMark vm(this);
1080   {
1081     // Actual collection work starts and is executed (only) in this scope.
1082 
1083     // Young GC internal collection timing. The elapsed time recorded in the
1084     // policy for the collection deliberately elides verification (and some
1085     // other trivial setup above).
1086     policy()->record_young_collection_start();
1087 
1088     pre_evacuate_collection_set(jtm.evacuation_info());
1089 
1090     G1ParScanThreadStateSet per_thread_states(_g1h,
1091                                               workers()->active_workers(),
1092                                               collection_set(),
1093                                               &_evac_failure_regions);
1094 
1095     bool may_do_optional_evacuation = collection_set()->optional_region_length() != 0;
1096     // Actually do the work...
1097     evacuate_initial_collection_set(&per_thread_states, may_do_optional_evacuation);
1098 
1099     if (may_do_optional_evacuation) {
1100       evacuate_optional_collection_set(&per_thread_states);
1101     }
1102     post_evacuate_collection_set(jtm.evacuation_info(), &per_thread_states);
1103 
1104     // Refine the type of a concurrent mark operation now that we did the
1105     // evacuation, eventually aborting it.
1106     _concurrent_operation_is_full_mark = policy()->concurrent_operation_is_full_mark("Revise IHOP");
1107 
1108     // Need to report the collection pause now since record_collection_pause_end()
1109     // modifies it to the next state.
1110     jtm.report_pause_type(collector_state()->young_gc_pause_type(_concurrent_operation_is_full_mark));
1111 
1112     policy()->record_young_collection_end(_concurrent_operation_is_full_mark, evacuation_alloc_failed());
1113   }
1114   TASKQUEUE_STATS_ONLY(_g1h->task_queues()->print_and_reset_taskqueue_stats("Oop Queue");)
1115 }