1 /*
   2  * Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 
  27 #include "classfile/classLoaderDataGraph.inline.hpp"
  28 #include "classfile/javaClasses.inline.hpp"
  29 #include "compiler/oopMap.hpp"
  30 #include "gc/g1/g1Allocator.hpp"
  31 #include "gc/g1/g1CardSetMemory.hpp"
  32 #include "gc/g1/g1CollectedHeap.inline.hpp"
  33 #include "gc/g1/g1CollectionSetCandidates.inline.hpp"
  34 #include "gc/g1/g1CollectorState.hpp"
  35 #include "gc/g1/g1ConcurrentMark.hpp"
  36 #include "gc/g1/g1GCPhaseTimes.hpp"
  37 #include "gc/g1/g1EvacFailureRegions.inline.hpp"
  38 #include "gc/g1/g1EvacInfo.hpp"
  39 #include "gc/g1/g1HeapRegionPrinter.hpp"
  40 #include "gc/g1/g1MonitoringSupport.hpp"
  41 #include "gc/g1/g1ParScanThreadState.inline.hpp"
  42 #include "gc/g1/g1Policy.hpp"
  43 #include "gc/g1/g1RedirtyCardsQueue.hpp"
  44 #include "gc/g1/g1RegionPinCache.inline.hpp"
  45 #include "gc/g1/g1RemSet.hpp"
  46 #include "gc/g1/g1RootProcessor.hpp"
  47 #include "gc/g1/g1Trace.hpp"
  48 #include "gc/g1/g1YoungCollector.hpp"
  49 #include "gc/g1/g1YoungGCAllocationFailureInjector.hpp"
  50 #include "gc/g1/g1YoungGCPostEvacuateTasks.hpp"
  51 #include "gc/g1/g1YoungGCPreEvacuateTasks.hpp"
  52 #include "gc/shared/concurrentGCBreakpoints.hpp"
  53 #include "gc/shared/gcTraceTime.inline.hpp"
  54 #include "gc/shared/gcTimer.hpp"
  55 #include "gc/shared/gc_globals.hpp"
  56 #include "gc/shared/referenceProcessor.hpp"
  57 #include "gc/shared/weakProcessor.inline.hpp"
  58 #include "gc/shared/workerPolicy.hpp"
  59 #include "gc/shared/workerThread.hpp"
  60 #include "jfr/jfrEvents.hpp"
  61 #include "memory/resourceArea.hpp"
  62 #include "runtime/threads.hpp"
  63 #include "utilities/ticks.hpp"
  64 
  65 // GCTraceTime wrapper that constructs the message according to GC pause type and
  66 // GC cause.
  67 // The code relies on the fact that GCTraceTimeWrapper stores the string passed
  68 // initially as a reference only, so that we can modify it as needed.
  69 class G1YoungGCTraceTime {
  70   G1YoungCollector* _collector;
  71 
  72   G1GCPauseType _pause_type;
  73   GCCause::Cause _pause_cause;
  74 
  75   static const uint MaxYoungGCNameLength = 128;
  76   char _young_gc_name_data[MaxYoungGCNameLength];
  77 
  78   GCTraceTime(Info, gc) _tt;
  79 
  80   const char* update_young_gc_name() {
  81     char evacuation_failed_string[48];
  82     evacuation_failed_string[0] = '\0';
  83 
  84     if (_collector->evacuation_failed()) {
  85       snprintf(evacuation_failed_string,
  86                ARRAY_SIZE(evacuation_failed_string),
  87                " (Evacuation Failure: %s%s%s)",
  88                _collector->evacuation_alloc_failed() ? "Allocation" : "",
  89                _collector->evacuation_alloc_failed() && _collector->evacuation_pinned() ? " / " : "",
  90                _collector->evacuation_pinned() ? "Pinned" : "");
  91     }
  92     snprintf(_young_gc_name_data,
  93              MaxYoungGCNameLength,
  94              "Pause Young (%s) (%s)%s",
  95              G1GCPauseTypeHelper::to_string(_pause_type),
  96              GCCause::to_string(_pause_cause),
  97              evacuation_failed_string);
  98     return _young_gc_name_data;
  99   }
 100 
 101 public:
 102   G1YoungGCTraceTime(G1YoungCollector* collector, GCCause::Cause cause) :
 103     _collector(collector),
 104     // Take snapshot of current pause type at start as it may be modified during gc.
 105     // The strings for all Concurrent Start pauses are the same, so the parameter
 106     // does not matter here.
 107     _pause_type(_collector->collector_state()->young_gc_pause_type(false /* concurrent_operation_is_full_mark */)),
 108     _pause_cause(cause),
 109     // Fake a "no cause" and manually add the correct string in update_young_gc_name()
 110     // to make the string look more natural.
 111     _tt(update_young_gc_name(), nullptr, GCCause::_no_gc, true) {
 112   }
 113 
 114   ~G1YoungGCTraceTime() {
 115     update_young_gc_name();
 116   }
 117 };
 118 
 119 class G1YoungGCNotifyPauseMark : public StackObj {
 120   G1YoungCollector* _collector;
 121 
 122 public:
 123   G1YoungGCNotifyPauseMark(G1YoungCollector* collector) : _collector(collector) {
 124     G1CollectedHeap::heap()->policy()->record_young_gc_pause_start();
 125   }
 126 
 127   ~G1YoungGCNotifyPauseMark() {
 128     G1CollectedHeap::heap()->policy()->record_young_gc_pause_end(_collector->evacuation_failed());
 129   }
 130 };
 131 
 132 class G1YoungGCJFRTracerMark : public G1JFRTracerMark {
 133   G1EvacInfo _evacuation_info;
 134 
 135   G1NewTracer* tracer() const { return (G1NewTracer*)_tracer; }
 136 
 137 public:
 138 
 139   G1EvacInfo* evacuation_info() { return &_evacuation_info; }
 140 
 141   G1YoungGCJFRTracerMark(STWGCTimer* gc_timer_stw, G1NewTracer* gc_tracer_stw, GCCause::Cause cause) :
 142     G1JFRTracerMark(gc_timer_stw, gc_tracer_stw), _evacuation_info() { }
 143 
 144   void report_pause_type(G1GCPauseType type) {
 145     tracer()->report_young_gc_pause(type);
 146   }
 147 
 148   ~G1YoungGCJFRTracerMark() {
 149     G1CollectedHeap* g1h = G1CollectedHeap::heap();
 150 
 151     tracer()->report_evacuation_info(&_evacuation_info);
 152     tracer()->report_tenuring_threshold(g1h->policy()->tenuring_threshold());
 153   }
 154 };
 155 
 156 class G1YoungGCVerifierMark : public StackObj {
 157   G1YoungCollector* _collector;
 158   G1HeapVerifier::G1VerifyType _type;
 159 
 160   static G1HeapVerifier::G1VerifyType young_collection_verify_type() {
 161     G1CollectorState* state = G1CollectedHeap::heap()->collector_state();
 162     if (state->in_concurrent_start_gc()) {
 163       return G1HeapVerifier::G1VerifyConcurrentStart;
 164     } else if (state->in_young_only_phase()) {
 165       return G1HeapVerifier::G1VerifyYoungNormal;
 166     } else {
 167       return G1HeapVerifier::G1VerifyMixed;
 168     }
 169   }
 170 
 171 public:
 172   G1YoungGCVerifierMark(G1YoungCollector* collector) : _collector(collector), _type(young_collection_verify_type()) {
 173     G1CollectedHeap::heap()->verify_before_young_collection(_type);
 174   }
 175 
 176   ~G1YoungGCVerifierMark() {
 177     // Inject evacuation failure tag into type if needed.
 178     G1HeapVerifier::G1VerifyType type = _type;
 179     if (_collector->evacuation_failed()) {
 180       type = (G1HeapVerifier::G1VerifyType)(type | G1HeapVerifier::G1VerifyYoungEvacFail);
 181     }
 182     G1CollectedHeap::heap()->verify_after_young_collection(type);
 183   }
 184 };
 185 
 186 G1Allocator* G1YoungCollector::allocator() const {
 187   return _g1h->allocator();
 188 }
 189 
 190 G1CollectionSet* G1YoungCollector::collection_set() const {
 191   return _g1h->collection_set();
 192 }
 193 
 194 G1CollectorState* G1YoungCollector::collector_state() const {
 195   return _g1h->collector_state();
 196 }
 197 
 198 G1ConcurrentMark* G1YoungCollector::concurrent_mark() const {
 199   return _g1h->concurrent_mark();
 200 }
 201 
 202 STWGCTimer* G1YoungCollector::gc_timer_stw() const {
 203   return _g1h->gc_timer_stw();
 204 }
 205 
 206 G1NewTracer* G1YoungCollector::gc_tracer_stw() const {
 207   return _g1h->gc_tracer_stw();
 208 }
 209 
 210 G1Policy* G1YoungCollector::policy() const {
 211   return _g1h->policy();
 212 }
 213 
 214 G1GCPhaseTimes* G1YoungCollector::phase_times() const {
 215   return _g1h->phase_times();
 216 }
 217 
 218 G1MonitoringSupport* G1YoungCollector::monitoring_support() const {
 219   return _g1h->monitoring_support();
 220 }
 221 
 222 G1RemSet* G1YoungCollector::rem_set() const {
 223   return _g1h->rem_set();
 224 }
 225 
 226 G1ScannerTasksQueueSet* G1YoungCollector::task_queues() const {
 227   return _g1h->task_queues();
 228 }
 229 
 230 G1SurvivorRegions* G1YoungCollector::survivor_regions() const {
 231   return _g1h->survivor();
 232 }
 233 
 234 ReferenceProcessor* G1YoungCollector::ref_processor_stw() const {
 235   return _g1h->ref_processor_stw();
 236 }
 237 
 238 WorkerThreads* G1YoungCollector::workers() const {
 239   return _g1h->workers();
 240 }
 241 
 242 G1YoungGCAllocationFailureInjector* G1YoungCollector::allocation_failure_injector() const {
 243   return _g1h->allocation_failure_injector();
 244 }
 245 
 246 
 247 void G1YoungCollector::wait_for_root_region_scanning() {
 248   Ticks start = Ticks::now();
 249   // We have to wait until the CM threads finish scanning the
 250   // root regions as it's the only way to ensure that all the
 251   // objects on them have been correctly scanned before we start
 252   // moving them during the GC.
 253   bool waited = concurrent_mark()->wait_until_root_region_scan_finished();
 254   Tickspan wait_time;
 255   if (waited) {
 256     wait_time = (Ticks::now() - start);
 257   }
 258   phase_times()->record_root_region_scan_wait_time(wait_time.seconds() * MILLIUNITS);
 259 }
 260 
 261 class G1PrintCollectionSetClosure : public HeapRegionClosure {
 262 public:
 263   virtual bool do_heap_region(G1HeapRegion* r) {
 264     G1HeapRegionPrinter::cset(r);
 265     return false;
 266   }
 267 };
 268 
 269 void G1YoungCollector::calculate_collection_set(G1EvacInfo* evacuation_info, double target_pause_time_ms) {
 270   // Forget the current allocation region (we might even choose it to be part
 271   // of the collection set!) before finalizing the collection set.
 272   allocator()->release_mutator_alloc_regions();
 273 
 274   collection_set()->finalize_initial_collection_set(target_pause_time_ms, survivor_regions());
 275   evacuation_info->set_collection_set_regions(collection_set()->region_length() +
 276                                               collection_set()->optional_region_length());
 277 
 278   concurrent_mark()->verify_no_collection_set_oops();
 279 
 280   if (G1HeapRegionPrinter::is_active()) {
 281     G1PrintCollectionSetClosure cl;
 282     collection_set()->iterate(&cl);
 283     collection_set()->iterate_optional(&cl);
 284   }
 285 }
 286 
 287 class G1PrepareEvacuationTask : public WorkerTask {
 288   class G1PrepareRegionsClosure : public HeapRegionClosure {
 289     G1CollectedHeap* _g1h;
 290     G1PrepareEvacuationTask* _parent_task;
 291     uint _worker_humongous_total;
 292     uint _worker_humongous_candidates;
 293 
 294     G1MonotonicArenaMemoryStats _card_set_stats;
 295 
 296     void sample_card_set_size(G1HeapRegion* hr) {
 297       // Sample card set sizes for young gen and humongous before GC: this makes
 298       // the policy to give back memory to the OS keep the most recent amount of
 299       // memory for these regions.
 300       if (hr->is_young() || hr->is_starts_humongous()) {
 301         _card_set_stats.add(hr->rem_set()->card_set_memory_stats());
 302       }
 303     }
 304 
 305     bool humongous_region_is_candidate(G1HeapRegion* region) const {
 306       assert(region->is_starts_humongous(), "Must start a humongous object");
 307 
 308       oop obj = cast_to_oop(region->bottom());
 309 
 310       // Dead objects cannot be eager reclaim candidates. Due to class
 311       // unloading it is unsafe to query their classes so we return early.
 312       if (_g1h->is_obj_dead(obj, region)) {
 313         return false;
 314       }
 315 
 316       // If we do not have a complete remembered set for the region, then we can
 317       // not be sure that we have all references to it.
 318       if (!region->rem_set()->is_complete()) {
 319         return false;
 320       }
 321       // We also cannot collect the humongous object if it is pinned.
 322       if (region->has_pinned_objects()) {
 323         return false;
 324       }
 325       // Candidate selection must satisfy the following constraints
 326       // while concurrent marking is in progress:
 327       //
 328       // * In order to maintain SATB invariants, an object must not be
 329       // reclaimed if it was allocated before the start of marking and
 330       // has not had its references scanned.  Such an object must have
 331       // its references (including type metadata) scanned to ensure no
 332       // live objects are missed by the marking process.  Objects
 333       // allocated after the start of concurrent marking don't need to
 334       // be scanned.
 335       //
 336       // * An object must not be reclaimed if it is on the concurrent
 337       // mark stack.  Objects allocated after the start of concurrent
 338       // marking are never pushed on the mark stack.
 339       //
 340       // Nominating only objects allocated after the start of concurrent
 341       // marking is sufficient to meet both constraints.  This may miss
 342       // some objects that satisfy the constraints, but the marking data
 343       // structures don't support efficiently performing the needed
 344       // additional tests or scrubbing of the mark stack.
 345       //
 346       // However, we presently only nominate is_typeArray() objects.
 347       // A humongous object containing references induces remembered
 348       // set entries on other regions.  In order to reclaim such an
 349       // object, those remembered sets would need to be cleaned up.
 350       //
 351       // We also treat is_typeArray() objects specially, allowing them
 352       // to be reclaimed even if allocated before the start of
 353       // concurrent mark.  For this we rely on mark stack insertion to
 354       // exclude is_typeArray() objects, preventing reclaiming an object
 355       // that is in the mark stack.  We also rely on the metadata for
 356       // such objects to be built-in and so ensured to be kept live.
 357       // Frequent allocation and drop of large binary blobs is an
 358       // important use case for eager reclaim, and this special handling
 359       // may reduce needed headroom.
 360 
 361       return obj->is_typeArray() &&
 362              _g1h->is_potential_eager_reclaim_candidate(region);
 363     }
 364 
 365   public:
 366     G1PrepareRegionsClosure(G1CollectedHeap* g1h, G1PrepareEvacuationTask* parent_task) :
 367       _g1h(g1h),
 368       _parent_task(parent_task),
 369       _worker_humongous_total(0),
 370       _worker_humongous_candidates(0) { }
 371 
 372     ~G1PrepareRegionsClosure() {
 373       _parent_task->add_humongous_candidates(_worker_humongous_candidates);
 374       _parent_task->add_humongous_total(_worker_humongous_total);
 375     }
 376 
 377     virtual bool do_heap_region(G1HeapRegion* hr) {
 378       // First prepare the region for scanning
 379       _g1h->rem_set()->prepare_region_for_scan(hr);
 380 
 381       sample_card_set_size(hr);
 382 
 383       // Now check if region is a humongous candidate
 384       if (!hr->is_starts_humongous()) {
 385         _g1h->register_region_with_region_attr(hr);
 386         return false;
 387       }
 388 
 389       uint index = hr->hrm_index();
 390       if (humongous_region_is_candidate(hr)) {
 391         _g1h->register_humongous_candidate_region_with_region_attr(index);
 392         _worker_humongous_candidates++;
 393         // We will later handle the remembered sets of these regions.
 394       } else {
 395         _g1h->register_region_with_region_attr(hr);
 396       }
 397       log_debug(gc, humongous)("Humongous region %u (object size %zu @ " PTR_FORMAT ") remset %zu code roots %zu "
 398                                "marked %d pinned count %zu reclaim candidate %d type array %d",
 399                                index,
 400                                cast_to_oop(hr->bottom())->size() * HeapWordSize,
 401                                p2i(hr->bottom()),
 402                                hr->rem_set()->occupied(),
 403                                hr->rem_set()->code_roots_list_length(),
 404                                _g1h->concurrent_mark()->mark_bitmap()->is_marked(hr->bottom()),
 405                                hr->pinned_count(),
 406                                _g1h->is_humongous_reclaim_candidate(index),
 407                                cast_to_oop(hr->bottom())->is_typeArray()
 408                               );
 409       _worker_humongous_total++;
 410 
 411       return false;
 412     }
 413 
 414     G1MonotonicArenaMemoryStats card_set_stats() const {
 415       return _card_set_stats;
 416     }
 417   };
 418 
 419   G1CollectedHeap* _g1h;
 420   HeapRegionClaimer _claimer;
 421   volatile uint _humongous_total;
 422   volatile uint _humongous_candidates;
 423 
 424   G1MonotonicArenaMemoryStats _all_card_set_stats;
 425 
 426 public:
 427   G1PrepareEvacuationTask(G1CollectedHeap* g1h) :
 428     WorkerTask("Prepare Evacuation"),
 429     _g1h(g1h),
 430     _claimer(_g1h->workers()->active_workers()),
 431     _humongous_total(0),
 432     _humongous_candidates(0) { }
 433 
 434   void work(uint worker_id) {
 435     G1PrepareRegionsClosure cl(_g1h, this);
 436     _g1h->heap_region_par_iterate_from_worker_offset(&cl, &_claimer, worker_id);
 437 
 438     MutexLocker x(G1RareEvent_lock, Mutex::_no_safepoint_check_flag);
 439     _all_card_set_stats.add(cl.card_set_stats());
 440   }
 441 
 442   void add_humongous_candidates(uint candidates) {
 443     Atomic::add(&_humongous_candidates, candidates);
 444   }
 445 
 446   void add_humongous_total(uint total) {
 447     Atomic::add(&_humongous_total, total);
 448   }
 449 
 450   uint humongous_candidates() {
 451     return _humongous_candidates;
 452   }
 453 
 454   uint humongous_total() {
 455     return _humongous_total;
 456   }
 457 
 458   const G1MonotonicArenaMemoryStats all_card_set_stats() const {
 459     return _all_card_set_stats;
 460   }
 461 };
 462 
 463 Tickspan G1YoungCollector::run_task_timed(WorkerTask* task) {
 464   Ticks start = Ticks::now();
 465   workers()->run_task(task);
 466   return Ticks::now() - start;
 467 }
 468 
 469 void G1YoungCollector::set_young_collection_default_active_worker_threads(){
 470   uint active_workers = WorkerPolicy::calc_active_workers(workers()->max_workers(),
 471                                                           workers()->active_workers(),
 472                                                           Threads::number_of_non_daemon_threads());
 473   active_workers = workers()->set_active_workers(active_workers);
 474   log_info(gc,task)("Using %u workers of %u for evacuation", active_workers, workers()->max_workers());
 475 }
 476 
 477 void G1YoungCollector::pre_evacuate_collection_set(G1EvacInfo* evacuation_info) {
 478   // Flush various data in thread-local buffers to be able to determine the collection
 479   // set
 480   {
 481     Ticks start = Ticks::now();
 482     G1PreEvacuateCollectionSetBatchTask cl;
 483     G1CollectedHeap::heap()->run_batch_task(&cl);
 484     phase_times()->record_pre_evacuate_prepare_time_ms((Ticks::now() - start).seconds() * 1000.0);
 485   }
 486 
 487   // Needs log buffers flushed.
 488   calculate_collection_set(evacuation_info, policy()->max_pause_time_ms());
 489 
 490   if (collector_state()->in_concurrent_start_gc()) {
 491     concurrent_mark()->pre_concurrent_start(_gc_cause);
 492   }
 493 
 494   // Please see comment in g1CollectedHeap.hpp and
 495   // G1CollectedHeap::ref_processing_init() to see how
 496   // reference processing currently works in G1.
 497   ref_processor_stw()->start_discovery(false /* always_clear */);
 498 
 499   _evac_failure_regions.pre_collection(_g1h->max_reserved_regions());
 500 
 501   _g1h->gc_prologue(false);
 502 
 503   // Initialize the GC alloc regions.
 504   allocator()->init_gc_alloc_regions(evacuation_info);
 505 
 506   {
 507     Ticks start = Ticks::now();
 508     rem_set()->prepare_for_scan_heap_roots();
 509     phase_times()->record_prepare_heap_roots_time_ms((Ticks::now() - start).seconds() * 1000.0);
 510   }
 511 
 512   {
 513     G1PrepareEvacuationTask g1_prep_task(_g1h);
 514     Tickspan task_time = run_task_timed(&g1_prep_task);
 515 
 516     _g1h->set_young_gen_card_set_stats(g1_prep_task.all_card_set_stats());
 517     _g1h->set_humongous_stats(g1_prep_task.humongous_total(), g1_prep_task.humongous_candidates());
 518 
 519     phase_times()->record_register_regions(task_time.seconds() * 1000.0);
 520   }
 521 
 522   assert(_g1h->verifier()->check_region_attr_table(), "Inconsistency in the region attributes table.");
 523 
 524 #if COMPILER2_OR_JVMCI
 525   DerivedPointerTable::clear();
 526 #endif
 527 
 528   allocation_failure_injector()->arm_if_needed();
 529 }
 530 
 531 class G1ParEvacuateFollowersClosure : public VoidClosure {
 532   double _start_term;
 533   double _term_time;
 534   size_t _term_attempts;
 535 
 536   void start_term_time() { _term_attempts++; _start_term = os::elapsedTime(); }
 537   void end_term_time() { _term_time += (os::elapsedTime() - _start_term); }
 538 
 539   G1CollectedHeap*              _g1h;
 540   G1ParScanThreadState*         _par_scan_state;
 541   G1ScannerTasksQueueSet*       _queues;
 542   TaskTerminator*               _terminator;
 543   G1GCPhaseTimes::GCParPhases   _phase;
 544 
 545   G1ParScanThreadState*   par_scan_state() { return _par_scan_state; }
 546   G1ScannerTasksQueueSet* queues()         { return _queues; }
 547   TaskTerminator*         terminator()     { return _terminator; }
 548 
 549   inline bool offer_termination() {
 550     EventGCPhaseParallel event;
 551     G1ParScanThreadState* const pss = par_scan_state();
 552     start_term_time();
 553     const bool res = (terminator() == nullptr) ? true : terminator()->offer_termination();
 554     end_term_time();
 555     event.commit(GCId::current(), pss->worker_id(), G1GCPhaseTimes::phase_name(G1GCPhaseTimes::Termination));
 556     return res;
 557   }
 558 
 559 public:
 560   G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h,
 561                                 G1ParScanThreadState* par_scan_state,
 562                                 G1ScannerTasksQueueSet* queues,
 563                                 TaskTerminator* terminator,
 564                                 G1GCPhaseTimes::GCParPhases phase)
 565     : _start_term(0.0), _term_time(0.0), _term_attempts(0),
 566       _g1h(g1h), _par_scan_state(par_scan_state),
 567       _queues(queues), _terminator(terminator), _phase(phase) {}
 568 
 569   void do_void() {
 570     EventGCPhaseParallel event;
 571     G1ParScanThreadState* const pss = par_scan_state();
 572     pss->trim_queue();
 573     event.commit(GCId::current(), pss->worker_id(), G1GCPhaseTimes::phase_name(_phase));
 574     do {
 575       EventGCPhaseParallel event;
 576       pss->steal_and_trim_queue(queues());
 577       event.commit(GCId::current(), pss->worker_id(), G1GCPhaseTimes::phase_name(_phase));
 578     } while (!offer_termination());
 579   }
 580 
 581   double term_time() const { return _term_time; }
 582   size_t term_attempts() const { return _term_attempts; }
 583 };
 584 
 585 class G1EvacuateRegionsBaseTask : public WorkerTask {
 586 protected:
 587   G1CollectedHeap* _g1h;
 588   G1ParScanThreadStateSet* _per_thread_states;
 589   G1ScannerTasksQueueSet* _task_queues;
 590   TaskTerminator _terminator;
 591   uint _num_workers;
 592 
 593   void evacuate_live_objects(G1ParScanThreadState* pss,
 594                              uint worker_id,
 595                              G1GCPhaseTimes::GCParPhases objcopy_phase,
 596                              G1GCPhaseTimes::GCParPhases termination_phase) {
 597     G1GCPhaseTimes* p = _g1h->phase_times();
 598 
 599     Ticks start = Ticks::now();
 600     G1ParEvacuateFollowersClosure cl(_g1h, pss, _task_queues, &_terminator, objcopy_phase);
 601     cl.do_void();
 602 
 603     assert(pss->queue_is_empty(), "should be empty");
 604 
 605     Tickspan evac_time = (Ticks::now() - start);
 606     p->record_or_add_time_secs(objcopy_phase, worker_id, evac_time.seconds() - cl.term_time());
 607 
 608     if (termination_phase == G1GCPhaseTimes::Termination) {
 609       p->record_time_secs(termination_phase, worker_id, cl.term_time());
 610       p->record_thread_work_item(termination_phase, worker_id, cl.term_attempts());
 611     } else {
 612       p->record_or_add_time_secs(termination_phase, worker_id, cl.term_time());
 613       p->record_or_add_thread_work_item(termination_phase, worker_id, cl.term_attempts());
 614     }
 615     assert(pss->trim_ticks().value() == 0,
 616            "Unexpected partial trimming during evacuation value " JLONG_FORMAT,
 617            pss->trim_ticks().value());
 618   }
 619 
 620   virtual void start_work(uint worker_id) { }
 621 
 622   virtual void end_work(uint worker_id) { }
 623 
 624   virtual void scan_roots(G1ParScanThreadState* pss, uint worker_id) = 0;
 625 
 626   virtual void evacuate_live_objects(G1ParScanThreadState* pss, uint worker_id) = 0;
 627 
 628 public:
 629   G1EvacuateRegionsBaseTask(const char* name,
 630                             G1ParScanThreadStateSet* per_thread_states,
 631                             G1ScannerTasksQueueSet* task_queues,
 632                             uint num_workers) :
 633     WorkerTask(name),
 634     _g1h(G1CollectedHeap::heap()),
 635     _per_thread_states(per_thread_states),
 636     _task_queues(task_queues),
 637     _terminator(num_workers, _task_queues),
 638     _num_workers(num_workers)
 639   { }
 640 
 641   void work(uint worker_id) {
 642     start_work(worker_id);
 643 
 644     {
 645       ResourceMark rm;
 646 
 647       G1ParScanThreadState* pss = _per_thread_states->state_for_worker(worker_id);
 648       pss->set_ref_discoverer(_g1h->ref_processor_stw());
 649 
 650       scan_roots(pss, worker_id);
 651       evacuate_live_objects(pss, worker_id);
 652     }
 653 
 654     end_work(worker_id);
 655   }
 656 };
 657 
 658 class G1EvacuateRegionsTask : public G1EvacuateRegionsBaseTask {
 659   G1RootProcessor* _root_processor;
 660   bool _has_optional_evacuation_work;
 661 
 662   void scan_roots(G1ParScanThreadState* pss, uint worker_id) {
 663     _root_processor->evacuate_roots(pss, worker_id);
 664     _g1h->rem_set()->scan_heap_roots(pss, worker_id, G1GCPhaseTimes::ScanHR, G1GCPhaseTimes::ObjCopy, _has_optional_evacuation_work);
 665     _g1h->rem_set()->scan_collection_set_regions(pss, worker_id, G1GCPhaseTimes::ScanHR, G1GCPhaseTimes::CodeRoots, G1GCPhaseTimes::ObjCopy);
 666   }
 667 
 668   void evacuate_live_objects(G1ParScanThreadState* pss, uint worker_id) {
 669     G1EvacuateRegionsBaseTask::evacuate_live_objects(pss, worker_id, G1GCPhaseTimes::ObjCopy, G1GCPhaseTimes::Termination);
 670   }
 671 
 672   void start_work(uint worker_id) {
 673     _g1h->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerStart, worker_id, Ticks::now().seconds());
 674   }
 675 
 676   void end_work(uint worker_id) {
 677     _g1h->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerEnd, worker_id, Ticks::now().seconds());
 678   }
 679 
 680 public:
 681   G1EvacuateRegionsTask(G1CollectedHeap* g1h,
 682                         G1ParScanThreadStateSet* per_thread_states,
 683                         G1ScannerTasksQueueSet* task_queues,
 684                         G1RootProcessor* root_processor,
 685                         uint num_workers,
 686                         bool has_optional_evacuation_work) :
 687     G1EvacuateRegionsBaseTask("G1 Evacuate Regions", per_thread_states, task_queues, num_workers),
 688     _root_processor(root_processor),
 689     _has_optional_evacuation_work(has_optional_evacuation_work)
 690   { }
 691 };
 692 
 693 void G1YoungCollector::evacuate_initial_collection_set(G1ParScanThreadStateSet* per_thread_states,
 694                                                       bool has_optional_evacuation_work) {
 695   G1GCPhaseTimes* p = phase_times();
 696 
 697   {
 698     Ticks start = Ticks::now();
 699     rem_set()->merge_heap_roots(true /* initial_evacuation */);
 700     p->record_merge_heap_roots_time((Ticks::now() - start).seconds() * 1000.0);
 701   }
 702 
 703   Tickspan task_time;
 704   const uint num_workers = workers()->active_workers();
 705 
 706   Ticks start_processing = Ticks::now();
 707   {
 708     G1RootProcessor root_processor(_g1h, num_workers);
 709     G1EvacuateRegionsTask g1_par_task(_g1h,
 710                                       per_thread_states,
 711                                       task_queues(),
 712                                       &root_processor,
 713                                       num_workers,
 714                                       has_optional_evacuation_work);
 715     task_time = run_task_timed(&g1_par_task);
 716     // Closing the inner scope will execute the destructor for the
 717     // G1RootProcessor object. By subtracting the WorkerThreads task from the total
 718     // time of this scope, we get the "NMethod List Cleanup" time. This list is
 719     // constructed during "STW two-phase nmethod root processing", see more in
 720     // nmethod.hpp
 721   }
 722   Tickspan total_processing = Ticks::now() - start_processing;
 723 
 724   p->record_initial_evac_time(task_time.seconds() * 1000.0);
 725   p->record_or_add_nmethod_list_cleanup_time((total_processing - task_time).seconds() * 1000.0);
 726 
 727   rem_set()->complete_evac_phase(has_optional_evacuation_work);
 728 }
 729 
 730 class G1EvacuateOptionalRegionsTask : public G1EvacuateRegionsBaseTask {
 731 
 732   void scan_roots(G1ParScanThreadState* pss, uint worker_id) {
 733     _g1h->rem_set()->scan_heap_roots(pss, worker_id, G1GCPhaseTimes::OptScanHR, G1GCPhaseTimes::OptObjCopy, true /* remember_already_scanned_cards */);
 734     _g1h->rem_set()->scan_collection_set_regions(pss, worker_id, G1GCPhaseTimes::OptScanHR, G1GCPhaseTimes::OptCodeRoots, G1GCPhaseTimes::OptObjCopy);
 735   }
 736 
 737   void evacuate_live_objects(G1ParScanThreadState* pss, uint worker_id) {
 738     G1EvacuateRegionsBaseTask::evacuate_live_objects(pss, worker_id, G1GCPhaseTimes::OptObjCopy, G1GCPhaseTimes::OptTermination);
 739   }
 740 
 741 public:
 742   G1EvacuateOptionalRegionsTask(G1ParScanThreadStateSet* per_thread_states,
 743                                 G1ScannerTasksQueueSet* queues,
 744                                 uint num_workers) :
 745     G1EvacuateRegionsBaseTask("G1 Evacuate Optional Regions", per_thread_states, queues, num_workers) {
 746   }
 747 };
 748 
 749 void G1YoungCollector::evacuate_next_optional_regions(G1ParScanThreadStateSet* per_thread_states) {
 750   // To access the protected constructor/destructor
 751   class G1MarkScope : public MarkScope { };
 752 
 753   Tickspan task_time;
 754 
 755   Ticks start_processing = Ticks::now();
 756   {
 757     G1MarkScope code_mark_scope;
 758     G1EvacuateOptionalRegionsTask task(per_thread_states, task_queues(), workers()->active_workers());
 759     task_time = run_task_timed(&task);
 760     // See comment in evacuate_initial_collection_set() for the reason of the scope.
 761   }
 762   Tickspan total_processing = Ticks::now() - start_processing;
 763 
 764   G1GCPhaseTimes* p = phase_times();
 765   p->record_or_add_nmethod_list_cleanup_time((total_processing - task_time).seconds() * 1000.0);
 766 }
 767 
 768 void G1YoungCollector::evacuate_optional_collection_set(G1ParScanThreadStateSet* per_thread_states) {
 769   const double collection_start_time_ms = phase_times()->cur_collection_start_sec() * 1000.0;
 770 
 771   while (!evacuation_alloc_failed() && collection_set()->optional_region_length() > 0) {
 772 
 773     double time_used_ms = os::elapsedTime() * 1000.0 - collection_start_time_ms;
 774     double time_left_ms = MaxGCPauseMillis - time_used_ms;
 775 
 776     if (time_left_ms < 0 ||
 777         !collection_set()->finalize_optional_for_evacuation(time_left_ms * policy()->optional_evacuation_fraction())) {
 778       log_trace(gc, ergo, cset)("Skipping evacuation of %u optional regions, no more regions can be evacuated in %.3fms",
 779                                 collection_set()->optional_region_length(), time_left_ms);
 780       break;
 781     }
 782 
 783     {
 784       Ticks start = Ticks::now();
 785       rem_set()->merge_heap_roots(false /* initial_evacuation */);
 786       phase_times()->record_or_add_optional_merge_heap_roots_time((Ticks::now() - start).seconds() * 1000.0);
 787     }
 788 
 789     {
 790       Ticks start = Ticks::now();
 791       evacuate_next_optional_regions(per_thread_states);
 792       phase_times()->record_or_add_optional_evac_time((Ticks::now() - start).seconds() * 1000.0);
 793     }
 794 
 795     rem_set()->complete_evac_phase(true /* has_more_than_one_evacuation_phase */);
 796   }
 797 
 798   collection_set()->abandon_optional_collection_set(per_thread_states);
 799 }
 800 
 801 // Non Copying Keep Alive closure
 802 class G1KeepAliveClosure: public OopClosure {
 803   G1CollectedHeap*_g1h;
 804 public:
 805   G1KeepAliveClosure(G1CollectedHeap* g1h) :_g1h(g1h) {}
 806   void do_oop(narrowOop* p) { guarantee(false, "Not needed"); }
 807   void do_oop(oop* p) {
 808     oop obj = *p;
 809     assert(obj != nullptr, "the caller should have filtered out null values");
 810 
 811     const G1HeapRegionAttr region_attr =_g1h->region_attr(obj);
 812     if (!region_attr.is_in_cset_or_humongous_candidate()) {
 813       return;
 814     }
 815     if (region_attr.is_in_cset()) {
 816       assert(obj->is_forwarded(), "invariant" );
 817       *p = obj->forwardee();
 818     } else {
 819       assert(!obj->is_forwarded(), "invariant" );
 820       assert(region_attr.is_humongous_candidate(),
 821              "Only allowed G1HeapRegionAttr state is IsHumongous, but is %d", region_attr.type());
 822      _g1h->set_humongous_is_live(obj);
 823     }
 824   }
 825 };
 826 
 827 // Copying Keep Alive closure - can be called from both
 828 // serial and parallel code as long as different worker
 829 // threads utilize different G1ParScanThreadState instances
 830 // and different queues.
 831 class G1CopyingKeepAliveClosure: public OopClosure {
 832   G1CollectedHeap* _g1h;
 833   G1ParScanThreadState*    _par_scan_state;
 834 
 835 public:
 836   G1CopyingKeepAliveClosure(G1CollectedHeap* g1h,
 837                             G1ParScanThreadState* pss):
 838     _g1h(g1h),
 839     _par_scan_state(pss)
 840   {}
 841 
 842   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
 843   virtual void do_oop(      oop* p) { do_oop_work(p); }
 844 
 845   template <class T> void do_oop_work(T* p) {
 846     oop obj = RawAccess<>::oop_load(p);
 847 
 848     if (_g1h->is_in_cset_or_humongous_candidate(obj)) {
 849       // If the referent object has been forwarded (either copied
 850       // to a new location or to itself in the event of an
 851       // evacuation failure) then we need to update the reference
 852       // field and, if both reference and referent are in the G1
 853       // heap, update the RSet for the referent.
 854       //
 855       // If the referent has not been forwarded then we have to keep
 856       // it alive by policy. Therefore we have copy the referent.
 857       //
 858       // When the queue is drained (after each phase of reference processing)
 859       // the object and it's followers will be copied, the reference field set
 860       // to point to the new location, and the RSet updated.
 861       _par_scan_state->push_on_queue(ScannerTask(p));
 862     }
 863   }
 864 };
 865 
 866 class G1STWRefProcProxyTask : public RefProcProxyTask {
 867   G1CollectedHeap& _g1h;
 868   G1ParScanThreadStateSet& _pss;
 869   TaskTerminator _terminator;
 870   G1ScannerTasksQueueSet& _task_queues;
 871 
 872   // Special closure for enqueuing discovered fields: during enqueue the card table
 873   // may not be in shape to properly handle normal barrier calls (e.g. card marks
 874   // in regions that failed evacuation, scribbling of various values by card table
 875   // scan code). Additionally the regular barrier enqueues into the "global"
 876   // DCQS, but during GC we need these to-be-refined entries in the GC local queue
 877   // so that after clearing the card table, the redirty cards phase will properly
 878   // mark all dirty cards to be picked up by refinement.
 879   class G1EnqueueDiscoveredFieldClosure : public EnqueueDiscoveredFieldClosure {
 880     G1CollectedHeap* _g1h;
 881     G1ParScanThreadState* _pss;
 882 
 883   public:
 884     G1EnqueueDiscoveredFieldClosure(G1CollectedHeap* g1h, G1ParScanThreadState* pss) : _g1h(g1h), _pss(pss) { }
 885 
 886     void enqueue(HeapWord* discovered_field_addr, oop value) override {
 887       assert(_g1h->is_in(discovered_field_addr), PTR_FORMAT " is not in heap ", p2i(discovered_field_addr));
 888       // Store the value first, whatever it is.
 889       RawAccess<>::oop_store(discovered_field_addr, value);
 890       if (value == nullptr) {
 891         return;
 892       }
 893       _pss->write_ref_field_post(discovered_field_addr, value);
 894     }
 895   };
 896 
 897 public:
 898   G1STWRefProcProxyTask(uint max_workers, G1CollectedHeap& g1h, G1ParScanThreadStateSet& pss, G1ScannerTasksQueueSet& task_queues)
 899     : RefProcProxyTask("G1STWRefProcProxyTask", max_workers),
 900       _g1h(g1h),
 901       _pss(pss),
 902       _terminator(max_workers, &task_queues),
 903       _task_queues(task_queues) {}
 904 
 905   void work(uint worker_id) override {
 906     assert(worker_id < _max_workers, "sanity");
 907     uint index = (_tm == RefProcThreadModel::Single) ? 0 : worker_id;
 908 
 909     G1ParScanThreadState* pss = _pss.state_for_worker(index);
 910     pss->set_ref_discoverer(nullptr);
 911 
 912     G1STWIsAliveClosure is_alive(&_g1h);
 913     G1CopyingKeepAliveClosure keep_alive(&_g1h, pss);
 914     G1EnqueueDiscoveredFieldClosure enqueue(&_g1h, pss);
 915     G1ParEvacuateFollowersClosure complete_gc(&_g1h, pss, &_task_queues, _tm == RefProcThreadModel::Single ? nullptr : &_terminator, G1GCPhaseTimes::ObjCopy);
 916     _rp_task->rp_work(worker_id, &is_alive, &keep_alive, &enqueue, &complete_gc);
 917 
 918     // We have completed copying any necessary live referent objects.
 919     assert(pss->queue_is_empty(), "both queue and overflow should be empty");
 920   }
 921 
 922   void prepare_run_task_hook() override {
 923     _terminator.reset_for_reuse(_queue_count);
 924   }
 925 };
 926 
 927 void G1YoungCollector::process_discovered_references(G1ParScanThreadStateSet* per_thread_states) {
 928   Ticks start = Ticks::now();
 929 
 930   ReferenceProcessor* rp = ref_processor_stw();
 931   assert(rp->discovery_enabled(), "should have been enabled");
 932 
 933   uint no_of_gc_workers = workers()->active_workers();
 934   rp->set_active_mt_degree(no_of_gc_workers);
 935 
 936   G1STWRefProcProxyTask task(rp->max_num_queues(), *_g1h, *per_thread_states, *task_queues());
 937   ReferenceProcessorPhaseTimes& pt = *phase_times()->ref_phase_times();
 938   ReferenceProcessorStats stats = rp->process_discovered_references(task, pt);
 939 
 940   gc_tracer_stw()->report_gc_reference_stats(stats);
 941 
 942   _g1h->make_pending_list_reachable();
 943 
 944   phase_times()->record_ref_proc_time((Ticks::now() - start).seconds() * MILLIUNITS);
 945 }
 946 
 947 void G1YoungCollector::post_evacuate_cleanup_1(G1ParScanThreadStateSet* per_thread_states) {
 948   Ticks start = Ticks::now();
 949   {
 950     G1PostEvacuateCollectionSetCleanupTask1 cl(per_thread_states, &_evac_failure_regions);
 951     _g1h->run_batch_task(&cl);
 952   }
 953   phase_times()->record_post_evacuate_cleanup_task_1_time((Ticks::now() - start).seconds() * 1000.0);
 954 }
 955 
 956 void G1YoungCollector::post_evacuate_cleanup_2(G1ParScanThreadStateSet* per_thread_states,
 957                                                G1EvacInfo* evacuation_info) {
 958   Ticks start = Ticks::now();
 959   {
 960     G1PostEvacuateCollectionSetCleanupTask2 cl(per_thread_states, evacuation_info, &_evac_failure_regions);
 961     _g1h->run_batch_task(&cl);
 962   }
 963   phase_times()->record_post_evacuate_cleanup_task_2_time((Ticks::now() - start).seconds() * 1000.0);
 964 }
 965 
 966 void G1YoungCollector::enqueue_candidates_as_root_regions() {
 967   assert(collector_state()->in_concurrent_start_gc(), "must be");
 968 
 969   G1CollectionSetCandidates* candidates = collection_set()->candidates();
 970   for (G1HeapRegion* r : *candidates) {
 971     _g1h->concurrent_mark()->add_root_region(r);
 972   }
 973 }
 974 
 975 void G1YoungCollector::post_evacuate_collection_set(G1EvacInfo* evacuation_info,
 976                                                     G1ParScanThreadStateSet* per_thread_states) {
 977   G1GCPhaseTimes* p = phase_times();
 978 
 979   // Process any discovered reference objects - we have
 980   // to do this _before_ we retire the GC alloc regions
 981   // as we may have to copy some 'reachable' referent
 982   // objects (and their reachable sub-graphs) that were
 983   // not copied during the pause.
 984   process_discovered_references(per_thread_states);
 985 
 986   G1STWIsAliveClosure is_alive(_g1h);
 987   G1KeepAliveClosure keep_alive(_g1h);
 988 
 989   WeakProcessor::weak_oops_do(workers(), &is_alive, &keep_alive, p->weak_phase_times());
 990 
 991   allocator()->release_gc_alloc_regions(evacuation_info);
 992 
 993   post_evacuate_cleanup_1(per_thread_states);
 994 
 995   post_evacuate_cleanup_2(per_thread_states, evacuation_info);
 996 
 997   // Regions in the collection set candidates are roots for the marking (they are
 998   // not marked through considering they are very likely to be reclaimed soon.
 999   // They need to be enqueued explicitly compared to survivor regions.
1000   if (collector_state()->in_concurrent_start_gc()) {
1001     enqueue_candidates_as_root_regions();
1002   }
1003 
1004   _evac_failure_regions.post_collection();
1005 
1006   assert_used_and_recalculate_used_equal(_g1h);
1007 
1008   _g1h->rebuild_free_region_list();
1009 
1010   _g1h->record_obj_copy_mem_stats();
1011 
1012   evacuation_info->set_bytes_used(_g1h->bytes_used_during_gc());
1013 
1014   _g1h->prepare_for_mutator_after_young_collection();
1015 
1016   _g1h->gc_epilogue(false);
1017 
1018   _g1h->expand_heap_after_young_collection();
1019 }
1020 
1021 bool G1YoungCollector::evacuation_failed() const {
1022   return _evac_failure_regions.has_regions_evac_failed();
1023 }
1024 
1025 bool G1YoungCollector::evacuation_pinned() const {
1026   return _evac_failure_regions.has_regions_evac_pinned();
1027 }
1028 
1029 bool G1YoungCollector::evacuation_alloc_failed() const {
1030   return _evac_failure_regions.has_regions_alloc_failed();
1031 }
1032 
1033 G1YoungCollector::G1YoungCollector(GCCause::Cause gc_cause) :
1034   _g1h(G1CollectedHeap::heap()),
1035   _gc_cause(gc_cause),
1036   _concurrent_operation_is_full_mark(false),
1037   _evac_failure_regions()
1038 {
1039 }
1040 
1041 void G1YoungCollector::collect() {
1042   // Do timing/tracing/statistics/pre- and post-logging/verification work not
1043   // directly related to the collection. They should not be accounted for in
1044   // collection work timing.
1045 
1046   // The G1YoungGCTraceTime message depends on collector state, so must come after
1047   // determining collector state.
1048   G1YoungGCTraceTime tm(this, _gc_cause);
1049 
1050   // JFR
1051   G1YoungGCJFRTracerMark jtm(gc_timer_stw(), gc_tracer_stw(), _gc_cause);
1052   // JStat/MXBeans
1053   G1YoungGCMonitoringScope ms(monitoring_support(),
1054                               !collection_set()->candidates()->is_empty() /* all_memory_pools_affected */);
1055   // Create the heap printer before internal pause timing to have
1056   // heap information printed as last part of detailed GC log.
1057   G1HeapPrinterMark hpm(_g1h);
1058   // Young GC internal pause timing
1059   G1YoungGCNotifyPauseMark npm(this);
1060 
1061   // Verification may use the workers, so they must be set up before.
1062   // Individual parallel phases may override this.
1063   set_young_collection_default_active_worker_threads();
1064 
1065   // Wait for root region scan here to make sure that it is done before any
1066   // use of the STW workers to maximize cpu use (i.e. all cores are available
1067   // just to do that).
1068   wait_for_root_region_scanning();
1069 
1070   G1YoungGCVerifierMark vm(this);
1071   {
1072     // Actual collection work starts and is executed (only) in this scope.
1073 
1074     // Young GC internal collection timing. The elapsed time recorded in the
1075     // policy for the collection deliberately elides verification (and some
1076     // other trivial setup above).
1077     policy()->record_young_collection_start();
1078 
1079     pre_evacuate_collection_set(jtm.evacuation_info());
1080 
1081     G1ParScanThreadStateSet per_thread_states(_g1h,
1082                                               workers()->active_workers(),
1083                                               collection_set(),
1084                                               &_evac_failure_regions);
1085 
1086     bool may_do_optional_evacuation = collection_set()->optional_region_length() != 0;
1087     // Actually do the work...
1088     evacuate_initial_collection_set(&per_thread_states, may_do_optional_evacuation);
1089 
1090     if (may_do_optional_evacuation) {
1091       evacuate_optional_collection_set(&per_thread_states);
1092     }
1093     post_evacuate_collection_set(jtm.evacuation_info(), &per_thread_states);
1094 
1095     // Refine the type of a concurrent mark operation now that we did the
1096     // evacuation, eventually aborting it.
1097     _concurrent_operation_is_full_mark = policy()->concurrent_operation_is_full_mark("Revise IHOP");
1098 
1099     // Need to report the collection pause now since record_collection_pause_end()
1100     // modifies it to the next state.
1101     jtm.report_pause_type(collector_state()->young_gc_pause_type(_concurrent_operation_is_full_mark));
1102 
1103     policy()->record_young_collection_end(_concurrent_operation_is_full_mark, evacuation_alloc_failed());
1104   }
1105   TASKQUEUE_STATS_ONLY(_g1h->task_queues()->print_and_reset_taskqueue_stats("Oop Queue");)
1106 }