1 /*
   2  * Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 
  26 #include "classfile/classLoaderDataGraph.inline.hpp"
  27 #include "classfile/javaClasses.inline.hpp"
  28 #include "code/nmethod.hpp"
  29 #include "compiler/oopMap.hpp"
  30 #include "gc/g1/g1Allocator.hpp"
  31 #include "gc/g1/g1CardSetMemory.hpp"
  32 #include "gc/g1/g1CollectedHeap.inline.hpp"
  33 #include "gc/g1/g1CollectionSetCandidates.inline.hpp"
  34 #include "gc/g1/g1CollectorState.inline.hpp"
  35 #include "gc/g1/g1ConcurrentMark.hpp"
  36 #include "gc/g1/g1EvacFailureRegions.inline.hpp"
  37 #include "gc/g1/g1EvacInfo.hpp"
  38 #include "gc/g1/g1GCPhaseTimes.hpp"
  39 #include "gc/g1/g1HeapRegion.inline.hpp"
  40 #include "gc/g1/g1HeapRegionPrinter.hpp"
  41 #include "gc/g1/g1MonitoringSupport.hpp"
  42 #include "gc/g1/g1ParScanThreadState.inline.hpp"
  43 #include "gc/g1/g1Policy.hpp"
  44 #include "gc/g1/g1RegionPinCache.inline.hpp"
  45 #include "gc/g1/g1RemSet.hpp"
  46 #include "gc/g1/g1RootProcessor.hpp"
  47 #include "gc/g1/g1Trace.hpp"
  48 #include "gc/g1/g1YoungCollector.hpp"
  49 #include "gc/g1/g1YoungGCAllocationFailureInjector.hpp"
  50 #include "gc/g1/g1YoungGCPostEvacuateTasks.hpp"
  51 #include "gc/g1/g1YoungGCPreEvacuateTasks.hpp"
  52 #include "gc/shared/concurrentGCBreakpoints.hpp"
  53 #include "gc/shared/gc_globals.hpp"
  54 #include "gc/shared/gcTimer.hpp"
  55 #include "gc/shared/gcTraceTime.inline.hpp"
  56 #include "gc/shared/referenceProcessor.hpp"
  57 #include "gc/shared/weakProcessor.inline.hpp"
  58 #include "gc/shared/workerPolicy.hpp"
  59 #include "gc/shared/workerThread.hpp"
  60 #include "jfr/jfrEvents.hpp"
  61 #include "memory/resourceArea.hpp"
  62 #include "runtime/atomic.hpp"
  63 #include "runtime/threads.hpp"
  64 #include "utilities/ticks.hpp"
  65 
  66 // GCTraceTime wrapper that constructs the message according to GC pause type and
  67 // GC cause.
  68 // The code relies on the fact that GCTraceTimeWrapper stores the string passed
  69 // initially as a reference only, so that we can modify it as needed.
  70 class G1YoungGCTraceTime {
  71   G1YoungCollector* _collector;
  72 
  73   G1CollectorState::Pause _pause_type;
  74   GCCause::Cause _pause_cause;
  75 
  76   static const uint MaxYoungGCNameLength = 128;
  77   char _young_gc_name_data[MaxYoungGCNameLength];
  78 
  79   GCTraceTime(Info, gc) _tt;
  80 
  81   const char* update_young_gc_name() {
  82     char evacuation_failed_string[48];
  83     evacuation_failed_string[0] = '\0';
  84 
  85     if (_collector->evacuation_failed()) {
  86       os::snprintf_checked(evacuation_failed_string,
  87                            ARRAY_SIZE(evacuation_failed_string),
  88                            " (Evacuation Failure: %s%s%s)",
  89                            _collector->evacuation_alloc_failed() ? "Allocation" : "",
  90                            _collector->evacuation_alloc_failed() && _collector->evacuation_pinned() ? " / " : "",
  91                            _collector->evacuation_pinned() ? "Pinned" : "");
  92     }
  93     os::snprintf_checked(_young_gc_name_data,
  94                          MaxYoungGCNameLength,
  95                          "Pause Young (%s) (%s)%s",
  96                          G1CollectorState::to_string(_pause_type),
  97                          GCCause::to_string(_pause_cause),
  98                          evacuation_failed_string);
  99     return _young_gc_name_data;
 100   }
 101 
 102 public:
 103   G1YoungGCTraceTime(G1YoungCollector* collector, GCCause::Cause cause) :
 104     _collector(collector),
 105     // Take snapshot of current pause type at start as it may be modified during gc.
 106     // The strings for all Concurrent Start pauses are the same, so the parameter
 107     // does not matter here.
 108     _pause_type(_collector->collector_state()->gc_pause_type(false /* concurrent_operation_is_full_mark */)),
 109     _pause_cause(cause),
 110     // Fake a "no cause" and manually add the correct string in update_young_gc_name()
 111     // to make the string look more natural.
 112     _tt(update_young_gc_name(), nullptr, GCCause::_no_gc, true) {
 113   }
 114 
 115   ~G1YoungGCTraceTime() {
 116     update_young_gc_name();
 117   }
 118 };
 119 
 120 class G1YoungGCNotifyPauseMark : public StackObj {
 121   G1YoungCollector* _collector;
 122 
 123 public:
 124   G1YoungGCNotifyPauseMark(G1YoungCollector* collector) : _collector(collector) {
 125     G1CollectedHeap::heap()->policy()->record_young_gc_pause_start();
 126   }
 127 
 128   ~G1YoungGCNotifyPauseMark() {
 129     G1CollectedHeap::heap()->policy()->record_young_gc_pause_end(_collector->evacuation_failed());
 130   }
 131 };
 132 
 133 class G1YoungGCJFRTracerMark : public G1JFRTracerMark {
 134   G1EvacInfo _evacuation_info;
 135 
 136   G1NewTracer* tracer() const { return (G1NewTracer*)_tracer; }
 137 
 138 public:
 139 
 140   G1EvacInfo* evacuation_info() { return &_evacuation_info; }
 141 
 142   G1YoungGCJFRTracerMark(STWGCTimer* gc_timer_stw, G1NewTracer* gc_tracer_stw, GCCause::Cause cause) :
 143     G1JFRTracerMark(gc_timer_stw, gc_tracer_stw), _evacuation_info() { }
 144 
 145   void report_pause_type(G1CollectorState::Pause type) {
 146     tracer()->report_young_gc_pause(type);
 147   }
 148 
 149   ~G1YoungGCJFRTracerMark() {
 150     G1CollectedHeap* g1h = G1CollectedHeap::heap();
 151 
 152     tracer()->report_evacuation_info(&_evacuation_info);
 153     tracer()->report_tenuring_threshold(g1h->policy()->tenuring_threshold());
 154   }
 155 };
 156 
 157 class G1YoungGCVerifierMark : public StackObj {
 158   G1YoungCollector* _collector;
 159   G1HeapVerifier::G1VerifyType _type;
 160 
 161   static G1HeapVerifier::G1VerifyType young_collection_verify_type() {
 162     G1CollectorState* state = G1CollectedHeap::heap()->collector_state();
 163     if (state->is_in_concurrent_start_gc()) {
 164       return G1HeapVerifier::G1VerifyConcurrentStart;
 165     } else if (state->is_in_young_only_phase()) {
 166       return G1HeapVerifier::G1VerifyYoungNormal;
 167     } else {
 168       return G1HeapVerifier::G1VerifyMixed;
 169     }
 170   }
 171 
 172 public:
 173   G1YoungGCVerifierMark(G1YoungCollector* collector) : _collector(collector), _type(young_collection_verify_type()) {
 174     G1CollectedHeap::heap()->verify_before_young_collection(_type);
 175   }
 176 
 177   ~G1YoungGCVerifierMark() {
 178     // Inject evacuation failure tag into type if needed.
 179     G1HeapVerifier::G1VerifyType type = _type;
 180     if (_collector->evacuation_failed()) {
 181       type = (G1HeapVerifier::G1VerifyType)(type | G1HeapVerifier::G1VerifyYoungEvacFail);
 182     }
 183     G1CollectedHeap::heap()->verify_after_young_collection(type);
 184   }
 185 };
 186 
 187 G1Allocator* G1YoungCollector::allocator() const {
 188   return _g1h->allocator();
 189 }
 190 
 191 G1CollectionSet* G1YoungCollector::collection_set() const {
 192   return _g1h->collection_set();
 193 }
 194 
 195 G1CollectorState* G1YoungCollector::collector_state() const {
 196   return _g1h->collector_state();
 197 }
 198 
 199 G1ConcurrentMark* G1YoungCollector::concurrent_mark() const {
 200   return _g1h->concurrent_mark();
 201 }
 202 
 203 STWGCTimer* G1YoungCollector::gc_timer_stw() const {
 204   return _g1h->gc_timer_stw();
 205 }
 206 
 207 G1NewTracer* G1YoungCollector::gc_tracer_stw() const {
 208   return _g1h->gc_tracer_stw();
 209 }
 210 
 211 G1Policy* G1YoungCollector::policy() const {
 212   return _g1h->policy();
 213 }
 214 
 215 G1GCPhaseTimes* G1YoungCollector::phase_times() const {
 216   return _g1h->phase_times();
 217 }
 218 
 219 G1MonitoringSupport* G1YoungCollector::monitoring_support() const {
 220   return _g1h->monitoring_support();
 221 }
 222 
 223 G1RemSet* G1YoungCollector::rem_set() const {
 224   return _g1h->rem_set();
 225 }
 226 
 227 G1ScannerTasksQueueSet* G1YoungCollector::task_queues() const {
 228   return _g1h->task_queues();
 229 }
 230 
 231 G1SurvivorRegions* G1YoungCollector::survivor_regions() const {
 232   return _g1h->survivor();
 233 }
 234 
 235 ReferenceProcessor* G1YoungCollector::ref_processor_stw() const {
 236   return _g1h->ref_processor_stw();
 237 }
 238 
 239 WorkerThreads* G1YoungCollector::workers() const {
 240   return _g1h->workers();
 241 }
 242 
 243 G1YoungGCAllocationFailureInjector* G1YoungCollector::allocation_failure_injector() const {
 244   return _g1h->allocation_failure_injector();
 245 }
 246 
 247 void G1YoungCollector::complete_root_region_scan() {
 248   Ticks start = Ticks::now();
 249   // We have to complete root region scan as it's the only way to ensure that all the
 250   // objects on them have been correctly scanned before we start moving them during the GC.
 251   if (concurrent_mark()->complete_root_regions_scan_in_safepoint()) {
 252     phase_times()->record_root_region_scan_time((Ticks::now() - start).seconds() * MILLIUNITS);
 253   }
 254 }
 255 
 256 class G1PrintCollectionSetClosure : public G1HeapRegionClosure {
 257 public:
 258   virtual bool do_heap_region(G1HeapRegion* r) {
 259     G1HeapRegionPrinter::cset(r);
 260     return false;
 261   }
 262 };
 263 
 264 void G1YoungCollector::calculate_collection_set(G1EvacInfo* evacuation_info, double target_pause_time_ms) {
 265   // Forget the current allocation region (we might even choose it to be part
 266   // of the collection set!) before finalizing the collection set.
 267   allocator()->release_mutator_alloc_regions();
 268 
 269   collection_set()->finalize_initial_collection_set(target_pause_time_ms, survivor_regions());
 270   evacuation_info->set_collection_set_regions(collection_set()->initial_region_length() +
 271                                               collection_set()->num_optional_regions());
 272 
 273   concurrent_mark()->verify_no_collection_set_oops();
 274 
 275   if (G1HeapRegionPrinter::is_active()) {
 276     G1PrintCollectionSetClosure cl;
 277     collection_set()->iterate(&cl);
 278     collection_set()->iterate_optional(&cl);
 279   }
 280 }
 281 
 282 class G1PrepareEvacuationTask : public WorkerTask {
 283   class G1PrepareRegionsClosure : public G1HeapRegionClosure {
 284     G1CollectedHeap* _g1h;
 285     G1PrepareEvacuationTask* _parent_task;
 286     uint _worker_humongous_total;
 287     uint _worker_humongous_candidates;
 288 
 289     G1MonotonicArenaMemoryStats _humongous_card_set_stats;
 290 
 291     bool humongous_region_is_candidate(G1HeapRegion* region) const {
 292       assert(region->is_starts_humongous(), "Must start a humongous object");
 293 
 294       oop obj = cast_to_oop(region->bottom());
 295 
 296       // Dead objects cannot be eager reclaim candidates. Due to class
 297       // unloading it is unsafe to query their classes so we return early.
 298       if (_g1h->is_obj_dead(obj, region)) {
 299         return false;
 300       }
 301 
 302       // If we do not have a complete remembered set for the region, then we can
 303       // not be sure that we have all references to it.
 304       if (!region->rem_set()->is_complete()) {
 305         return false;
 306       }
 307       // We also cannot collect the humongous object if it is pinned.
 308       if (region->has_pinned_objects()) {
 309         return false;
 310       }
 311       // Candidate selection must satisfy the following constraints
 312       // while concurrent marking is in progress:
 313       //
 314       // * In order to maintain SATB invariants, an object must not be
 315       // reclaimed if it was allocated before the start of marking and
 316       // has not had its references scanned.  Such an object must have
 317       // its references (including type metadata) scanned to ensure no
 318       // live objects are missed by the marking process.  Objects
 319       // allocated after the start of concurrent marking don't need to
 320       // be scanned.
 321       //
 322       // * An object must not be reclaimed if it is on the concurrent
 323       // mark stack.  Objects allocated after the start of concurrent
 324       // marking are never pushed on the mark stack.
 325       //
 326       // Nominating only objects allocated after the start of concurrent
 327       // marking is sufficient to meet both constraints.  This may miss
 328       // some objects that satisfy the constraints, but the marking data
 329       // structures don't support efficiently performing the needed
 330       // additional tests or scrubbing of the mark stack.
 331       //
 332       // We handle humongous objects specially, because frequent allocation and
 333       // dropping of large binary blobs is an important use case for eager reclaim,
 334       // and this special handling increases needed headroom.
 335       // It also helps with G1 allocating humongous objects as old generation
 336       // objects although they might also die quite quickly.
 337       //
 338       // TypeArray objects are allowed to be reclaimed even if allocated before
 339       // the start of concurrent mark.  For this we rely on mark stack insertion
 340       // to exclude is_typeArray() objects, preventing reclaiming an object
 341       // that is in the mark stack.  We also rely on the metadata for
 342       // such objects to be built-in and so ensured to be kept live.
 343       //
 344       // Non-typeArrays that were allocated before marking are excluded from
 345       // eager reclaim during marking.  One issue is the problem described
 346       // above with scrubbing the mark stack, but there is also a problem
 347       // causing these humongous objects being collected incorrectly:
 348       //
 349       // E.g. if the mutator is running, we may have objects o1 and o2 in the same
 350       // region, where o1 has already been scanned and o2 is only reachable by
 351       // the candidate object h, which is humongous.
 352       //
 353       // If the mutator read the reference to o2 from h and installed it into o1,
 354       // no remembered set entry would be created for keeping alive o2, as o1 and
 355       // o2 are in the same region.  Object h might be reclaimed by the next
 356       // garbage collection. o1 still has the reference to o2, but since o1 had
 357       // already been scanned we do not detect o2 to be still live and reclaim it.
 358       //
 359       // There is another minor problem with non-typeArray regions being the source
 360       // of remembered set entries in other region's remembered sets.  There are
 361       // two cases: first, the remembered set entry is in a Free region after reclaim.
 362       // We handle this case by ignoring these cards during merging the remembered
 363       // sets.
 364       //
 365       // Second, there may be cases where eagerly reclaimed regions were already
 366       // reallocated.  This may cause scanning of these outdated remembered set
 367       // entries, containing some objects. But apart from extra work this does
 368       // not cause correctness issues.
 369       // There is no difference between scanning cards covering an effectively
 370       // dead humongous object vs. some other objects in reallocated regions.
 371       //
 372       // TAMSes are only reset after completing the entire mark cycle, during
 373       // bitmap clearing. It is worth to not wait until then, and allow reclamation
 374       // outside of actual (concurrent) SATB marking.
 375       // This also applies to the concurrent start pause - we only set
 376       // mark_in_progress() at the end of that GC: no mutator is running that can
 377       // sneakily install a new reference to the potentially reclaimed humongous
 378       // object.
 379       // During the concurrent start pause the situation described above where we
 380       // miss a reference can not happen. No mutator is modifying the object
 381       // graph to install such an overlooked reference.
 382       //
 383       // After the pause, having reclaimed h, obviously the mutator can't fetch
 384       // the reference from h any more.
 385       if (!obj->is_typeArray()) {
 386         // All regions that were allocated before marking have a TAMS != bottom.
 387         bool allocated_before_mark_start = region->bottom() != _g1h->concurrent_mark()->top_at_mark_start(region);
 388         bool mark_in_progress = _g1h->collector_state()->is_in_marking();
 389 
 390         if (allocated_before_mark_start && mark_in_progress) {
 391           return false;
 392         }
 393       }
 394       return _g1h->is_potential_eager_reclaim_candidate(region);
 395     }
 396 
 397   public:
 398     G1PrepareRegionsClosure(G1CollectedHeap* g1h, G1PrepareEvacuationTask* parent_task) :
 399       _g1h(g1h),
 400       _parent_task(parent_task),
 401       _worker_humongous_total(0),
 402       _worker_humongous_candidates(0),
 403       _humongous_card_set_stats() { }
 404 
 405     ~G1PrepareRegionsClosure() {
 406       _parent_task->add_humongous_candidates(_worker_humongous_candidates);
 407       _parent_task->add_humongous_total(_worker_humongous_total);
 408     }
 409 
 410     virtual bool do_heap_region(G1HeapRegion* hr) {
 411       // First prepare the region for scanning
 412       _g1h->rem_set()->prepare_region_for_scan(hr);
 413 
 414       // Now check if region is a humongous candidate
 415       if (!hr->is_starts_humongous()) {
 416         _g1h->update_region_attr(hr);
 417         return false;
 418       }
 419 
 420       uint index = hr->hrm_index();
 421       if (humongous_region_is_candidate(hr)) {
 422         _g1h->register_humongous_candidate_region_with_region_attr(index);
 423         _worker_humongous_candidates++;
 424         // We will later handle the remembered sets of these regions.
 425       } else {
 426         _g1h->update_region_attr(hr);
 427       }
 428 
 429       // Sample card set sizes for humongous regions before GC: this makes the policy
 430       // to give back memory to the OS keep the most recent amount of memory for these regions.
 431       _humongous_card_set_stats.add(hr->rem_set()->card_set_memory_stats());
 432 
 433       log_debug(gc, humongous)("Humongous region %u (object size %zu @ " PTR_FORMAT ") remset %zu code roots %zu "
 434                                "marked %d pinned count %zu reclaim candidate %d type %s",
 435                                index,
 436                                cast_to_oop(hr->bottom())->size() * HeapWordSize,
 437                                p2i(hr->bottom()),
 438                                hr->rem_set()->occupied(),
 439                                hr->rem_set()->code_roots_list_length(),
 440                                _g1h->concurrent_mark()->mark_bitmap()->is_marked(hr->bottom()),
 441                                hr->pinned_count(),
 442                                _g1h->is_humongous_reclaim_candidate(index),
 443                                cast_to_oop(hr->bottom())->is_typeArray() ? "tA"
 444                                                                          : (cast_to_oop(hr->bottom())->is_objArray() ? "oA" : "ob")
 445                               );
 446       _worker_humongous_total++;
 447 
 448       return false;
 449     }
 450 
 451     G1MonotonicArenaMemoryStats humongous_card_set_stats() const {
 452       return _humongous_card_set_stats;
 453     }
 454   };
 455 
 456   G1CollectedHeap* _g1h;
 457   G1HeapRegionClaimer _claimer;
 458   Atomic<uint> _humongous_total;
 459   Atomic<uint> _humongous_candidates;
 460 
 461   G1MonotonicArenaMemoryStats _all_card_set_stats;
 462 
 463 public:
 464   G1PrepareEvacuationTask(G1CollectedHeap* g1h) :
 465     WorkerTask("Prepare Evacuation"),
 466     _g1h(g1h),
 467     _claimer(_g1h->workers()->active_workers()),
 468     _humongous_total(0),
 469     _humongous_candidates(0) { }
 470 
 471   void work(uint worker_id) {
 472     G1PrepareRegionsClosure cl(_g1h, this);
 473     _g1h->heap_region_par_iterate_from_worker_offset(&cl, &_claimer, worker_id);
 474 
 475     MutexLocker x(G1RareEvent_lock, Mutex::_no_safepoint_check_flag);
 476     _all_card_set_stats.add(cl.humongous_card_set_stats());
 477   }
 478 
 479   void add_humongous_candidates(uint candidates) {
 480     _humongous_candidates.add_then_fetch(candidates);
 481   }
 482 
 483   void add_humongous_total(uint total) {
 484     _humongous_total.add_then_fetch(total);
 485   }
 486 
 487   uint humongous_candidates() {
 488     return _humongous_candidates.load_relaxed();
 489   }
 490 
 491   uint humongous_total() {
 492     return _humongous_total.load_relaxed();
 493   }
 494 
 495   const G1MonotonicArenaMemoryStats all_card_set_stats() const {
 496     return _all_card_set_stats;
 497   }
 498 };
 499 
 500 Tickspan G1YoungCollector::run_task_timed(WorkerTask* task) {
 501   Ticks start = Ticks::now();
 502   workers()->run_task(task);
 503   return Ticks::now() - start;
 504 }
 505 
 506 void G1YoungCollector::set_young_collection_default_active_worker_threads(){
 507   uint active_workers = WorkerPolicy::calc_active_workers(workers()->max_workers(),
 508                                                           workers()->active_workers(),
 509                                                           Threads::number_of_non_daemon_threads());
 510   active_workers = workers()->set_active_workers(active_workers);
 511   log_info(gc,task)("Using %u workers of %u for evacuation", active_workers, workers()->max_workers());
 512 }
 513 
 514 void G1YoungCollector::pre_evacuate_collection_set(G1EvacInfo* evacuation_info) {
 515   // Flush various data in thread-local buffers to be able to determine the collection
 516   // set
 517   {
 518     Ticks start = Ticks::now();
 519     G1PreEvacuateCollectionSetBatchTask cl;
 520     G1CollectedHeap::heap()->run_batch_task(&cl);
 521     phase_times()->record_pre_evacuate_prepare_time_ms((Ticks::now() - start).seconds() * 1000.0);
 522   }
 523 
 524   // Needs log buffers flushed.
 525   calculate_collection_set(evacuation_info, policy()->max_pause_time_ms());
 526 
 527   if (collector_state()->is_in_concurrent_start_gc()) {
 528     Ticks start = Ticks::now();
 529     concurrent_mark()->pre_concurrent_start(_gc_cause);
 530     phase_times()->record_prepare_concurrent_task_time_ms((Ticks::now() - start).seconds() * 1000.0);
 531   }
 532 
 533   // Please see comment in g1CollectedHeap.hpp and
 534   // G1CollectedHeap::ref_processing_init() to see how
 535   // reference processing currently works in G1.
 536   ref_processor_stw()->start_discovery(false /* always_clear */);
 537 
 538   _evac_failure_regions.pre_collection(_g1h->max_num_regions());
 539 
 540   _g1h->gc_prologue(false);
 541 
 542   // Initialize the GC alloc regions.
 543   allocator()->init_gc_alloc_regions(evacuation_info);
 544 
 545   {
 546     Ticks start = Ticks::now();
 547     rem_set()->prepare_for_scan_heap_roots();
 548 
 549     _g1h->collection_set()->prepare_for_scan();
 550 
 551     phase_times()->record_prepare_heap_roots_time_ms((Ticks::now() - start).seconds() * 1000.0);
 552   }
 553 
 554   {
 555     G1PrepareEvacuationTask g1_prep_task(_g1h);
 556     Tickspan task_time = run_task_timed(&g1_prep_task);
 557 
 558     G1MonotonicArenaMemoryStats sampled_card_set_stats = g1_prep_task.all_card_set_stats();
 559     sampled_card_set_stats.add(_g1h->young_regions_cset_group()->card_set_memory_stats());
 560     _g1h->set_young_gen_card_set_stats(sampled_card_set_stats);
 561     _g1h->set_humongous_stats(g1_prep_task.humongous_total(), g1_prep_task.humongous_candidates());
 562 
 563     phase_times()->record_register_regions(task_time.seconds() * 1000.0);
 564   }
 565 
 566   assert(_g1h->verifier()->check_region_attr_table(), "Inconsistency in the region attributes table.");
 567 
 568 #if COMPILER2_OR_JVMCI
 569   DerivedPointerTable::clear();
 570 #endif
 571 
 572   allocation_failure_injector()->arm_if_needed();
 573 }
 574 
 575 class G1ParEvacuateFollowersClosure : public VoidClosure {
 576   double _start_term;
 577   double _term_time;
 578   size_t _term_attempts;
 579 
 580   void start_term_time() { _term_attempts++; _start_term = os::elapsedTime(); }
 581   void end_term_time() { _term_time += (os::elapsedTime() - _start_term); }
 582 
 583   G1CollectedHeap*              _g1h;
 584   G1ParScanThreadState*         _par_scan_state;
 585   G1ScannerTasksQueueSet*       _queues;
 586   TaskTerminator*               _terminator;
 587   G1GCPhaseTimes::GCParPhases   _phase;
 588 
 589   G1ParScanThreadState*   par_scan_state() { return _par_scan_state; }
 590   G1ScannerTasksQueueSet* queues()         { return _queues; }
 591   TaskTerminator*         terminator()     { return _terminator; }
 592 
 593   inline bool offer_termination() {
 594     EventGCPhaseParallel event;
 595     G1ParScanThreadState* const pss = par_scan_state();
 596     start_term_time();
 597     const bool res = (terminator() == nullptr) ? true : terminator()->offer_termination();
 598     end_term_time();
 599     event.commit(GCId::current(), pss->worker_id(), G1GCPhaseTimes::phase_name(G1GCPhaseTimes::Termination));
 600     return res;
 601   }
 602 
 603 public:
 604   G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h,
 605                                 G1ParScanThreadState* par_scan_state,
 606                                 G1ScannerTasksQueueSet* queues,
 607                                 TaskTerminator* terminator,
 608                                 G1GCPhaseTimes::GCParPhases phase)
 609     : _start_term(0.0), _term_time(0.0), _term_attempts(0),
 610       _g1h(g1h), _par_scan_state(par_scan_state),
 611       _queues(queues), _terminator(terminator), _phase(phase) {}
 612 
 613   void do_void() {
 614     EventGCPhaseParallel event;
 615     G1ParScanThreadState* const pss = par_scan_state();
 616     pss->trim_queue();
 617     event.commit(GCId::current(), pss->worker_id(), G1GCPhaseTimes::phase_name(_phase));
 618     do {
 619       EventGCPhaseParallel event;
 620       pss->steal_and_trim_queue(queues());
 621       event.commit(GCId::current(), pss->worker_id(), G1GCPhaseTimes::phase_name(_phase));
 622     } while (!offer_termination());
 623   }
 624 
 625   double term_time() const { return _term_time; }
 626   size_t term_attempts() const { return _term_attempts; }
 627 };
 628 
 629 class G1EvacuateRegionsBaseTask : public WorkerTask {
 630 
 631   // All pinned regions in the collection set must be registered as failed
 632   // regions as there is no guarantee that there is a reference reachable by
 633   // Java code (i.e. only by native code) that adds it to the evacuation failed
 634   // regions.
 635   void record_pinned_regions(G1ParScanThreadState* pss, uint worker_id) {
 636     class RecordPinnedRegionClosure : public G1HeapRegionClosure {
 637       G1ParScanThreadState* _pss;
 638       uint _worker_id;
 639 
 640     public:
 641       RecordPinnedRegionClosure(G1ParScanThreadState* pss, uint worker_id) : _pss(pss), _worker_id(worker_id) { }
 642 
 643       bool do_heap_region(G1HeapRegion* r) {
 644         if (r->has_pinned_objects()) {
 645           _pss->record_evacuation_failed_region(r, _worker_id, true /* cause_pinned */);
 646         }
 647         return false;
 648       }
 649     } cl(pss, worker_id);
 650 
 651     _g1h->collection_set_iterate_increment_from(&cl, worker_id);
 652   }
 653 
 654 protected:
 655   G1CollectedHeap* _g1h;
 656   G1ParScanThreadStateSet* _per_thread_states;
 657 
 658   G1ScannerTasksQueueSet* _task_queues;
 659   TaskTerminator _terminator;
 660 
 661   void evacuate_live_objects(G1ParScanThreadState* pss,
 662                              uint worker_id,
 663                              G1GCPhaseTimes::GCParPhases objcopy_phase,
 664                              G1GCPhaseTimes::GCParPhases termination_phase) {
 665     G1GCPhaseTimes* p = _g1h->phase_times();
 666 
 667     Ticks start = Ticks::now();
 668     G1ParEvacuateFollowersClosure cl(_g1h, pss, _task_queues, &_terminator, objcopy_phase);
 669     cl.do_void();
 670 
 671     assert(pss->queue_is_empty(), "should be empty");
 672 
 673     Tickspan evac_time = (Ticks::now() - start);
 674     p->record_or_add_time_secs(objcopy_phase, worker_id, evac_time.seconds() - cl.term_time());
 675 
 676     if (termination_phase == G1GCPhaseTimes::Termination) {
 677       p->record_time_secs(termination_phase, worker_id, cl.term_time());
 678       p->record_thread_work_item(termination_phase, worker_id, cl.term_attempts());
 679     } else {
 680       p->record_or_add_time_secs(termination_phase, worker_id, cl.term_time());
 681       p->record_or_add_thread_work_item(termination_phase, worker_id, cl.term_attempts());
 682     }
 683     assert(pss->trim_ticks().value() == 0,
 684            "Unexpected partial trimming during evacuation value " JLONG_FORMAT,
 685            pss->trim_ticks().value());
 686   }
 687 
 688   virtual void start_work(uint worker_id) { }
 689 
 690   virtual void end_work(uint worker_id) { }
 691 
 692   virtual void scan_roots(G1ParScanThreadState* pss, uint worker_id) = 0;
 693 
 694   virtual void evacuate_live_objects(G1ParScanThreadState* pss, uint worker_id) = 0;
 695 
 696 private:
 697   Atomic<bool> _pinned_regions_recorded;
 698 
 699 public:
 700   G1EvacuateRegionsBaseTask(const char* name,
 701                             G1ParScanThreadStateSet* per_thread_states,
 702                             G1ScannerTasksQueueSet* task_queues,
 703                             uint num_workers) :
 704     WorkerTask(name),
 705     _g1h(G1CollectedHeap::heap()),
 706     _per_thread_states(per_thread_states),
 707     _task_queues(task_queues),
 708     _terminator(num_workers, _task_queues),
 709     _pinned_regions_recorded(false)
 710   { }
 711 
 712   void work(uint worker_id) {
 713     start_work(worker_id);
 714 
 715     {
 716       ResourceMark rm;
 717 
 718       G1ParScanThreadState* pss = _per_thread_states->state_for_worker(worker_id);
 719       pss->set_ref_discoverer(_g1h->ref_processor_stw());
 720 
 721       if (_pinned_regions_recorded.compare_set(false, true)) {
 722         record_pinned_regions(pss, worker_id);
 723       }
 724       scan_roots(pss, worker_id);
 725       evacuate_live_objects(pss, worker_id);
 726     }
 727 
 728     end_work(worker_id);
 729   }
 730 };
 731 
 732 class G1EvacuateRegionsTask : public G1EvacuateRegionsBaseTask {
 733   G1RootProcessor* _root_processor;
 734   bool _has_optional_evacuation_work;
 735 
 736   void scan_roots(G1ParScanThreadState* pss, uint worker_id) {
 737     _root_processor->evacuate_roots(pss, worker_id);
 738     _g1h->rem_set()->scan_heap_roots(pss, worker_id, G1GCPhaseTimes::ScanHR, G1GCPhaseTimes::ObjCopy, _has_optional_evacuation_work);
 739     _g1h->rem_set()->scan_collection_set_code_roots(pss, worker_id, G1GCPhaseTimes::CodeRoots, G1GCPhaseTimes::ObjCopy);
 740     // There are no optional roots to scan right now.
 741 #ifdef ASSERT
 742     class VerifyOptionalCollectionSetRootsEmptyClosure : public G1HeapRegionClosure {
 743       G1ParScanThreadState* _pss;
 744 
 745     public:
 746       VerifyOptionalCollectionSetRootsEmptyClosure(G1ParScanThreadState* pss) : _pss(pss) { }
 747 
 748       bool do_heap_region(G1HeapRegion* r) override {
 749         assert(!r->has_index_in_opt_cset(), "must be");
 750         return false;
 751       }
 752     } cl(pss);
 753     _g1h->collection_set_iterate_increment_from(&cl, worker_id);
 754 #endif
 755   }
 756 
 757   void evacuate_live_objects(G1ParScanThreadState* pss, uint worker_id) {
 758     G1EvacuateRegionsBaseTask::evacuate_live_objects(pss, worker_id, G1GCPhaseTimes::ObjCopy, G1GCPhaseTimes::Termination);
 759   }
 760 
 761   void start_work(uint worker_id) {
 762     _g1h->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerStart, worker_id, Ticks::now().seconds());
 763   }
 764 
 765   void end_work(uint worker_id) {
 766     _g1h->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerEnd, worker_id, Ticks::now().seconds());
 767   }
 768 
 769 public:
 770   G1EvacuateRegionsTask(G1CollectedHeap* g1h,
 771                         G1ParScanThreadStateSet* per_thread_states,
 772                         G1ScannerTasksQueueSet* task_queues,
 773                         G1RootProcessor* root_processor,
 774                         uint num_workers,
 775                         bool has_optional_evacuation_work) :
 776     G1EvacuateRegionsBaseTask("G1 Evacuate Regions", per_thread_states, task_queues, num_workers),
 777     _root_processor(root_processor),
 778     _has_optional_evacuation_work(has_optional_evacuation_work)
 779   { }
 780 };
 781 
 782 void G1YoungCollector::evacuate_initial_collection_set(G1ParScanThreadStateSet* per_thread_states,
 783                                                       bool has_optional_evacuation_work) {
 784   G1GCPhaseTimes* p = phase_times();
 785 
 786   rem_set()->merge_heap_roots(true /* initial_evacuation */);
 787 
 788   Tickspan task_time;
 789   const uint num_workers = workers()->active_workers();
 790 
 791   Ticks start_processing = Ticks::now();
 792   {
 793     G1RootProcessor root_processor(_g1h, num_workers > 1 /* is_parallel */);
 794     G1EvacuateRegionsTask g1_par_task(_g1h,
 795                                       per_thread_states,
 796                                       task_queues(),
 797                                       &root_processor,
 798                                       num_workers,
 799                                       has_optional_evacuation_work);
 800     task_time = run_task_timed(&g1_par_task);
 801     // Closing the inner scope will execute the destructor for the
 802     // G1RootProcessor object. By subtracting the WorkerThreads task from the total
 803     // time of this scope, we get the "NMethod List Cleanup" time. This list is
 804     // constructed during "STW two-phase nmethod root processing", see more in
 805     // nmethod.hpp
 806   }
 807   Tickspan total_processing = Ticks::now() - start_processing;
 808 
 809   p->record_initial_evac_time(task_time.seconds() * 1000.0);
 810   p->record_or_add_nmethod_list_cleanup_time((total_processing - task_time).seconds() * 1000.0);
 811 
 812   rem_set()->complete_evac_phase(has_optional_evacuation_work);
 813 }
 814 
 815 class G1EvacuateOptionalRegionsTask : public G1EvacuateRegionsBaseTask {
 816 
 817   void scan_roots(G1ParScanThreadState* pss, uint worker_id) {
 818     _g1h->rem_set()->scan_heap_roots(pss, worker_id, G1GCPhaseTimes::OptScanHR, G1GCPhaseTimes::OptObjCopy, true /* remember_already_scanned_cards */);
 819     _g1h->rem_set()->scan_collection_set_code_roots(pss, worker_id, G1GCPhaseTimes::OptCodeRoots, G1GCPhaseTimes::OptObjCopy);
 820     _g1h->rem_set()->scan_collection_set_optional_roots(pss, worker_id, G1GCPhaseTimes::OptScanHR, G1GCPhaseTimes::ObjCopy);
 821   }
 822 
 823   void evacuate_live_objects(G1ParScanThreadState* pss, uint worker_id) {
 824     G1EvacuateRegionsBaseTask::evacuate_live_objects(pss, worker_id, G1GCPhaseTimes::OptObjCopy, G1GCPhaseTimes::OptTermination);
 825   }
 826 
 827 public:
 828   G1EvacuateOptionalRegionsTask(G1ParScanThreadStateSet* per_thread_states,
 829                                 G1ScannerTasksQueueSet* queues,
 830                                 uint num_workers) :
 831     G1EvacuateRegionsBaseTask("G1 Evacuate Optional Regions", per_thread_states, queues, num_workers) {
 832   }
 833 };
 834 
 835 void G1YoungCollector::evacuate_next_optional_regions(G1ParScanThreadStateSet* per_thread_states) {
 836   Tickspan task_time;
 837 
 838   Ticks start_processing = Ticks::now();
 839   {
 840     NMethodMarkingScope nmethod_marking_scope;
 841     G1EvacuateOptionalRegionsTask task(per_thread_states, task_queues(), workers()->active_workers());
 842     task_time = run_task_timed(&task);
 843     // See comment in evacuate_initial_collection_set() for the reason of the scope.
 844   }
 845   Tickspan total_processing = Ticks::now() - start_processing;
 846 
 847   G1GCPhaseTimes* p = phase_times();
 848   p->record_or_add_optional_evac_time(task_time.seconds() * 1000.0);
 849   p->record_or_add_nmethod_list_cleanup_time((total_processing - task_time).seconds() * 1000.0);
 850 }
 851 
 852 void G1YoungCollector::evacuate_optional_collection_set(G1ParScanThreadStateSet* per_thread_states) {
 853   const double pause_start_time_ms = policy()->cur_pause_start_sec() * 1000.0;
 854   double target_pause_time_ms = MaxGCPauseMillis;
 855 
 856   if (G1ForceOptionalEvacuation) {
 857     target_pause_time_ms = DBL_MAX;
 858   }
 859 
 860   while (!evacuation_alloc_failed() && collection_set()->num_optional_regions() > 0) {
 861 
 862     double time_used_ms = os::elapsedTime() * 1000.0 - pause_start_time_ms;
 863     double time_left_ms = target_pause_time_ms - time_used_ms;
 864 
 865     if (time_left_ms <= 0 ||
 866         !collection_set()->finalize_optional_for_evacuation(time_left_ms * policy()->optional_evacuation_fraction())) {
 867       log_trace(gc, ergo, cset)("Skipping evacuation of %u optional regions, no more regions can be evacuated in %.3fms",
 868                                 collection_set()->num_optional_regions(), time_left_ms);
 869       break;
 870     }
 871 
 872     rem_set()->merge_heap_roots(false /* initial_evacuation */);
 873 
 874     evacuate_next_optional_regions(per_thread_states);
 875 
 876     rem_set()->complete_evac_phase(true /* has_more_than_one_evacuation_phase */);
 877   }
 878 
 879   collection_set()->abandon_optional_collection_set(per_thread_states);
 880 }
 881 
 882 // Non Copying Keep Alive closure
 883 class G1KeepAliveClosure: public OopClosure {
 884   G1CollectedHeap*_g1h;
 885 public:
 886   G1KeepAliveClosure(G1CollectedHeap* g1h) :_g1h(g1h) {}
 887   void do_oop(narrowOop* p) { guarantee(false, "Not needed"); }
 888   void do_oop(oop* p) {
 889     oop obj = *p;
 890     assert(obj != nullptr, "the caller should have filtered out null values");
 891 
 892     const G1HeapRegionAttr region_attr =_g1h->region_attr(obj);
 893     assert(!region_attr.is_humongous_candidate(), "Humongous candidates should never be considered alive");
 894     if (region_attr.is_in_cset()) {
 895       assert(obj->is_forwarded(), "invariant" );
 896       *p = obj->forwardee();
 897     }
 898   }
 899 };
 900 
 901 // Copying Keep Alive closure - can be called from both
 902 // serial and parallel code as long as different worker
 903 // threads utilize different G1ParScanThreadState instances
 904 // and different queues.
 905 class G1CopyingKeepAliveClosure: public OopClosure {
 906   G1CollectedHeap* _g1h;
 907   G1ParScanThreadState*    _par_scan_state;
 908 
 909 public:
 910   G1CopyingKeepAliveClosure(G1CollectedHeap* g1h,
 911                             G1ParScanThreadState* pss):
 912     _g1h(g1h),
 913     _par_scan_state(pss)
 914   {}
 915 
 916   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
 917   virtual void do_oop(      oop* p) { do_oop_work(p); }
 918 
 919   template <class T> void do_oop_work(T* p) {
 920     oop obj = RawAccess<>::oop_load(p);
 921 
 922     assert(!_g1h->region_attr(obj).is_humongous_candidate(), "Humongous candidates should never be considered alive");
 923     if (_g1h->is_in_cset(obj)) {
 924       // If the referent object has been forwarded (either copied
 925       // to a new location or to itself in the event of an
 926       // evacuation failure) then we need to update the reference
 927       // field and, if both reference and referent are in the G1
 928       // heap, update the RSet for the referent.
 929       //
 930       // If the referent has not been forwarded then we have to keep
 931       // it alive by policy. Therefore we have copy the referent.
 932       //
 933       // When the queue is drained (after each phase of reference processing)
 934       // the object and it's followers will be copied, the reference field set
 935       // to point to the new location, and the RSet updated.
 936       _par_scan_state->push_on_queue(ScannerTask(p));
 937     }
 938   }
 939 };
 940 
 941 class G1STWRefProcProxyTask : public RefProcProxyTask {
 942   G1CollectedHeap& _g1h;
 943   G1ParScanThreadStateSet& _pss;
 944   TaskTerminator _terminator;
 945   G1ScannerTasksQueueSet& _task_queues;
 946 
 947   // G1 specific closure for marking discovered fields. Need to mark the card in the
 948   // refinement table as the card table is in use by garbage collection.
 949   class G1EnqueueDiscoveredFieldClosure : public EnqueueDiscoveredFieldClosure {
 950     G1CollectedHeap* _g1h;
 951     G1ParScanThreadState* _pss;
 952 
 953   public:
 954     G1EnqueueDiscoveredFieldClosure(G1CollectedHeap* g1h, G1ParScanThreadState* pss) : _g1h(g1h), _pss(pss) { }
 955 
 956     void enqueue(HeapWord* discovered_field_addr, oop value) override {
 957       assert(_g1h->is_in(discovered_field_addr), PTR_FORMAT " is not in heap ", p2i(discovered_field_addr));
 958       // Store the value first, whatever it is.
 959       RawAccess<>::oop_store(discovered_field_addr, value);
 960       if (value == nullptr) {
 961         return;
 962       }
 963       _pss->write_ref_field_post(discovered_field_addr, value);
 964     }
 965   };
 966 
 967 public:
 968   G1STWRefProcProxyTask(uint max_workers, G1CollectedHeap& g1h, G1ParScanThreadStateSet& pss, G1ScannerTasksQueueSet& task_queues)
 969     : RefProcProxyTask("G1STWRefProcProxyTask", max_workers),
 970       _g1h(g1h),
 971       _pss(pss),
 972       _terminator(max_workers, &task_queues),
 973       _task_queues(task_queues) {}
 974 
 975   void work(uint worker_id) override {
 976     assert(worker_id < _max_workers, "sanity");
 977     uint index = (_tm == RefProcThreadModel::Single) ? 0 : worker_id;
 978 
 979     G1ParScanThreadState* pss = _pss.state_for_worker(index);
 980     pss->set_ref_discoverer(nullptr);
 981 
 982     G1STWIsAliveClosure is_alive(&_g1h);
 983     G1CopyingKeepAliveClosure keep_alive(&_g1h, pss);
 984     G1EnqueueDiscoveredFieldClosure enqueue(&_g1h, pss);
 985     G1ParEvacuateFollowersClosure complete_gc(&_g1h, pss, &_task_queues, _tm == RefProcThreadModel::Single ? nullptr : &_terminator, G1GCPhaseTimes::ObjCopy);
 986     _rp_task->rp_work(worker_id, &is_alive, &keep_alive, &enqueue, &complete_gc);
 987 
 988     // We have completed copying any necessary live referent objects.
 989     assert(pss->queue_is_empty(), "both queue and overflow should be empty");
 990   }
 991 
 992   void prepare_run_task_hook() override {
 993     _terminator.reset_for_reuse(_queue_count);
 994   }
 995 };
 996 
 997 void G1YoungCollector::process_discovered_references(G1ParScanThreadStateSet* per_thread_states) {
 998   Ticks start = Ticks::now();
 999 
1000   ReferenceProcessor* rp = ref_processor_stw();
1001   assert(rp->discovery_enabled(), "should have been enabled");
1002 
1003   G1STWRefProcProxyTask task(rp->max_num_queues(), *_g1h, *per_thread_states, *task_queues());
1004   ReferenceProcessorPhaseTimes& pt = *phase_times()->ref_phase_times();
1005   ReferenceProcessorStats stats = rp->process_discovered_references(task, _g1h->workers(), pt);
1006 
1007   gc_tracer_stw()->report_gc_reference_stats(stats);
1008 
1009   _g1h->make_pending_list_reachable();
1010 
1011   phase_times()->record_ref_proc_time((Ticks::now() - start).seconds() * MILLIUNITS);
1012 }
1013 
1014 void G1YoungCollector::post_evacuate_cleanup_1(G1ParScanThreadStateSet* per_thread_states) {
1015   Ticks start = Ticks::now();
1016   {
1017     G1PostEvacuateCollectionSetCleanupTask1 cl(per_thread_states, &_evac_failure_regions);
1018     _g1h->run_batch_task(&cl);
1019   }
1020   phase_times()->record_post_evacuate_cleanup_task_1_time((Ticks::now() - start).seconds() * 1000.0);
1021 }
1022 
1023 void G1YoungCollector::post_evacuate_cleanup_2(G1ParScanThreadStateSet* per_thread_states,
1024                                                G1EvacInfo* evacuation_info) {
1025   Ticks start = Ticks::now();
1026   {
1027     G1PostEvacuateCollectionSetCleanupTask2 cl(per_thread_states, evacuation_info, &_evac_failure_regions);
1028     _g1h->run_batch_task(&cl);
1029   }
1030   phase_times()->record_post_evacuate_cleanup_task_2_time((Ticks::now() - start).seconds() * 1000.0);
1031 }
1032 
1033 void G1YoungCollector::enqueue_candidates_as_root_regions() {
1034   assert(collector_state()->is_in_concurrent_start_gc(), "must be");
1035 
1036   G1CollectionSetCandidates* candidates = collection_set()->candidates();
1037   candidates->iterate_regions([&] (G1HeapRegion* r) {
1038     _g1h->concurrent_mark()->add_root_region(r);
1039   });
1040 }
1041 
1042 void G1YoungCollector::post_evacuate_collection_set(G1EvacInfo* evacuation_info,
1043                                                     G1ParScanThreadStateSet* per_thread_states) {
1044   G1GCPhaseTimes* p = phase_times();
1045 
1046   // Process any discovered reference objects - we have
1047   // to do this _before_ we retire the GC alloc regions
1048   // as we may have to copy some 'reachable' referent
1049   // objects (and their reachable sub-graphs) that were
1050   // not copied during the pause.
1051   process_discovered_references(per_thread_states);
1052 
1053   G1STWIsAliveClosure is_alive(_g1h);
1054   G1KeepAliveClosure keep_alive(_g1h);
1055 
1056   WeakProcessor::weak_oops_do(workers(), &is_alive, &keep_alive, p->weak_phase_times());
1057 
1058   allocator()->release_gc_alloc_regions(evacuation_info);
1059 
1060 #if TASKQUEUE_STATS
1061   _g1h->task_queues()->print_and_reset_taskqueue_stats("Young GC");
1062   // Logging uses thread states, which are deleted by cleanup, so this must
1063   // be done before cleanup.
1064   per_thread_states->print_partial_array_task_stats();
1065 #endif // TASKQUEUE_STATS
1066 
1067   post_evacuate_cleanup_1(per_thread_states);
1068 
1069   post_evacuate_cleanup_2(per_thread_states, evacuation_info);
1070 
1071   // Regions in the collection set candidates are roots for the marking (they are
1072   // not marked through considering they are very likely to be reclaimed soon.
1073   // They need to be enqueued explicitly compared to survivor regions.
1074   if (collector_state()->is_in_concurrent_start_gc()) {
1075     enqueue_candidates_as_root_regions();
1076   }
1077 
1078   _evac_failure_regions.post_collection();
1079 
1080   assert_used_and_recalculate_used_equal(_g1h);
1081 
1082   _g1h->rebuild_free_region_list();
1083 
1084   _g1h->record_obj_copy_mem_stats();
1085 
1086   evacuation_info->set_bytes_used(_g1h->bytes_used_during_gc());
1087 
1088   _g1h->prepare_for_mutator_after_young_collection();
1089 
1090   _g1h->gc_epilogue(false);
1091 
1092   _g1h->resize_heap_after_young_collection(_allocation_word_size);
1093 }
1094 
1095 bool G1YoungCollector::evacuation_failed() const {
1096   return _evac_failure_regions.has_regions_evac_failed();
1097 }
1098 
1099 bool G1YoungCollector::evacuation_pinned() const {
1100   return _evac_failure_regions.has_regions_evac_pinned();
1101 }
1102 
1103 bool G1YoungCollector::evacuation_alloc_failed() const {
1104   return _evac_failure_regions.has_regions_alloc_failed();
1105 }
1106 
1107 G1YoungCollector::G1YoungCollector(GCCause::Cause gc_cause,
1108                                    size_t allocation_word_size) :
1109   _g1h(G1CollectedHeap::heap()),
1110   _gc_cause(gc_cause),
1111   _allocation_word_size(allocation_word_size),
1112   _concurrent_operation_is_full_mark(false),
1113   _evac_failure_regions()
1114 {
1115 }
1116 
1117 void G1YoungCollector::collect() {
1118   // Do timing/tracing/statistics/pre- and post-logging/verification work not
1119   // directly related to the collection. They should not be accounted for in
1120   // collection work timing.
1121 
1122   // The G1YoungGCTraceTime message depends on collector state, so must come after
1123   // determining collector state.
1124   G1YoungGCTraceTime tm(this, _gc_cause);
1125 
1126   // JFR
1127   G1YoungGCJFRTracerMark jtm(gc_timer_stw(), gc_tracer_stw(), _gc_cause);
1128   // JStat/MXBeans
1129   G1YoungGCMonitoringScope ms(monitoring_support(),
1130                               !collection_set()->candidates()->is_empty() /* all_memory_pools_affected */);
1131   // Create the heap printer before internal pause timing to have
1132   // heap information printed as last part of detailed GC log.
1133   G1HeapPrinterMark hpm(_g1h);
1134   // Young GC internal pause timing
1135   G1YoungGCNotifyPauseMark npm(this);
1136 
1137   // Verification may use the workers, so they must be set up before.
1138   // Individual parallel phases may override this.
1139   set_young_collection_default_active_worker_threads();
1140 
1141   // Wait for root region scan here to make sure that it is done before any
1142   // use of the STW workers to maximize cpu use (i.e. all cores are available
1143   // just to do that).
1144   complete_root_region_scan();
1145 
1146   G1YoungGCVerifierMark vm(this);
1147   {
1148     // Actual collection work starts and is executed (only) in this scope.
1149 
1150     // Young GC internal collection timing. The elapsed time recorded in the
1151     // policy for the collection deliberately elides verification (and some
1152     // other trivial setup above).
1153     policy()->record_young_collection_start();
1154 
1155     pre_evacuate_collection_set(jtm.evacuation_info());
1156 
1157     G1ParScanThreadStateSet per_thread_states(_g1h,
1158                                               workers()->active_workers(),
1159                                               collection_set(),
1160                                               &_evac_failure_regions);
1161 
1162     bool may_do_optional_evacuation = collection_set()->num_optional_regions() != 0;
1163     // Actually do the work...
1164     evacuate_initial_collection_set(&per_thread_states, may_do_optional_evacuation);
1165 
1166     if (may_do_optional_evacuation) {
1167       evacuate_optional_collection_set(&per_thread_states);
1168     }
1169     post_evacuate_collection_set(jtm.evacuation_info(), &per_thread_states);
1170 
1171     // Refine the type of a concurrent mark operation now that we did the
1172     // evacuation, eventually aborting it.
1173     _concurrent_operation_is_full_mark = policy()->concurrent_operation_is_full_mark("Revise IHOP", _allocation_word_size);
1174 
1175     // Need to report the collection pause now since record_collection_pause_end()
1176     // modifies it to the next state.
1177     jtm.report_pause_type(collector_state()->gc_pause_type(_concurrent_operation_is_full_mark));
1178 
1179     policy()->record_young_collection_end(_concurrent_operation_is_full_mark, evacuation_alloc_failed(), _allocation_word_size);
1180   }
1181 }