1 /*
2 * Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25
26 #include "classfile/classLoaderDataGraph.inline.hpp"
27 #include "classfile/javaClasses.inline.hpp"
28 #include "code/nmethod.hpp"
29 #include "compiler/oopMap.hpp"
30 #include "gc/g1/g1Allocator.hpp"
31 #include "gc/g1/g1CardSetMemory.hpp"
32 #include "gc/g1/g1CollectedHeap.inline.hpp"
33 #include "gc/g1/g1CollectionSetCandidates.inline.hpp"
34 #include "gc/g1/g1CollectorState.inline.hpp"
35 #include "gc/g1/g1ConcurrentMark.hpp"
36 #include "gc/g1/g1EvacFailureRegions.inline.hpp"
37 #include "gc/g1/g1EvacInfo.hpp"
38 #include "gc/g1/g1GCPhaseTimes.hpp"
39 #include "gc/g1/g1HeapRegion.inline.hpp"
40 #include "gc/g1/g1HeapRegionPrinter.hpp"
41 #include "gc/g1/g1MonitoringSupport.hpp"
42 #include "gc/g1/g1ParScanThreadState.inline.hpp"
43 #include "gc/g1/g1Policy.hpp"
44 #include "gc/g1/g1RegionPinCache.inline.hpp"
45 #include "gc/g1/g1RemSet.hpp"
46 #include "gc/g1/g1RootProcessor.hpp"
47 #include "gc/g1/g1Trace.hpp"
48 #include "gc/g1/g1YoungCollector.hpp"
49 #include "gc/g1/g1YoungGCAllocationFailureInjector.hpp"
50 #include "gc/g1/g1YoungGCPostEvacuateTasks.hpp"
51 #include "gc/g1/g1YoungGCPreEvacuateTasks.hpp"
52 #include "gc/shared/concurrentGCBreakpoints.hpp"
53 #include "gc/shared/gc_globals.hpp"
54 #include "gc/shared/gcTimer.hpp"
55 #include "gc/shared/gcTraceTime.inline.hpp"
56 #include "gc/shared/referenceProcessor.hpp"
57 #include "gc/shared/weakProcessor.inline.hpp"
58 #include "gc/shared/workerPolicy.hpp"
59 #include "gc/shared/workerThread.hpp"
60 #include "jfr/jfrEvents.hpp"
61 #include "memory/resourceArea.hpp"
62 #include "runtime/atomic.hpp"
63 #include "runtime/threads.hpp"
64 #include "utilities/ticks.hpp"
65
66 // GCTraceTime wrapper that constructs the message according to GC pause type and
67 // GC cause.
68 // The code relies on the fact that GCTraceTimeWrapper stores the string passed
69 // initially as a reference only, so that we can modify it as needed.
70 class G1YoungGCTraceTime {
71 G1YoungCollector* _collector;
72
73 G1CollectorState::Pause _pause_type;
74 GCCause::Cause _pause_cause;
75
76 static const uint MaxYoungGCNameLength = 128;
77 char _young_gc_name_data[MaxYoungGCNameLength];
78
79 GCTraceTime(Info, gc) _tt;
80
81 const char* update_young_gc_name() {
82 char evacuation_failed_string[48];
83 evacuation_failed_string[0] = '\0';
84
85 if (_collector->evacuation_failed()) {
86 os::snprintf_checked(evacuation_failed_string,
87 ARRAY_SIZE(evacuation_failed_string),
88 " (Evacuation Failure: %s%s%s)",
89 _collector->evacuation_alloc_failed() ? "Allocation" : "",
90 _collector->evacuation_alloc_failed() && _collector->evacuation_pinned() ? " / " : "",
91 _collector->evacuation_pinned() ? "Pinned" : "");
92 }
93 os::snprintf_checked(_young_gc_name_data,
94 MaxYoungGCNameLength,
95 "Pause Young (%s) (%s)%s",
96 G1CollectorState::to_string(_pause_type),
97 GCCause::to_string(_pause_cause),
98 evacuation_failed_string);
99 return _young_gc_name_data;
100 }
101
102 public:
103 G1YoungGCTraceTime(G1YoungCollector* collector, GCCause::Cause cause) :
104 _collector(collector),
105 // Take snapshot of current pause type at start as it may be modified during gc.
106 // The strings for all Concurrent Start pauses are the same, so the parameter
107 // does not matter here.
108 _pause_type(_collector->collector_state()->gc_pause_type(false /* concurrent_operation_is_full_mark */)),
109 _pause_cause(cause),
110 // Fake a "no cause" and manually add the correct string in update_young_gc_name()
111 // to make the string look more natural.
112 _tt(update_young_gc_name(), nullptr, GCCause::_no_gc, true) {
113 }
114
115 ~G1YoungGCTraceTime() {
116 update_young_gc_name();
117 }
118 };
119
120 class G1YoungGCNotifyPauseMark : public StackObj {
121 G1YoungCollector* _collector;
122
123 public:
124 G1YoungGCNotifyPauseMark(G1YoungCollector* collector) : _collector(collector) {
125 G1CollectedHeap::heap()->policy()->record_young_gc_pause_start();
126 }
127
128 ~G1YoungGCNotifyPauseMark() {
129 G1CollectedHeap::heap()->policy()->record_young_gc_pause_end(_collector->evacuation_failed());
130 }
131 };
132
133 class G1YoungGCJFRTracerMark : public G1JFRTracerMark {
134 G1EvacInfo _evacuation_info;
135
136 G1NewTracer* tracer() const { return (G1NewTracer*)_tracer; }
137
138 public:
139
140 G1EvacInfo* evacuation_info() { return &_evacuation_info; }
141
142 G1YoungGCJFRTracerMark(STWGCTimer* gc_timer_stw, G1NewTracer* gc_tracer_stw, GCCause::Cause cause) :
143 G1JFRTracerMark(gc_timer_stw, gc_tracer_stw), _evacuation_info() { }
144
145 void report_pause_type(G1CollectorState::Pause type) {
146 tracer()->report_young_gc_pause(type);
147 }
148
149 ~G1YoungGCJFRTracerMark() {
150 G1CollectedHeap* g1h = G1CollectedHeap::heap();
151
152 tracer()->report_evacuation_info(&_evacuation_info);
153 tracer()->report_tenuring_threshold(g1h->policy()->tenuring_threshold());
154 }
155 };
156
157 class G1YoungGCVerifierMark : public StackObj {
158 G1YoungCollector* _collector;
159 G1HeapVerifier::G1VerifyType _type;
160
161 static G1HeapVerifier::G1VerifyType young_collection_verify_type() {
162 G1CollectorState* state = G1CollectedHeap::heap()->collector_state();
163 if (state->is_in_concurrent_start_gc()) {
164 return G1HeapVerifier::G1VerifyConcurrentStart;
165 } else if (state->is_in_young_only_phase()) {
166 return G1HeapVerifier::G1VerifyYoungNormal;
167 } else {
168 return G1HeapVerifier::G1VerifyMixed;
169 }
170 }
171
172 public:
173 G1YoungGCVerifierMark(G1YoungCollector* collector) : _collector(collector), _type(young_collection_verify_type()) {
174 G1CollectedHeap::heap()->verify_before_young_collection(_type);
175 }
176
177 ~G1YoungGCVerifierMark() {
178 // Inject evacuation failure tag into type if needed.
179 G1HeapVerifier::G1VerifyType type = _type;
180 if (_collector->evacuation_failed()) {
181 type = (G1HeapVerifier::G1VerifyType)(type | G1HeapVerifier::G1VerifyYoungEvacFail);
182 }
183 G1CollectedHeap::heap()->verify_after_young_collection(type);
184 }
185 };
186
187 G1Allocator* G1YoungCollector::allocator() const {
188 return _g1h->allocator();
189 }
190
191 G1CollectionSet* G1YoungCollector::collection_set() const {
192 return _g1h->collection_set();
193 }
194
195 G1CollectorState* G1YoungCollector::collector_state() const {
196 return _g1h->collector_state();
197 }
198
199 G1ConcurrentMark* G1YoungCollector::concurrent_mark() const {
200 return _g1h->concurrent_mark();
201 }
202
203 STWGCTimer* G1YoungCollector::gc_timer_stw() const {
204 return _g1h->gc_timer_stw();
205 }
206
207 G1NewTracer* G1YoungCollector::gc_tracer_stw() const {
208 return _g1h->gc_tracer_stw();
209 }
210
211 G1Policy* G1YoungCollector::policy() const {
212 return _g1h->policy();
213 }
214
215 G1GCPhaseTimes* G1YoungCollector::phase_times() const {
216 return _g1h->phase_times();
217 }
218
219 G1MonitoringSupport* G1YoungCollector::monitoring_support() const {
220 return _g1h->monitoring_support();
221 }
222
223 G1RemSet* G1YoungCollector::rem_set() const {
224 return _g1h->rem_set();
225 }
226
227 G1ScannerTasksQueueSet* G1YoungCollector::task_queues() const {
228 return _g1h->task_queues();
229 }
230
231 G1SurvivorRegions* G1YoungCollector::survivor_regions() const {
232 return _g1h->survivor();
233 }
234
235 ReferenceProcessor* G1YoungCollector::ref_processor_stw() const {
236 return _g1h->ref_processor_stw();
237 }
238
239 WorkerThreads* G1YoungCollector::workers() const {
240 return _g1h->workers();
241 }
242
243 G1YoungGCAllocationFailureInjector* G1YoungCollector::allocation_failure_injector() const {
244 return _g1h->allocation_failure_injector();
245 }
246
247 void G1YoungCollector::complete_root_region_scan() {
248 Ticks start = Ticks::now();
249 // We have to complete root region scan as it's the only way to ensure that all the
250 // objects on them have been correctly scanned before we start moving them during the GC.
251 if (concurrent_mark()->complete_root_regions_scan_in_safepoint()) {
252 phase_times()->record_root_region_scan_time((Ticks::now() - start).seconds() * MILLIUNITS);
253 }
254 }
255
256 class G1PrintCollectionSetClosure : public G1HeapRegionClosure {
257 public:
258 virtual bool do_heap_region(G1HeapRegion* r) {
259 G1HeapRegionPrinter::cset(r);
260 return false;
261 }
262 };
263
264 void G1YoungCollector::calculate_collection_set(G1EvacInfo* evacuation_info, double target_pause_time_ms) {
265 // Forget the current allocation region (we might even choose it to be part
266 // of the collection set!) before finalizing the collection set.
267 allocator()->release_mutator_alloc_regions();
268
269 collection_set()->finalize_initial_collection_set(target_pause_time_ms, survivor_regions());
270 evacuation_info->set_collection_set_regions(collection_set()->initial_region_length() +
271 collection_set()->num_optional_regions());
272
273 concurrent_mark()->verify_no_collection_set_oops();
274
275 if (G1HeapRegionPrinter::is_active()) {
276 G1PrintCollectionSetClosure cl;
277 collection_set()->iterate(&cl);
278 collection_set()->iterate_optional(&cl);
279 }
280 }
281
282 class G1PrepareEvacuationTask : public WorkerTask {
283 class G1PrepareRegionsClosure : public G1HeapRegionClosure {
284 G1CollectedHeap* _g1h;
285 G1PrepareEvacuationTask* _parent_task;
286 uint _worker_humongous_total;
287 uint _worker_humongous_candidates;
288
289 G1MonotonicArenaMemoryStats _humongous_card_set_stats;
290
291 bool humongous_region_is_candidate(G1HeapRegion* region) const {
292 assert(region->is_starts_humongous(), "Must start a humongous object");
293
294 oop obj = cast_to_oop(region->bottom());
295
296 // Dead objects cannot be eager reclaim candidates. Due to class
297 // unloading it is unsafe to query their classes so we return early.
298 if (_g1h->is_obj_dead(obj, region)) {
299 return false;
300 }
301
302 // If we do not have a complete remembered set for the region, then we can
303 // not be sure that we have all references to it.
304 if (!region->rem_set()->is_complete()) {
305 return false;
306 }
307 // We also cannot collect the humongous object if it is pinned.
308 if (region->has_pinned_objects()) {
309 return false;
310 }
311 // Candidate selection must satisfy the following constraints
312 // while concurrent marking is in progress:
313 //
314 // * In order to maintain SATB invariants, an object must not be
315 // reclaimed if it was allocated before the start of marking and
316 // has not had its references scanned. Such an object must have
317 // its references (including type metadata) scanned to ensure no
318 // live objects are missed by the marking process. Objects
319 // allocated after the start of concurrent marking don't need to
320 // be scanned.
321 //
322 // * An object must not be reclaimed if it is on the concurrent
323 // mark stack. Objects allocated after the start of concurrent
324 // marking are never pushed on the mark stack.
325 //
326 // Nominating only objects allocated after the start of concurrent
327 // marking is sufficient to meet both constraints. This may miss
328 // some objects that satisfy the constraints, but the marking data
329 // structures don't support efficiently performing the needed
330 // additional tests or scrubbing of the mark stack.
331 //
332 // We handle humongous objects specially, because frequent allocation and
333 // dropping of large binary blobs is an important use case for eager reclaim,
334 // and this special handling increases needed headroom.
335 // It also helps with G1 allocating humongous objects as old generation
336 // objects although they might also die quite quickly.
337 //
338 // Humongous objects without oops (typeArrays, flatArrays without oops in
339 // its elements) are allowed to be reclaimed even if allocated before
340 // the start of concurrent mark. For this we rely on mark stack insertion
341 // to exclude them, preventing reclaiming an object
342 // that is in the mark stack. That code also ensures that metadata (klass)
343 // is kept live.
344 //
345 // Other humongous objects that were allocated before marking are excluded from
346 // eager reclaim during marking. One issue is the problem described
347 // above with scrubbing the mark stack, but there is also a problem
348 // causing these humongous objects being collected incorrectly:
349 //
350 // E.g. if the mutator is running, we may have objects o1 and o2 in the same
351 // region, where o1 has already been scanned and o2 is only reachable by
352 // the candidate object h, which is humongous.
353 //
354 // If the mutator read the reference to o2 from h and installed it into o1,
355 // no remembered set entry would be created for keeping alive o2, as o1 and
356 // o2 are in the same region. Object h might be reclaimed by the next
357 // garbage collection. o1 still has the reference to o2, but since o1 had
358 // already been scanned we do not detect o2 to be still live and reclaim it.
359 //
360 // There is another minor problem with these humongous objects with oops being
361 // the source of remembered set entries in other region's remembered sets.
362 // There are two cases: first, the remembered set entry is in a Free region
363 // after reclaim. We handle this case by ignoring these cards during merging
364 // the remembered sets.
365 //
366 // Second, there may be cases where regions previously containing eagerly
367 // reclaimed objects were already allocated into again.
368 // This may cause scanning of these outdated remembered set entries,
369 // containing some objects. But apart from extra work this does not cause
370 // correctness issues.
371 // There is no difference between scanning cards covering an effectively
372 // dead humongous object vs. some other objects in reallocated regions.
373 //
374 // TAMSes are only reset after completing the entire mark cycle, during
375 // bitmap clearing. It is worth to not wait until then, and allow reclamation
376 // outside of actual (concurrent) SATB marking.
377 // This also applies to the concurrent start pause - we only set
378 // mark_in_progress() at the end of that GC: no mutator is running that can
379 // sneakily install a new reference to the potentially reclaimed humongous
380 // object.
381 // During the concurrent start pause the situation described above where we
382 // miss a reference can not happen. No mutator is modifying the object
383 // graph to install such an overlooked reference.
384 //
385 // After the pause, having reclaimed h, obviously the mutator can't fetch
386 // the reference from h any more.
387 bool marked_immediately = _g1h->can_be_marked_through_immediately(obj);
388 if (!marked_immediately) {
389 // All regions that were allocated before marking have a TAMS != bottom.
390 bool allocated_before_mark_start = region->bottom() != _g1h->concurrent_mark()->top_at_mark_start(region);
391 bool mark_in_progress = _g1h->collector_state()->is_in_marking();
392
393 if (allocated_before_mark_start && mark_in_progress) {
394 return false;
395 }
396 }
397 return _g1h->is_potential_eager_reclaim_candidate(region);
398 }
399
400 public:
401 G1PrepareRegionsClosure(G1CollectedHeap* g1h, G1PrepareEvacuationTask* parent_task) :
402 _g1h(g1h),
403 _parent_task(parent_task),
404 _worker_humongous_total(0),
405 _worker_humongous_candidates(0),
406 _humongous_card_set_stats() { }
407
408 ~G1PrepareRegionsClosure() {
409 _parent_task->add_humongous_candidates(_worker_humongous_candidates);
410 _parent_task->add_humongous_total(_worker_humongous_total);
411 }
412
413 virtual bool do_heap_region(G1HeapRegion* hr) {
414 // First prepare the region for scanning
415 _g1h->rem_set()->prepare_region_for_scan(hr);
416
417 // Now check if region is a humongous candidate
418 if (!hr->is_starts_humongous()) {
419 _g1h->update_region_attr(hr);
420 return false;
421 }
422
423 uint index = hr->hrm_index();
424 if (humongous_region_is_candidate(hr)) {
425 _g1h->register_humongous_candidate_region_with_region_attr(index);
426 _worker_humongous_candidates++;
427 // We will later handle the remembered sets of these regions.
428 } else {
429 _g1h->update_region_attr(hr);
430 }
431
432 // Sample card set sizes for humongous regions before GC: this makes the policy
433 // to give back memory to the OS keep the most recent amount of memory for these regions.
434 _humongous_card_set_stats.add(hr->rem_set()->card_set_memory_stats());
435
436 log_debug(gc, humongous)("Humongous region %u (object size %zu @ " PTR_FORMAT ") remset %zu code roots %zu "
437 "marked %d pinned count %zu reclaim candidate %d type %s",
438 index,
439 cast_to_oop(hr->bottom())->size() * HeapWordSize,
440 p2i(hr->bottom()),
441 hr->rem_set()->occupied(),
442 hr->rem_set()->code_roots_list_length(),
443 _g1h->concurrent_mark()->mark_bitmap()->is_marked(hr->bottom()),
444 hr->pinned_count(),
445 _g1h->is_humongous_reclaim_candidate(index),
446 cast_to_oop(hr->bottom())->is_typeArray() ? "tA"
447 : (cast_to_oop(hr->bottom())->is_objArray() ? "oA" : "ob")
448 );
449 _worker_humongous_total++;
450
451 return false;
452 }
453
454 G1MonotonicArenaMemoryStats humongous_card_set_stats() const {
455 return _humongous_card_set_stats;
456 }
457 };
458
459 G1CollectedHeap* _g1h;
460 G1HeapRegionClaimer _claimer;
461 Atomic<uint> _humongous_total;
462 Atomic<uint> _humongous_candidates;
463
464 G1MonotonicArenaMemoryStats _all_card_set_stats;
465
466 public:
467 G1PrepareEvacuationTask(G1CollectedHeap* g1h) :
468 WorkerTask("Prepare Evacuation"),
469 _g1h(g1h),
470 _claimer(_g1h->workers()->active_workers()),
471 _humongous_total(0),
472 _humongous_candidates(0) { }
473
474 void work(uint worker_id) {
475 G1PrepareRegionsClosure cl(_g1h, this);
476 _g1h->heap_region_par_iterate_from_worker_offset(&cl, &_claimer, worker_id);
477
478 MutexLocker x(G1RareEvent_lock, Mutex::_no_safepoint_check_flag);
479 _all_card_set_stats.add(cl.humongous_card_set_stats());
480 }
481
482 void add_humongous_candidates(uint candidates) {
483 _humongous_candidates.add_then_fetch(candidates);
484 }
485
486 void add_humongous_total(uint total) {
487 _humongous_total.add_then_fetch(total);
488 }
489
490 uint humongous_candidates() {
491 return _humongous_candidates.load_relaxed();
492 }
493
494 uint humongous_total() {
495 return _humongous_total.load_relaxed();
496 }
497
498 const G1MonotonicArenaMemoryStats all_card_set_stats() const {
499 return _all_card_set_stats;
500 }
501 };
502
503 Tickspan G1YoungCollector::run_task_timed(WorkerTask* task) {
504 Ticks start = Ticks::now();
505 workers()->run_task(task);
506 return Ticks::now() - start;
507 }
508
509 void G1YoungCollector::set_young_collection_default_active_worker_threads(){
510 uint active_workers = WorkerPolicy::calc_active_workers(workers()->max_workers(),
511 workers()->active_workers(),
512 Threads::number_of_non_daemon_threads());
513 active_workers = workers()->set_active_workers(active_workers);
514 log_info(gc,task)("Using %u workers of %u for evacuation", active_workers, workers()->max_workers());
515 }
516
517 void G1YoungCollector::pre_evacuate_collection_set(G1EvacInfo* evacuation_info) {
518 // Flush various data in thread-local buffers to be able to determine the collection
519 // set
520 {
521 Ticks start = Ticks::now();
522 G1PreEvacuateCollectionSetBatchTask cl;
523 G1CollectedHeap::heap()->run_batch_task(&cl);
524 phase_times()->record_pre_evacuate_prepare_time_ms((Ticks::now() - start).seconds() * 1000.0);
525 }
526
527 // Needs log buffers flushed.
528 calculate_collection_set(evacuation_info, policy()->max_pause_time_ms());
529
530 if (collector_state()->is_in_concurrent_start_gc()) {
531 Ticks start = Ticks::now();
532 concurrent_mark()->pre_concurrent_start(_gc_cause);
533 phase_times()->record_prepare_concurrent_task_time_ms((Ticks::now() - start).seconds() * 1000.0);
534 }
535
536 // Please see comment in g1CollectedHeap.hpp and
537 // G1CollectedHeap::ref_processing_init() to see how
538 // reference processing currently works in G1.
539 ref_processor_stw()->start_discovery(false /* always_clear */);
540
541 _evac_failure_regions.pre_collection(_g1h->max_num_regions());
542
543 _g1h->gc_prologue(false);
544
545 // Initialize the GC alloc regions.
546 allocator()->init_gc_alloc_regions(evacuation_info);
547
548 {
549 Ticks start = Ticks::now();
550 rem_set()->prepare_for_scan_heap_roots();
551
552 _g1h->collection_set()->prepare_for_scan();
553
554 phase_times()->record_prepare_heap_roots_time_ms((Ticks::now() - start).seconds() * 1000.0);
555 }
556
557 {
558 G1PrepareEvacuationTask g1_prep_task(_g1h);
559 Tickspan task_time = run_task_timed(&g1_prep_task);
560
561 G1MonotonicArenaMemoryStats sampled_card_set_stats = g1_prep_task.all_card_set_stats();
562 sampled_card_set_stats.add(_g1h->young_regions_cset_group()->card_set_memory_stats());
563 _g1h->set_young_gen_card_set_stats(sampled_card_set_stats);
564 _g1h->set_humongous_stats(g1_prep_task.humongous_total(), g1_prep_task.humongous_candidates());
565
566 phase_times()->record_register_regions(task_time.seconds() * 1000.0);
567 }
568
569 assert(_g1h->verifier()->check_region_attr_table(), "Inconsistency in the region attributes table.");
570
571 #if COMPILER2_OR_JVMCI
572 DerivedPointerTable::clear();
573 #endif
574
575 allocation_failure_injector()->arm_if_needed();
576 }
577
578 class G1ParEvacuateFollowersClosure : public VoidClosure {
579 double _start_term;
580 double _term_time;
581 size_t _term_attempts;
582
583 void start_term_time() { _term_attempts++; _start_term = os::elapsedTime(); }
584 void end_term_time() { _term_time += (os::elapsedTime() - _start_term); }
585
586 G1CollectedHeap* _g1h;
587 G1ParScanThreadState* _par_scan_state;
588 G1ScannerTasksQueueSet* _queues;
589 TaskTerminator* _terminator;
590 G1GCPhaseTimes::GCParPhases _phase;
591
592 G1ParScanThreadState* par_scan_state() { return _par_scan_state; }
593 G1ScannerTasksQueueSet* queues() { return _queues; }
594 TaskTerminator* terminator() { return _terminator; }
595
596 inline bool offer_termination() {
597 EventGCPhaseParallel event;
598 G1ParScanThreadState* const pss = par_scan_state();
599 start_term_time();
600 const bool res = (terminator() == nullptr) ? true : terminator()->offer_termination();
601 end_term_time();
602 event.commit(GCId::current(), pss->worker_id(), G1GCPhaseTimes::phase_name(G1GCPhaseTimes::Termination));
603 return res;
604 }
605
606 public:
607 G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h,
608 G1ParScanThreadState* par_scan_state,
609 G1ScannerTasksQueueSet* queues,
610 TaskTerminator* terminator,
611 G1GCPhaseTimes::GCParPhases phase)
612 : _start_term(0.0), _term_time(0.0), _term_attempts(0),
613 _g1h(g1h), _par_scan_state(par_scan_state),
614 _queues(queues), _terminator(terminator), _phase(phase) {}
615
616 void do_void() {
617 EventGCPhaseParallel event;
618 G1ParScanThreadState* const pss = par_scan_state();
619 pss->trim_queue();
620 event.commit(GCId::current(), pss->worker_id(), G1GCPhaseTimes::phase_name(_phase));
621 do {
622 EventGCPhaseParallel event;
623 pss->steal_and_trim_queue(queues());
624 event.commit(GCId::current(), pss->worker_id(), G1GCPhaseTimes::phase_name(_phase));
625 } while (!offer_termination());
626 }
627
628 double term_time() const { return _term_time; }
629 size_t term_attempts() const { return _term_attempts; }
630 };
631
632 class G1EvacuateRegionsBaseTask : public WorkerTask {
633
634 // All pinned regions in the collection set must be registered as failed
635 // regions as there is no guarantee that there is a reference reachable by
636 // Java code (i.e. only by native code) that adds it to the evacuation failed
637 // regions.
638 void record_pinned_regions(G1ParScanThreadState* pss, uint worker_id) {
639 class RecordPinnedRegionClosure : public G1HeapRegionClosure {
640 G1ParScanThreadState* _pss;
641 uint _worker_id;
642
643 public:
644 RecordPinnedRegionClosure(G1ParScanThreadState* pss, uint worker_id) : _pss(pss), _worker_id(worker_id) { }
645
646 bool do_heap_region(G1HeapRegion* r) {
647 if (r->has_pinned_objects()) {
648 _pss->record_evacuation_failed_region(r, _worker_id, true /* cause_pinned */);
649 }
650 return false;
651 }
652 } cl(pss, worker_id);
653
654 _g1h->collection_set_iterate_increment_from(&cl, worker_id);
655 }
656
657 protected:
658 G1CollectedHeap* _g1h;
659 G1ParScanThreadStateSet* _per_thread_states;
660
661 G1ScannerTasksQueueSet* _task_queues;
662 TaskTerminator _terminator;
663
664 void evacuate_live_objects(G1ParScanThreadState* pss,
665 uint worker_id,
666 G1GCPhaseTimes::GCParPhases objcopy_phase,
667 G1GCPhaseTimes::GCParPhases termination_phase) {
668 G1GCPhaseTimes* p = _g1h->phase_times();
669
670 Ticks start = Ticks::now();
671 G1ParEvacuateFollowersClosure cl(_g1h, pss, _task_queues, &_terminator, objcopy_phase);
672 cl.do_void();
673
674 assert(pss->queue_is_empty(), "should be empty");
675
676 Tickspan evac_time = (Ticks::now() - start);
677 p->record_or_add_time_secs(objcopy_phase, worker_id, evac_time.seconds() - cl.term_time());
678
679 if (termination_phase == G1GCPhaseTimes::Termination) {
680 p->record_time_secs(termination_phase, worker_id, cl.term_time());
681 p->record_thread_work_item(termination_phase, worker_id, cl.term_attempts());
682 } else {
683 p->record_or_add_time_secs(termination_phase, worker_id, cl.term_time());
684 p->record_or_add_thread_work_item(termination_phase, worker_id, cl.term_attempts());
685 }
686 assert(pss->trim_ticks().value() == 0,
687 "Unexpected partial trimming during evacuation value " JLONG_FORMAT,
688 pss->trim_ticks().value());
689 }
690
691 virtual void start_work(uint worker_id) { }
692
693 virtual void end_work(uint worker_id) { }
694
695 virtual void scan_roots(G1ParScanThreadState* pss, uint worker_id) = 0;
696
697 virtual void evacuate_live_objects(G1ParScanThreadState* pss, uint worker_id) = 0;
698
699 private:
700 Atomic<bool> _pinned_regions_recorded;
701
702 public:
703 G1EvacuateRegionsBaseTask(const char* name,
704 G1ParScanThreadStateSet* per_thread_states,
705 G1ScannerTasksQueueSet* task_queues,
706 uint num_workers) :
707 WorkerTask(name),
708 _g1h(G1CollectedHeap::heap()),
709 _per_thread_states(per_thread_states),
710 _task_queues(task_queues),
711 _terminator(num_workers, _task_queues),
712 _pinned_regions_recorded(false)
713 { }
714
715 void work(uint worker_id) {
716 start_work(worker_id);
717
718 {
719 ResourceMark rm;
720
721 G1ParScanThreadState* pss = _per_thread_states->state_for_worker(worker_id);
722 pss->set_ref_discoverer(_g1h->ref_processor_stw());
723
724 if (_pinned_regions_recorded.compare_set(false, true)) {
725 record_pinned_regions(pss, worker_id);
726 }
727 scan_roots(pss, worker_id);
728 evacuate_live_objects(pss, worker_id);
729 }
730
731 end_work(worker_id);
732 }
733 };
734
735 class G1EvacuateRegionsTask : public G1EvacuateRegionsBaseTask {
736 G1RootProcessor* _root_processor;
737 bool _has_optional_evacuation_work;
738
739 void scan_roots(G1ParScanThreadState* pss, uint worker_id) {
740 _root_processor->evacuate_roots(pss, worker_id);
741 _g1h->rem_set()->scan_heap_roots(pss, worker_id, G1GCPhaseTimes::ScanHR, G1GCPhaseTimes::ObjCopy, _has_optional_evacuation_work);
742 _g1h->rem_set()->scan_collection_set_code_roots(pss, worker_id, G1GCPhaseTimes::CodeRoots, G1GCPhaseTimes::ObjCopy);
743 // There are no optional roots to scan right now.
744 #ifdef ASSERT
745 class VerifyOptionalCollectionSetRootsEmptyClosure : public G1HeapRegionClosure {
746 G1ParScanThreadState* _pss;
747
748 public:
749 VerifyOptionalCollectionSetRootsEmptyClosure(G1ParScanThreadState* pss) : _pss(pss) { }
750
751 bool do_heap_region(G1HeapRegion* r) override {
752 assert(!r->has_index_in_opt_cset(), "must be");
753 return false;
754 }
755 } cl(pss);
756 _g1h->collection_set_iterate_increment_from(&cl, worker_id);
757 #endif
758 }
759
760 void evacuate_live_objects(G1ParScanThreadState* pss, uint worker_id) {
761 G1EvacuateRegionsBaseTask::evacuate_live_objects(pss, worker_id, G1GCPhaseTimes::ObjCopy, G1GCPhaseTimes::Termination);
762 }
763
764 void start_work(uint worker_id) {
765 _g1h->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerStart, worker_id, Ticks::now().seconds());
766 }
767
768 void end_work(uint worker_id) {
769 _g1h->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerEnd, worker_id, Ticks::now().seconds());
770 }
771
772 public:
773 G1EvacuateRegionsTask(G1CollectedHeap* g1h,
774 G1ParScanThreadStateSet* per_thread_states,
775 G1ScannerTasksQueueSet* task_queues,
776 G1RootProcessor* root_processor,
777 uint num_workers,
778 bool has_optional_evacuation_work) :
779 G1EvacuateRegionsBaseTask("G1 Evacuate Regions", per_thread_states, task_queues, num_workers),
780 _root_processor(root_processor),
781 _has_optional_evacuation_work(has_optional_evacuation_work)
782 { }
783 };
784
785 void G1YoungCollector::evacuate_initial_collection_set(G1ParScanThreadStateSet* per_thread_states,
786 bool has_optional_evacuation_work) {
787 G1GCPhaseTimes* p = phase_times();
788
789 rem_set()->merge_heap_roots(true /* initial_evacuation */);
790
791 Tickspan task_time;
792 const uint num_workers = workers()->active_workers();
793
794 Ticks start_processing = Ticks::now();
795 {
796 G1RootProcessor root_processor(_g1h, num_workers > 1 /* is_parallel */);
797 G1EvacuateRegionsTask g1_par_task(_g1h,
798 per_thread_states,
799 task_queues(),
800 &root_processor,
801 num_workers,
802 has_optional_evacuation_work);
803 task_time = run_task_timed(&g1_par_task);
804 // Closing the inner scope will execute the destructor for the
805 // G1RootProcessor object. By subtracting the WorkerThreads task from the total
806 // time of this scope, we get the "NMethod List Cleanup" time. This list is
807 // constructed during "STW two-phase nmethod root processing", see more in
808 // nmethod.hpp
809 }
810 Tickspan total_processing = Ticks::now() - start_processing;
811
812 p->record_initial_evac_time(task_time.seconds() * 1000.0);
813 p->record_or_add_nmethod_list_cleanup_time((total_processing - task_time).seconds() * 1000.0);
814
815 rem_set()->complete_evac_phase(has_optional_evacuation_work);
816 }
817
818 class G1EvacuateOptionalRegionsTask : public G1EvacuateRegionsBaseTask {
819
820 void scan_roots(G1ParScanThreadState* pss, uint worker_id) {
821 _g1h->rem_set()->scan_heap_roots(pss, worker_id, G1GCPhaseTimes::OptScanHR, G1GCPhaseTimes::OptObjCopy, true /* remember_already_scanned_cards */);
822 _g1h->rem_set()->scan_collection_set_code_roots(pss, worker_id, G1GCPhaseTimes::OptCodeRoots, G1GCPhaseTimes::OptObjCopy);
823 _g1h->rem_set()->scan_collection_set_optional_roots(pss, worker_id, G1GCPhaseTimes::OptScanHR, G1GCPhaseTimes::ObjCopy);
824 }
825
826 void evacuate_live_objects(G1ParScanThreadState* pss, uint worker_id) {
827 G1EvacuateRegionsBaseTask::evacuate_live_objects(pss, worker_id, G1GCPhaseTimes::OptObjCopy, G1GCPhaseTimes::OptTermination);
828 }
829
830 public:
831 G1EvacuateOptionalRegionsTask(G1ParScanThreadStateSet* per_thread_states,
832 G1ScannerTasksQueueSet* queues,
833 uint num_workers) :
834 G1EvacuateRegionsBaseTask("G1 Evacuate Optional Regions", per_thread_states, queues, num_workers) {
835 }
836 };
837
838 void G1YoungCollector::evacuate_next_optional_regions(G1ParScanThreadStateSet* per_thread_states) {
839 Tickspan task_time;
840
841 Ticks start_processing = Ticks::now();
842 {
843 NMethodMarkingScope nmethod_marking_scope;
844 G1EvacuateOptionalRegionsTask task(per_thread_states, task_queues(), workers()->active_workers());
845 task_time = run_task_timed(&task);
846 // See comment in evacuate_initial_collection_set() for the reason of the scope.
847 }
848 Tickspan total_processing = Ticks::now() - start_processing;
849
850 G1GCPhaseTimes* p = phase_times();
851 p->record_or_add_optional_evac_time(task_time.seconds() * 1000.0);
852 p->record_or_add_nmethod_list_cleanup_time((total_processing - task_time).seconds() * 1000.0);
853 }
854
855 void G1YoungCollector::evacuate_optional_collection_set(G1ParScanThreadStateSet* per_thread_states) {
856 const double pause_start_time_ms = policy()->cur_pause_start_sec() * 1000.0;
857 double target_pause_time_ms = MaxGCPauseMillis;
858
859 if (G1ForceOptionalEvacuation) {
860 target_pause_time_ms = DBL_MAX;
861 }
862
863 while (!evacuation_alloc_failed() && collection_set()->num_optional_regions() > 0) {
864
865 double time_used_ms = os::elapsedTime() * 1000.0 - pause_start_time_ms;
866 double time_left_ms = target_pause_time_ms - time_used_ms;
867
868 if (time_left_ms <= 0 ||
869 !collection_set()->finalize_optional_for_evacuation(time_left_ms * policy()->optional_evacuation_fraction())) {
870 log_trace(gc, ergo, cset)("Skipping evacuation of %u optional regions, no more regions can be evacuated in %.3fms",
871 collection_set()->num_optional_regions(), time_left_ms);
872 break;
873 }
874
875 rem_set()->merge_heap_roots(false /* initial_evacuation */);
876
877 evacuate_next_optional_regions(per_thread_states);
878
879 rem_set()->complete_evac_phase(true /* has_more_than_one_evacuation_phase */);
880 }
881
882 collection_set()->abandon_optional_collection_set(per_thread_states);
883 }
884
885 // Non Copying Keep Alive closure
886 class G1KeepAliveClosure: public OopClosure {
887 G1CollectedHeap*_g1h;
888 public:
889 G1KeepAliveClosure(G1CollectedHeap* g1h) :_g1h(g1h) {}
890 void do_oop(narrowOop* p) { guarantee(false, "Not needed"); }
891 void do_oop(oop* p) {
892 oop obj = *p;
893 assert(obj != nullptr, "the caller should have filtered out null values");
894
895 const G1HeapRegionAttr region_attr =_g1h->region_attr(obj);
896 assert(!region_attr.is_humongous_candidate(), "Humongous candidates should never be considered alive");
897 if (region_attr.is_in_cset()) {
898 assert(obj->is_forwarded(), "invariant" );
899 *p = obj->forwardee();
900 }
901 }
902 };
903
904 // Copying Keep Alive closure - can be called from both
905 // serial and parallel code as long as different worker
906 // threads utilize different G1ParScanThreadState instances
907 // and different queues.
908 class G1CopyingKeepAliveClosure: public OopClosure {
909 G1CollectedHeap* _g1h;
910 G1ParScanThreadState* _par_scan_state;
911
912 public:
913 G1CopyingKeepAliveClosure(G1CollectedHeap* g1h,
914 G1ParScanThreadState* pss):
915 _g1h(g1h),
916 _par_scan_state(pss)
917 {}
918
919 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
920 virtual void do_oop( oop* p) { do_oop_work(p); }
921
922 template <class T> void do_oop_work(T* p) {
923 oop obj = RawAccess<>::oop_load(p);
924
925 assert(!_g1h->region_attr(obj).is_humongous_candidate(), "Humongous candidates should never be considered alive");
926 if (_g1h->is_in_cset(obj)) {
927 // If the referent object has been forwarded (either copied
928 // to a new location or to itself in the event of an
929 // evacuation failure) then we need to update the reference
930 // field and, if both reference and referent are in the G1
931 // heap, update the RSet for the referent.
932 //
933 // If the referent has not been forwarded then we have to keep
934 // it alive by policy. Therefore we have copy the referent.
935 //
936 // When the queue is drained (after each phase of reference processing)
937 // the object and it's followers will be copied, the reference field set
938 // to point to the new location, and the RSet updated.
939 _par_scan_state->push_on_queue(ScannerTask(p));
940 }
941 }
942 };
943
944 class G1STWRefProcProxyTask : public RefProcProxyTask {
945 G1CollectedHeap& _g1h;
946 G1ParScanThreadStateSet& _pss;
947 TaskTerminator _terminator;
948 G1ScannerTasksQueueSet& _task_queues;
949
950 // G1 specific closure for marking discovered fields. Need to mark the card in the
951 // refinement table as the card table is in use by garbage collection.
952 class G1EnqueueDiscoveredFieldClosure : public EnqueueDiscoveredFieldClosure {
953 G1CollectedHeap* _g1h;
954 G1ParScanThreadState* _pss;
955
956 public:
957 G1EnqueueDiscoveredFieldClosure(G1CollectedHeap* g1h, G1ParScanThreadState* pss) : _g1h(g1h), _pss(pss) { }
958
959 void enqueue(HeapWord* discovered_field_addr, oop value) override {
960 assert(_g1h->is_in(discovered_field_addr), PTR_FORMAT " is not in heap ", p2i(discovered_field_addr));
961 // Store the value first, whatever it is.
962 RawAccess<>::oop_store(discovered_field_addr, value);
963 if (value == nullptr) {
964 return;
965 }
966 _pss->write_ref_field_post(discovered_field_addr, value);
967 }
968 };
969
970 public:
971 G1STWRefProcProxyTask(uint max_workers, G1CollectedHeap& g1h, G1ParScanThreadStateSet& pss, G1ScannerTasksQueueSet& task_queues)
972 : RefProcProxyTask("G1STWRefProcProxyTask", max_workers),
973 _g1h(g1h),
974 _pss(pss),
975 _terminator(max_workers, &task_queues),
976 _task_queues(task_queues) {}
977
978 void work(uint worker_id) override {
979 assert(worker_id < _max_workers, "sanity");
980 uint index = (_tm == RefProcThreadModel::Single) ? 0 : worker_id;
981
982 G1ParScanThreadState* pss = _pss.state_for_worker(index);
983 pss->set_ref_discoverer(nullptr);
984
985 G1STWIsAliveClosure is_alive(&_g1h);
986 G1CopyingKeepAliveClosure keep_alive(&_g1h, pss);
987 G1EnqueueDiscoveredFieldClosure enqueue(&_g1h, pss);
988 G1ParEvacuateFollowersClosure complete_gc(&_g1h, pss, &_task_queues, _tm == RefProcThreadModel::Single ? nullptr : &_terminator, G1GCPhaseTimes::ObjCopy);
989 _rp_task->rp_work(worker_id, &is_alive, &keep_alive, &enqueue, &complete_gc);
990
991 // We have completed copying any necessary live referent objects.
992 assert(pss->queue_is_empty(), "both queue and overflow should be empty");
993 }
994
995 void prepare_run_task_hook() override {
996 _terminator.reset_for_reuse(_queue_count);
997 }
998 };
999
1000 void G1YoungCollector::process_discovered_references(G1ParScanThreadStateSet* per_thread_states) {
1001 Ticks start = Ticks::now();
1002
1003 ReferenceProcessor* rp = ref_processor_stw();
1004 assert(rp->discovery_enabled(), "should have been enabled");
1005
1006 G1STWRefProcProxyTask task(rp->max_num_queues(), *_g1h, *per_thread_states, *task_queues());
1007 ReferenceProcessorPhaseTimes& pt = *phase_times()->ref_phase_times();
1008 ReferenceProcessorStats stats = rp->process_discovered_references(task, _g1h->workers(), pt);
1009
1010 gc_tracer_stw()->report_gc_reference_stats(stats);
1011
1012 _g1h->make_pending_list_reachable();
1013
1014 phase_times()->record_ref_proc_time((Ticks::now() - start).seconds() * MILLIUNITS);
1015 }
1016
1017 void G1YoungCollector::post_evacuate_cleanup_1(G1ParScanThreadStateSet* per_thread_states) {
1018 Ticks start = Ticks::now();
1019 {
1020 G1PostEvacuateCollectionSetCleanupTask1 cl(per_thread_states, &_evac_failure_regions);
1021 _g1h->run_batch_task(&cl);
1022 }
1023 phase_times()->record_post_evacuate_cleanup_task_1_time((Ticks::now() - start).seconds() * 1000.0);
1024 }
1025
1026 void G1YoungCollector::post_evacuate_cleanup_2(G1ParScanThreadStateSet* per_thread_states,
1027 G1EvacInfo* evacuation_info) {
1028 Ticks start = Ticks::now();
1029 {
1030 G1PostEvacuateCollectionSetCleanupTask2 cl(per_thread_states, evacuation_info, &_evac_failure_regions);
1031 _g1h->run_batch_task(&cl);
1032 }
1033 phase_times()->record_post_evacuate_cleanup_task_2_time((Ticks::now() - start).seconds() * 1000.0);
1034 }
1035
1036 void G1YoungCollector::enqueue_candidates_as_root_regions() {
1037 assert(collector_state()->is_in_concurrent_start_gc(), "must be");
1038
1039 G1CollectionSetCandidates* candidates = collection_set()->candidates();
1040 candidates->iterate_regions([&] (G1HeapRegion* r) {
1041 _g1h->concurrent_mark()->add_root_region(r);
1042 });
1043 }
1044
1045 void G1YoungCollector::post_evacuate_collection_set(G1EvacInfo* evacuation_info,
1046 G1ParScanThreadStateSet* per_thread_states) {
1047 G1GCPhaseTimes* p = phase_times();
1048
1049 // Process any discovered reference objects - we have
1050 // to do this _before_ we retire the GC alloc regions
1051 // as we may have to copy some 'reachable' referent
1052 // objects (and their reachable sub-graphs) that were
1053 // not copied during the pause.
1054 process_discovered_references(per_thread_states);
1055
1056 G1STWIsAliveClosure is_alive(_g1h);
1057 G1KeepAliveClosure keep_alive(_g1h);
1058
1059 WeakProcessor::weak_oops_do(workers(), &is_alive, &keep_alive, p->weak_phase_times());
1060
1061 allocator()->release_gc_alloc_regions(evacuation_info);
1062
1063 #if TASKQUEUE_STATS
1064 _g1h->task_queues()->print_and_reset_taskqueue_stats("Young GC");
1065 // Logging uses thread states, which are deleted by cleanup, so this must
1066 // be done before cleanup.
1067 per_thread_states->print_partial_array_task_stats();
1068 #endif // TASKQUEUE_STATS
1069
1070 post_evacuate_cleanup_1(per_thread_states);
1071
1072 post_evacuate_cleanup_2(per_thread_states, evacuation_info);
1073
1074 // Regions in the collection set candidates are roots for the marking (they are
1075 // not marked through considering they are very likely to be reclaimed soon.
1076 // They need to be enqueued explicitly compared to survivor regions.
1077 if (collector_state()->is_in_concurrent_start_gc()) {
1078 enqueue_candidates_as_root_regions();
1079 }
1080
1081 _evac_failure_regions.post_collection();
1082
1083 assert_used_and_recalculate_used_equal(_g1h);
1084
1085 _g1h->rebuild_free_region_list();
1086
1087 _g1h->record_obj_copy_mem_stats();
1088
1089 evacuation_info->set_bytes_used(_g1h->bytes_used_during_gc());
1090
1091 _g1h->prepare_for_mutator_after_young_collection();
1092
1093 _g1h->gc_epilogue(false);
1094
1095 _g1h->resize_heap_after_young_collection(_allocation_word_size);
1096 }
1097
1098 bool G1YoungCollector::evacuation_failed() const {
1099 return _evac_failure_regions.has_regions_evac_failed();
1100 }
1101
1102 bool G1YoungCollector::evacuation_pinned() const {
1103 return _evac_failure_regions.has_regions_evac_pinned();
1104 }
1105
1106 bool G1YoungCollector::evacuation_alloc_failed() const {
1107 return _evac_failure_regions.has_regions_alloc_failed();
1108 }
1109
1110 G1YoungCollector::G1YoungCollector(GCCause::Cause gc_cause,
1111 size_t allocation_word_size) :
1112 _g1h(G1CollectedHeap::heap()),
1113 _gc_cause(gc_cause),
1114 _allocation_word_size(allocation_word_size),
1115 _concurrent_operation_is_full_mark(false),
1116 _evac_failure_regions()
1117 {
1118 }
1119
1120 void G1YoungCollector::collect() {
1121 // Do timing/tracing/statistics/pre- and post-logging/verification work not
1122 // directly related to the collection. They should not be accounted for in
1123 // collection work timing.
1124
1125 // The G1YoungGCTraceTime message depends on collector state, so must come after
1126 // determining collector state.
1127 G1YoungGCTraceTime tm(this, _gc_cause);
1128
1129 // JFR
1130 G1YoungGCJFRTracerMark jtm(gc_timer_stw(), gc_tracer_stw(), _gc_cause);
1131 // JStat/MXBeans
1132 G1YoungGCMonitoringScope ms(monitoring_support(),
1133 !collection_set()->candidates()->is_empty() /* all_memory_pools_affected */);
1134 // Create the heap printer before internal pause timing to have
1135 // heap information printed as last part of detailed GC log.
1136 G1HeapPrinterMark hpm(_g1h);
1137 // Young GC internal pause timing
1138 G1YoungGCNotifyPauseMark npm(this);
1139
1140 // Verification may use the workers, so they must be set up before.
1141 // Individual parallel phases may override this.
1142 set_young_collection_default_active_worker_threads();
1143
1144 // Wait for root region scan here to make sure that it is done before any
1145 // use of the STW workers to maximize cpu use (i.e. all cores are available
1146 // just to do that).
1147 complete_root_region_scan();
1148
1149 G1YoungGCVerifierMark vm(this);
1150 {
1151 // Actual collection work starts and is executed (only) in this scope.
1152
1153 // Young GC internal collection timing. The elapsed time recorded in the
1154 // policy for the collection deliberately elides verification (and some
1155 // other trivial setup above).
1156 policy()->record_young_collection_start();
1157
1158 pre_evacuate_collection_set(jtm.evacuation_info());
1159
1160 G1ParScanThreadStateSet per_thread_states(_g1h,
1161 workers()->active_workers(),
1162 collection_set(),
1163 &_evac_failure_regions);
1164
1165 bool may_do_optional_evacuation = collection_set()->num_optional_regions() != 0;
1166 // Actually do the work...
1167 evacuate_initial_collection_set(&per_thread_states, may_do_optional_evacuation);
1168
1169 if (may_do_optional_evacuation) {
1170 evacuate_optional_collection_set(&per_thread_states);
1171 }
1172 post_evacuate_collection_set(jtm.evacuation_info(), &per_thread_states);
1173
1174 // Refine the type of a concurrent mark operation now that we did the
1175 // evacuation, eventually aborting it.
1176 _concurrent_operation_is_full_mark = policy()->concurrent_operation_is_full_mark("Revise IHOP", _allocation_word_size);
1177
1178 // Need to report the collection pause now since record_collection_pause_end()
1179 // modifies it to the next state.
1180 jtm.report_pause_type(collector_state()->gc_pause_type(_concurrent_operation_is_full_mark));
1181
1182 policy()->record_young_collection_end(_concurrent_operation_is_full_mark, evacuation_alloc_failed(), _allocation_word_size);
1183 }
1184 }