1 /*
2 * Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26
27 #include "classfile/classLoaderDataGraph.inline.hpp"
28 #include "classfile/javaClasses.inline.hpp"
29 #include "compiler/oopMap.hpp"
30 #include "gc/g1/g1Allocator.hpp"
31 #include "gc/g1/g1CardSetMemory.hpp"
32 #include "gc/g1/g1CollectedHeap.inline.hpp"
33 #include "gc/g1/g1CollectionSetCandidates.inline.hpp"
34 #include "gc/g1/g1CollectorState.hpp"
35 #include "gc/g1/g1ConcurrentMark.hpp"
36 #include "gc/g1/g1GCPhaseTimes.hpp"
37 #include "gc/g1/g1EvacFailureRegions.inline.hpp"
38 #include "gc/g1/g1EvacInfo.hpp"
39 #include "gc/g1/g1HeapRegionPrinter.hpp"
40 #include "gc/g1/g1MonitoringSupport.hpp"
41 #include "gc/g1/g1ParScanThreadState.inline.hpp"
42 #include "gc/g1/g1Policy.hpp"
43 #include "gc/g1/g1RedirtyCardsQueue.hpp"
44 #include "gc/g1/g1RegionPinCache.inline.hpp"
45 #include "gc/g1/g1RemSet.hpp"
46 #include "gc/g1/g1RootProcessor.hpp"
47 #include "gc/g1/g1Trace.hpp"
48 #include "gc/g1/g1YoungCollector.hpp"
49 #include "gc/g1/g1YoungGCAllocationFailureInjector.hpp"
50 #include "gc/g1/g1YoungGCPostEvacuateTasks.hpp"
51 #include "gc/g1/g1YoungGCPreEvacuateTasks.hpp"
52 #include "gc/shared/concurrentGCBreakpoints.hpp"
53 #include "gc/shared/gcTraceTime.inline.hpp"
54 #include "gc/shared/gcTimer.hpp"
55 #include "gc/shared/gc_globals.hpp"
56 #include "gc/shared/preservedMarks.hpp"
57 #include "gc/shared/referenceProcessor.hpp"
58 #include "gc/shared/weakProcessor.inline.hpp"
59 #include "gc/shared/workerPolicy.hpp"
60 #include "gc/shared/workerThread.hpp"
61 #include "jfr/jfrEvents.hpp"
62 #include "memory/resourceArea.hpp"
63 #include "runtime/threads.hpp"
64 #include "utilities/ticks.hpp"
65
66 // GCTraceTime wrapper that constructs the message according to GC pause type and
67 // GC cause.
68 // The code relies on the fact that GCTraceTimeWrapper stores the string passed
69 // initially as a reference only, so that we can modify it as needed.
70 class G1YoungGCTraceTime {
71 G1YoungCollector* _collector;
72
73 G1GCPauseType _pause_type;
74 GCCause::Cause _pause_cause;
75
76 static const uint MaxYoungGCNameLength = 128;
77 char _young_gc_name_data[MaxYoungGCNameLength];
78
79 GCTraceTime(Info, gc) _tt;
80
81 const char* update_young_gc_name() {
82 char evacuation_failed_string[48];
83 evacuation_failed_string[0] = '\0';
84
85 if (_collector->evacuation_failed()) {
86 snprintf(evacuation_failed_string,
87 ARRAY_SIZE(evacuation_failed_string),
88 " (Evacuation Failure: %s%s%s)",
89 _collector->evacuation_alloc_failed() ? "Allocation" : "",
90 _collector->evacuation_alloc_failed() && _collector->evacuation_pinned() ? " / " : "",
91 _collector->evacuation_pinned() ? "Pinned" : "");
92 }
93 snprintf(_young_gc_name_data,
94 MaxYoungGCNameLength,
95 "Pause Young (%s) (%s)%s",
96 G1GCPauseTypeHelper::to_string(_pause_type),
97 GCCause::to_string(_pause_cause),
98 evacuation_failed_string);
99 return _young_gc_name_data;
100 }
101
102 public:
103 G1YoungGCTraceTime(G1YoungCollector* collector, GCCause::Cause cause) :
104 _collector(collector),
105 // Take snapshot of current pause type at start as it may be modified during gc.
106 // The strings for all Concurrent Start pauses are the same, so the parameter
107 // does not matter here.
108 _pause_type(_collector->collector_state()->young_gc_pause_type(false /* concurrent_operation_is_full_mark */)),
109 _pause_cause(cause),
110 // Fake a "no cause" and manually add the correct string in update_young_gc_name()
111 // to make the string look more natural.
112 _tt(update_young_gc_name(), nullptr, GCCause::_no_gc, true) {
113 }
114
115 ~G1YoungGCTraceTime() {
116 update_young_gc_name();
117 }
118 };
119
120 class G1YoungGCNotifyPauseMark : public StackObj {
121 G1YoungCollector* _collector;
122
123 public:
124 G1YoungGCNotifyPauseMark(G1YoungCollector* collector) : _collector(collector) {
125 G1CollectedHeap::heap()->policy()->record_young_gc_pause_start();
126 }
127
128 ~G1YoungGCNotifyPauseMark() {
129 G1CollectedHeap::heap()->policy()->record_young_gc_pause_end(_collector->evacuation_failed());
130 }
131 };
132
133 class G1YoungGCJFRTracerMark : public G1JFRTracerMark {
134 G1EvacInfo _evacuation_info;
135
136 G1NewTracer* tracer() const { return (G1NewTracer*)_tracer; }
137
138 public:
139
140 G1EvacInfo* evacuation_info() { return &_evacuation_info; }
141
142 G1YoungGCJFRTracerMark(STWGCTimer* gc_timer_stw, G1NewTracer* gc_tracer_stw, GCCause::Cause cause) :
143 G1JFRTracerMark(gc_timer_stw, gc_tracer_stw), _evacuation_info() { }
144
145 void report_pause_type(G1GCPauseType type) {
146 tracer()->report_young_gc_pause(type);
147 }
148
149 ~G1YoungGCJFRTracerMark() {
150 G1CollectedHeap* g1h = G1CollectedHeap::heap();
151
152 tracer()->report_evacuation_info(&_evacuation_info);
153 tracer()->report_tenuring_threshold(g1h->policy()->tenuring_threshold());
154 }
155 };
156
157 class G1YoungGCVerifierMark : public StackObj {
158 G1YoungCollector* _collector;
159 G1HeapVerifier::G1VerifyType _type;
160
161 static G1HeapVerifier::G1VerifyType young_collection_verify_type() {
162 G1CollectorState* state = G1CollectedHeap::heap()->collector_state();
163 if (state->in_concurrent_start_gc()) {
164 return G1HeapVerifier::G1VerifyConcurrentStart;
165 } else if (state->in_young_only_phase()) {
166 return G1HeapVerifier::G1VerifyYoungNormal;
167 } else {
168 return G1HeapVerifier::G1VerifyMixed;
169 }
170 }
171
172 public:
173 G1YoungGCVerifierMark(G1YoungCollector* collector) : _collector(collector), _type(young_collection_verify_type()) {
174 G1CollectedHeap::heap()->verify_before_young_collection(_type);
175 }
176
177 ~G1YoungGCVerifierMark() {
178 // Inject evacuation failure tag into type if needed.
179 G1HeapVerifier::G1VerifyType type = _type;
180 if (_collector->evacuation_failed()) {
181 type = (G1HeapVerifier::G1VerifyType)(type | G1HeapVerifier::G1VerifyYoungEvacFail);
182 }
183 G1CollectedHeap::heap()->verify_after_young_collection(type);
184 }
185 };
186
187 G1Allocator* G1YoungCollector::allocator() const {
188 return _g1h->allocator();
189 }
190
191 G1CollectionSet* G1YoungCollector::collection_set() const {
192 return _g1h->collection_set();
193 }
194
195 G1CollectorState* G1YoungCollector::collector_state() const {
196 return _g1h->collector_state();
197 }
198
199 G1ConcurrentMark* G1YoungCollector::concurrent_mark() const {
200 return _g1h->concurrent_mark();
201 }
202
203 STWGCTimer* G1YoungCollector::gc_timer_stw() const {
204 return _g1h->gc_timer_stw();
205 }
206
207 G1NewTracer* G1YoungCollector::gc_tracer_stw() const {
208 return _g1h->gc_tracer_stw();
209 }
210
211 G1Policy* G1YoungCollector::policy() const {
212 return _g1h->policy();
213 }
214
215 G1GCPhaseTimes* G1YoungCollector::phase_times() const {
216 return _g1h->phase_times();
217 }
218
219 G1MonitoringSupport* G1YoungCollector::monitoring_support() const {
220 return _g1h->monitoring_support();
221 }
222
223 G1RemSet* G1YoungCollector::rem_set() const {
224 return _g1h->rem_set();
225 }
226
227 G1ScannerTasksQueueSet* G1YoungCollector::task_queues() const {
228 return _g1h->task_queues();
229 }
230
231 G1SurvivorRegions* G1YoungCollector::survivor_regions() const {
232 return _g1h->survivor();
233 }
234
235 ReferenceProcessor* G1YoungCollector::ref_processor_stw() const {
236 return _g1h->ref_processor_stw();
237 }
238
239 WorkerThreads* G1YoungCollector::workers() const {
240 return _g1h->workers();
241 }
242
243 G1YoungGCAllocationFailureInjector* G1YoungCollector::allocation_failure_injector() const {
244 return _g1h->allocation_failure_injector();
245 }
246
247
248 void G1YoungCollector::wait_for_root_region_scanning() {
249 Ticks start = Ticks::now();
250 // We have to wait until the CM threads finish scanning the
251 // root regions as it's the only way to ensure that all the
252 // objects on them have been correctly scanned before we start
253 // moving them during the GC.
254 bool waited = concurrent_mark()->wait_until_root_region_scan_finished();
255 Tickspan wait_time;
256 if (waited) {
257 wait_time = (Ticks::now() - start);
258 }
259 phase_times()->record_root_region_scan_wait_time(wait_time.seconds() * MILLIUNITS);
260 }
261
262 class G1PrintCollectionSetClosure : public HeapRegionClosure {
263 public:
264 virtual bool do_heap_region(G1HeapRegion* r) {
265 G1HeapRegionPrinter::cset(r);
266 return false;
267 }
268 };
269
270 void G1YoungCollector::calculate_collection_set(G1EvacInfo* evacuation_info, double target_pause_time_ms) {
271 // Forget the current allocation region (we might even choose it to be part
272 // of the collection set!) before finalizing the collection set.
273 allocator()->release_mutator_alloc_regions();
274
275 collection_set()->finalize_initial_collection_set(target_pause_time_ms, survivor_regions());
276 evacuation_info->set_collection_set_regions(collection_set()->region_length() +
277 collection_set()->optional_region_length());
278
279 concurrent_mark()->verify_no_collection_set_oops();
280
281 if (G1HeapRegionPrinter::is_active()) {
282 G1PrintCollectionSetClosure cl;
283 collection_set()->iterate(&cl);
284 collection_set()->iterate_optional(&cl);
285 }
286 }
287
288 class G1PrepareEvacuationTask : public WorkerTask {
289 class G1PrepareRegionsClosure : public HeapRegionClosure {
290 G1CollectedHeap* _g1h;
291 G1PrepareEvacuationTask* _parent_task;
292 uint _worker_humongous_total;
293 uint _worker_humongous_candidates;
294
295 G1MonotonicArenaMemoryStats _card_set_stats;
296
297 void sample_card_set_size(G1HeapRegion* hr) {
298 // Sample card set sizes for young gen and humongous before GC: this makes
299 // the policy to give back memory to the OS keep the most recent amount of
300 // memory for these regions.
301 if (hr->is_young() || hr->is_starts_humongous()) {
302 _card_set_stats.add(hr->rem_set()->card_set_memory_stats());
303 }
304 }
305
306 bool humongous_region_is_candidate(G1HeapRegion* region) const {
307 assert(region->is_starts_humongous(), "Must start a humongous object");
308
309 oop obj = cast_to_oop(region->bottom());
310
311 // Dead objects cannot be eager reclaim candidates. Due to class
312 // unloading it is unsafe to query their classes so we return early.
313 if (_g1h->is_obj_dead(obj, region)) {
314 return false;
315 }
316
317 // If we do not have a complete remembered set for the region, then we can
318 // not be sure that we have all references to it.
319 if (!region->rem_set()->is_complete()) {
320 return false;
321 }
322 // We also cannot collect the humongous object if it is pinned.
323 if (region->has_pinned_objects()) {
324 return false;
325 }
326 // Candidate selection must satisfy the following constraints
327 // while concurrent marking is in progress:
328 //
329 // * In order to maintain SATB invariants, an object must not be
330 // reclaimed if it was allocated before the start of marking and
331 // has not had its references scanned. Such an object must have
332 // its references (including type metadata) scanned to ensure no
333 // live objects are missed by the marking process. Objects
334 // allocated after the start of concurrent marking don't need to
335 // be scanned.
336 //
337 // * An object must not be reclaimed if it is on the concurrent
338 // mark stack. Objects allocated after the start of concurrent
339 // marking are never pushed on the mark stack.
340 //
341 // Nominating only objects allocated after the start of concurrent
342 // marking is sufficient to meet both constraints. This may miss
343 // some objects that satisfy the constraints, but the marking data
344 // structures don't support efficiently performing the needed
345 // additional tests or scrubbing of the mark stack.
346 //
347 // However, we presently only nominate is_typeArray() objects.
348 // A humongous object containing references induces remembered
349 // set entries on other regions. In order to reclaim such an
350 // object, those remembered sets would need to be cleaned up.
351 //
352 // We also treat is_typeArray() objects specially, allowing them
353 // to be reclaimed even if allocated before the start of
354 // concurrent mark. For this we rely on mark stack insertion to
355 // exclude is_typeArray() objects, preventing reclaiming an object
356 // that is in the mark stack. We also rely on the metadata for
357 // such objects to be built-in and so ensured to be kept live.
358 // Frequent allocation and drop of large binary blobs is an
359 // important use case for eager reclaim, and this special handling
360 // may reduce needed headroom.
361
362 return obj->is_typeArray() &&
363 _g1h->is_potential_eager_reclaim_candidate(region);
364 }
365
366 public:
367 G1PrepareRegionsClosure(G1CollectedHeap* g1h, G1PrepareEvacuationTask* parent_task) :
368 _g1h(g1h),
369 _parent_task(parent_task),
370 _worker_humongous_total(0),
371 _worker_humongous_candidates(0) { }
372
373 ~G1PrepareRegionsClosure() {
374 _parent_task->add_humongous_candidates(_worker_humongous_candidates);
375 _parent_task->add_humongous_total(_worker_humongous_total);
376 }
377
378 virtual bool do_heap_region(G1HeapRegion* hr) {
379 // First prepare the region for scanning
380 _g1h->rem_set()->prepare_region_for_scan(hr);
381
382 sample_card_set_size(hr);
383
384 // Now check if region is a humongous candidate
385 if (!hr->is_starts_humongous()) {
386 _g1h->register_region_with_region_attr(hr);
387 return false;
388 }
389
390 uint index = hr->hrm_index();
391 if (humongous_region_is_candidate(hr)) {
392 _g1h->register_humongous_candidate_region_with_region_attr(index);
393 _worker_humongous_candidates++;
394 // We will later handle the remembered sets of these regions.
395 } else {
396 _g1h->register_region_with_region_attr(hr);
397 }
398 log_debug(gc, humongous)("Humongous region %u (object size %zu @ " PTR_FORMAT ") remset %zu code roots %zu "
399 "marked %d pinned count %zu reclaim candidate %d type array %d",
400 index,
401 cast_to_oop(hr->bottom())->size() * HeapWordSize,
402 p2i(hr->bottom()),
403 hr->rem_set()->occupied(),
404 hr->rem_set()->code_roots_list_length(),
405 _g1h->concurrent_mark()->mark_bitmap()->is_marked(hr->bottom()),
406 hr->pinned_count(),
407 _g1h->is_humongous_reclaim_candidate(index),
408 cast_to_oop(hr->bottom())->is_typeArray()
409 );
410 _worker_humongous_total++;
411
412 return false;
413 }
414
415 G1MonotonicArenaMemoryStats card_set_stats() const {
416 return _card_set_stats;
417 }
418 };
419
420 G1CollectedHeap* _g1h;
421 HeapRegionClaimer _claimer;
422 volatile uint _humongous_total;
423 volatile uint _humongous_candidates;
424
425 G1MonotonicArenaMemoryStats _all_card_set_stats;
426
427 public:
428 G1PrepareEvacuationTask(G1CollectedHeap* g1h) :
429 WorkerTask("Prepare Evacuation"),
430 _g1h(g1h),
431 _claimer(_g1h->workers()->active_workers()),
432 _humongous_total(0),
433 _humongous_candidates(0) { }
434
435 void work(uint worker_id) {
436 G1PrepareRegionsClosure cl(_g1h, this);
437 _g1h->heap_region_par_iterate_from_worker_offset(&cl, &_claimer, worker_id);
438
439 MutexLocker x(G1RareEvent_lock, Mutex::_no_safepoint_check_flag);
440 _all_card_set_stats.add(cl.card_set_stats());
441 }
442
443 void add_humongous_candidates(uint candidates) {
444 Atomic::add(&_humongous_candidates, candidates);
445 }
446
447 void add_humongous_total(uint total) {
448 Atomic::add(&_humongous_total, total);
449 }
450
451 uint humongous_candidates() {
452 return _humongous_candidates;
453 }
454
455 uint humongous_total() {
456 return _humongous_total;
457 }
458
459 const G1MonotonicArenaMemoryStats all_card_set_stats() const {
460 return _all_card_set_stats;
461 }
462 };
463
464 Tickspan G1YoungCollector::run_task_timed(WorkerTask* task) {
465 Ticks start = Ticks::now();
466 workers()->run_task(task);
467 return Ticks::now() - start;
468 }
469
470 void G1YoungCollector::set_young_collection_default_active_worker_threads(){
471 uint active_workers = WorkerPolicy::calc_active_workers(workers()->max_workers(),
472 workers()->active_workers(),
473 Threads::number_of_non_daemon_threads());
474 active_workers = workers()->set_active_workers(active_workers);
475 log_info(gc,task)("Using %u workers of %u for evacuation", active_workers, workers()->max_workers());
476 }
477
478 void G1YoungCollector::pre_evacuate_collection_set(G1EvacInfo* evacuation_info) {
479 // Flush various data in thread-local buffers to be able to determine the collection
480 // set
481 {
482 Ticks start = Ticks::now();
483 G1PreEvacuateCollectionSetBatchTask cl;
484 G1CollectedHeap::heap()->run_batch_task(&cl);
485 phase_times()->record_pre_evacuate_prepare_time_ms((Ticks::now() - start).seconds() * 1000.0);
486 }
487
488 // Needs log buffers flushed.
489 calculate_collection_set(evacuation_info, policy()->max_pause_time_ms());
490
491 if (collector_state()->in_concurrent_start_gc()) {
492 concurrent_mark()->pre_concurrent_start(_gc_cause);
493 }
494
495 // Please see comment in g1CollectedHeap.hpp and
496 // G1CollectedHeap::ref_processing_init() to see how
497 // reference processing currently works in G1.
498 ref_processor_stw()->start_discovery(false /* always_clear */);
499
500 _evac_failure_regions.pre_collection(_g1h->max_reserved_regions());
501
502 _g1h->gc_prologue(false);
503
504 // Initialize the GC alloc regions.
505 allocator()->init_gc_alloc_regions(evacuation_info);
506
507 {
508 Ticks start = Ticks::now();
509 rem_set()->prepare_for_scan_heap_roots();
510 phase_times()->record_prepare_heap_roots_time_ms((Ticks::now() - start).seconds() * 1000.0);
511 }
512
513 {
514 G1PrepareEvacuationTask g1_prep_task(_g1h);
515 Tickspan task_time = run_task_timed(&g1_prep_task);
516
517 _g1h->set_young_gen_card_set_stats(g1_prep_task.all_card_set_stats());
518 _g1h->set_humongous_stats(g1_prep_task.humongous_total(), g1_prep_task.humongous_candidates());
519
520 phase_times()->record_register_regions(task_time.seconds() * 1000.0);
521 }
522
523 assert(_g1h->verifier()->check_region_attr_table(), "Inconsistency in the region attributes table.");
524
525 #if COMPILER2_OR_JVMCI
526 DerivedPointerTable::clear();
527 #endif
528
529 allocation_failure_injector()->arm_if_needed();
530 }
531
532 class G1ParEvacuateFollowersClosure : public VoidClosure {
533 double _start_term;
534 double _term_time;
535 size_t _term_attempts;
536
537 void start_term_time() { _term_attempts++; _start_term = os::elapsedTime(); }
538 void end_term_time() { _term_time += (os::elapsedTime() - _start_term); }
539
540 G1CollectedHeap* _g1h;
541 G1ParScanThreadState* _par_scan_state;
542 G1ScannerTasksQueueSet* _queues;
543 TaskTerminator* _terminator;
544 G1GCPhaseTimes::GCParPhases _phase;
545
546 G1ParScanThreadState* par_scan_state() { return _par_scan_state; }
547 G1ScannerTasksQueueSet* queues() { return _queues; }
548 TaskTerminator* terminator() { return _terminator; }
549
550 inline bool offer_termination() {
551 EventGCPhaseParallel event;
552 G1ParScanThreadState* const pss = par_scan_state();
553 start_term_time();
554 const bool res = (terminator() == nullptr) ? true : terminator()->offer_termination();
555 end_term_time();
556 event.commit(GCId::current(), pss->worker_id(), G1GCPhaseTimes::phase_name(G1GCPhaseTimes::Termination));
557 return res;
558 }
559
560 public:
561 G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h,
562 G1ParScanThreadState* par_scan_state,
563 G1ScannerTasksQueueSet* queues,
564 TaskTerminator* terminator,
565 G1GCPhaseTimes::GCParPhases phase)
566 : _start_term(0.0), _term_time(0.0), _term_attempts(0),
567 _g1h(g1h), _par_scan_state(par_scan_state),
568 _queues(queues), _terminator(terminator), _phase(phase) {}
569
570 void do_void() {
571 EventGCPhaseParallel event;
572 G1ParScanThreadState* const pss = par_scan_state();
573 pss->trim_queue();
574 event.commit(GCId::current(), pss->worker_id(), G1GCPhaseTimes::phase_name(_phase));
575 do {
576 EventGCPhaseParallel event;
577 pss->steal_and_trim_queue(queues());
578 event.commit(GCId::current(), pss->worker_id(), G1GCPhaseTimes::phase_name(_phase));
579 } while (!offer_termination());
580 }
581
582 double term_time() const { return _term_time; }
583 size_t term_attempts() const { return _term_attempts; }
584 };
585
586 class G1EvacuateRegionsBaseTask : public WorkerTask {
587 protected:
588 G1CollectedHeap* _g1h;
589 G1ParScanThreadStateSet* _per_thread_states;
590 G1ScannerTasksQueueSet* _task_queues;
591 TaskTerminator _terminator;
592 uint _num_workers;
593
594 void evacuate_live_objects(G1ParScanThreadState* pss,
595 uint worker_id,
596 G1GCPhaseTimes::GCParPhases objcopy_phase,
597 G1GCPhaseTimes::GCParPhases termination_phase) {
598 G1GCPhaseTimes* p = _g1h->phase_times();
599
600 Ticks start = Ticks::now();
601 G1ParEvacuateFollowersClosure cl(_g1h, pss, _task_queues, &_terminator, objcopy_phase);
602 cl.do_void();
603
604 assert(pss->queue_is_empty(), "should be empty");
605
606 Tickspan evac_time = (Ticks::now() - start);
607 p->record_or_add_time_secs(objcopy_phase, worker_id, evac_time.seconds() - cl.term_time());
608
609 if (termination_phase == G1GCPhaseTimes::Termination) {
610 p->record_time_secs(termination_phase, worker_id, cl.term_time());
611 p->record_thread_work_item(termination_phase, worker_id, cl.term_attempts());
612 } else {
613 p->record_or_add_time_secs(termination_phase, worker_id, cl.term_time());
614 p->record_or_add_thread_work_item(termination_phase, worker_id, cl.term_attempts());
615 }
616 assert(pss->trim_ticks().value() == 0,
617 "Unexpected partial trimming during evacuation value " JLONG_FORMAT,
618 pss->trim_ticks().value());
619 }
620
621 virtual void start_work(uint worker_id) { }
622
623 virtual void end_work(uint worker_id) { }
624
625 virtual void scan_roots(G1ParScanThreadState* pss, uint worker_id) = 0;
626
627 virtual void evacuate_live_objects(G1ParScanThreadState* pss, uint worker_id) = 0;
628
629 public:
630 G1EvacuateRegionsBaseTask(const char* name,
631 G1ParScanThreadStateSet* per_thread_states,
632 G1ScannerTasksQueueSet* task_queues,
633 uint num_workers) :
634 WorkerTask(name),
635 _g1h(G1CollectedHeap::heap()),
636 _per_thread_states(per_thread_states),
637 _task_queues(task_queues),
638 _terminator(num_workers, _task_queues),
639 _num_workers(num_workers)
640 { }
641
642 void work(uint worker_id) {
643 start_work(worker_id);
644
645 {
646 ResourceMark rm;
647
648 G1ParScanThreadState* pss = _per_thread_states->state_for_worker(worker_id);
649 pss->set_ref_discoverer(_g1h->ref_processor_stw());
650
651 scan_roots(pss, worker_id);
652 evacuate_live_objects(pss, worker_id);
653 }
654
655 end_work(worker_id);
656 }
657 };
658
659 class G1EvacuateRegionsTask : public G1EvacuateRegionsBaseTask {
660 G1RootProcessor* _root_processor;
661 bool _has_optional_evacuation_work;
662
663 void scan_roots(G1ParScanThreadState* pss, uint worker_id) {
664 _root_processor->evacuate_roots(pss, worker_id);
665 _g1h->rem_set()->scan_heap_roots(pss, worker_id, G1GCPhaseTimes::ScanHR, G1GCPhaseTimes::ObjCopy, _has_optional_evacuation_work);
666 _g1h->rem_set()->scan_collection_set_regions(pss, worker_id, G1GCPhaseTimes::ScanHR, G1GCPhaseTimes::CodeRoots, G1GCPhaseTimes::ObjCopy);
667 }
668
669 void evacuate_live_objects(G1ParScanThreadState* pss, uint worker_id) {
670 G1EvacuateRegionsBaseTask::evacuate_live_objects(pss, worker_id, G1GCPhaseTimes::ObjCopy, G1GCPhaseTimes::Termination);
671 }
672
673 void start_work(uint worker_id) {
674 _g1h->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerStart, worker_id, Ticks::now().seconds());
675 }
676
677 void end_work(uint worker_id) {
678 _g1h->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerEnd, worker_id, Ticks::now().seconds());
679 }
680
681 public:
682 G1EvacuateRegionsTask(G1CollectedHeap* g1h,
683 G1ParScanThreadStateSet* per_thread_states,
684 G1ScannerTasksQueueSet* task_queues,
685 G1RootProcessor* root_processor,
686 uint num_workers,
687 bool has_optional_evacuation_work) :
688 G1EvacuateRegionsBaseTask("G1 Evacuate Regions", per_thread_states, task_queues, num_workers),
689 _root_processor(root_processor),
690 _has_optional_evacuation_work(has_optional_evacuation_work)
691 { }
692 };
693
694 void G1YoungCollector::evacuate_initial_collection_set(G1ParScanThreadStateSet* per_thread_states,
695 bool has_optional_evacuation_work) {
696 G1GCPhaseTimes* p = phase_times();
697
698 {
699 Ticks start = Ticks::now();
700 rem_set()->merge_heap_roots(true /* initial_evacuation */);
701 p->record_merge_heap_roots_time((Ticks::now() - start).seconds() * 1000.0);
702 }
703
704 Tickspan task_time;
705 const uint num_workers = workers()->active_workers();
706
707 Ticks start_processing = Ticks::now();
708 {
709 G1RootProcessor root_processor(_g1h, num_workers);
710 G1EvacuateRegionsTask g1_par_task(_g1h,
711 per_thread_states,
712 task_queues(),
713 &root_processor,
714 num_workers,
715 has_optional_evacuation_work);
716 task_time = run_task_timed(&g1_par_task);
717 // Closing the inner scope will execute the destructor for the
718 // G1RootProcessor object. By subtracting the WorkerThreads task from the total
719 // time of this scope, we get the "NMethod List Cleanup" time. This list is
720 // constructed during "STW two-phase nmethod root processing", see more in
721 // nmethod.hpp
722 }
723 Tickspan total_processing = Ticks::now() - start_processing;
724
725 p->record_initial_evac_time(task_time.seconds() * 1000.0);
726 p->record_or_add_nmethod_list_cleanup_time((total_processing - task_time).seconds() * 1000.0);
727
728 rem_set()->complete_evac_phase(has_optional_evacuation_work);
729 }
730
731 class G1EvacuateOptionalRegionsTask : public G1EvacuateRegionsBaseTask {
732
733 void scan_roots(G1ParScanThreadState* pss, uint worker_id) {
734 _g1h->rem_set()->scan_heap_roots(pss, worker_id, G1GCPhaseTimes::OptScanHR, G1GCPhaseTimes::OptObjCopy, true /* remember_already_scanned_cards */);
735 _g1h->rem_set()->scan_collection_set_regions(pss, worker_id, G1GCPhaseTimes::OptScanHR, G1GCPhaseTimes::OptCodeRoots, G1GCPhaseTimes::OptObjCopy);
736 }
737
738 void evacuate_live_objects(G1ParScanThreadState* pss, uint worker_id) {
739 G1EvacuateRegionsBaseTask::evacuate_live_objects(pss, worker_id, G1GCPhaseTimes::OptObjCopy, G1GCPhaseTimes::OptTermination);
740 }
741
742 public:
743 G1EvacuateOptionalRegionsTask(G1ParScanThreadStateSet* per_thread_states,
744 G1ScannerTasksQueueSet* queues,
745 uint num_workers) :
746 G1EvacuateRegionsBaseTask("G1 Evacuate Optional Regions", per_thread_states, queues, num_workers) {
747 }
748 };
749
750 void G1YoungCollector::evacuate_next_optional_regions(G1ParScanThreadStateSet* per_thread_states) {
751 // To access the protected constructor/destructor
752 class G1MarkScope : public MarkScope { };
753
754 Tickspan task_time;
755
756 Ticks start_processing = Ticks::now();
757 {
758 G1MarkScope code_mark_scope;
759 G1EvacuateOptionalRegionsTask task(per_thread_states, task_queues(), workers()->active_workers());
760 task_time = run_task_timed(&task);
761 // See comment in evacuate_initial_collection_set() for the reason of the scope.
762 }
763 Tickspan total_processing = Ticks::now() - start_processing;
764
765 G1GCPhaseTimes* p = phase_times();
766 p->record_or_add_nmethod_list_cleanup_time((total_processing - task_time).seconds() * 1000.0);
767 }
768
769 void G1YoungCollector::evacuate_optional_collection_set(G1ParScanThreadStateSet* per_thread_states) {
770 const double collection_start_time_ms = phase_times()->cur_collection_start_sec() * 1000.0;
771
772 while (!evacuation_alloc_failed() && collection_set()->optional_region_length() > 0) {
773
774 double time_used_ms = os::elapsedTime() * 1000.0 - collection_start_time_ms;
775 double time_left_ms = MaxGCPauseMillis - time_used_ms;
776
777 if (time_left_ms < 0 ||
778 !collection_set()->finalize_optional_for_evacuation(time_left_ms * policy()->optional_evacuation_fraction())) {
779 log_trace(gc, ergo, cset)("Skipping evacuation of %u optional regions, no more regions can be evacuated in %.3fms",
780 collection_set()->optional_region_length(), time_left_ms);
781 break;
782 }
783
784 {
785 Ticks start = Ticks::now();
786 rem_set()->merge_heap_roots(false /* initial_evacuation */);
787 phase_times()->record_or_add_optional_merge_heap_roots_time((Ticks::now() - start).seconds() * 1000.0);
788 }
789
790 {
791 Ticks start = Ticks::now();
792 evacuate_next_optional_regions(per_thread_states);
793 phase_times()->record_or_add_optional_evac_time((Ticks::now() - start).seconds() * 1000.0);
794 }
795
796 rem_set()->complete_evac_phase(true /* has_more_than_one_evacuation_phase */);
797 }
798
799 collection_set()->abandon_optional_collection_set(per_thread_states);
800 }
801
802 // Non Copying Keep Alive closure
803 class G1KeepAliveClosure: public OopClosure {
804 G1CollectedHeap*_g1h;
805 public:
806 G1KeepAliveClosure(G1CollectedHeap* g1h) :_g1h(g1h) {}
807 void do_oop(narrowOop* p) { guarantee(false, "Not needed"); }
808 void do_oop(oop* p) {
809 oop obj = *p;
810 assert(obj != nullptr, "the caller should have filtered out null values");
811
812 const G1HeapRegionAttr region_attr =_g1h->region_attr(obj);
813 if (!region_attr.is_in_cset_or_humongous_candidate()) {
814 return;
815 }
816 if (region_attr.is_in_cset()) {
817 assert(obj->is_forwarded(), "invariant" );
818 *p = obj->forwardee();
819 } else {
820 assert(!obj->is_forwarded(), "invariant" );
821 assert(region_attr.is_humongous_candidate(),
822 "Only allowed G1HeapRegionAttr state is IsHumongous, but is %d", region_attr.type());
823 _g1h->set_humongous_is_live(obj);
824 }
825 }
826 };
827
828 // Copying Keep Alive closure - can be called from both
829 // serial and parallel code as long as different worker
830 // threads utilize different G1ParScanThreadState instances
831 // and different queues.
832 class G1CopyingKeepAliveClosure: public OopClosure {
833 G1CollectedHeap* _g1h;
834 G1ParScanThreadState* _par_scan_state;
835
836 public:
837 G1CopyingKeepAliveClosure(G1CollectedHeap* g1h,
838 G1ParScanThreadState* pss):
839 _g1h(g1h),
840 _par_scan_state(pss)
841 {}
842
843 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
844 virtual void do_oop( oop* p) { do_oop_work(p); }
845
846 template <class T> void do_oop_work(T* p) {
847 oop obj = RawAccess<>::oop_load(p);
848
849 if (_g1h->is_in_cset_or_humongous_candidate(obj)) {
850 // If the referent object has been forwarded (either copied
851 // to a new location or to itself in the event of an
852 // evacuation failure) then we need to update the reference
853 // field and, if both reference and referent are in the G1
854 // heap, update the RSet for the referent.
855 //
856 // If the referent has not been forwarded then we have to keep
857 // it alive by policy. Therefore we have copy the referent.
858 //
859 // When the queue is drained (after each phase of reference processing)
860 // the object and it's followers will be copied, the reference field set
861 // to point to the new location, and the RSet updated.
862 _par_scan_state->push_on_queue(ScannerTask(p));
863 }
864 }
865 };
866
867 class G1STWRefProcProxyTask : public RefProcProxyTask {
868 G1CollectedHeap& _g1h;
869 G1ParScanThreadStateSet& _pss;
870 TaskTerminator _terminator;
871 G1ScannerTasksQueueSet& _task_queues;
872
873 // Special closure for enqueuing discovered fields: during enqueue the card table
874 // may not be in shape to properly handle normal barrier calls (e.g. card marks
875 // in regions that failed evacuation, scribbling of various values by card table
876 // scan code). Additionally the regular barrier enqueues into the "global"
877 // DCQS, but during GC we need these to-be-refined entries in the GC local queue
878 // so that after clearing the card table, the redirty cards phase will properly
879 // mark all dirty cards to be picked up by refinement.
880 class G1EnqueueDiscoveredFieldClosure : public EnqueueDiscoveredFieldClosure {
881 G1CollectedHeap* _g1h;
882 G1ParScanThreadState* _pss;
883
884 public:
885 G1EnqueueDiscoveredFieldClosure(G1CollectedHeap* g1h, G1ParScanThreadState* pss) : _g1h(g1h), _pss(pss) { }
886
887 void enqueue(HeapWord* discovered_field_addr, oop value) override {
888 assert(_g1h->is_in(discovered_field_addr), PTR_FORMAT " is not in heap ", p2i(discovered_field_addr));
889 // Store the value first, whatever it is.
890 RawAccess<>::oop_store(discovered_field_addr, value);
891 if (value == nullptr) {
892 return;
893 }
894 _pss->write_ref_field_post(discovered_field_addr, value);
895 }
896 };
897
898 public:
899 G1STWRefProcProxyTask(uint max_workers, G1CollectedHeap& g1h, G1ParScanThreadStateSet& pss, G1ScannerTasksQueueSet& task_queues)
900 : RefProcProxyTask("G1STWRefProcProxyTask", max_workers),
901 _g1h(g1h),
902 _pss(pss),
903 _terminator(max_workers, &task_queues),
904 _task_queues(task_queues) {}
905
906 void work(uint worker_id) override {
907 assert(worker_id < _max_workers, "sanity");
908 uint index = (_tm == RefProcThreadModel::Single) ? 0 : worker_id;
909
910 G1ParScanThreadState* pss = _pss.state_for_worker(index);
911 pss->set_ref_discoverer(nullptr);
912
913 G1STWIsAliveClosure is_alive(&_g1h);
914 G1CopyingKeepAliveClosure keep_alive(&_g1h, pss);
915 G1EnqueueDiscoveredFieldClosure enqueue(&_g1h, pss);
916 G1ParEvacuateFollowersClosure complete_gc(&_g1h, pss, &_task_queues, _tm == RefProcThreadModel::Single ? nullptr : &_terminator, G1GCPhaseTimes::ObjCopy);
917 _rp_task->rp_work(worker_id, &is_alive, &keep_alive, &enqueue, &complete_gc);
918
919 // We have completed copying any necessary live referent objects.
920 assert(pss->queue_is_empty(), "both queue and overflow should be empty");
921 }
922
923 void prepare_run_task_hook() override {
924 _terminator.reset_for_reuse(_queue_count);
925 }
926 };
927
928 void G1YoungCollector::process_discovered_references(G1ParScanThreadStateSet* per_thread_states) {
929 Ticks start = Ticks::now();
930
931 ReferenceProcessor* rp = ref_processor_stw();
932 assert(rp->discovery_enabled(), "should have been enabled");
933
934 uint no_of_gc_workers = workers()->active_workers();
935 rp->set_active_mt_degree(no_of_gc_workers);
936
937 G1STWRefProcProxyTask task(rp->max_num_queues(), *_g1h, *per_thread_states, *task_queues());
938 ReferenceProcessorPhaseTimes& pt = *phase_times()->ref_phase_times();
939 ReferenceProcessorStats stats = rp->process_discovered_references(task, pt);
940
941 gc_tracer_stw()->report_gc_reference_stats(stats);
942
943 _g1h->make_pending_list_reachable();
944
945 phase_times()->record_ref_proc_time((Ticks::now() - start).seconds() * MILLIUNITS);
946 }
947
948 void G1YoungCollector::post_evacuate_cleanup_1(G1ParScanThreadStateSet* per_thread_states) {
949 Ticks start = Ticks::now();
950 {
951 G1PostEvacuateCollectionSetCleanupTask1 cl(per_thread_states, &_evac_failure_regions);
952 _g1h->run_batch_task(&cl);
953 }
954 phase_times()->record_post_evacuate_cleanup_task_1_time((Ticks::now() - start).seconds() * 1000.0);
955 }
956
957 void G1YoungCollector::post_evacuate_cleanup_2(G1ParScanThreadStateSet* per_thread_states,
958 G1EvacInfo* evacuation_info) {
959 Ticks start = Ticks::now();
960 {
961 G1PostEvacuateCollectionSetCleanupTask2 cl(per_thread_states, evacuation_info, &_evac_failure_regions);
962 _g1h->run_batch_task(&cl);
963 }
964 phase_times()->record_post_evacuate_cleanup_task_2_time((Ticks::now() - start).seconds() * 1000.0);
965 }
966
967 void G1YoungCollector::enqueue_candidates_as_root_regions() {
968 assert(collector_state()->in_concurrent_start_gc(), "must be");
969
970 G1CollectionSetCandidates* candidates = collection_set()->candidates();
971 for (G1HeapRegion* r : *candidates) {
972 _g1h->concurrent_mark()->add_root_region(r);
973 }
974 }
975
976 void G1YoungCollector::post_evacuate_collection_set(G1EvacInfo* evacuation_info,
977 G1ParScanThreadStateSet* per_thread_states) {
978 G1GCPhaseTimes* p = phase_times();
979
980 // Process any discovered reference objects - we have
981 // to do this _before_ we retire the GC alloc regions
982 // as we may have to copy some 'reachable' referent
983 // objects (and their reachable sub-graphs) that were
984 // not copied during the pause.
985 process_discovered_references(per_thread_states);
986
987 G1STWIsAliveClosure is_alive(_g1h);
988 G1KeepAliveClosure keep_alive(_g1h);
989
990 WeakProcessor::weak_oops_do(workers(), &is_alive, &keep_alive, p->weak_phase_times());
991
992 allocator()->release_gc_alloc_regions(evacuation_info);
993
994 post_evacuate_cleanup_1(per_thread_states);
995
996 post_evacuate_cleanup_2(per_thread_states, evacuation_info);
997
998 // Regions in the collection set candidates are roots for the marking (they are
999 // not marked through considering they are very likely to be reclaimed soon.
1000 // They need to be enqueued explicitly compared to survivor regions.
1001 if (collector_state()->in_concurrent_start_gc()) {
1002 enqueue_candidates_as_root_regions();
1003 }
1004
1005 _evac_failure_regions.post_collection();
1006
1007 assert_used_and_recalculate_used_equal(_g1h);
1008
1009 _g1h->rebuild_free_region_list();
1010
1011 _g1h->record_obj_copy_mem_stats();
1012
1013 evacuation_info->set_bytes_used(_g1h->bytes_used_during_gc());
1014
1015 _g1h->prepare_for_mutator_after_young_collection();
1016
1017 _g1h->gc_epilogue(false);
1018
1019 _g1h->expand_heap_after_young_collection();
1020 }
1021
1022 bool G1YoungCollector::evacuation_failed() const {
1023 return _evac_failure_regions.has_regions_evac_failed();
1024 }
1025
1026 bool G1YoungCollector::evacuation_pinned() const {
1027 return _evac_failure_regions.has_regions_evac_pinned();
1028 }
1029
1030 bool G1YoungCollector::evacuation_alloc_failed() const {
1031 return _evac_failure_regions.has_regions_alloc_failed();
1032 }
1033
1034 G1YoungCollector::G1YoungCollector(GCCause::Cause gc_cause) :
1035 _g1h(G1CollectedHeap::heap()),
1036 _gc_cause(gc_cause),
1037 _concurrent_operation_is_full_mark(false),
1038 _evac_failure_regions()
1039 {
1040 }
1041
1042 void G1YoungCollector::collect() {
1043 // Do timing/tracing/statistics/pre- and post-logging/verification work not
1044 // directly related to the collection. They should not be accounted for in
1045 // collection work timing.
1046
1047 // The G1YoungGCTraceTime message depends on collector state, so must come after
1048 // determining collector state.
1049 G1YoungGCTraceTime tm(this, _gc_cause);
1050
1051 // JFR
1052 G1YoungGCJFRTracerMark jtm(gc_timer_stw(), gc_tracer_stw(), _gc_cause);
1053 // JStat/MXBeans
1054 G1YoungGCMonitoringScope ms(monitoring_support(),
1055 !collection_set()->candidates()->is_empty() /* all_memory_pools_affected */);
1056 // Create the heap printer before internal pause timing to have
1057 // heap information printed as last part of detailed GC log.
1058 G1HeapPrinterMark hpm(_g1h);
1059 // Young GC internal pause timing
1060 G1YoungGCNotifyPauseMark npm(this);
1061
1062 // Verification may use the workers, so they must be set up before.
1063 // Individual parallel phases may override this.
1064 set_young_collection_default_active_worker_threads();
1065
1066 // Wait for root region scan here to make sure that it is done before any
1067 // use of the STW workers to maximize cpu use (i.e. all cores are available
1068 // just to do that).
1069 wait_for_root_region_scanning();
1070
1071 G1YoungGCVerifierMark vm(this);
1072 {
1073 // Actual collection work starts and is executed (only) in this scope.
1074
1075 // Young GC internal collection timing. The elapsed time recorded in the
1076 // policy for the collection deliberately elides verification (and some
1077 // other trivial setup above).
1078 policy()->record_young_collection_start();
1079
1080 pre_evacuate_collection_set(jtm.evacuation_info());
1081
1082 G1ParScanThreadStateSet per_thread_states(_g1h,
1083 workers()->active_workers(),
1084 collection_set(),
1085 &_evac_failure_regions);
1086
1087 bool may_do_optional_evacuation = collection_set()->optional_region_length() != 0;
1088 // Actually do the work...
1089 evacuate_initial_collection_set(&per_thread_states, may_do_optional_evacuation);
1090
1091 if (may_do_optional_evacuation) {
1092 evacuate_optional_collection_set(&per_thread_states);
1093 }
1094 post_evacuate_collection_set(jtm.evacuation_info(), &per_thread_states);
1095
1096 // Refine the type of a concurrent mark operation now that we did the
1097 // evacuation, eventually aborting it.
1098 _concurrent_operation_is_full_mark = policy()->concurrent_operation_is_full_mark("Revise IHOP");
1099
1100 // Need to report the collection pause now since record_collection_pause_end()
1101 // modifies it to the next state.
1102 jtm.report_pause_type(collector_state()->young_gc_pause_type(_concurrent_operation_is_full_mark));
1103
1104 policy()->record_young_collection_end(_concurrent_operation_is_full_mark, evacuation_alloc_failed());
1105 }
1106 TASKQUEUE_STATS_ONLY(_g1h->task_queues()->print_and_reset_taskqueue_stats("Oop Queue");)
1107 }
--- EOF ---