1 /*
2 * Copyright (c) 2014, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "gc/g1/g1Allocator.inline.hpp"
26 #include "gc/g1/g1CollectedHeap.inline.hpp"
27 #include "gc/g1/g1CollectionSet.hpp"
28 #include "gc/g1/g1EvacFailureRegions.inline.hpp"
29 #include "gc/g1/g1HeapRegionPrinter.hpp"
30 #include "gc/g1/g1OopClosures.inline.hpp"
31 #include "gc/g1/g1ParScanThreadState.inline.hpp"
32 #include "gc/g1/g1RootClosures.hpp"
33 #include "gc/g1/g1StringDedup.hpp"
34 #include "gc/g1/g1Trace.hpp"
35 #include "gc/g1/g1YoungGCAllocationFailureInjector.inline.hpp"
36 #include "gc/shared/continuationGCSupport.inline.hpp"
37 #include "gc/shared/partialArraySplitter.inline.hpp"
38 #include "gc/shared/partialArrayState.hpp"
39 #include "gc/shared/partialArrayTaskStats.hpp"
40 #include "gc/shared/stringdedup/stringDedup.hpp"
41 #include "gc/shared/taskqueue.inline.hpp"
42 #include "memory/allocation.inline.hpp"
43 #include "oops/access.inline.hpp"
44 #include "oops/oop.inline.hpp"
45 #include "runtime/atomic.hpp"
46 #include "runtime/mutexLocker.hpp"
47 #include "runtime/prefetch.inline.hpp"
48 #include "utilities/globalDefinitions.hpp"
49 #include "utilities/macros.hpp"
50
51 // In fastdebug builds the code size can get out of hand, potentially
52 // tripping over compiler limits (which may be bugs, but nevertheless
53 // need to be taken into consideration). A side benefit of limiting
54 // inlining is that we get more call frames that might aid debugging.
55 // And the fastdebug compile time for this file is much reduced.
56 // Explicit NOINLINE to block ATTRIBUTE_FLATTENing.
57 #define MAYBE_INLINE_EVACUATION NOT_DEBUG(inline) DEBUG_ONLY(NOINLINE)
58
59 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h,
60 G1RedirtyCardsQueueSet* rdcqs,
61 uint worker_id,
62 uint num_workers,
63 G1CollectionSet* collection_set,
64 G1EvacFailureRegions* evac_failure_regions)
65 : _g1h(g1h),
66 _task_queue(g1h->task_queue(worker_id)),
67 _rdc_local_qset(rdcqs),
68 _ct(g1h->card_table()),
69 _closures(nullptr),
70 _plab_allocator(nullptr),
71 _age_table(false),
72 _tenuring_threshold(g1h->policy()->tenuring_threshold()),
73 _scanner(g1h, this),
74 _worker_id(worker_id),
75 _last_enqueued_card(SIZE_MAX),
76 _stack_trim_upper_threshold(GCDrainStackTargetSize * 2 + 1),
77 _stack_trim_lower_threshold(GCDrainStackTargetSize),
78 _trim_ticks(),
79 _surviving_young_words_base(nullptr),
80 _surviving_young_words(nullptr),
81 _surviving_words_length(collection_set->young_region_length() + 1),
82 _old_gen_is_full(false),
83 _partial_array_splitter(g1h->partial_array_state_manager(), num_workers, ParGCArrayScanChunk),
84 _string_dedup_requests(),
85 _max_num_optional_regions(collection_set->num_optional_regions()),
86 _numa(g1h->numa()),
87 _obj_alloc_stat(nullptr),
88 ALLOCATION_FAILURE_INJECTOR_ONLY(_allocation_failure_inject_counter(0) COMMA)
89 _evacuation_failed_info(),
90 _evac_failure_regions(evac_failure_regions),
91 _evac_failure_enqueued_cards(0)
92 {
93 // We allocate number of young gen regions in the collection set plus one
94 // entries, since entry 0 keeps track of surviving bytes for non-young regions.
95 // We also add a few elements at the beginning and at the end in
96 // an attempt to eliminate cache contention
97 const size_t padding_elem_num = (DEFAULT_PADDING_SIZE / sizeof(size_t));
98 size_t array_length = padding_elem_num + _surviving_words_length + padding_elem_num;
99
100 _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length, mtGC);
101 _surviving_young_words = _surviving_young_words_base + padding_elem_num;
102 memset(_surviving_young_words, 0, _surviving_words_length * sizeof(size_t));
103
104 _plab_allocator = new G1PLABAllocator(_g1h->allocator());
105
106 _closures = G1EvacuationRootClosures::create_root_closures(_g1h,
107 this,
108 collection_set->only_contains_young_regions());
109
110 _oops_into_optional_regions = new G1OopStarChunkedList[_max_num_optional_regions];
111
112 initialize_numa_stats();
113 }
114
115 size_t G1ParScanThreadState::flush_stats(size_t* surviving_young_words, uint num_workers, BufferNodeList* rdc_buffers) {
116 *rdc_buffers = _rdc_local_qset.flush();
117 flush_numa_stats();
118 // Update allocation statistics.
119 _plab_allocator->flush_and_retire_stats(num_workers);
120 _g1h->policy()->record_age_table(&_age_table);
121
122 if (_evacuation_failed_info.has_failed()) {
123 _g1h->gc_tracer_stw()->report_evacuation_failed(_evacuation_failed_info);
124 }
125
126 size_t sum = 0;
127 for (uint i = 0; i < _surviving_words_length; i++) {
128 surviving_young_words[i] += _surviving_young_words[i];
129 sum += _surviving_young_words[i];
130 }
131 return sum;
132 }
133
134 G1ParScanThreadState::~G1ParScanThreadState() {
135 delete _plab_allocator;
136 delete _closures;
137 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base);
138 delete[] _oops_into_optional_regions;
139 FREE_C_HEAP_ARRAY(size_t, _obj_alloc_stat);
140 }
141
142 size_t G1ParScanThreadState::lab_waste_words() const {
143 return _plab_allocator->waste();
144 }
145
146 size_t G1ParScanThreadState::lab_undo_waste_words() const {
147 return _plab_allocator->undo_waste();
148 }
149
150 size_t G1ParScanThreadState::evac_failure_enqueued_cards() const {
151 return _evac_failure_enqueued_cards;
152 }
153
154 #ifdef ASSERT
155 void G1ParScanThreadState::verify_task(narrowOop* task) const {
156 assert(task != nullptr, "invariant");
157 assert(UseCompressedOops, "sanity");
158 oop p = RawAccess<>::oop_load(task);
159 assert(_g1h->is_in_reserved(p),
160 "task=" PTR_FORMAT " p=" PTR_FORMAT, p2i(task), p2i(p));
161 }
162
163 void G1ParScanThreadState::verify_task(oop* task) const {
164 assert(task != nullptr, "invariant");
165 oop p = RawAccess<>::oop_load(task);
166 assert(_g1h->is_in_reserved(p),
167 "task=" PTR_FORMAT " p=" PTR_FORMAT, p2i(task), p2i(p));
168 }
169
170 void G1ParScanThreadState::verify_task(PartialArrayState* task) const {
171 assert(task != nullptr, "invariant");
172 // Source isn't used for processing, so not recorded in task.
173 assert(task->source() == nullptr, "invariant");
174 oop p = task->destination();
175 assert(_g1h->is_in_reserved(p),
176 "task=" PTR_FORMAT " dest=" PTR_FORMAT, p2i(task), p2i(p));
177 }
178
179 void G1ParScanThreadState::verify_task(ScannerTask task) const {
180 if (task.is_narrow_oop_ptr()) {
181 verify_task(task.to_narrow_oop_ptr());
182 } else if (task.is_oop_ptr()) {
183 verify_task(task.to_oop_ptr());
184 } else if (task.is_partial_array_state()) {
185 verify_task(task.to_partial_array_state());
186 } else {
187 ShouldNotReachHere();
188 }
189 }
190 #endif // ASSERT
191
192 template <class T>
193 MAYBE_INLINE_EVACUATION
194 void G1ParScanThreadState::do_oop_evac(T* p) {
195 // Reference should not be null here as such are never pushed to the task queue.
196 oop obj = RawAccess<IS_NOT_NULL>::oop_load(p);
197
198 // Although we never intentionally push references outside of the collection
199 // set, due to (benign) races in the claim mechanism during RSet scanning more
200 // than one thread might claim the same card. So the same card may be
201 // processed multiple times, and so we might get references into old gen here.
202 // So we need to redo this check.
203 const G1HeapRegionAttr region_attr = _g1h->region_attr(obj);
204 // References pushed onto the work stack should never point to a humongous region
205 // as they are not added to the collection set due to above precondition.
206 assert(!region_attr.is_humongous_candidate(),
207 "Obj " PTR_FORMAT " should not refer to humongous region %u from " PTR_FORMAT,
208 p2i(obj), _g1h->addr_to_region(obj), p2i(p));
209
210 if (!region_attr.is_in_cset()) {
211 // In this case somebody else already did all the work.
212 return;
213 }
214
215 markWord m = obj->mark();
216 if (m.is_forwarded()) {
217 obj = obj->forwardee(m);
218 } else {
219 obj = do_copy_to_survivor_space(region_attr, obj, m);
220 }
221 RawAccess<IS_NOT_NULL>::oop_store(p, obj);
222
223 write_ref_field_post(p, obj);
224 }
225
226 MAYBE_INLINE_EVACUATION
227 void G1ParScanThreadState::do_partial_array(PartialArrayState* state, bool stolen) {
228 // Access state before release by claim().
229 objArrayOop to_array = objArrayOop(state->destination());
230 PartialArraySplitter::Claim claim =
231 _partial_array_splitter.claim(state, _task_queue, stolen);
232 G1HeapRegionAttr dest_attr = _g1h->region_attr(to_array);
233 G1SkipCardEnqueueSetter x(&_scanner, dest_attr.is_new_survivor());
234 // Process claimed task.
235 to_array->oop_iterate_range(&_scanner,
236 checked_cast<int>(claim._start),
237 checked_cast<int>(claim._end));
238 }
239
240 MAYBE_INLINE_EVACUATION
241 void G1ParScanThreadState::start_partial_objarray(oop from_obj,
242 oop to_obj) {
243 assert(from_obj->is_forwarded(), "precondition");
244 assert(from_obj->forwardee() == to_obj, "precondition");
245 assert(to_obj->is_objArray(), "precondition");
246
247 objArrayOop to_array = objArrayOop(to_obj);
248 size_t array_length = to_array->length();
249 size_t initial_chunk_size =
250 // The source array is unused when processing states.
251 _partial_array_splitter.start(_task_queue, nullptr, to_array, array_length);
252
253 assert(_scanner.skip_card_enqueue_set(), "must be");
254 // Process the initial chunk. No need to process the type in the
255 // klass, as it will already be handled by processing the built-in
256 // module.
257 to_array->oop_iterate_range(&_scanner, 0, checked_cast<int>(initial_chunk_size));
258 }
259
260 MAYBE_INLINE_EVACUATION
261 void G1ParScanThreadState::dispatch_task(ScannerTask task, bool stolen) {
262 verify_task(task);
263 if (task.is_narrow_oop_ptr()) {
264 do_oop_evac(task.to_narrow_oop_ptr());
265 } else if (task.is_oop_ptr()) {
266 do_oop_evac(task.to_oop_ptr());
267 } else {
268 do_partial_array(task.to_partial_array_state(), stolen);
269 }
270 }
271
272 // Process tasks until overflow queue is empty and local queue
273 // contains no more than threshold entries. NOINLINE to prevent
274 // inlining into steal_and_trim_queue.
275 ATTRIBUTE_FLATTEN NOINLINE
276 void G1ParScanThreadState::trim_queue_to_threshold(uint threshold) {
277 ScannerTask task;
278 do {
279 while (_task_queue->pop_overflow(task)) {
280 if (!_task_queue->try_push_to_taskqueue(task)) {
281 dispatch_task(task, false);
282 }
283 }
284 while (_task_queue->pop_local(task, threshold)) {
285 dispatch_task(task, false);
286 }
287 } while (!_task_queue->overflow_empty());
288 }
289
290 ATTRIBUTE_FLATTEN
291 void G1ParScanThreadState::steal_and_trim_queue(G1ScannerTasksQueueSet* task_queues) {
292 ScannerTask stolen_task;
293 while (task_queues->steal(_worker_id, stolen_task)) {
294 dispatch_task(stolen_task, true);
295 // Processing stolen task may have added tasks to our queue.
296 trim_queue();
297 }
298 }
299
300 HeapWord* G1ParScanThreadState::allocate_in_next_plab(G1HeapRegionAttr* dest,
301 size_t word_sz,
302 bool previous_plab_refill_failed,
303 uint node_index) {
304
305 assert(dest->is_in_cset_or_humongous_candidate(), "Unexpected dest: %s region attr", dest->get_type_str());
306
307 // Right now we only have two types of regions (young / old) so
308 // let's keep the logic here simple. We can generalize it when necessary.
309 if (dest->is_young()) {
310 bool plab_refill_in_old_failed = false;
311 HeapWord* const obj_ptr = _plab_allocator->allocate(G1HeapRegionAttr::Old,
312 word_sz,
313 &plab_refill_in_old_failed,
314 node_index);
315 // Make sure that we won't attempt to copy any other objects out
316 // of a survivor region (given that apparently we cannot allocate
317 // any new ones) to avoid coming into this slow path again and again.
318 // Only consider failed PLAB refill here: failed inline allocations are
319 // typically large, so not indicative of remaining space.
320 if (previous_plab_refill_failed) {
321 _tenuring_threshold = 0;
322 }
323
324 if (obj_ptr != nullptr) {
325 dest->set_old();
326 } else {
327 // We just failed to allocate in old gen. The same idea as explained above
328 // for making survivor gen unavailable for allocation applies for old gen.
329 _old_gen_is_full = plab_refill_in_old_failed;
330 }
331 return obj_ptr;
332 } else {
333 _old_gen_is_full = previous_plab_refill_failed;
334 assert(dest->is_old(), "Unexpected dest region attr: %s", dest->get_type_str());
335 // no other space to try.
336 return nullptr;
337 }
338 }
339
340 G1HeapRegionAttr G1ParScanThreadState::next_region_attr(G1HeapRegionAttr const region_attr, markWord const m, uint& age) {
341 assert(region_attr.is_young() || region_attr.is_old(), "must be either Young or Old");
342
343 if (region_attr.is_young()) {
344 age = !m.has_displaced_mark_helper() ? m.age()
345 : m.displaced_mark_helper().age();
346 if (age < _tenuring_threshold) {
347 return region_attr;
348 }
349 }
350 // young-to-old (promotion) or old-to-old; destination is old in both cases.
351 return G1HeapRegionAttr::Old;
352 }
353
354 void G1ParScanThreadState::report_promotion_event(G1HeapRegionAttr const dest_attr,
355 Klass* klass, size_t word_sz, uint age,
356 HeapWord * const obj_ptr, uint node_index) const {
357 PLAB* alloc_buf = _plab_allocator->alloc_buffer(dest_attr, node_index);
358 if (alloc_buf->contains(obj_ptr)) {
359 _g1h->gc_tracer_stw()->report_promotion_in_new_plab_event(klass, word_sz * HeapWordSize, age,
360 dest_attr.type() == G1HeapRegionAttr::Old,
361 alloc_buf->word_sz() * HeapWordSize);
362 } else {
363 _g1h->gc_tracer_stw()->report_promotion_outside_plab_event(klass, word_sz * HeapWordSize, age,
364 dest_attr.type() == G1HeapRegionAttr::Old);
365 }
366 }
367
368 NOINLINE
369 HeapWord* G1ParScanThreadState::allocate_copy_slow(G1HeapRegionAttr* dest_attr,
370 Klass* klass,
371 size_t word_sz,
372 uint age,
373 uint node_index) {
374 HeapWord* obj_ptr = nullptr;
375 // Try slow-path allocation unless we're allocating old and old is already full.
376 if (!(dest_attr->is_old() && _old_gen_is_full)) {
377 bool plab_refill_failed = false;
378 obj_ptr = _plab_allocator->allocate_direct_or_new_plab(*dest_attr,
379 word_sz,
380 &plab_refill_failed,
381 node_index);
382 if (obj_ptr == nullptr) {
383 obj_ptr = allocate_in_next_plab(dest_attr,
384 word_sz,
385 plab_refill_failed,
386 node_index);
387 }
388 }
389 if (obj_ptr != nullptr) {
390 update_numa_stats(node_index);
391 if (_g1h->gc_tracer_stw()->should_report_promotion_events()) {
392 // The events are checked individually as part of the actual commit
393 report_promotion_event(*dest_attr, klass, word_sz, age, obj_ptr, node_index);
394 }
395 }
396 return obj_ptr;
397 }
398
399 #if ALLOCATION_FAILURE_INJECTOR
400 bool G1ParScanThreadState::inject_allocation_failure(uint region_idx) {
401 return _g1h->allocation_failure_injector()->allocation_should_fail(_allocation_failure_inject_counter, region_idx);
402 }
403 #endif
404
405 NOINLINE
406 void G1ParScanThreadState::undo_allocation(G1HeapRegionAttr dest_attr,
407 HeapWord* obj_ptr,
408 size_t word_sz,
409 uint node_index) {
410 _plab_allocator->undo_allocation(dest_attr, obj_ptr, word_sz, node_index);
411 }
412
413 void G1ParScanThreadState::update_bot_after_copying(oop obj, size_t word_sz) {
414 HeapWord* obj_start = cast_from_oop<HeapWord*>(obj);
415 G1HeapRegion* region = _g1h->heap_region_containing(obj_start);
416 region->update_bot_for_block(obj_start, obj_start + word_sz);
417 }
418
419 ALWAYSINLINE
420 void G1ParScanThreadState::do_iterate_object(oop const obj,
421 oop const old,
422 Klass* const klass,
423 G1HeapRegionAttr const region_attr,
424 G1HeapRegionAttr const dest_attr,
425 uint age) {
426 // Most objects are not arrays, so do one array check rather than
427 // checking for each array category for each object.
428 if (klass->is_array_klass()) {
429 assert(!klass->is_stack_chunk_instance_klass(), "must be");
430
431 if (klass->is_objArray_klass()) {
432 start_partial_objarray(old, obj);
433 } else {
434 // Nothing needs to be done for typeArrays. Body doesn't contain
435 // any oops to scan, and the type in the klass will already be handled
436 // by processing the built-in module.
437 assert(klass->is_typeArray_klass(), "invariant");
438 }
439 return;
440 }
441
442 ContinuationGCSupport::transform_stack_chunk(obj);
443
444 // Check for deduplicating young Strings.
445 if (G1StringDedup::is_candidate_from_evacuation(klass,
446 region_attr,
447 dest_attr,
448 age)) {
449 // Record old; request adds a new weak reference, which reference
450 // processing expects to refer to a from-space object.
451 _string_dedup_requests.add(old);
452 }
453
454 assert(_scanner.skip_card_enqueue_set(), "must be");
455 obj->oop_iterate_backwards(&_scanner, klass);
456 }
457
458 // Private inline function, for direct internal use and providing the
459 // implementation of the public not-inline function.
460 MAYBE_INLINE_EVACUATION
461 oop G1ParScanThreadState::do_copy_to_survivor_space(G1HeapRegionAttr const region_attr,
462 oop const old,
463 markWord const old_mark) {
464 assert(region_attr.is_in_cset(),
465 "Unexpected region attr type: %s", region_attr.get_type_str());
466
467 // NOTE: With compact headers, it is not safe to load the Klass* from old, because
468 // that would access the mark-word, that might change at any time by concurrent
469 // workers.
470 // This mark word would refer to a forwardee, which may not yet have completed
471 // copying. Therefore we must load the Klass* from the mark-word that we already
472 // loaded. This is safe, because we only enter here if not yet forwarded.
473 assert(!old_mark.is_forwarded(), "precondition");
474 Klass* klass = UseCompactObjectHeaders
475 ? old_mark.klass()
476 : old->klass();
477
478 const size_t old_size = old->size_given_mark_and_klass(old_mark, klass);
479 const size_t word_sz = old->copy_size(old_size, old_mark);
480
481 // JNI only allows pinning of typeArrays, so we only need to keep those in place.
482 if (region_attr.is_pinned() && klass->is_typeArray_klass()) {
483 return handle_evacuation_failure_par(old, old_mark, klass, region_attr, word_sz, true /* cause_pinned */);
484 }
485
486 uint age = 0;
487 G1HeapRegionAttr dest_attr = next_region_attr(region_attr, old_mark, age);
488 G1HeapRegion* const from_region = _g1h->heap_region_containing(old);
489 uint node_index = from_region->node_index();
490
491 HeapWord* obj_ptr = _plab_allocator->plab_allocate(dest_attr, word_sz, node_index);
492
493 // PLAB allocations should succeed most of the time, so we'll
494 // normally check against null once and that's it.
495 if (obj_ptr == nullptr) {
496 obj_ptr = allocate_copy_slow(&dest_attr, klass, word_sz, age, node_index);
497 if (obj_ptr == nullptr) {
498 // This will either forward-to-self, or detect that someone else has
499 // installed a forwarding pointer.
500 return handle_evacuation_failure_par(old, old_mark, klass, region_attr, word_sz, false /* cause_pinned */);
501 }
502 }
503
504 assert(obj_ptr != nullptr, "when we get here, allocation should have succeeded");
505 assert(_g1h->is_in_reserved(obj_ptr), "Allocated memory should be in the heap");
506
507 // Should this evacuation fail?
508 if (inject_allocation_failure(from_region->hrm_index())) {
509 // Doing this after all the allocation attempts also tests the
510 // undo_allocation() method too.
511 undo_allocation(dest_attr, obj_ptr, word_sz, node_index);
512 return handle_evacuation_failure_par(old, old_mark, klass, region_attr, word_sz, false /* cause_pinned */);
513 }
514
515 // We're going to allocate linearly, so might as well prefetch ahead.
516 Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes);
517 Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(old), obj_ptr, old_size);
518
519 const oop obj = cast_to_oop(obj_ptr);
520 // Because the forwarding is done with memory_order_relaxed there is no
521 // ordering with the above copy. Clients that get the forwardee must not
522 // examine its contents without other synchronization, since the contents
523 // may not be up to date for them.
524 const oop forward_ptr = old->forward_to_atomic(obj, old_mark, memory_order_relaxed);
525 if (forward_ptr == nullptr) {
526
527 {
528 const uint young_index = from_region->young_index_in_cset();
529 assert((from_region->is_young() && young_index > 0) ||
530 (!from_region->is_young() && young_index == 0), "invariant" );
531 _surviving_young_words[young_index] += word_sz;
532 }
533
534 obj->initialize_hash_if_necessary(old);
535
536 if (dest_attr.is_young()) {
537 if (age < markWord::max_age) {
538 age++;
539 obj->incr_age();
540 }
541 _age_table.add(age, word_sz);
542 } else {
543 update_bot_after_copying(obj, word_sz);
544 }
545
546 {
547 // Skip the card enqueue iff the object (obj) is in survivor region.
548 // However, G1HeapRegion::is_survivor() is too expensive here.
549 // Instead, we use dest_attr.is_young() because the two values are always
550 // equal: successfully allocated young regions must be survivor regions.
551 assert(dest_attr.is_young() == _g1h->heap_region_containing(obj)->is_survivor(), "must be");
552 G1SkipCardEnqueueSetter x(&_scanner, dest_attr.is_young());
553 do_iterate_object(obj, old, klass, region_attr, dest_attr, age);
554 }
555
556 return obj;
557 } else {
558 _plab_allocator->undo_allocation(dest_attr, obj_ptr, word_sz, node_index);
559 return forward_ptr;
560 }
561 }
562
563 // Public not-inline entry point.
564 ATTRIBUTE_FLATTEN
565 oop G1ParScanThreadState::copy_to_survivor_space(G1HeapRegionAttr region_attr,
566 oop old,
567 markWord old_mark) {
568 return do_copy_to_survivor_space(region_attr, old, old_mark);
569 }
570
571 G1ParScanThreadState* G1ParScanThreadStateSet::state_for_worker(uint worker_id) {
572 assert(worker_id < _num_workers, "out of bounds access");
573 if (_states[worker_id] == nullptr) {
574 _states[worker_id] =
575 new G1ParScanThreadState(_g1h, rdcqs(),
576 worker_id,
577 _num_workers,
578 _collection_set,
579 _evac_failure_regions);
580 }
581 return _states[worker_id];
582 }
583
584 const size_t* G1ParScanThreadStateSet::surviving_young_words() const {
585 assert(_flushed, "thread local state from the per thread states should have been flushed");
586 return _surviving_young_words_total;
587 }
588
589 void G1ParScanThreadStateSet::flush_stats() {
590 assert(!_flushed, "thread local state from the per thread states should be flushed once");
591 for (uint worker_id = 0; worker_id < _num_workers; ++worker_id) {
592 G1ParScanThreadState* pss = _states[worker_id];
593 assert(pss != nullptr, "must be initialized");
594
595 G1GCPhaseTimes* p = _g1h->phase_times();
596
597 // Need to get the following two before the call to G1ParThreadScanState::flush()
598 // because it resets the PLAB allocator where we get this info from.
599 size_t lab_waste_bytes = pss->lab_waste_words() * HeapWordSize;
600 size_t lab_undo_waste_bytes = pss->lab_undo_waste_words() * HeapWordSize;
601 size_t copied_bytes = pss->flush_stats(_surviving_young_words_total, _num_workers, &_rdc_buffers[worker_id]) * HeapWordSize;
602 size_t evac_fail_enqueued_cards = pss->evac_failure_enqueued_cards();
603
604 p->record_or_add_thread_work_item(G1GCPhaseTimes::MergePSS, worker_id, copied_bytes, G1GCPhaseTimes::MergePSSCopiedBytes);
605 p->record_or_add_thread_work_item(G1GCPhaseTimes::MergePSS, worker_id, lab_waste_bytes, G1GCPhaseTimes::MergePSSLABWasteBytes);
606 p->record_or_add_thread_work_item(G1GCPhaseTimes::MergePSS, worker_id, lab_undo_waste_bytes, G1GCPhaseTimes::MergePSSLABUndoWasteBytes);
607 p->record_or_add_thread_work_item(G1GCPhaseTimes::MergePSS, worker_id, evac_fail_enqueued_cards, G1GCPhaseTimes::MergePSSEvacFailExtra);
608
609 delete pss;
610 _states[worker_id] = nullptr;
611 }
612
613 G1DirtyCardQueueSet& dcq = G1BarrierSet::dirty_card_queue_set();
614 dcq.merge_bufferlists(rdcqs());
615 rdcqs()->verify_empty();
616
617 _flushed = true;
618 }
619
620 void G1ParScanThreadStateSet::record_unused_optional_region(G1HeapRegion* hr) {
621 for (uint worker_index = 0; worker_index < _num_workers; ++worker_index) {
622 G1ParScanThreadState* pss = _states[worker_index];
623 assert(pss != nullptr, "must be initialized");
624
625 size_t used_memory = pss->oops_into_optional_region(hr)->used_memory();
626 _g1h->phase_times()->record_or_add_thread_work_item(G1GCPhaseTimes::OptScanHR, worker_index, used_memory, G1GCPhaseTimes::ScanHRUsedMemory);
627 }
628 }
629
630 void G1ParScanThreadState::record_evacuation_failed_region(G1HeapRegion* r, uint worker_id, bool cause_pinned) {
631 if (_evac_failure_regions->record(worker_id, r->hrm_index(), cause_pinned)) {
632 G1HeapRegionPrinter::evac_failure(r);
633 }
634 }
635
636 NOINLINE
637 oop G1ParScanThreadState::handle_evacuation_failure_par(oop old, markWord m, Klass* klass, G1HeapRegionAttr attr, size_t word_sz, bool cause_pinned) {
638 assert(_g1h->is_in_cset(old), "Object " PTR_FORMAT " should be in the CSet", p2i(old));
639
640 oop forward_ptr = old->forward_to_self_atomic(m, memory_order_relaxed);
641 if (forward_ptr == nullptr) {
642 // Forward-to-self succeeded. We are the "owner" of the object.
643 G1HeapRegion* r = _g1h->heap_region_containing(old);
644
645 record_evacuation_failed_region(r, _worker_id, cause_pinned);
646
647 // Mark the failing object in the marking bitmap and later use the bitmap to handle
648 // evacuation failure recovery.
649 _g1h->mark_evac_failure_object(_worker_id, old, word_sz);
650
651 _evacuation_failed_info.register_copy_failure(word_sz);
652
653 {
654 // For iterating objects that failed evacuation currently we can reuse the
655 // existing closure to scan evacuated objects; since we are iterating from a
656 // collection set region (i.e. never a Survivor region), we always need to
657 // gather cards for this case.
658 G1SkipCardEnqueueSetter x(&_scanner, false /* skip_card_enqueue */);
659 do_iterate_object(old, old, klass, attr, attr, m.age());
660 }
661
662 return old;
663 } else {
664 // Forward-to-self failed. Either someone else managed to allocate
665 // space for this object (old != forward_ptr) or they beat us in
666 // self-forwarding it (old == forward_ptr).
667 assert(old == forward_ptr || !_g1h->is_in_cset(forward_ptr),
668 "Object " PTR_FORMAT " forwarded to: " PTR_FORMAT " "
669 "should not be in the CSet",
670 p2i(old), p2i(forward_ptr));
671 return forward_ptr;
672 }
673 }
674
675 void G1ParScanThreadState::initialize_numa_stats() {
676 if (_numa->is_enabled()) {
677 LogTarget(Info, gc, heap, numa) lt;
678
679 if (lt.is_enabled()) {
680 uint num_nodes = _numa->num_active_nodes();
681 // Record only if there are multiple active nodes.
682 _obj_alloc_stat = NEW_C_HEAP_ARRAY(size_t, num_nodes, mtGC);
683 memset(_obj_alloc_stat, 0, sizeof(size_t) * num_nodes);
684 }
685 }
686 }
687
688 void G1ParScanThreadState::flush_numa_stats() {
689 if (_obj_alloc_stat != nullptr) {
690 uint node_index = _numa->index_of_current_thread();
691 _numa->copy_statistics(G1NUMAStats::LocalObjProcessAtCopyToSurv, node_index, _obj_alloc_stat);
692 }
693 }
694
695 void G1ParScanThreadState::update_numa_stats(uint node_index) {
696 if (_obj_alloc_stat != nullptr) {
697 _obj_alloc_stat[node_index]++;
698 }
699 }
700
701 #if TASKQUEUE_STATS
702
703 PartialArrayTaskStats* G1ParScanThreadState::partial_array_task_stats() {
704 return _partial_array_splitter.stats();
705 }
706
707 #endif // TASKQUEUE_STATS
708
709 G1ParScanThreadStateSet::G1ParScanThreadStateSet(G1CollectedHeap* g1h,
710 uint num_workers,
711 G1CollectionSet* collection_set,
712 G1EvacFailureRegions* evac_failure_regions) :
713 _g1h(g1h),
714 _collection_set(collection_set),
715 _rdcqs(G1BarrierSet::dirty_card_queue_set().allocator()),
716 _states(NEW_C_HEAP_ARRAY(G1ParScanThreadState*, num_workers, mtGC)),
717 _rdc_buffers(NEW_C_HEAP_ARRAY(BufferNodeList, num_workers, mtGC)),
718 _surviving_young_words_total(NEW_C_HEAP_ARRAY(size_t, collection_set->young_region_length() + 1, mtGC)),
719 _num_workers(num_workers),
720 _flushed(false),
721 _evac_failure_regions(evac_failure_regions)
722 {
723 for (uint i = 0; i < num_workers; ++i) {
724 _states[i] = nullptr;
725 _rdc_buffers[i] = BufferNodeList();
726 }
727 memset(_surviving_young_words_total, 0, (collection_set->young_region_length() + 1) * sizeof(size_t));
728 }
729
730 G1ParScanThreadStateSet::~G1ParScanThreadStateSet() {
731 assert(_flushed, "thread local state from the per thread states should have been flushed");
732 FREE_C_HEAP_ARRAY(G1ParScanThreadState*, _states);
733 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_total);
734 FREE_C_HEAP_ARRAY(BufferNodeList, _rdc_buffers);
735 }
736
737 #if TASKQUEUE_STATS
738
739 void G1ParScanThreadStateSet::print_partial_array_task_stats() {
740 auto get_stats = [&](uint i) {
741 return state_for_worker(i)->partial_array_task_stats();
742 };
743 PartialArrayTaskStats::log_set(_num_workers, get_stats,
744 "Partial Array Task Stats");
745 }
746
747 #endif // TASKQUEUE_STATS