195
196 // Although we never intentionally push references outside of the collection
197 // set, due to (benign) races in the claim mechanism during RSet scanning more
198 // than one thread might claim the same card. So the same card may be
199 // processed multiple times, and so we might get references into old gen here.
200 // So we need to redo this check.
201 const G1HeapRegionAttr region_attr = _g1h->region_attr(obj);
202 // References pushed onto the work stack should never point to a humongous region
203 // as they are not added to the collection set due to above precondition.
204 assert(!region_attr.is_humongous_candidate(),
205 "Obj " PTR_FORMAT " should not refer to humongous region %u from " PTR_FORMAT,
206 p2i(obj), _g1h->addr_to_region(obj), p2i(p));
207
208 if (!region_attr.is_in_cset()) {
209 // In this case somebody else already did all the work.
210 return;
211 }
212
213 markWord m = obj->mark();
214 if (m.is_marked()) {
215 obj = cast_to_oop(m.decode_pointer());
216 } else {
217 obj = do_copy_to_survivor_space(region_attr, obj, m);
218 }
219 RawAccess<IS_NOT_NULL>::oop_store(p, obj);
220
221 write_ref_field_post(p, obj);
222 }
223
224 MAYBE_INLINE_EVACUATION
225 void G1ParScanThreadState::do_partial_array(PartialArrayScanTask task) {
226 oop from_obj = task.to_source_array();
227
228 assert(_g1h->is_in_reserved(from_obj), "must be in heap.");
229 assert(from_obj->is_objArray(), "must be obj array");
230 assert(from_obj->is_forwarded(), "must be forwarded");
231
232 oop to_obj = from_obj->forwardee();
233 assert(from_obj != to_obj, "should not be chunking self-forwarded objects");
234 assert(to_obj->is_objArray(), "must be obj array");
235 objArrayOop to_array = objArrayOop(to_obj);
236
237 PartialArrayTaskStepper::Step step
238 = _partial_array_stepper.next(objArrayOop(from_obj),
239 to_array,
240 _partial_objarray_chunk_size);
241 for (uint i = 0; i < step._ncreate; ++i) {
242 push_on_queue(ScannerTask(PartialArrayScanTask(from_obj)));
243 }
244
245 G1HeapRegionAttr dest_attr = _g1h->region_attr(to_array);
246 G1SkipCardEnqueueSetter x(&_scanner, dest_attr.is_new_survivor());
247 // Process claimed task. The length of to_array is not correct, but
248 // fortunately the iteration ignores the length field and just relies
249 // on start/end.
250 to_array->oop_iterate_range(&_scanner,
251 step._index,
252 step._index + _partial_objarray_chunk_size);
253 }
254
255 MAYBE_INLINE_EVACUATION
256 void G1ParScanThreadState::start_partial_objarray(G1HeapRegionAttr dest_attr,
257 oop from_obj,
258 oop to_obj) {
259 assert(from_obj->is_objArray(), "precondition");
260 assert(from_obj->is_forwarded(), "precondition");
261 assert(from_obj->forwardee() == to_obj, "precondition");
262 assert(from_obj != to_obj, "should not be scanning self-forwarded objects");
263 assert(to_obj->is_objArray(), "precondition");
264
265 objArrayOop to_array = objArrayOop(to_obj);
266
267 PartialArrayTaskStepper::Step step
268 = _partial_array_stepper.start(objArrayOop(from_obj),
269 to_array,
270 _partial_objarray_chunk_size);
271
272 // Push any needed partial scan tasks. Pushed before processing the
273 // initial chunk to allow other workers to steal while we're processing.
274 for (uint i = 0; i < step._ncreate; ++i) {
275 push_on_queue(ScannerTask(PartialArrayScanTask(from_obj)));
276 }
277
278 // Skip the card enqueue iff the object (to_array) is in survivor region.
279 // However, HeapRegion::is_survivor() is too expensive here.
366 // no other space to try.
367 return nullptr;
368 }
369 }
370
371 G1HeapRegionAttr G1ParScanThreadState::next_region_attr(G1HeapRegionAttr const region_attr, markWord const m, uint& age) {
372 assert(region_attr.is_young() || region_attr.is_old(), "must be either Young or Old");
373
374 if (region_attr.is_young()) {
375 age = !m.has_displaced_mark_helper() ? m.age()
376 : m.displaced_mark_helper().age();
377 if (age < _tenuring_threshold) {
378 return region_attr;
379 }
380 }
381 // young-to-old (promotion) or old-to-old; destination is old in both cases.
382 return G1HeapRegionAttr::Old;
383 }
384
385 void G1ParScanThreadState::report_promotion_event(G1HeapRegionAttr const dest_attr,
386 oop const old, size_t word_sz, uint age,
387 HeapWord * const obj_ptr, uint node_index) const {
388 PLAB* alloc_buf = _plab_allocator->alloc_buffer(dest_attr, node_index);
389 if (alloc_buf->contains(obj_ptr)) {
390 _g1h->gc_tracer_stw()->report_promotion_in_new_plab_event(old->klass(), word_sz * HeapWordSize, age,
391 dest_attr.type() == G1HeapRegionAttr::Old,
392 alloc_buf->word_sz() * HeapWordSize);
393 } else {
394 _g1h->gc_tracer_stw()->report_promotion_outside_plab_event(old->klass(), word_sz * HeapWordSize, age,
395 dest_attr.type() == G1HeapRegionAttr::Old);
396 }
397 }
398
399 NOINLINE
400 HeapWord* G1ParScanThreadState::allocate_copy_slow(G1HeapRegionAttr* dest_attr,
401 oop old,
402 size_t word_sz,
403 uint age,
404 uint node_index) {
405 HeapWord* obj_ptr = nullptr;
406 // Try slow-path allocation unless we're allocating old and old is already full.
407 if (!(dest_attr->is_old() && _old_gen_is_full)) {
408 bool plab_refill_failed = false;
409 obj_ptr = _plab_allocator->allocate_direct_or_new_plab(*dest_attr,
410 word_sz,
411 &plab_refill_failed,
412 node_index);
413 if (obj_ptr == nullptr) {
414 obj_ptr = allocate_in_next_plab(dest_attr,
415 word_sz,
416 plab_refill_failed,
417 node_index);
418 }
419 }
420 if (obj_ptr != nullptr) {
421 update_numa_stats(node_index);
422 if (_g1h->gc_tracer_stw()->should_report_promotion_events()) {
423 // The events are checked individually as part of the actual commit
424 report_promotion_event(*dest_attr, old, word_sz, age, obj_ptr, node_index);
425 }
426 }
427 return obj_ptr;
428 }
429
430 #if EVAC_FAILURE_INJECTOR
431 bool G1ParScanThreadState::inject_evacuation_failure(uint region_idx) {
432 return _g1h->evac_failure_injector()->evacuation_should_fail(_evac_failure_inject_counter, region_idx);
433 }
434 #endif
435
436 NOINLINE
437 void G1ParScanThreadState::undo_allocation(G1HeapRegionAttr dest_attr,
438 HeapWord* obj_ptr,
439 size_t word_sz,
440 uint node_index) {
441 _plab_allocator->undo_allocation(dest_attr, obj_ptr, word_sz, node_index);
442 }
443
444 void G1ParScanThreadState::update_bot_after_copying(oop obj, size_t word_sz) {
445 HeapWord* obj_start = cast_from_oop<HeapWord*>(obj);
446 HeapRegion* region = _g1h->heap_region_containing(obj_start);
447 region->update_bot_for_obj(obj_start, word_sz);
448 }
449
450 // Private inline function, for direct internal use and providing the
451 // implementation of the public not-inline function.
452 MAYBE_INLINE_EVACUATION
453 oop G1ParScanThreadState::do_copy_to_survivor_space(G1HeapRegionAttr const region_attr,
454 oop const old,
455 markWord const old_mark) {
456 assert(region_attr.is_in_cset(),
457 "Unexpected region attr type: %s", region_attr.get_type_str());
458
459 // Get the klass once. We'll need it again later, and this avoids
460 // re-decoding when it's compressed.
461 Klass* klass = old->klass();
462 const size_t word_sz = old->size_given_klass(klass);
463
464 uint age = 0;
465 G1HeapRegionAttr dest_attr = next_region_attr(region_attr, old_mark, age);
466 HeapRegion* const from_region = _g1h->heap_region_containing(old);
467 uint node_index = from_region->node_index();
468
469 HeapWord* obj_ptr = _plab_allocator->plab_allocate(dest_attr, word_sz, node_index);
470
471 // PLAB allocations should succeed most of the time, so we'll
472 // normally check against null once and that's it.
473 if (obj_ptr == nullptr) {
474 obj_ptr = allocate_copy_slow(&dest_attr, old, word_sz, age, node_index);
475 if (obj_ptr == nullptr) {
476 // This will either forward-to-self, or detect that someone else has
477 // installed a forwarding pointer.
478 return handle_evacuation_failure_par(old, old_mark, word_sz);
479 }
480 }
481
482 assert(obj_ptr != nullptr, "when we get here, allocation should have succeeded");
483 assert(_g1h->is_in_reserved(obj_ptr), "Allocated memory should be in the heap");
484
485 // Should this evacuation fail?
486 if (inject_evacuation_failure(from_region->hrm_index())) {
487 // Doing this after all the allocation attempts also tests the
488 // undo_allocation() method too.
489 undo_allocation(dest_attr, obj_ptr, word_sz, node_index);
490 return handle_evacuation_failure_par(old, old_mark, word_sz);
491 }
492
493 // We're going to allocate linearly, so might as well prefetch ahead.
494 Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes);
610 delete pss;
611 _states[worker_id] = nullptr;
612 }
613 _flushed = true;
614 }
615
616 void G1ParScanThreadStateSet::record_unused_optional_region(HeapRegion* hr) {
617 for (uint worker_index = 0; worker_index < _num_workers; ++worker_index) {
618 G1ParScanThreadState* pss = _states[worker_index];
619 assert(pss != nullptr, "must be initialized");
620
621 size_t used_memory = pss->oops_into_optional_region(hr)->used_memory();
622 _g1h->phase_times()->record_or_add_thread_work_item(G1GCPhaseTimes::OptScanHR, worker_index, used_memory, G1GCPhaseTimes::ScanHRUsedMemory);
623 }
624 }
625
626 NOINLINE
627 oop G1ParScanThreadState::handle_evacuation_failure_par(oop old, markWord m, size_t word_sz) {
628 assert(_g1h->is_in_cset(old), "Object " PTR_FORMAT " should be in the CSet", p2i(old));
629
630 oop forward_ptr = old->forward_to_atomic(old, m, memory_order_relaxed);
631 if (forward_ptr == nullptr) {
632 // Forward-to-self succeeded. We are the "owner" of the object.
633 HeapRegion* r = _g1h->heap_region_containing(old);
634
635 if (_evac_failure_regions->record(r->hrm_index())) {
636 _g1h->hr_printer()->evac_failure(r);
637 }
638
639 // Mark the failing object in the marking bitmap and later use the bitmap to handle
640 // evacuation failure recovery.
641 _g1h->mark_evac_failure_object(_worker_id, old, word_sz);
642
643 _preserved_marks->push_if_necessary(old, m);
644
645 ContinuationGCSupport::transform_stack_chunk(old);
646
647 _evacuation_failed_info.register_copy_failure(word_sz);
648
649 // For iterating objects that failed evacuation currently we can reuse the
650 // existing closure to scan evacuated objects; since we are iterating from a
|
195
196 // Although we never intentionally push references outside of the collection
197 // set, due to (benign) races in the claim mechanism during RSet scanning more
198 // than one thread might claim the same card. So the same card may be
199 // processed multiple times, and so we might get references into old gen here.
200 // So we need to redo this check.
201 const G1HeapRegionAttr region_attr = _g1h->region_attr(obj);
202 // References pushed onto the work stack should never point to a humongous region
203 // as they are not added to the collection set due to above precondition.
204 assert(!region_attr.is_humongous_candidate(),
205 "Obj " PTR_FORMAT " should not refer to humongous region %u from " PTR_FORMAT,
206 p2i(obj), _g1h->addr_to_region(obj), p2i(p));
207
208 if (!region_attr.is_in_cset()) {
209 // In this case somebody else already did all the work.
210 return;
211 }
212
213 markWord m = obj->mark();
214 if (m.is_marked()) {
215 obj = obj->forwardee(m);
216 } else {
217 obj = do_copy_to_survivor_space(region_attr, obj, m);
218 }
219 RawAccess<IS_NOT_NULL>::oop_store(p, obj);
220
221 write_ref_field_post(p, obj);
222 }
223
224 MAYBE_INLINE_EVACUATION
225 void G1ParScanThreadState::do_partial_array(PartialArrayScanTask task) {
226 oop from_obj = task.to_source_array();
227
228 assert(_g1h->is_in_reserved(from_obj), "must be in heap.");
229 assert(from_obj->forward_safe_klass()->is_objArray_klass(), "must be obj array");
230 assert(from_obj->is_forwarded(), "must be forwarded");
231
232 oop to_obj = from_obj->forwardee();
233 assert(from_obj != to_obj, "should not be chunking self-forwarded objects");
234 assert(to_obj->is_objArray(), "must be obj array");
235 objArrayOop to_array = objArrayOop(to_obj);
236
237 PartialArrayTaskStepper::Step step
238 = _partial_array_stepper.next(objArrayOop(from_obj),
239 to_array,
240 _partial_objarray_chunk_size);
241 for (uint i = 0; i < step._ncreate; ++i) {
242 push_on_queue(ScannerTask(PartialArrayScanTask(from_obj)));
243 }
244
245 G1HeapRegionAttr dest_attr = _g1h->region_attr(to_array);
246 G1SkipCardEnqueueSetter x(&_scanner, dest_attr.is_new_survivor());
247 // Process claimed task. The length of to_array is not correct, but
248 // fortunately the iteration ignores the length field and just relies
249 // on start/end.
250 to_array->oop_iterate_range(&_scanner,
251 step._index,
252 step._index + _partial_objarray_chunk_size);
253 }
254
255 MAYBE_INLINE_EVACUATION
256 void G1ParScanThreadState::start_partial_objarray(G1HeapRegionAttr dest_attr,
257 oop from_obj,
258 oop to_obj) {
259 assert(from_obj->forward_safe_klass()->is_objArray_klass(), "precondition");
260 assert(from_obj->is_forwarded(), "precondition");
261 assert(from_obj->forwardee() == to_obj, "precondition");
262 assert(from_obj != to_obj, "should not be scanning self-forwarded objects");
263 assert(to_obj->is_objArray(), "precondition");
264
265 objArrayOop to_array = objArrayOop(to_obj);
266
267 PartialArrayTaskStepper::Step step
268 = _partial_array_stepper.start(objArrayOop(from_obj),
269 to_array,
270 _partial_objarray_chunk_size);
271
272 // Push any needed partial scan tasks. Pushed before processing the
273 // initial chunk to allow other workers to steal while we're processing.
274 for (uint i = 0; i < step._ncreate; ++i) {
275 push_on_queue(ScannerTask(PartialArrayScanTask(from_obj)));
276 }
277
278 // Skip the card enqueue iff the object (to_array) is in survivor region.
279 // However, HeapRegion::is_survivor() is too expensive here.
366 // no other space to try.
367 return nullptr;
368 }
369 }
370
371 G1HeapRegionAttr G1ParScanThreadState::next_region_attr(G1HeapRegionAttr const region_attr, markWord const m, uint& age) {
372 assert(region_attr.is_young() || region_attr.is_old(), "must be either Young or Old");
373
374 if (region_attr.is_young()) {
375 age = !m.has_displaced_mark_helper() ? m.age()
376 : m.displaced_mark_helper().age();
377 if (age < _tenuring_threshold) {
378 return region_attr;
379 }
380 }
381 // young-to-old (promotion) or old-to-old; destination is old in both cases.
382 return G1HeapRegionAttr::Old;
383 }
384
385 void G1ParScanThreadState::report_promotion_event(G1HeapRegionAttr const dest_attr,
386 Klass* klass, size_t word_sz, uint age,
387 HeapWord * const obj_ptr, uint node_index) const {
388 PLAB* alloc_buf = _plab_allocator->alloc_buffer(dest_attr, node_index);
389 if (alloc_buf->contains(obj_ptr)) {
390 _g1h->gc_tracer_stw()->report_promotion_in_new_plab_event(klass, word_sz * HeapWordSize, age,
391 dest_attr.type() == G1HeapRegionAttr::Old,
392 alloc_buf->word_sz() * HeapWordSize);
393 } else {
394 _g1h->gc_tracer_stw()->report_promotion_outside_plab_event(klass, word_sz * HeapWordSize, age,
395 dest_attr.type() == G1HeapRegionAttr::Old);
396 }
397 }
398
399 NOINLINE
400 HeapWord* G1ParScanThreadState::allocate_copy_slow(G1HeapRegionAttr* dest_attr,
401 Klass* klass,
402 size_t word_sz,
403 uint age,
404 uint node_index) {
405 HeapWord* obj_ptr = nullptr;
406 // Try slow-path allocation unless we're allocating old and old is already full.
407 if (!(dest_attr->is_old() && _old_gen_is_full)) {
408 bool plab_refill_failed = false;
409 obj_ptr = _plab_allocator->allocate_direct_or_new_plab(*dest_attr,
410 word_sz,
411 &plab_refill_failed,
412 node_index);
413 if (obj_ptr == nullptr) {
414 obj_ptr = allocate_in_next_plab(dest_attr,
415 word_sz,
416 plab_refill_failed,
417 node_index);
418 }
419 }
420 if (obj_ptr != nullptr) {
421 update_numa_stats(node_index);
422 if (_g1h->gc_tracer_stw()->should_report_promotion_events()) {
423 // The events are checked individually as part of the actual commit
424 report_promotion_event(*dest_attr, klass, word_sz, age, obj_ptr, node_index);
425 }
426 }
427 return obj_ptr;
428 }
429
430 #if EVAC_FAILURE_INJECTOR
431 bool G1ParScanThreadState::inject_evacuation_failure(uint region_idx) {
432 return _g1h->evac_failure_injector()->evacuation_should_fail(_evac_failure_inject_counter, region_idx);
433 }
434 #endif
435
436 NOINLINE
437 void G1ParScanThreadState::undo_allocation(G1HeapRegionAttr dest_attr,
438 HeapWord* obj_ptr,
439 size_t word_sz,
440 uint node_index) {
441 _plab_allocator->undo_allocation(dest_attr, obj_ptr, word_sz, node_index);
442 }
443
444 void G1ParScanThreadState::update_bot_after_copying(oop obj, size_t word_sz) {
445 HeapWord* obj_start = cast_from_oop<HeapWord*>(obj);
446 HeapRegion* region = _g1h->heap_region_containing(obj_start);
447 region->update_bot_for_obj(obj_start, word_sz);
448 }
449
450 // Private inline function, for direct internal use and providing the
451 // implementation of the public not-inline function.
452 MAYBE_INLINE_EVACUATION
453 oop G1ParScanThreadState::do_copy_to_survivor_space(G1HeapRegionAttr const region_attr,
454 oop const old,
455 markWord const old_mark) {
456 assert(region_attr.is_in_cset(),
457 "Unexpected region attr type: %s", region_attr.get_type_str());
458
459 // Get the klass once. We'll need it again later, and this avoids
460 // re-decoding when it's compressed.
461 // NOTE: With compact headers, it is not safe to load the Klass* from o, because
462 // that would access the mark-word, and the mark-word might change at any time by
463 // concurrent promotion. The promoted mark-word would point to the forwardee, which
464 // may not yet have completed copying. Therefore we must load the Klass* from
465 // the mark-word that we have already loaded. This is safe, because we have checked
466 // that this is not yet forwarded in the caller.
467 Klass* klass = old->forward_safe_klass(old_mark);
468 const size_t word_sz = old->size_given_klass(klass);
469
470 uint age = 0;
471 G1HeapRegionAttr dest_attr = next_region_attr(region_attr, old_mark, age);
472 HeapRegion* const from_region = _g1h->heap_region_containing(old);
473 uint node_index = from_region->node_index();
474
475 HeapWord* obj_ptr = _plab_allocator->plab_allocate(dest_attr, word_sz, node_index);
476
477 // PLAB allocations should succeed most of the time, so we'll
478 // normally check against null once and that's it.
479 if (obj_ptr == nullptr) {
480 obj_ptr = allocate_copy_slow(&dest_attr, klass, word_sz, age, node_index);
481 if (obj_ptr == nullptr) {
482 // This will either forward-to-self, or detect that someone else has
483 // installed a forwarding pointer.
484 return handle_evacuation_failure_par(old, old_mark, word_sz);
485 }
486 }
487
488 assert(obj_ptr != nullptr, "when we get here, allocation should have succeeded");
489 assert(_g1h->is_in_reserved(obj_ptr), "Allocated memory should be in the heap");
490
491 // Should this evacuation fail?
492 if (inject_evacuation_failure(from_region->hrm_index())) {
493 // Doing this after all the allocation attempts also tests the
494 // undo_allocation() method too.
495 undo_allocation(dest_attr, obj_ptr, word_sz, node_index);
496 return handle_evacuation_failure_par(old, old_mark, word_sz);
497 }
498
499 // We're going to allocate linearly, so might as well prefetch ahead.
500 Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes);
616 delete pss;
617 _states[worker_id] = nullptr;
618 }
619 _flushed = true;
620 }
621
622 void G1ParScanThreadStateSet::record_unused_optional_region(HeapRegion* hr) {
623 for (uint worker_index = 0; worker_index < _num_workers; ++worker_index) {
624 G1ParScanThreadState* pss = _states[worker_index];
625 assert(pss != nullptr, "must be initialized");
626
627 size_t used_memory = pss->oops_into_optional_region(hr)->used_memory();
628 _g1h->phase_times()->record_or_add_thread_work_item(G1GCPhaseTimes::OptScanHR, worker_index, used_memory, G1GCPhaseTimes::ScanHRUsedMemory);
629 }
630 }
631
632 NOINLINE
633 oop G1ParScanThreadState::handle_evacuation_failure_par(oop old, markWord m, size_t word_sz) {
634 assert(_g1h->is_in_cset(old), "Object " PTR_FORMAT " should be in the CSet", p2i(old));
635
636 oop forward_ptr = old->forward_to_self_atomic(m, memory_order_relaxed);
637 if (forward_ptr == nullptr) {
638 // Forward-to-self succeeded. We are the "owner" of the object.
639 HeapRegion* r = _g1h->heap_region_containing(old);
640
641 if (_evac_failure_regions->record(r->hrm_index())) {
642 _g1h->hr_printer()->evac_failure(r);
643 }
644
645 // Mark the failing object in the marking bitmap and later use the bitmap to handle
646 // evacuation failure recovery.
647 _g1h->mark_evac_failure_object(_worker_id, old, word_sz);
648
649 _preserved_marks->push_if_necessary(old, m);
650
651 ContinuationGCSupport::transform_stack_chunk(old);
652
653 _evacuation_failed_info.register_copy_failure(word_sz);
654
655 // For iterating objects that failed evacuation currently we can reuse the
656 // existing closure to scan evacuated objects; since we are iterating from a
|