461 // Private inline function, for direct internal use and providing the
462 // implementation of the public not-inline function.
463 MAYBE_INLINE_EVACUATION
464 oop G1ParScanThreadState::do_copy_to_survivor_space(G1HeapRegionAttr const region_attr,
465 oop const old,
466 markWord const old_mark) {
467 assert(region_attr.is_in_cset(),
468 "Unexpected region attr type: %s", region_attr.get_type_str());
469
470 // NOTE: With compact headers, it is not safe to load the Klass* from old, because
471 // that would access the mark-word, that might change at any time by concurrent
472 // workers.
473 // This mark word would refer to a forwardee, which may not yet have completed
474 // copying. Therefore we must load the Klass* from the mark-word that we already
475 // loaded. This is safe, because we only enter here if not yet forwarded.
476 assert(!old_mark.is_forwarded(), "precondition");
477 Klass* klass = UseCompactObjectHeaders
478 ? old_mark.klass()
479 : old->klass();
480
481 const size_t word_sz = old->size_given_klass(klass);
482
483 // JNI only allows pinning of typeArrays, so we only need to keep those in place.
484 if (region_attr.is_pinned() && klass->is_typeArray_klass()) {
485 return handle_evacuation_failure_par(old, old_mark, word_sz, true /* cause_pinned */);
486 }
487
488 uint age = 0;
489 G1HeapRegionAttr dest_attr = next_region_attr(region_attr, old_mark, age);
490 G1HeapRegion* const from_region = _g1h->heap_region_containing(old);
491 uint node_index = from_region->node_index();
492
493 HeapWord* obj_ptr = _plab_allocator->plab_allocate(dest_attr, word_sz, node_index);
494
495 // PLAB allocations should succeed most of the time, so we'll
496 // normally check against null once and that's it.
497 if (obj_ptr == nullptr) {
498 obj_ptr = allocate_copy_slow(&dest_attr, klass, word_sz, age, node_index);
499 if (obj_ptr == nullptr) {
500 // This will either forward-to-self, or detect that someone else has
501 // installed a forwarding pointer.
502 return handle_evacuation_failure_par(old, old_mark, word_sz, false /* cause_pinned */);
503 }
504 }
505
506 assert(obj_ptr != nullptr, "when we get here, allocation should have succeeded");
507 assert(_g1h->is_in_reserved(obj_ptr), "Allocated memory should be in the heap");
508
509 // Should this evacuation fail?
510 if (inject_allocation_failure(from_region->hrm_index())) {
511 // Doing this after all the allocation attempts also tests the
512 // undo_allocation() method too.
513 undo_allocation(dest_attr, obj_ptr, word_sz, node_index);
514 return handle_evacuation_failure_par(old, old_mark, word_sz, false /* cause_pinned */);
515 }
516
517 // We're going to allocate linearly, so might as well prefetch ahead.
518 Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes);
519 Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(old), obj_ptr, word_sz);
520
521 const oop obj = cast_to_oop(obj_ptr);
522 // Because the forwarding is done with memory_order_relaxed there is no
523 // ordering with the above copy. Clients that get the forwardee must not
524 // examine its contents without other synchronization, since the contents
525 // may not be up to date for them.
526 const oop forward_ptr = old->forward_to_atomic(obj, old_mark, memory_order_relaxed);
527 if (forward_ptr == nullptr) {
528
529 {
530 const uint young_index = from_region->young_index_in_cset();
531 assert((from_region->is_young() && young_index > 0) ||
532 (!from_region->is_young() && young_index == 0), "invariant" );
533 _surviving_young_words[young_index] += word_sz;
534 }
535
536 if (dest_attr.is_young()) {
537 if (age < markWord::max_age) {
538 age++;
539 obj->incr_age();
540 }
541 _age_table.add(age, word_sz);
542 } else {
543 update_bot_after_copying(obj, word_sz);
544 }
545
546 // Most objects are not arrays, so do one array check rather than
547 // checking for each array category for each object.
548 if (klass->is_array_klass()) {
549 if (klass->is_objArray_klass()) {
550 start_partial_objarray(dest_attr, old, obj);
551 } else {
552 // Nothing needs to be done for typeArrays. Body doesn't contain
553 // any oops to scan, and the type in the klass will already be handled
554 // by processing the built-in module.
555 assert(klass->is_typeArray_klass(), "invariant");
|
461 // Private inline function, for direct internal use and providing the
462 // implementation of the public not-inline function.
463 MAYBE_INLINE_EVACUATION
464 oop G1ParScanThreadState::do_copy_to_survivor_space(G1HeapRegionAttr const region_attr,
465 oop const old,
466 markWord const old_mark) {
467 assert(region_attr.is_in_cset(),
468 "Unexpected region attr type: %s", region_attr.get_type_str());
469
470 // NOTE: With compact headers, it is not safe to load the Klass* from old, because
471 // that would access the mark-word, that might change at any time by concurrent
472 // workers.
473 // This mark word would refer to a forwardee, which may not yet have completed
474 // copying. Therefore we must load the Klass* from the mark-word that we already
475 // loaded. This is safe, because we only enter here if not yet forwarded.
476 assert(!old_mark.is_forwarded(), "precondition");
477 Klass* klass = UseCompactObjectHeaders
478 ? old_mark.klass()
479 : old->klass();
480
481 const size_t old_size = old->size_given_mark_and_klass(old_mark, klass);
482 const size_t word_sz = old->copy_size(old_size, old_mark);
483
484 // JNI only allows pinning of typeArrays, so we only need to keep those in place.
485 if (region_attr.is_pinned() && klass->is_typeArray_klass()) {
486 return handle_evacuation_failure_par(old, old_mark, word_sz, true /* cause_pinned */);
487 }
488
489 uint age = 0;
490 G1HeapRegionAttr dest_attr = next_region_attr(region_attr, old_mark, age);
491 G1HeapRegion* const from_region = _g1h->heap_region_containing(old);
492 uint node_index = from_region->node_index();
493
494 HeapWord* obj_ptr = _plab_allocator->plab_allocate(dest_attr, word_sz, node_index);
495
496 // PLAB allocations should succeed most of the time, so we'll
497 // normally check against null once and that's it.
498 if (obj_ptr == nullptr) {
499 obj_ptr = allocate_copy_slow(&dest_attr, klass, word_sz, age, node_index);
500 if (obj_ptr == nullptr) {
501 // This will either forward-to-self, or detect that someone else has
502 // installed a forwarding pointer.
503 return handle_evacuation_failure_par(old, old_mark, word_sz, false /* cause_pinned */);
504 }
505 }
506
507 assert(obj_ptr != nullptr, "when we get here, allocation should have succeeded");
508 assert(_g1h->is_in_reserved(obj_ptr), "Allocated memory should be in the heap");
509
510 // Should this evacuation fail?
511 if (inject_allocation_failure(from_region->hrm_index())) {
512 // Doing this after all the allocation attempts also tests the
513 // undo_allocation() method too.
514 undo_allocation(dest_attr, obj_ptr, word_sz, node_index);
515 return handle_evacuation_failure_par(old, old_mark, word_sz, false /* cause_pinned */);
516 }
517
518 // We're going to allocate linearly, so might as well prefetch ahead.
519 Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes);
520 Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(old), obj_ptr, old_size);
521
522 const oop obj = cast_to_oop(obj_ptr);
523 // Because the forwarding is done with memory_order_relaxed there is no
524 // ordering with the above copy. Clients that get the forwardee must not
525 // examine its contents without other synchronization, since the contents
526 // may not be up to date for them.
527 const oop forward_ptr = old->forward_to_atomic(obj, old_mark, memory_order_relaxed);
528 if (forward_ptr == nullptr) {
529
530 {
531 const uint young_index = from_region->young_index_in_cset();
532 assert((from_region->is_young() && young_index > 0) ||
533 (!from_region->is_young() && young_index == 0), "invariant" );
534 _surviving_young_words[young_index] += word_sz;
535 }
536
537 obj->initialize_hash_if_necessary(old);
538
539 if (dest_attr.is_young()) {
540 if (age < markWord::max_age) {
541 age++;
542 obj->incr_age();
543 }
544 _age_table.add(age, word_sz);
545 } else {
546 update_bot_after_copying(obj, word_sz);
547 }
548
549 // Most objects are not arrays, so do one array check rather than
550 // checking for each array category for each object.
551 if (klass->is_array_klass()) {
552 if (klass->is_objArray_klass()) {
553 start_partial_objarray(dest_attr, old, obj);
554 } else {
555 // Nothing needs to be done for typeArrays. Body doesn't contain
556 // any oops to scan, and the type in the klass will already be handled
557 // by processing the built-in module.
558 assert(klass->is_typeArray_klass(), "invariant");
|