< prev index next >

src/hotspot/share/gc/g1/g1ParScanThreadState.cpp

Print this page

464 // Private inline function, for direct internal use and providing the
465 // implementation of the public not-inline function.
466 MAYBE_INLINE_EVACUATION
467 oop G1ParScanThreadState::do_copy_to_survivor_space(G1HeapRegionAttr const region_attr,
468                                                     oop const old,
469                                                     markWord const old_mark) {
470   assert(region_attr.is_in_cset(),
471          "Unexpected region attr type: %s", region_attr.get_type_str());
472 
473   // NOTE: With compact headers, it is not safe to load the Klass* from old, because
474   // that would access the mark-word, that might change at any time by concurrent
475   // workers.
476   // This mark word would refer to a forwardee, which may not yet have completed
477   // copying. Therefore we must load the Klass* from the mark-word that we already
478   // loaded. This is safe, because we only enter here if not yet forwarded.
479   assert(!old_mark.is_forwarded(), "precondition");
480   Klass* klass = UseCompactObjectHeaders
481       ? old_mark.klass()
482       : old->klass();
483 
484   const size_t word_sz = old->size_given_klass(klass);

485 
486   // JNI only allows pinning of typeArrays, so we only need to keep those in place.
487   if (region_attr.is_pinned() && klass->is_typeArray_klass()) {
488     return handle_evacuation_failure_par(old, old_mark, klass, region_attr, word_sz, true /* cause_pinned */);
489   }
490 
491   uint age = 0;
492   G1HeapRegionAttr dest_attr = next_region_attr(region_attr, old_mark, age);
493   G1HeapRegion* const from_region = _g1h->heap_region_containing(old);
494   uint node_index = from_region->node_index();
495 
496   HeapWord* obj_ptr = _plab_allocator->plab_allocate(dest_attr, word_sz, node_index);
497 
498   // PLAB allocations should succeed most of the time, so we'll
499   // normally check against null once and that's it.
500   if (obj_ptr == nullptr) {
501     obj_ptr = allocate_copy_slow(&dest_attr, klass, word_sz, age, node_index);
502     if (obj_ptr == nullptr) {
503       // This will either forward-to-self, or detect that someone else has
504       // installed a forwarding pointer.
505       return handle_evacuation_failure_par(old, old_mark, klass, region_attr, word_sz, false /* cause_pinned */);
506     }
507   }
508 
509   assert(obj_ptr != nullptr, "when we get here, allocation should have succeeded");
510   assert(_g1h->is_in_reserved(obj_ptr), "Allocated memory should be in the heap");
511 
512   // Should this evacuation fail?
513   if (inject_allocation_failure(from_region->hrm_index())) {
514     // Doing this after all the allocation attempts also tests the
515     // undo_allocation() method too.
516     undo_allocation(dest_attr, obj_ptr, word_sz, node_index);
517     return handle_evacuation_failure_par(old, old_mark, klass, region_attr, word_sz, false /* cause_pinned */);
518   }
519 
520   // We're going to allocate linearly, so might as well prefetch ahead.
521   Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes);
522   Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(old), obj_ptr, word_sz);
523 
524   const oop obj = cast_to_oop(obj_ptr);
525   // Because the forwarding is done with memory_order_relaxed there is no
526   // ordering with the above copy.  Clients that get the forwardee must not
527   // examine its contents without other synchronization, since the contents
528   // may not be up to date for them.
529   const oop forward_ptr = old->forward_to_atomic(obj, old_mark, memory_order_relaxed);
530   if (forward_ptr == nullptr) {
531 
532     {
533       const uint young_index = from_region->young_index_in_cset();
534       assert((from_region->is_young() && young_index >  0) ||
535              (!from_region->is_young() && young_index == 0), "invariant" );
536       _surviving_young_words[young_index] += word_sz;
537     }
538 


539     if (dest_attr.is_young()) {
540       if (age < markWord::max_age) {
541         age++;
542         obj->incr_age();
543       }
544       _age_table.add(age, word_sz);
545     } else {
546       update_bot_after_copying(obj, word_sz);
547     }
548 
549     {
550       // Skip the card enqueue iff the object (obj) is in survivor region.
551       // However, G1HeapRegion::is_survivor() is too expensive here.
552       // Instead, we use dest_attr.is_young() because the two values are always
553       // equal: successfully allocated young regions must be survivor regions.
554       assert(dest_attr.is_young() == _g1h->heap_region_containing(obj)->is_survivor(), "must be");
555       G1SkipCardMarkSetter x(&_scanner, dest_attr.is_young());
556       do_iterate_object(obj, old, klass, region_attr, dest_attr, age);
557     }
558 

464 // Private inline function, for direct internal use and providing the
465 // implementation of the public not-inline function.
466 MAYBE_INLINE_EVACUATION
467 oop G1ParScanThreadState::do_copy_to_survivor_space(G1HeapRegionAttr const region_attr,
468                                                     oop const old,
469                                                     markWord const old_mark) {
470   assert(region_attr.is_in_cset(),
471          "Unexpected region attr type: %s", region_attr.get_type_str());
472 
473   // NOTE: With compact headers, it is not safe to load the Klass* from old, because
474   // that would access the mark-word, that might change at any time by concurrent
475   // workers.
476   // This mark word would refer to a forwardee, which may not yet have completed
477   // copying. Therefore we must load the Klass* from the mark-word that we already
478   // loaded. This is safe, because we only enter here if not yet forwarded.
479   assert(!old_mark.is_forwarded(), "precondition");
480   Klass* klass = UseCompactObjectHeaders
481       ? old_mark.klass()
482       : old->klass();
483 
484   const size_t old_size = old->size_given_mark_and_klass(old_mark, klass);
485   const size_t word_sz = old->copy_size(old_size, old_mark);
486 
487   // JNI only allows pinning of typeArrays, so we only need to keep those in place.
488   if (region_attr.is_pinned() && klass->is_typeArray_klass()) {
489     return handle_evacuation_failure_par(old, old_mark, klass, region_attr, word_sz, true /* cause_pinned */);
490   }
491 
492   uint age = 0;
493   G1HeapRegionAttr dest_attr = next_region_attr(region_attr, old_mark, age);
494   G1HeapRegion* const from_region = _g1h->heap_region_containing(old);
495   uint node_index = from_region->node_index();
496 
497   HeapWord* obj_ptr = _plab_allocator->plab_allocate(dest_attr, word_sz, node_index);
498 
499   // PLAB allocations should succeed most of the time, so we'll
500   // normally check against null once and that's it.
501   if (obj_ptr == nullptr) {
502     obj_ptr = allocate_copy_slow(&dest_attr, klass, word_sz, age, node_index);
503     if (obj_ptr == nullptr) {
504       // This will either forward-to-self, or detect that someone else has
505       // installed a forwarding pointer.
506       return handle_evacuation_failure_par(old, old_mark, klass, region_attr, word_sz, false /* cause_pinned */);
507     }
508   }
509 
510   assert(obj_ptr != nullptr, "when we get here, allocation should have succeeded");
511   assert(_g1h->is_in_reserved(obj_ptr), "Allocated memory should be in the heap");
512 
513   // Should this evacuation fail?
514   if (inject_allocation_failure(from_region->hrm_index())) {
515     // Doing this after all the allocation attempts also tests the
516     // undo_allocation() method too.
517     undo_allocation(dest_attr, obj_ptr, word_sz, node_index);
518     return handle_evacuation_failure_par(old, old_mark, klass, region_attr, word_sz, false /* cause_pinned */);
519   }
520 
521   // We're going to allocate linearly, so might as well prefetch ahead.
522   Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes);
523   Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(old), obj_ptr, old_size);
524 
525   const oop obj = cast_to_oop(obj_ptr);
526   // Because the forwarding is done with memory_order_relaxed there is no
527   // ordering with the above copy.  Clients that get the forwardee must not
528   // examine its contents without other synchronization, since the contents
529   // may not be up to date for them.
530   const oop forward_ptr = old->forward_to_atomic(obj, old_mark, memory_order_relaxed);
531   if (forward_ptr == nullptr) {
532 
533     {
534       const uint young_index = from_region->young_index_in_cset();
535       assert((from_region->is_young() && young_index >  0) ||
536              (!from_region->is_young() && young_index == 0), "invariant" );
537       _surviving_young_words[young_index] += word_sz;
538     }
539 
540     obj->initialize_hash_if_necessary(old);
541 
542     if (dest_attr.is_young()) {
543       if (age < markWord::max_age) {
544         age++;
545         obj->incr_age();
546       }
547       _age_table.add(age, word_sz);
548     } else {
549       update_bot_after_copying(obj, word_sz);
550     }
551 
552     {
553       // Skip the card enqueue iff the object (obj) is in survivor region.
554       // However, G1HeapRegion::is_survivor() is too expensive here.
555       // Instead, we use dest_attr.is_young() because the two values are always
556       // equal: successfully allocated young regions must be survivor regions.
557       assert(dest_attr.is_young() == _g1h->heap_region_containing(obj)->is_survivor(), "must be");
558       G1SkipCardMarkSetter x(&_scanner, dest_attr.is_young());
559       do_iterate_object(obj, old, klass, region_attr, dest_attr, age);
560     }
561 
< prev index next >