< prev index next >

src/hotspot/share/gc/g1/g1ParScanThreadState.cpp

Print this page

458 // Private inline function, for direct internal use and providing the
459 // implementation of the public not-inline function.
460 MAYBE_INLINE_EVACUATION
461 oop G1ParScanThreadState::do_copy_to_survivor_space(G1HeapRegionAttr const region_attr,
462                                                     oop const old,
463                                                     markWord const old_mark) {
464   assert(region_attr.is_in_cset(),
465          "Unexpected region attr type: %s", region_attr.get_type_str());
466 
467   // NOTE: With compact headers, it is not safe to load the Klass* from old, because
468   // that would access the mark-word, that might change at any time by concurrent
469   // workers.
470   // This mark word would refer to a forwardee, which may not yet have completed
471   // copying. Therefore we must load the Klass* from the mark-word that we already
472   // loaded. This is safe, because we only enter here if not yet forwarded.
473   assert(!old_mark.is_forwarded(), "precondition");
474   Klass* klass = UseCompactObjectHeaders
475       ? old_mark.klass()
476       : old->klass();
477 
478   const size_t word_sz = old->size_given_klass(klass);

479 
480   // JNI only allows pinning of typeArrays, so we only need to keep those in place.
481   if (region_attr.is_pinned() && klass->is_typeArray_klass()) {
482     return handle_evacuation_failure_par(old, old_mark, klass, region_attr, word_sz, true /* cause_pinned */);
483   }
484 
485   uint age = 0;
486   G1HeapRegionAttr dest_attr = next_region_attr(region_attr, old_mark, age);
487   G1HeapRegion* const from_region = _g1h->heap_region_containing(old);
488   uint node_index = from_region->node_index();
489 
490   HeapWord* obj_ptr = _plab_allocator->plab_allocate(dest_attr, word_sz, node_index);
491 
492   // PLAB allocations should succeed most of the time, so we'll
493   // normally check against null once and that's it.
494   if (obj_ptr == nullptr) {
495     obj_ptr = allocate_copy_slow(&dest_attr, klass, word_sz, age, node_index);
496     if (obj_ptr == nullptr) {
497       // This will either forward-to-self, or detect that someone else has
498       // installed a forwarding pointer.
499       return handle_evacuation_failure_par(old, old_mark, klass, region_attr, word_sz, false /* cause_pinned */);
500     }
501   }
502 
503   assert(obj_ptr != nullptr, "when we get here, allocation should have succeeded");
504   assert(_g1h->is_in_reserved(obj_ptr), "Allocated memory should be in the heap");
505 
506   // Should this evacuation fail?
507   if (inject_allocation_failure(from_region->hrm_index())) {
508     // Doing this after all the allocation attempts also tests the
509     // undo_allocation() method too.
510     undo_allocation(dest_attr, obj_ptr, word_sz, node_index);
511     return handle_evacuation_failure_par(old, old_mark, klass, region_attr, word_sz, false /* cause_pinned */);
512   }
513 
514   // We're going to allocate linearly, so might as well prefetch ahead.
515   Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes);
516   Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(old), obj_ptr, word_sz);
517 
518   const oop obj = cast_to_oop(obj_ptr);
519   // Because the forwarding is done with memory_order_relaxed there is no
520   // ordering with the above copy.  Clients that get the forwardee must not
521   // examine its contents without other synchronization, since the contents
522   // may not be up to date for them.
523   const oop forward_ptr = old->forward_to_atomic(obj, old_mark, memory_order_relaxed);
524   if (forward_ptr == nullptr) {
525 
526     {
527       const uint young_index = from_region->young_index_in_cset();
528       assert((from_region->is_young() && young_index >  0) ||
529              (!from_region->is_young() && young_index == 0), "invariant" );
530       _surviving_young_words[young_index] += word_sz;
531     }
532 


533     if (dest_attr.is_young()) {
534       if (age < markWord::max_age) {
535         age++;
536         obj->incr_age();
537       }
538       _age_table.add(age, word_sz);
539     } else {
540       update_bot_after_copying(obj, word_sz);
541     }
542 
543     {
544       // Skip the card enqueue iff the object (obj) is in survivor region.
545       // However, G1HeapRegion::is_survivor() is too expensive here.
546       // Instead, we use dest_attr.is_young() because the two values are always
547       // equal: successfully allocated young regions must be survivor regions.
548       assert(dest_attr.is_young() == _g1h->heap_region_containing(obj)->is_survivor(), "must be");
549       G1SkipCardEnqueueSetter x(&_scanner, dest_attr.is_young());
550       do_iterate_object(obj, old, klass, region_attr, dest_attr, age);
551     }
552 

458 // Private inline function, for direct internal use and providing the
459 // implementation of the public not-inline function.
460 MAYBE_INLINE_EVACUATION
461 oop G1ParScanThreadState::do_copy_to_survivor_space(G1HeapRegionAttr const region_attr,
462                                                     oop const old,
463                                                     markWord const old_mark) {
464   assert(region_attr.is_in_cset(),
465          "Unexpected region attr type: %s", region_attr.get_type_str());
466 
467   // NOTE: With compact headers, it is not safe to load the Klass* from old, because
468   // that would access the mark-word, that might change at any time by concurrent
469   // workers.
470   // This mark word would refer to a forwardee, which may not yet have completed
471   // copying. Therefore we must load the Klass* from the mark-word that we already
472   // loaded. This is safe, because we only enter here if not yet forwarded.
473   assert(!old_mark.is_forwarded(), "precondition");
474   Klass* klass = UseCompactObjectHeaders
475       ? old_mark.klass()
476       : old->klass();
477 
478   const size_t old_size = old->size_given_mark_and_klass(old_mark, klass);
479   const size_t word_sz = old->copy_size(old_size, old_mark);
480 
481   // JNI only allows pinning of typeArrays, so we only need to keep those in place.
482   if (region_attr.is_pinned() && klass->is_typeArray_klass()) {
483     return handle_evacuation_failure_par(old, old_mark, klass, region_attr, word_sz, true /* cause_pinned */);
484   }
485 
486   uint age = 0;
487   G1HeapRegionAttr dest_attr = next_region_attr(region_attr, old_mark, age);
488   G1HeapRegion* const from_region = _g1h->heap_region_containing(old);
489   uint node_index = from_region->node_index();
490 
491   HeapWord* obj_ptr = _plab_allocator->plab_allocate(dest_attr, word_sz, node_index);
492 
493   // PLAB allocations should succeed most of the time, so we'll
494   // normally check against null once and that's it.
495   if (obj_ptr == nullptr) {
496     obj_ptr = allocate_copy_slow(&dest_attr, klass, word_sz, age, node_index);
497     if (obj_ptr == nullptr) {
498       // This will either forward-to-self, or detect that someone else has
499       // installed a forwarding pointer.
500       return handle_evacuation_failure_par(old, old_mark, klass, region_attr, word_sz, false /* cause_pinned */);
501     }
502   }
503 
504   assert(obj_ptr != nullptr, "when we get here, allocation should have succeeded");
505   assert(_g1h->is_in_reserved(obj_ptr), "Allocated memory should be in the heap");
506 
507   // Should this evacuation fail?
508   if (inject_allocation_failure(from_region->hrm_index())) {
509     // Doing this after all the allocation attempts also tests the
510     // undo_allocation() method too.
511     undo_allocation(dest_attr, obj_ptr, word_sz, node_index);
512     return handle_evacuation_failure_par(old, old_mark, klass, region_attr, word_sz, false /* cause_pinned */);
513   }
514 
515   // We're going to allocate linearly, so might as well prefetch ahead.
516   Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes);
517   Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(old), obj_ptr, old_size);
518 
519   const oop obj = cast_to_oop(obj_ptr);
520   // Because the forwarding is done with memory_order_relaxed there is no
521   // ordering with the above copy.  Clients that get the forwardee must not
522   // examine its contents without other synchronization, since the contents
523   // may not be up to date for them.
524   const oop forward_ptr = old->forward_to_atomic(obj, old_mark, memory_order_relaxed);
525   if (forward_ptr == nullptr) {
526 
527     {
528       const uint young_index = from_region->young_index_in_cset();
529       assert((from_region->is_young() && young_index >  0) ||
530              (!from_region->is_young() && young_index == 0), "invariant" );
531       _surviving_young_words[young_index] += word_sz;
532     }
533 
534     obj->initialize_hash_if_necessary(old);
535 
536     if (dest_attr.is_young()) {
537       if (age < markWord::max_age) {
538         age++;
539         obj->incr_age();
540       }
541       _age_table.add(age, word_sz);
542     } else {
543       update_bot_after_copying(obj, word_sz);
544     }
545 
546     {
547       // Skip the card enqueue iff the object (obj) is in survivor region.
548       // However, G1HeapRegion::is_survivor() is too expensive here.
549       // Instead, we use dest_attr.is_young() because the two values are always
550       // equal: successfully allocated young regions must be survivor regions.
551       assert(dest_attr.is_young() == _g1h->heap_region_containing(obj)->is_survivor(), "must be");
552       G1SkipCardEnqueueSetter x(&_scanner, dest_attr.is_young());
553       do_iterate_object(obj, old, klass, region_attr, dest_attr, age);
554     }
555 
< prev index next >