< prev index next >

src/hotspot/share/gc/g1/g1ParScanThreadState.cpp

Print this page

425 // Private inline function, for direct internal use and providing the
426 // implementation of the public not-inline function.
427 MAYBE_INLINE_EVACUATION
428 oop G1ParScanThreadState::do_copy_to_survivor_space(G1HeapRegionAttr const region_attr,
429                                                     oop const old,
430                                                     markWord const old_mark) {
431   assert(region_attr.is_in_cset(),
432          "Unexpected region attr type: %s", region_attr.get_type_str());
433 
434   // NOTE: With compact headers, it is not safe to load the Klass* from old, because
435   // that would access the mark-word, that might change at any time by concurrent
436   // workers.
437   // This mark word would refer to a forwardee, which may not yet have completed
438   // copying. Therefore we must load the Klass* from the mark-word that we already
439   // loaded. This is safe, because we only enter here if not yet forwarded.
440   assert(!old_mark.is_forwarded(), "precondition");
441   Klass* klass = UseCompactObjectHeaders
442       ? old_mark.klass()
443       : old->klass();
444 
445   const size_t word_sz = old->size_given_klass(klass);

446 
447   // JNI only allows pinning of typeArrays, so we only need to keep those in place.
448   if (region_attr.is_pinned() && klass->is_typeArray_klass()) {
449     return handle_evacuation_failure_par(old, old_mark, word_sz, true /* cause_pinned */);
450   }
451 
452   uint age = 0;
453   G1HeapRegionAttr dest_attr = next_region_attr(region_attr, old_mark, age);
454   G1HeapRegion* const from_region = _g1h->heap_region_containing(old);
455   uint node_index = from_region->node_index();
456 
457   HeapWord* obj_ptr = _plab_allocator->plab_allocate(dest_attr, word_sz, node_index);
458 
459   // PLAB allocations should succeed most of the time, so we'll
460   // normally check against null once and that's it.
461   if (obj_ptr == nullptr) {
462     obj_ptr = allocate_copy_slow(&dest_attr, klass, word_sz, age, node_index);
463     if (obj_ptr == nullptr) {
464       // This will either forward-to-self, or detect that someone else has
465       // installed a forwarding pointer.
466       return handle_evacuation_failure_par(old, old_mark, word_sz, false /* cause_pinned */);
467     }
468   }
469 
470   assert(obj_ptr != nullptr, "when we get here, allocation should have succeeded");
471   assert(_g1h->is_in_reserved(obj_ptr), "Allocated memory should be in the heap");
472 
473   // Should this evacuation fail?
474   if (inject_allocation_failure(from_region->hrm_index())) {
475     // Doing this after all the allocation attempts also tests the
476     // undo_allocation() method too.
477     undo_allocation(dest_attr, obj_ptr, word_sz, node_index);
478     return handle_evacuation_failure_par(old, old_mark, word_sz, false /* cause_pinned */);
479   }
480 
481   // We're going to allocate linearly, so might as well prefetch ahead.
482   Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes);
483   Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(old), obj_ptr, word_sz);
484 
485   const oop obj = cast_to_oop(obj_ptr);
486   // Because the forwarding is done with memory_order_relaxed there is no
487   // ordering with the above copy.  Clients that get the forwardee must not
488   // examine its contents without other synchronization, since the contents
489   // may not be up to date for them.
490   const oop forward_ptr = old->forward_to_atomic(obj, old_mark, memory_order_relaxed);
491   if (forward_ptr == nullptr) {
492 
493     {
494       const uint young_index = from_region->young_index_in_cset();
495       assert((from_region->is_young() && young_index >  0) ||
496              (!from_region->is_young() && young_index == 0), "invariant" );
497       _surviving_young_words[young_index] += word_sz;
498     }
499 


500     if (dest_attr.is_young()) {
501       if (age < markWord::max_age) {
502         age++;
503         obj->incr_age();
504       }
505       _age_table.add(age, word_sz);
506     } else {
507       update_bot_after_copying(obj, word_sz);
508     }
509 
510     // Most objects are not arrays, so do one array check rather than
511     // checking for each array category for each object.
512     if (klass->is_array_klass()) {
513       if (klass->is_objArray_klass()) {
514         start_partial_objarray(dest_attr, old, obj);
515       } else {
516         // Nothing needs to be done for typeArrays.  Body doesn't contain
517         // any oops to scan, and the type in the klass will already be handled
518         // by processing the built-in module.
519         assert(klass->is_typeArray_klass(), "invariant");

425 // Private inline function, for direct internal use and providing the
426 // implementation of the public not-inline function.
427 MAYBE_INLINE_EVACUATION
428 oop G1ParScanThreadState::do_copy_to_survivor_space(G1HeapRegionAttr const region_attr,
429                                                     oop const old,
430                                                     markWord const old_mark) {
431   assert(region_attr.is_in_cset(),
432          "Unexpected region attr type: %s", region_attr.get_type_str());
433 
434   // NOTE: With compact headers, it is not safe to load the Klass* from old, because
435   // that would access the mark-word, that might change at any time by concurrent
436   // workers.
437   // This mark word would refer to a forwardee, which may not yet have completed
438   // copying. Therefore we must load the Klass* from the mark-word that we already
439   // loaded. This is safe, because we only enter here if not yet forwarded.
440   assert(!old_mark.is_forwarded(), "precondition");
441   Klass* klass = UseCompactObjectHeaders
442       ? old_mark.klass()
443       : old->klass();
444 
445   const size_t old_size = old->size_given_mark_and_klass(old_mark, klass);
446   const size_t word_sz = old->copy_size(old_size, old_mark);
447 
448   // JNI only allows pinning of typeArrays, so we only need to keep those in place.
449   if (region_attr.is_pinned() && klass->is_typeArray_klass()) {
450     return handle_evacuation_failure_par(old, old_mark, word_sz, true /* cause_pinned */);
451   }
452 
453   uint age = 0;
454   G1HeapRegionAttr dest_attr = next_region_attr(region_attr, old_mark, age);
455   G1HeapRegion* const from_region = _g1h->heap_region_containing(old);
456   uint node_index = from_region->node_index();
457 
458   HeapWord* obj_ptr = _plab_allocator->plab_allocate(dest_attr, word_sz, node_index);
459 
460   // PLAB allocations should succeed most of the time, so we'll
461   // normally check against null once and that's it.
462   if (obj_ptr == nullptr) {
463     obj_ptr = allocate_copy_slow(&dest_attr, klass, word_sz, age, node_index);
464     if (obj_ptr == nullptr) {
465       // This will either forward-to-self, or detect that someone else has
466       // installed a forwarding pointer.
467       return handle_evacuation_failure_par(old, old_mark, word_sz, false /* cause_pinned */);
468     }
469   }
470 
471   assert(obj_ptr != nullptr, "when we get here, allocation should have succeeded");
472   assert(_g1h->is_in_reserved(obj_ptr), "Allocated memory should be in the heap");
473 
474   // Should this evacuation fail?
475   if (inject_allocation_failure(from_region->hrm_index())) {
476     // Doing this after all the allocation attempts also tests the
477     // undo_allocation() method too.
478     undo_allocation(dest_attr, obj_ptr, word_sz, node_index);
479     return handle_evacuation_failure_par(old, old_mark, word_sz, false /* cause_pinned */);
480   }
481 
482   // We're going to allocate linearly, so might as well prefetch ahead.
483   Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes);
484   Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(old), obj_ptr, old_size);
485 
486   const oop obj = cast_to_oop(obj_ptr);
487   // Because the forwarding is done with memory_order_relaxed there is no
488   // ordering with the above copy.  Clients that get the forwardee must not
489   // examine its contents without other synchronization, since the contents
490   // may not be up to date for them.
491   const oop forward_ptr = old->forward_to_atomic(obj, old_mark, memory_order_relaxed);
492   if (forward_ptr == nullptr) {
493 
494     {
495       const uint young_index = from_region->young_index_in_cset();
496       assert((from_region->is_young() && young_index >  0) ||
497              (!from_region->is_young() && young_index == 0), "invariant" );
498       _surviving_young_words[young_index] += word_sz;
499     }
500 
501     obj->initialize_hash_if_necessary(old);
502 
503     if (dest_attr.is_young()) {
504       if (age < markWord::max_age) {
505         age++;
506         obj->incr_age();
507       }
508       _age_table.add(age, word_sz);
509     } else {
510       update_bot_after_copying(obj, word_sz);
511     }
512 
513     // Most objects are not arrays, so do one array check rather than
514     // checking for each array category for each object.
515     if (klass->is_array_klass()) {
516       if (klass->is_objArray_klass()) {
517         start_partial_objarray(dest_attr, old, obj);
518       } else {
519         // Nothing needs to be done for typeArrays.  Body doesn't contain
520         // any oops to scan, and the type in the klass will already be handled
521         // by processing the built-in module.
522         assert(klass->is_typeArray_klass(), "invariant");
< prev index next >