< prev index next >

src/hotspot/share/gc/g1/g1ParScanThreadState.cpp

Print this page
*** 33,11 ***
  #include "gc/g1/g1StringDedup.hpp"
  #include "gc/g1/g1Trace.hpp"
  #include "gc/g1/g1YoungGCAllocationFailureInjector.inline.hpp"
  #include "gc/shared/continuationGCSupport.inline.hpp"
  #include "gc/shared/partialArrayTaskStepper.inline.hpp"
- #include "gc/shared/preservedMarks.inline.hpp"
  #include "gc/shared/stringdedup/stringDedup.hpp"
  #include "gc/shared/taskqueue.inline.hpp"
  #include "memory/allocation.inline.hpp"
  #include "oops/access.inline.hpp"
  #include "oops/oop.inline.hpp"
--- 33,10 ---

*** 54,11 ***
  // Explicit NOINLINE to block ATTRIBUTE_FLATTENing.
  #define MAYBE_INLINE_EVACUATION NOT_DEBUG(inline) DEBUG_ONLY(NOINLINE)
  
  G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h,
                                             G1RedirtyCardsQueueSet* rdcqs,
-                                            PreservedMarks* preserved_marks,
                                             uint worker_id,
                                             uint num_workers,
                                             G1CollectionSet* collection_set,
                                             G1EvacFailureRegions* evac_failure_regions)
    : _g1h(g1h),
--- 53,10 ---

*** 84,11 ***
      _string_dedup_requests(),
      _max_num_optional_regions(collection_set->optional_region_length()),
      _numa(g1h->numa()),
      _obj_alloc_stat(nullptr),
      ALLOCATION_FAILURE_INJECTOR_ONLY(_allocation_failure_inject_counter(0) COMMA)
-     _preserved_marks(preserved_marks),
      _evacuation_failed_info(),
      _evac_failure_regions(evac_failure_regions),
      _evac_failure_enqueued_cards(0)
  {
    // We allocate number of young gen regions in the collection set plus one
--- 82,10 ---

*** 209,12 ***
      // In this case somebody else already did all the work.
      return;
    }
  
    markWord m = obj->mark();
!   if (m.is_marked()) {
!     obj = cast_to_oop(m.decode_pointer());
    } else {
      obj = do_copy_to_survivor_space(region_attr, obj, m);
    }
    RawAccess<IS_NOT_NULL>::oop_store(p, obj);
  
--- 206,12 ---
      // In this case somebody else already did all the work.
      return;
    }
  
    markWord m = obj->mark();
!   if (m.is_forwarded()) {
!     obj = obj->forwardee(m);
    } else {
      obj = do_copy_to_survivor_space(region_attr, obj, m);
    }
    RawAccess<IS_NOT_NULL>::oop_store(p, obj);
  

*** 224,11 ***
  MAYBE_INLINE_EVACUATION
  void G1ParScanThreadState::do_partial_array(PartialArrayScanTask task) {
    oop from_obj = task.to_source_array();
  
    assert(_g1h->is_in_reserved(from_obj), "must be in heap.");
!   assert(from_obj->is_objArray(), "must be obj array");
    assert(from_obj->is_forwarded(), "must be forwarded");
  
    oop to_obj = from_obj->forwardee();
    assert(from_obj != to_obj, "should not be chunking self-forwarded objects");
    assert(to_obj->is_objArray(), "must be obj array");
--- 221,11 ---
  MAYBE_INLINE_EVACUATION
  void G1ParScanThreadState::do_partial_array(PartialArrayScanTask task) {
    oop from_obj = task.to_source_array();
  
    assert(_g1h->is_in_reserved(from_obj), "must be in heap.");
!   assert(from_obj->forward_safe_klass()->is_objArray_klass(), "must be obj array");
    assert(from_obj->is_forwarded(), "must be forwarded");
  
    oop to_obj = from_obj->forwardee();
    assert(from_obj != to_obj, "should not be chunking self-forwarded objects");
    assert(to_obj->is_objArray(), "must be obj array");

*** 254,11 ***
  
  MAYBE_INLINE_EVACUATION
  void G1ParScanThreadState::start_partial_objarray(G1HeapRegionAttr dest_attr,
                                                    oop from_obj,
                                                    oop to_obj) {
!   assert(from_obj->is_objArray(), "precondition");
    assert(from_obj->is_forwarded(), "precondition");
    assert(from_obj->forwardee() == to_obj, "precondition");
    assert(from_obj != to_obj, "should not be scanning self-forwarded objects");
    assert(to_obj->is_objArray(), "precondition");
  
--- 251,11 ---
  
  MAYBE_INLINE_EVACUATION
  void G1ParScanThreadState::start_partial_objarray(G1HeapRegionAttr dest_attr,
                                                    oop from_obj,
                                                    oop to_obj) {
!   assert(from_obj->forward_safe_klass()->is_objArray_klass(), "precondition");
    assert(from_obj->is_forwarded(), "precondition");
    assert(from_obj->forwardee() == to_obj, "precondition");
    assert(from_obj != to_obj, "should not be scanning self-forwarded objects");
    assert(to_obj->is_objArray(), "precondition");
  

*** 381,26 ***
    // young-to-old (promotion) or old-to-old; destination is old in both cases.
    return G1HeapRegionAttr::Old;
  }
  
  void G1ParScanThreadState::report_promotion_event(G1HeapRegionAttr const dest_attr,
!                                                   oop const old, size_t word_sz, uint age,
                                                    HeapWord * const obj_ptr, uint node_index) const {
    PLAB* alloc_buf = _plab_allocator->alloc_buffer(dest_attr, node_index);
    if (alloc_buf->contains(obj_ptr)) {
!     _g1h->gc_tracer_stw()->report_promotion_in_new_plab_event(old->klass(), word_sz * HeapWordSize, age,
                                                                dest_attr.type() == G1HeapRegionAttr::Old,
                                                                alloc_buf->word_sz() * HeapWordSize);
    } else {
!     _g1h->gc_tracer_stw()->report_promotion_outside_plab_event(old->klass(), word_sz * HeapWordSize, age,
                                                                 dest_attr.type() == G1HeapRegionAttr::Old);
    }
  }
  
  NOINLINE
  HeapWord* G1ParScanThreadState::allocate_copy_slow(G1HeapRegionAttr* dest_attr,
!                                                    oop old,
                                                     size_t word_sz,
                                                     uint age,
                                                     uint node_index) {
    HeapWord* obj_ptr = nullptr;
    // Try slow-path allocation unless we're allocating old and old is already full.
--- 378,26 ---
    // young-to-old (promotion) or old-to-old; destination is old in both cases.
    return G1HeapRegionAttr::Old;
  }
  
  void G1ParScanThreadState::report_promotion_event(G1HeapRegionAttr const dest_attr,
!                                                   Klass* klass, size_t word_sz, uint age,
                                                    HeapWord * const obj_ptr, uint node_index) const {
    PLAB* alloc_buf = _plab_allocator->alloc_buffer(dest_attr, node_index);
    if (alloc_buf->contains(obj_ptr)) {
!     _g1h->gc_tracer_stw()->report_promotion_in_new_plab_event(klass, word_sz * HeapWordSize, age,
                                                                dest_attr.type() == G1HeapRegionAttr::Old,
                                                                alloc_buf->word_sz() * HeapWordSize);
    } else {
!     _g1h->gc_tracer_stw()->report_promotion_outside_plab_event(klass, word_sz * HeapWordSize, age,
                                                                 dest_attr.type() == G1HeapRegionAttr::Old);
    }
  }
  
  NOINLINE
  HeapWord* G1ParScanThreadState::allocate_copy_slow(G1HeapRegionAttr* dest_attr,
!                                                    Klass* klass,
                                                     size_t word_sz,
                                                     uint age,
                                                     uint node_index) {
    HeapWord* obj_ptr = nullptr;
    // Try slow-path allocation unless we're allocating old and old is already full.

*** 419,11 ***
    }
    if (obj_ptr != nullptr) {
      update_numa_stats(node_index);
      if (_g1h->gc_tracer_stw()->should_report_promotion_events()) {
        // The events are checked individually as part of the actual commit
!       report_promotion_event(*dest_attr, old, word_sz, age, obj_ptr, node_index);
      }
    }
    return obj_ptr;
  }
  
--- 416,11 ---
    }
    if (obj_ptr != nullptr) {
      update_numa_stats(node_index);
      if (_g1h->gc_tracer_stw()->should_report_promotion_events()) {
        // The events are checked individually as part of the actual commit
!       report_promotion_event(*dest_attr, klass, word_sz, age, obj_ptr, node_index);
      }
    }
    return obj_ptr;
  }
  

*** 456,11 ***
    assert(region_attr.is_in_cset(),
           "Unexpected region attr type: %s", region_attr.get_type_str());
  
    // Get the klass once.  We'll need it again later, and this avoids
    // re-decoding when it's compressed.
!   Klass* klass = old->klass();
    const size_t word_sz = old->size_given_klass(klass);
  
    // JNI only allows pinning of typeArrays, so we only need to keep those in place.
    if (region_attr.is_pinned() && klass->is_typeArray_klass()) {
      return handle_evacuation_failure_par(old, old_mark, word_sz, true /* cause_pinned */);
--- 453,17 ---
    assert(region_attr.is_in_cset(),
           "Unexpected region attr type: %s", region_attr.get_type_str());
  
    // Get the klass once.  We'll need it again later, and this avoids
    // re-decoding when it's compressed.
!   // NOTE: With compact headers, it is not safe to load the Klass* from o, because
+   // that would access the mark-word, and the mark-word might change at any time by
+   // concurrent promotion. The promoted mark-word would point to the forwardee, which
+   // may not yet have completed copying. Therefore we must load the Klass* from
+   // the mark-word that we have already loaded. This is safe, because we have checked
+   // that this is not yet forwarded in the caller.
+   Klass* klass = old->forward_safe_klass(old_mark);
    const size_t word_sz = old->size_given_klass(klass);
  
    // JNI only allows pinning of typeArrays, so we only need to keep those in place.
    if (region_attr.is_pinned() && klass->is_typeArray_klass()) {
      return handle_evacuation_failure_par(old, old_mark, word_sz, true /* cause_pinned */);

*** 474,11 ***
    HeapWord* obj_ptr = _plab_allocator->plab_allocate(dest_attr, word_sz, node_index);
  
    // PLAB allocations should succeed most of the time, so we'll
    // normally check against null once and that's it.
    if (obj_ptr == nullptr) {
!     obj_ptr = allocate_copy_slow(&dest_attr, old, word_sz, age, node_index);
      if (obj_ptr == nullptr) {
        // This will either forward-to-self, or detect that someone else has
        // installed a forwarding pointer.
        return handle_evacuation_failure_par(old, old_mark, word_sz, false /* cause_pinned */);
      }
--- 477,11 ---
    HeapWord* obj_ptr = _plab_allocator->plab_allocate(dest_attr, word_sz, node_index);
  
    // PLAB allocations should succeed most of the time, so we'll
    // normally check against null once and that's it.
    if (obj_ptr == nullptr) {
!     obj_ptr = allocate_copy_slow(&dest_attr, klass, word_sz, age, node_index);
      if (obj_ptr == nullptr) {
        // This will either forward-to-self, or detect that someone else has
        // installed a forwarding pointer.
        return handle_evacuation_failure_par(old, old_mark, word_sz, false /* cause_pinned */);
      }

*** 575,11 ***
  G1ParScanThreadState* G1ParScanThreadStateSet::state_for_worker(uint worker_id) {
    assert(worker_id < _num_workers, "out of bounds access");
    if (_states[worker_id] == nullptr) {
      _states[worker_id] =
        new G1ParScanThreadState(_g1h, rdcqs(),
-                                _preserved_marks_set.get(worker_id),
                                 worker_id,
                                 _num_workers,
                                 _collection_set,
                                 _evac_failure_regions);
    }
--- 578,10 ---

*** 634,11 ***
  
  NOINLINE
  oop G1ParScanThreadState::handle_evacuation_failure_par(oop old, markWord m, size_t word_sz, bool cause_pinned) {
    assert(_g1h->is_in_cset(old), "Object " PTR_FORMAT " should be in the CSet", p2i(old));
  
!   oop forward_ptr = old->forward_to_atomic(old, m, memory_order_relaxed);
    if (forward_ptr == nullptr) {
      // Forward-to-self succeeded. We are the "owner" of the object.
      HeapRegion* r = _g1h->heap_region_containing(old);
  
      if (_evac_failure_regions->record(_worker_id, r->hrm_index(), cause_pinned)) {
--- 636,11 ---
  
  NOINLINE
  oop G1ParScanThreadState::handle_evacuation_failure_par(oop old, markWord m, size_t word_sz, bool cause_pinned) {
    assert(_g1h->is_in_cset(old), "Object " PTR_FORMAT " should be in the CSet", p2i(old));
  
!   oop forward_ptr = old->forward_to_self_atomic(m, memory_order_relaxed);
    if (forward_ptr == nullptr) {
      // Forward-to-self succeeded. We are the "owner" of the object.
      HeapRegion* r = _g1h->heap_region_containing(old);
  
      if (_evac_failure_regions->record(_worker_id, r->hrm_index(), cause_pinned)) {

*** 647,12 ***
  
      // Mark the failing object in the marking bitmap and later use the bitmap to handle
      // evacuation failure recovery.
      _g1h->mark_evac_failure_object(_worker_id, old, word_sz);
  
-     _preserved_marks->push_if_necessary(old, m);
- 
      ContinuationGCSupport::transform_stack_chunk(old);
  
      _evacuation_failed_info.register_copy_failure(word_sz);
  
      // For iterating objects that failed evacuation currently we can reuse the
--- 649,10 ---

*** 706,18 ***
                                                   G1CollectionSet* collection_set,
                                                   G1EvacFailureRegions* evac_failure_regions) :
      _g1h(g1h),
      _collection_set(collection_set),
      _rdcqs(G1BarrierSet::dirty_card_queue_set().allocator()),
-     _preserved_marks_set(true /* in_c_heap */),
      _states(NEW_C_HEAP_ARRAY(G1ParScanThreadState*, num_workers, mtGC)),
      _rdc_buffers(NEW_C_HEAP_ARRAY(BufferNodeList, num_workers, mtGC)),
      _surviving_young_words_total(NEW_C_HEAP_ARRAY(size_t, collection_set->young_region_length() + 1, mtGC)),
      _num_workers(num_workers),
      _flushed(false),
      _evac_failure_regions(evac_failure_regions) {
-   _preserved_marks_set.init(num_workers);
    for (uint i = 0; i < num_workers; ++i) {
      _states[i] = nullptr;
      _rdc_buffers[i] = BufferNodeList();
    }
    memset(_surviving_young_words_total, 0, (collection_set->young_region_length() + 1) * sizeof(size_t));
--- 706,16 ---

*** 726,7 ***
  G1ParScanThreadStateSet::~G1ParScanThreadStateSet() {
    assert(_flushed, "thread local state from the per thread states should have been flushed");
    FREE_C_HEAP_ARRAY(G1ParScanThreadState*, _states);
    FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_total);
    FREE_C_HEAP_ARRAY(BufferNodeList, _rdc_buffers);
-   _preserved_marks_set.reclaim();
  }
--- 724,6 ---
< prev index next >