< prev index next >

src/hotspot/share/gc/g1/g1ParScanThreadState.cpp

Print this page
*** 195,11 ***
      return;
    }
  
    markWord m = obj->mark();
    if (m.is_marked()) {
!     obj = cast_to_oop(m.decode_pointer());
    } else {
      obj = do_copy_to_survivor_space(region_attr, obj, m);
    }
    RawAccess<IS_NOT_NULL>::oop_store(p, obj);
  
--- 195,11 ---
      return;
    }
  
    markWord m = obj->mark();
    if (m.is_marked()) {
!     obj = obj->forwardee(m);
    } else {
      obj = do_copy_to_survivor_space(region_attr, obj, m);
    }
    RawAccess<IS_NOT_NULL>::oop_store(p, obj);
  

*** 209,11 ***
  MAYBE_INLINE_EVACUATION
  void G1ParScanThreadState::do_partial_array(PartialArrayScanTask task) {
    oop from_obj = task.to_source_array();
  
    assert(_g1h->is_in_reserved(from_obj), "must be in heap.");
!   assert(from_obj->is_objArray(), "must be obj array");
    assert(from_obj->is_forwarded(), "must be forwarded");
  
    oop to_obj = from_obj->forwardee();
    assert(from_obj != to_obj, "should not be chunking self-forwarded objects");
    assert(to_obj->is_objArray(), "must be obj array");
--- 209,11 ---
  MAYBE_INLINE_EVACUATION
  void G1ParScanThreadState::do_partial_array(PartialArrayScanTask task) {
    oop from_obj = task.to_source_array();
  
    assert(_g1h->is_in_reserved(from_obj), "must be in heap.");
!   assert(UseCompactObjectHeaders || from_obj->is_objArray(), "must be obj array");
    assert(from_obj->is_forwarded(), "must be forwarded");
  
    oop to_obj = from_obj->forwardee();
    assert(from_obj != to_obj, "should not be chunking self-forwarded objects");
    assert(to_obj->is_objArray(), "must be obj array");

*** 239,11 ***
  
  MAYBE_INLINE_EVACUATION
  void G1ParScanThreadState::start_partial_objarray(G1HeapRegionAttr dest_attr,
                                                    oop from_obj,
                                                    oop to_obj) {
!   assert(from_obj->is_objArray(), "precondition");
    assert(from_obj->is_forwarded(), "precondition");
    assert(from_obj->forwardee() == to_obj, "precondition");
    assert(from_obj != to_obj, "should not be scanning self-forwarded objects");
    assert(to_obj->is_objArray(), "precondition");
  
--- 239,11 ---
  
  MAYBE_INLINE_EVACUATION
  void G1ParScanThreadState::start_partial_objarray(G1HeapRegionAttr dest_attr,
                                                    oop from_obj,
                                                    oop to_obj) {
!   assert(UseCompactObjectHeaders || from_obj->is_objArray(), "precondition");
    assert(from_obj->is_forwarded(), "precondition");
    assert(from_obj->forwardee() == to_obj, "precondition");
    assert(from_obj != to_obj, "should not be scanning self-forwarded objects");
    assert(to_obj->is_objArray(), "precondition");
  

*** 358,26 ***
    }
    return dest(region_attr);
  }
  
  void G1ParScanThreadState::report_promotion_event(G1HeapRegionAttr const dest_attr,
!                                                   oop const old, size_t word_sz, uint age,
                                                    HeapWord * const obj_ptr, uint node_index) const {
    PLAB* alloc_buf = _plab_allocator->alloc_buffer(dest_attr, node_index);
    if (alloc_buf->contains(obj_ptr)) {
!     _g1h->_gc_tracer_stw->report_promotion_in_new_plab_event(old->klass(), word_sz * HeapWordSize, age,
                                                               dest_attr.type() == G1HeapRegionAttr::Old,
                                                               alloc_buf->word_sz() * HeapWordSize);
    } else {
!     _g1h->_gc_tracer_stw->report_promotion_outside_plab_event(old->klass(), word_sz * HeapWordSize, age,
                                                                dest_attr.type() == G1HeapRegionAttr::Old);
    }
  }
  
  NOINLINE
  HeapWord* G1ParScanThreadState::allocate_copy_slow(G1HeapRegionAttr* dest_attr,
!                                                    oop old,
                                                     size_t word_sz,
                                                     uint age,
                                                     uint node_index) {
    HeapWord* obj_ptr = NULL;
    // Try slow-path allocation unless we're allocating old and old is already full.
--- 358,26 ---
    }
    return dest(region_attr);
  }
  
  void G1ParScanThreadState::report_promotion_event(G1HeapRegionAttr const dest_attr,
!                                                   Klass* klass, size_t word_sz, uint age,
                                                    HeapWord * const obj_ptr, uint node_index) const {
    PLAB* alloc_buf = _plab_allocator->alloc_buffer(dest_attr, node_index);
    if (alloc_buf->contains(obj_ptr)) {
!     _g1h->_gc_tracer_stw->report_promotion_in_new_plab_event(klass, word_sz * HeapWordSize, age,
                                                               dest_attr.type() == G1HeapRegionAttr::Old,
                                                               alloc_buf->word_sz() * HeapWordSize);
    } else {
!     _g1h->_gc_tracer_stw->report_promotion_outside_plab_event(klass, word_sz * HeapWordSize, age,
                                                                dest_attr.type() == G1HeapRegionAttr::Old);
    }
  }
  
  NOINLINE
  HeapWord* G1ParScanThreadState::allocate_copy_slow(G1HeapRegionAttr* dest_attr,
!                                                    Klass* klass,
                                                     size_t word_sz,
                                                     uint age,
                                                     uint node_index) {
    HeapWord* obj_ptr = NULL;
    // Try slow-path allocation unless we're allocating old and old is already full.

*** 396,11 ***
    }
    if (obj_ptr != NULL) {
      update_numa_stats(node_index);
      if (_g1h->_gc_tracer_stw->should_report_promotion_events()) {
        // The events are checked individually as part of the actual commit
!       report_promotion_event(*dest_attr, old, word_sz, age, obj_ptr, node_index);
      }
    }
    return obj_ptr;
  }
  
--- 396,11 ---
    }
    if (obj_ptr != NULL) {
      update_numa_stats(node_index);
      if (_g1h->_gc_tracer_stw->should_report_promotion_events()) {
        // The events are checked individually as part of the actual commit
!       report_promotion_event(*dest_attr, klass, word_sz, age, obj_ptr, node_index);
      }
    }
    return obj_ptr;
  }
  

*** 421,11 ***
    assert(region_attr.is_in_cset(),
           "Unexpected region attr type: %s", region_attr.get_type_str());
  
    // Get the klass once.  We'll need it again later, and this avoids
    // re-decoding when it's compressed.
!   Klass* klass = old->klass();
    const size_t word_sz = old->size_given_klass(klass);
  
    uint age = 0;
    G1HeapRegionAttr dest_attr = next_region_attr(region_attr, old_mark, age);
    HeapRegion* const from_region = _g1h->heap_region_containing(old);
--- 421,17 ---
    assert(region_attr.is_in_cset(),
           "Unexpected region attr type: %s", region_attr.get_type_str());
  
    // Get the klass once.  We'll need it again later, and this avoids
    // re-decoding when it's compressed.
!   // NOTE: With compact headers, it is not safe to load the Klass* from o, because
+   // that would access the mark-word, and the mark-word might change at any time by
+   // concurrent promotion. The promoted mark-word would point to the forwardee, which
+   // may not yet have completed copying. Therefore we must load the Klass* from
+   // the mark-word that we have already loaded. This is safe, because we have checked
+   // that this is not yet forwarded in the caller.
+   Klass* klass = old->forward_safe_klass(old_mark);
    const size_t word_sz = old->size_given_klass(klass);
  
    uint age = 0;
    G1HeapRegionAttr dest_attr = next_region_attr(region_attr, old_mark, age);
    HeapRegion* const from_region = _g1h->heap_region_containing(old);

*** 434,11 ***
    HeapWord* obj_ptr = _plab_allocator->plab_allocate(dest_attr, word_sz, node_index);
  
    // PLAB allocations should succeed most of the time, so we'll
    // normally check against NULL once and that's it.
    if (obj_ptr == NULL) {
!     obj_ptr = allocate_copy_slow(&dest_attr, old, word_sz, age, node_index);
      if (obj_ptr == NULL) {
        // This will either forward-to-self, or detect that someone else has
        // installed a forwarding pointer.
        return handle_evacuation_failure_par(old, old_mark);
      }
--- 440,11 ---
    HeapWord* obj_ptr = _plab_allocator->plab_allocate(dest_attr, word_sz, node_index);
  
    // PLAB allocations should succeed most of the time, so we'll
    // normally check against NULL once and that's it.
    if (obj_ptr == NULL) {
!     obj_ptr = allocate_copy_slow(&dest_attr, klass, word_sz, age, node_index);
      if (obj_ptr == NULL) {
        // This will either forward-to-self, or detect that someone else has
        // installed a forwarding pointer.
        return handle_evacuation_failure_par(old, old_mark);
      }

*** 591,11 ***
  
  NOINLINE
  oop G1ParScanThreadState::handle_evacuation_failure_par(oop old, markWord m) {
    assert(_g1h->is_in_cset(old), "Object " PTR_FORMAT " should be in the CSet", p2i(old));
  
!   oop forward_ptr = old->forward_to_atomic(old, m, memory_order_relaxed);
    if (forward_ptr == NULL) {
      // Forward-to-self succeeded. We are the "owner" of the object.
      HeapRegion* r = _g1h->heap_region_containing(old);
  
      if (_g1h->notify_region_failed_evacuation(r->hrm_index())) {
--- 597,11 ---
  
  NOINLINE
  oop G1ParScanThreadState::handle_evacuation_failure_par(oop old, markWord m) {
    assert(_g1h->is_in_cset(old), "Object " PTR_FORMAT " should be in the CSet", p2i(old));
  
!   oop forward_ptr = old->forward_to_self_atomic(m, memory_order_relaxed);
    if (forward_ptr == NULL) {
      // Forward-to-self succeeded. We are the "owner" of the object.
      HeapRegion* r = _g1h->heap_region_containing(old);
  
      if (_g1h->notify_region_failed_evacuation(r->hrm_index())) {
< prev index next >