< prev index next > src/hotspot/share/gc/parallel/psPromotionManager.inline.hpp
Print this page
oop obj = RawAccess<IS_NOT_NULL>::oop_load(p);
Prefetch::write(obj->mark_addr(), 0);
push_depth(ScannerTask(p));
}
- inline void PSPromotionManager::promotion_trace_event(oop new_obj, oop old_obj,
+ inline void PSPromotionManager::promotion_trace_event(oop new_obj, Klass* klass,
size_t obj_size,
uint age, bool tenured,
const PSPromotionLAB* lab) {
// Skip if memory allocation failed
if (new_obj != nullptr) {
if (lab != nullptr) {
// Promotion of object through newly allocated PLAB
if (gc_tracer->should_report_promotion_in_new_plab_event()) {
size_t obj_bytes = obj_size * HeapWordSize;
size_t lab_size = lab->capacity();
- gc_tracer->report_promotion_in_new_plab_event(old_obj->klass(), obj_bytes,
+ gc_tracer->report_promotion_in_new_plab_event(klass, obj_bytes,
age, tenured, lab_size);
}
} else {
// Promotion of object directly to heap
if (gc_tracer->should_report_promotion_outside_plab_event()) {
size_t obj_bytes = obj_size * HeapWordSize;
- gc_tracer->report_promotion_outside_plab_event(old_obj->klass(), obj_bytes,
+ gc_tracer->report_promotion_outside_plab_event(klass, obj_bytes,
age, tenured);
}
}
}
}
// Ensure any loads from the forwardee follow all changes that precede
// the release-cmpxchg that performed the forwarding, possibly in some
// other thread.
OrderAccess::acquire();
// Return the already installed forwardee.
- return cast_to_oop(m.decode_pointer());
+ return o->forwardee(m);
}
}
//
// This method is pretty bulky. It would be nice to split it up
markWord test_mark) {
assert(should_scavenge(&o), "Sanity");
oop new_obj = nullptr;
bool new_obj_is_tenured = false;
- size_t new_obj_size = o->size();
+ // NOTE: With compact headers, it is not safe to load the Klass* from o, because
+ // that would access the mark-word, and the mark-word might change at any time by
+ // concurrent promotion. The promoted mark-word would point to the forwardee, which
+ // may not yet have completed copying. Therefore we must load the Klass* from
+ // the mark-word that we have already loaded. This is safe, because we have checked
+ // that this is not yet forwarded in the caller.
+ Klass* klass = o->forward_safe_klass(test_mark);
+ size_t new_obj_size = o->size_given_klass(klass);
// Find the objects age, MT safe.
uint age = (test_mark.has_displaced_mark_helper() /* o->has_displaced_mark() */) ?
test_mark.displaced_mark_helper().age() : test_mark.age();
if (new_obj == nullptr && !_young_gen_is_full) {
// Do we allocate directly, or flush and refill?
if (new_obj_size > (YoungPLABSize / 2)) {
// Allocate this object directly
new_obj = cast_to_oop(young_space()->cas_allocate(new_obj_size));
- promotion_trace_event(new_obj, o, new_obj_size, age, false, nullptr);
+ promotion_trace_event(new_obj, klass, new_obj_size, age, false, nullptr);
} else {
// Flush and fill
_young_lab.flush();
HeapWord* lab_base = young_space()->cas_allocate(YoungPLABSize);
if (lab_base != nullptr) {
_young_lab.initialize(MemRegion(lab_base, YoungPLABSize));
// Try the young lab allocation again.
new_obj = cast_to_oop(_young_lab.allocate(new_obj_size));
- promotion_trace_event(new_obj, o, new_obj_size, age, false, &_young_lab);
+ promotion_trace_event(new_obj, klass, new_obj_size, age, false, &_young_lab);
} else {
_young_gen_is_full = true;
}
}
}
if (!_old_gen_is_full) {
// Do we allocate directly, or flush and refill?
if (new_obj_size > (OldPLABSize / 2)) {
// Allocate this object directly
new_obj = cast_to_oop(old_gen()->allocate(new_obj_size));
- promotion_trace_event(new_obj, o, new_obj_size, age, true, nullptr);
+ promotion_trace_event(new_obj, klass, new_obj_size, age, true, nullptr);
} else {
// Flush and fill
_old_lab.flush();
HeapWord* lab_base = old_gen()->allocate(OldPLABSize);
if(lab_base != nullptr) {
_old_lab.initialize(MemRegion(lab_base, OldPLABSize));
// Try the old lab allocation again.
new_obj = cast_to_oop(_old_lab.allocate(new_obj_size));
- promotion_trace_event(new_obj, o, new_obj_size, age, true, &_old_lab);
+ promotion_trace_event(new_obj, klass, new_obj_size, age, true, &_old_lab);
}
}
}
// This is the promotion failed test, and code handling.
// Copy obj
Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(o), cast_from_oop<HeapWord*>(new_obj), new_obj_size);
// Parallel GC claims with a release - so other threads might access this object
// after claiming and they should see the "completed" object.
- ContinuationGCSupport::transform_stack_chunk(new_obj);
+ if (UseCompactObjectHeaders) {
+ // The copy above is not atomic. Make sure we have seen the proper mark
+ // and re-install it into the copy, so that Klass* is guaranteed to be correct.
+ markWord mark = o->mark();
+ if (!mark.is_marked()) {
+ new_obj->set_mark(mark);
+ ContinuationGCSupport::transform_stack_chunk(new_obj);
+ } else {
+ // If we copied a mark-word that indicates 'forwarded' state, the object
+ // installation would not succeed. We cannot access Klass* anymore either.
+ // Skip the transformation.
+ }
+ } else {
+ ContinuationGCSupport::transform_stack_chunk(new_obj);
+ }
// Now we have to CAS in the header.
// Make copy visible to threads reading the forwardee.
oop forwardee = o->forward_to_atomic(new_obj, test_mark, memory_order_release);
if (forwardee == nullptr) { // forwardee is null when forwarding is successful
< prev index next >