< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.cpp

Print this page




 213     obj->oop_iterate(&cl);
 214   }
 215 }
 216 
 217 oop ShenandoahBarrierSet::load_reference_barrier_not_null(oop obj) {
 218   if (ShenandoahLoadRefBarrier && _heap->has_forwarded_objects()) {
 219     return load_reference_barrier_impl(obj);
 220   } else {
 221     return obj;
 222   }
 223 }
 224 
 225 oop ShenandoahBarrierSet::load_reference_barrier(oop obj) {
 226   if (obj != NULL) {
 227     return load_reference_barrier_not_null(obj);
 228   } else {
 229     return obj;
 230   }
 231 }
 232 







 233 
 234 oop ShenandoahBarrierSet::load_reference_barrier_mutator(oop obj) {

 235   assert(ShenandoahLoadRefBarrier, "should be enabled");
 236   assert(_heap->is_gc_in_progress_mask(ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL), "evac should be in progress");
 237   shenandoah_assert_in_cset(NULL, obj);
 238 
 239   oop fwd = resolve_forwarded_not_null(obj);
 240   if (oopDesc::equals_raw(obj, fwd)) {



 241     ShenandoahEvacOOMScope oom_evac_scope;
 242 
 243     Thread* thread = Thread::current();
 244     oop res_oop = _heap->evacuate_object(obj, thread);
 245 
 246     // Since we are already here and paid the price of getting through runtime call adapters
 247     // and acquiring oom-scope, it makes sense to try and evacuate more adjacent objects,
 248     // thus amortizing the overhead. For sparsely live heaps, scan costs easily dominate
 249     // total assist costs, and can introduce a lot of evacuation latency. This is why we
 250     // only scan for _nearest_ N objects, regardless if they are eligible for evac or not.
 251     // The scan itself should also avoid touching the non-marked objects below TAMS, because
 252     // their metadata (notably, klasses) may be incorrect already.
 253 
 254     size_t max = ShenandoahEvacAssist;
 255     if (max > 0) {
 256       // Traversal is special: it uses incomplete marking context, because it coalesces evac with mark.
 257       // Other code uses complete marking context, because evac happens after the mark.
 258       ShenandoahMarkingContext* ctx = _heap->is_concurrent_traversal_in_progress() ?
 259                                       _heap->marking_context() : _heap->complete_marking_context();
 260 
 261       ShenandoahHeapRegion* r = _heap->heap_region_containing(obj);
 262       assert(r->is_cset(), "sanity");
 263 
 264       HeapWord* cur = (HeapWord*)obj + obj->size();
 265 
 266       size_t count = 0;
 267       while ((cur < r->top()) && ctx->is_marked(oop(cur)) && (count++ < max)) {
 268         oop cur_oop = oop(cur);
 269         if (oopDesc::equals_raw(cur_oop, resolve_forwarded_not_null(cur_oop))) {
 270           _heap->evacuate_object(cur_oop, thread);
 271         }
 272         cur = cur + cur_oop->size();
 273       }
 274     }
 275 
 276     return res_oop;





 277   }

 278   return fwd;
 279 }
 280 
 281 oop ShenandoahBarrierSet::load_reference_barrier_impl(oop obj) {
 282   assert(ShenandoahLoadRefBarrier, "should be enabled");
 283   if (!CompressedOops::is_null(obj)) {
 284     bool evac_in_progress = _heap->is_gc_in_progress_mask(ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL);
 285     oop fwd = resolve_forwarded_not_null(obj);
 286     if (evac_in_progress &&
 287         _heap->in_collection_set(obj) &&
 288         oopDesc::equals_raw(obj, fwd)) {
 289       Thread *t = Thread::current();
 290       if (t->is_GC_task_thread()) {
 291         return _heap->evacuate_object(obj, t);
 292       } else {
 293         ShenandoahEvacOOMScope oom_evac_scope;
 294         return _heap->evacuate_object(obj, t);
 295       }
 296     } else {
 297       return fwd;




 213     obj->oop_iterate(&cl);
 214   }
 215 }
 216 
 217 oop ShenandoahBarrierSet::load_reference_barrier_not_null(oop obj) {
 218   if (ShenandoahLoadRefBarrier && _heap->has_forwarded_objects()) {
 219     return load_reference_barrier_impl(obj);
 220   } else {
 221     return obj;
 222   }
 223 }
 224 
 225 oop ShenandoahBarrierSet::load_reference_barrier(oop obj) {
 226   if (obj != NULL) {
 227     return load_reference_barrier_not_null(obj);
 228   } else {
 229     return obj;
 230   }
 231 }
 232 
 233 oop ShenandoahBarrierSet::load_reference_barrier_mutator(oop obj, oop* load_addr) {
 234   return load_reference_barrier_mutator_work(obj, load_addr);
 235 }
 236 
 237 oop ShenandoahBarrierSet::load_reference_barrier_mutator(oop obj, narrowOop* load_addr) {
 238   return load_reference_barrier_mutator_work(obj, load_addr);
 239 }
 240 
 241 template <class T>
 242 oop ShenandoahBarrierSet::load_reference_barrier_mutator_work(oop obj, T* load_addr) {
 243   assert(ShenandoahLoadRefBarrier, "should be enabled");
 244   shenandoah_assert_in_cset(load_addr, obj);

 245 
 246   oop fwd = resolve_forwarded_not_null(obj);
 247   if (oopDesc::equals_raw(obj, fwd)) {
 248     assert(_heap->is_gc_in_progress_mask(ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL),
 249            "evac should be in progress");
 250 
 251     ShenandoahEvacOOMScope oom_evac_scope;
 252 
 253     Thread* thread = Thread::current();
 254     oop res_oop = _heap->evacuate_object(obj, thread);
 255 
 256     // Since we are already here and paid the price of getting through runtime call adapters
 257     // and acquiring oom-scope, it makes sense to try and evacuate more adjacent objects,
 258     // thus amortizing the overhead. For sparsely live heaps, scan costs easily dominate
 259     // total assist costs, and can introduce a lot of evacuation latency. This is why we
 260     // only scan for _nearest_ N objects, regardless if they are eligible for evac or not.
 261     // The scan itself should also avoid touching the non-marked objects below TAMS, because
 262     // their metadata (notably, klasses) may be incorrect already.
 263 
 264     size_t max = ShenandoahEvacAssist;
 265     if (max > 0) {
 266       // Traversal is special: it uses incomplete marking context, because it coalesces evac with mark.
 267       // Other code uses complete marking context, because evac happens after the mark.
 268       ShenandoahMarkingContext* ctx = _heap->is_concurrent_traversal_in_progress() ?
 269                                       _heap->marking_context() : _heap->complete_marking_context();
 270 
 271       ShenandoahHeapRegion* r = _heap->heap_region_containing(obj);
 272       assert(r->is_cset(), "sanity");
 273 
 274       HeapWord* cur = (HeapWord*)obj + obj->size();
 275 
 276       size_t count = 0;
 277       while ((cur < r->top()) && ctx->is_marked(oop(cur)) && (count++ < max)) {
 278         oop cur_oop = oop(cur);
 279         if (oopDesc::equals_raw(cur_oop, resolve_forwarded_not_null(cur_oop))) {
 280           _heap->evacuate_object(cur_oop, thread);
 281         }
 282         cur = cur + cur_oop->size();
 283       }
 284     }
 285 
 286     fwd = res_oop;
 287   }
 288 
 289   if (load_addr != NULL && fwd != obj) {
 290     // Since we are here and we know the load address, update the reference.
 291     ShenandoahHeap::cas_oop(fwd, load_addr, obj);
 292   }
 293 
 294   return fwd;
 295 }
 296 
 297 oop ShenandoahBarrierSet::load_reference_barrier_impl(oop obj) {
 298   assert(ShenandoahLoadRefBarrier, "should be enabled");
 299   if (!CompressedOops::is_null(obj)) {
 300     bool evac_in_progress = _heap->is_gc_in_progress_mask(ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL);
 301     oop fwd = resolve_forwarded_not_null(obj);
 302     if (evac_in_progress &&
 303         _heap->in_collection_set(obj) &&
 304         oopDesc::equals_raw(obj, fwd)) {
 305       Thread *t = Thread::current();
 306       if (t->is_GC_task_thread()) {
 307         return _heap->evacuate_object(obj, t);
 308       } else {
 309         ShenandoahEvacOOMScope oom_evac_scope;
 310         return _heap->evacuate_object(obj, t);
 311       }
 312     } else {
 313       return fwd;


< prev index next >