< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp

Print this page

 24 
 25 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
 26 #define SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
 27 
 28 #include "gc/shenandoah/shenandoahHeap.hpp"
 29 
 30 #include "classfile/javaClasses.inline.hpp"
 31 #include "gc/shared/markBitMap.inline.hpp"
 32 #include "gc/shared/threadLocalAllocBuffer.inline.hpp"
 33 #include "gc/shared/suspendibleThreadSet.hpp"
 34 #include "gc/shared/tlab_globals.hpp"
 35 #include "gc/shenandoah/shenandoahAsserts.hpp"
 36 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
 37 #include "gc/shenandoah/shenandoahCollectionSet.inline.hpp"
 38 #include "gc/shenandoah/shenandoahForwarding.inline.hpp"
 39 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
 40 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp"
 41 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
 42 #include "gc/shenandoah/shenandoahControlThread.hpp"
 43 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"

 44 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"


 45 #include "oops/compressedOops.inline.hpp"
 46 #include "oops/oop.inline.hpp"
 47 #include "runtime/atomic.hpp"
 48 #include "runtime/prefetch.inline.hpp"
 49 #include "runtime/thread.hpp"
 50 #include "utilities/copy.hpp"
 51 #include "utilities/globalDefinitions.hpp"
 52 
 53 inline ShenandoahHeap* ShenandoahHeap::heap() {
 54   return named_heap<ShenandoahHeap>(CollectedHeap::Shenandoah);
 55 }
 56 
 57 inline ShenandoahHeapRegion* ShenandoahRegionIterator::next() {
 58   size_t new_index = Atomic::add(&_index, (size_t) 1, memory_order_relaxed);
 59   // get_region() provides the bounds-check and returns NULL on OOB.
 60   return _heap->get_region(new_index - 1);
 61 }
 62 
 63 inline bool ShenandoahHeap::has_forwarded_objects() const {
 64   return _gc_state.is_set(HAS_FORWARDED);

247     return cancelled_gc();
248   }
249 
250   jbyte prev = _cancelled_gc.cmpxchg(NOT_CANCELLED, CANCELLABLE);
251   if (prev == CANCELLABLE || prev == NOT_CANCELLED) {
252     if (SuspendibleThreadSet::should_yield()) {
253       SuspendibleThreadSet::yield();
254     }
255 
256     // Back to CANCELLABLE. The thread that poked NOT_CANCELLED first gets
257     // to restore to CANCELLABLE.
258     if (prev == CANCELLABLE) {
259       _cancelled_gc.set(CANCELLABLE);
260     }
261     return false;
262   } else {
263     return true;
264   }
265 }
266 
267 inline void ShenandoahHeap::clear_cancelled_gc() {
268   _cancelled_gc.set(CANCELLABLE);
269   _oom_evac_handler.clear();








270 }
271 
272 inline HeapWord* ShenandoahHeap::allocate_from_gclab(Thread* thread, size_t size) {
273   assert(UseTLAB, "TLABs should be enabled");
274 
275   PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
276   if (gclab == NULL) {
277     assert(!thread->is_Java_thread() && !thread->is_Worker_thread(),
278            "Performance: thread should have GCLAB: %s", thread->name());
279     // No GCLABs in this thread, fallback to shared allocation
280     return NULL;
281   }
282   HeapWord* obj = gclab->allocate(size);
283   if (obj != NULL) {
284     return obj;
285   }
286   // Otherwise...
287   return allocate_from_gclab_slow(thread, size);
288 }
289 
























290 inline oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
291   if (ShenandoahThreadLocalData::is_oom_during_evac(Thread::current())) {

292     // This thread went through the OOM during evac protocol and it is safe to return
293     // the forward pointer. It must not attempt to evacuate any more.
294     return ShenandoahBarrierSet::resolve_forwarded(p);
295   }
296 
297   assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope");
298 
299   size_t size = p->size();

300 
301   assert(!heap_region_containing(p)->is_humongous(), "never evacuate humongous objects");




















302 
303   bool alloc_from_gclab = true;




304   HeapWord* copy = NULL;


305 
306 #ifdef ASSERT
307   if (ShenandoahOOMDuringEvacALot &&
308       (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
309         copy = NULL;
310   } else {
311 #endif
312     if (UseTLAB) {
313       copy = allocate_from_gclab(thread, size);





























314     }

315     if (copy == NULL) {
316       ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size);
317       copy = allocate_memory(req);
318       alloc_from_gclab = false;

319     }
320 #ifdef ASSERT
321   }
322 #endif
323 
324   if (copy == NULL) {













325     control_thread()->handle_alloc_failure_evac(size);
326 
327     _oom_evac_handler.handle_out_of_memory_during_evacuation();
328 
329     return ShenandoahBarrierSet::resolve_forwarded(p);
330   }
331 
332   // Copy the object:
333   Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(p), copy, size);
334 
335   // Try to install the new forwarding pointer.
336   oop copy_val = cast_to_oop(copy);






337   oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val);
338   if (result == copy_val) {
339     // Successfully evacuated. Our copy is now the public one!



340     shenandoah_assert_correct(NULL, copy_val);
341     return copy_val;
342   }  else {
343     // Failed to evacuate. We need to deal with the object that is left behind. Since this
344     // new allocation is certainly after TAMS, it will be considered live in the next cycle.
345     // But if it happens to contain references to evacuated regions, those references would
346     // not get updated for this stale copy during this cycle, and we will crash while scanning
347     // it the next cycle.
348     //
349     // For GCLAB allocations, it is enough to rollback the allocation ptr. Either the next
350     // object will overwrite this stale copy, or the filler object on LAB retirement will
351     // do this. For non-GCLAB allocations, we have no way to retract the allocation, and
352     // have to explicitly overwrite the copy with the filler object. With that overwrite,
353     // we have to keep the fwdptr initialized and pointing to our (stale) copy.
354     if (alloc_from_gclab) {
355       ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size);















356     } else {



357       fill_with_object(copy, size);
358       shenandoah_assert_correct(NULL, copy_val);

359     }
360     shenandoah_assert_correct(NULL, result);
361     return result;
362   }
363 }
364 


















365 inline bool ShenandoahHeap::requires_marking(const void* entry) const {
366   oop obj = cast_to_oop(entry);
367   return !_marking_context->is_marked_strong(obj);
368 }
369 
370 inline bool ShenandoahHeap::in_collection_set(oop p) const {
371   assert(collection_set() != NULL, "Sanity");
372   return collection_set()->is_in(p);
373 }
374 
375 inline bool ShenandoahHeap::in_collection_set_loc(void* p) const {
376   assert(collection_set() != NULL, "Sanity");
377   return collection_set()->is_in_loc(p);
378 }
379 
380 inline bool ShenandoahHeap::is_stable() const {
381   return _gc_state.is_clear();
382 }
383 
384 inline bool ShenandoahHeap::is_idle() const {
385   return _gc_state.is_unset(MARKING | EVACUATION | UPDATEREFS);
386 }
387 
388 inline bool ShenandoahHeap::is_concurrent_mark_in_progress() const {
389   return _gc_state.is_set(MARKING);








390 }
391 
392 inline bool ShenandoahHeap::is_evacuation_in_progress() const {
393   return _gc_state.is_set(EVACUATION);
394 }
395 
396 inline bool ShenandoahHeap::is_gc_in_progress_mask(uint mask) const {
397   return _gc_state.is_set(mask);
398 }
399 
400 inline bool ShenandoahHeap::is_degenerated_gc_in_progress() const {
401   return _degenerated_gc_in_progress.is_set();
402 }
403 
404 inline bool ShenandoahHeap::is_full_gc_in_progress() const {
405   return _full_gc_in_progress.is_set();
406 }
407 
408 inline bool ShenandoahHeap::is_full_gc_move_in_progress() const {
409   return _full_gc_move_in_progress.is_set();
410 }
411 
412 inline bool ShenandoahHeap::is_update_refs_in_progress() const {
413   return _gc_state.is_set(UPDATEREFS);
414 }
415 
416 inline bool ShenandoahHeap::is_stw_gc_in_progress() const {
417   return is_full_gc_in_progress() || is_degenerated_gc_in_progress();
418 }
419 
420 inline bool ShenandoahHeap::is_concurrent_strong_root_in_progress() const {
421   return _concurrent_strong_root_in_progress.is_set();
422 }
423 
424 inline bool ShenandoahHeap::is_concurrent_weak_root_in_progress() const {
425   return _gc_state.is_set(WEAK_ROOTS);
426 }
427 








































































428 template<class T>
429 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl) {
430   marked_object_iterate(region, cl, region->top());
431 }
432 
433 template<class T>
434 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit) {
435   assert(! region->is_humongous_continuation(), "no humongous continuation regions here");
436 
437   ShenandoahMarkingContext* const ctx = complete_marking_context();
438   assert(ctx->is_complete(), "sanity");
439 
440   HeapWord* tams = ctx->top_at_mark_start(region);
441 
442   size_t skip_bitmap_delta = 1;
443   HeapWord* start = region->bottom();
444   HeapWord* end = MIN2(tams, region->end());
445 
446   // Step 1. Scan below the TAMS based on bitmap data.
447   HeapWord* limit_bitmap = MIN2(limit, tams);
448 
449   // Try to scan the initial candidate. If the candidate is above the TAMS, it would
450   // fail the subsequent "< limit_bitmap" checks, and fall through to Step 2.
451   HeapWord* cb = ctx->get_next_marked_addr(start, end);
452 
453   intx dist = ShenandoahMarkScanPrefetch;
454   if (dist > 0) {
455     // Batched scan that prefetches the oop data, anticipating the access to
456     // either header, oop field, or forwarding pointer. Not that we cannot
457     // touch anything in oop, while it still being prefetched to get enough
458     // time for prefetch to work. This is why we try to scan the bitmap linearly,

549     HeapWord* bottom = region->bottom();
550     if (top > bottom) {
551       region = region->humongous_start_region();
552       ShenandoahObjectToOopBoundedClosure<T> objs(cl, bottom, top);
553       marked_object_iterate(region, &objs);
554     }
555   } else {
556     ShenandoahObjectToOopClosure<T> objs(cl);
557     marked_object_iterate(region, &objs, top);
558   }
559 }
560 
561 inline ShenandoahHeapRegion* const ShenandoahHeap::get_region(size_t region_idx) const {
562   if (region_idx < _num_regions) {
563     return _regions[region_idx];
564   } else {
565     return NULL;
566   }
567 }
568 
569 inline void ShenandoahHeap::mark_complete_marking_context() {
570   _marking_context->mark_complete();
571 }
572 
573 inline void ShenandoahHeap::mark_incomplete_marking_context() {
574   _marking_context->mark_incomplete();
575 }
576 
577 inline ShenandoahMarkingContext* ShenandoahHeap::complete_marking_context() const {
578   assert (_marking_context->is_complete()," sanity");
579   return _marking_context;
580 }
581 
582 inline ShenandoahMarkingContext* ShenandoahHeap::marking_context() const {
583   return _marking_context;
584 }
585 
























586 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP

 24 
 25 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
 26 #define SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
 27 
 28 #include "gc/shenandoah/shenandoahHeap.hpp"
 29 
 30 #include "classfile/javaClasses.inline.hpp"
 31 #include "gc/shared/markBitMap.inline.hpp"
 32 #include "gc/shared/threadLocalAllocBuffer.inline.hpp"
 33 #include "gc/shared/suspendibleThreadSet.hpp"
 34 #include "gc/shared/tlab_globals.hpp"
 35 #include "gc/shenandoah/shenandoahAsserts.hpp"
 36 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
 37 #include "gc/shenandoah/shenandoahCollectionSet.inline.hpp"
 38 #include "gc/shenandoah/shenandoahForwarding.inline.hpp"
 39 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
 40 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp"
 41 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
 42 #include "gc/shenandoah/shenandoahControlThread.hpp"
 43 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
 44 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
 45 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
 46 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
 47 #include "gc/shenandoah/mode/shenandoahMode.hpp"
 48 #include "oops/compressedOops.inline.hpp"
 49 #include "oops/oop.inline.hpp"
 50 #include "runtime/atomic.hpp"
 51 #include "runtime/prefetch.inline.hpp"
 52 #include "runtime/thread.hpp"
 53 #include "utilities/copy.hpp"
 54 #include "utilities/globalDefinitions.hpp"
 55 
 56 inline ShenandoahHeap* ShenandoahHeap::heap() {
 57   return named_heap<ShenandoahHeap>(CollectedHeap::Shenandoah);
 58 }
 59 
 60 inline ShenandoahHeapRegion* ShenandoahRegionIterator::next() {
 61   size_t new_index = Atomic::add(&_index, (size_t) 1, memory_order_relaxed);
 62   // get_region() provides the bounds-check and returns NULL on OOB.
 63   return _heap->get_region(new_index - 1);
 64 }
 65 
 66 inline bool ShenandoahHeap::has_forwarded_objects() const {
 67   return _gc_state.is_set(HAS_FORWARDED);

250     return cancelled_gc();
251   }
252 
253   jbyte prev = _cancelled_gc.cmpxchg(NOT_CANCELLED, CANCELLABLE);
254   if (prev == CANCELLABLE || prev == NOT_CANCELLED) {
255     if (SuspendibleThreadSet::should_yield()) {
256       SuspendibleThreadSet::yield();
257     }
258 
259     // Back to CANCELLABLE. The thread that poked NOT_CANCELLED first gets
260     // to restore to CANCELLABLE.
261     if (prev == CANCELLABLE) {
262       _cancelled_gc.set(CANCELLABLE);
263     }
264     return false;
265   } else {
266     return true;
267   }
268 }
269 
270 inline void ShenandoahHeap::clear_cancelled_gc(bool clear_oom_handler) {
271   _cancelled_gc.set(CANCELLABLE);
272   if (_cancel_requested_time > 0) {
273     double cancel_time = os::elapsedTime() - _cancel_requested_time;
274     log_info(gc)("GC cancellation took %.3fs", cancel_time);
275     _cancel_requested_time = 0;
276   }
277 
278   if (clear_oom_handler) {
279     _oom_evac_handler.clear();
280   }
281 }
282 
283 inline HeapWord* ShenandoahHeap::allocate_from_gclab(Thread* thread, size_t size) {
284   assert(UseTLAB, "TLABs should be enabled");
285 
286   PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
287   if (gclab == NULL) {
288     assert(!thread->is_Java_thread() && !thread->is_Worker_thread(),
289            "Performance: thread should have GCLAB: %s", thread->name());
290     // No GCLABs in this thread, fallback to shared allocation
291     return NULL;
292   }
293   HeapWord* obj = gclab->allocate(size);
294   if (obj != NULL) {
295     return obj;
296   }

297   return allocate_from_gclab_slow(thread, size);
298 }
299 
300 inline HeapWord* ShenandoahHeap::allocate_from_plab(Thread* thread, size_t size, bool is_promotion) {
301   assert(UseTLAB, "TLABs should be enabled");
302 
303   PLAB* plab = ShenandoahThreadLocalData::plab(thread);
304   if (is_promotion && !ShenandoahThreadLocalData::allow_plab_promotions(thread)) {
305     return NULL;
306   } else if (plab == NULL) {
307     assert(!thread->is_Java_thread() && !thread->is_Worker_thread(),
308            "Performance: thread should have PLAB: %s", thread->name());
309     // No PLABs in this thread, fallback to shared allocation
310     return NULL;
311   }
312   HeapWord* obj = plab->allocate(size);
313   if (obj == NULL) {
314     obj = allocate_from_plab_slow(thread, size, is_promotion);
315   }
316   if (is_promotion) {
317     ShenandoahThreadLocalData::add_to_plab_promoted(thread, size * HeapWordSize);
318   } else {
319     ShenandoahThreadLocalData::add_to_plab_evacuated(thread, size * HeapWordSize);
320   }
321   return obj;
322 }
323 
324 inline oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
325   assert(thread == Thread::current(), "Expected thread parameter to be current thread.");
326   if (ShenandoahThreadLocalData::is_oom_during_evac(thread)) {
327     // This thread went through the OOM during evac protocol and it is safe to return
328     // the forward pointer. It must not attempt to evacuate any more.
329     return ShenandoahBarrierSet::resolve_forwarded(p);
330   }
331 
332   assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope");
333 
334   ShenandoahHeapRegion* r = heap_region_containing(p);
335   assert(!r->is_humongous(), "never evacuate humongous objects");
336 
337   ShenandoahRegionAffiliation target_gen = r->affiliation();
338   if (mode()->is_generational() && ShenandoahHeap::heap()->is_gc_generation_young() &&
339       target_gen == YOUNG_GENERATION && ShenandoahPromoteTenuredObjects) {
340     markWord mark = p->mark();
341     if (mark.is_marked()) {
342       // Already forwarded.
343       return ShenandoahBarrierSet::resolve_forwarded(p);
344     }
345     if (mark.has_displaced_mark_helper()) {
346       // We don't want to deal with MT here just to ensure we read the right mark word.
347       // Skip the potential promotion attempt for this one.
348     } else if (r->age() + mark.age() >= InitialTenuringThreshold) {
349       oop result = try_evacuate_object(p, thread, r, OLD_GENERATION);
350       if (result != NULL) {
351         return result;
352       }
353       // If we failed to promote this aged object, we'll fall through to code below and evacuat to young-gen.
354     }
355   }
356   return try_evacuate_object(p, thread, r, target_gen);
357 }
358 
359 // try_evacuate_object registers the object and dirties the associated remembered set information when evacuating
360 // to OLD_GENERATION.
361 inline oop ShenandoahHeap::try_evacuate_object(oop p, Thread* thread, ShenandoahHeapRegion* from_region,
362                                                ShenandoahRegionAffiliation target_gen) {
363   bool alloc_from_lab = true;
364   HeapWord* copy = NULL;
365   size_t size = p->size();
366   bool is_promotion = (target_gen == OLD_GENERATION) && from_region->is_young();
367 
368 #ifdef ASSERT
369   if (ShenandoahOOMDuringEvacALot &&
370       (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
371         copy = NULL;
372   } else {
373 #endif
374     if (UseTLAB) {
375       switch (target_gen) {
376         case YOUNG_GENERATION: {
377            copy = allocate_from_gclab(thread, size);
378            if ((copy == nullptr) && (size < ShenandoahThreadLocalData::gclab_size(thread))) {
379              // GCLAB allocation failed because we are bumping up against the limit on young evacuation reserve.  Try resetting
380              // the desired GCLAB size and retry GCLAB allocation to avoid cascading of shared memory allocations.
381              ShenandoahThreadLocalData::set_gclab_size(thread, PLAB::min_size());
382              copy = allocate_from_gclab(thread, size);
383              // If we still get nullptr, we'll try a shared allocation below.
384            }
385            break;
386         }
387         case OLD_GENERATION: {
388            if (ShenandoahUsePLAB) {
389              copy = allocate_from_plab(thread, size, is_promotion);
390              if ((copy == nullptr) && (size < ShenandoahThreadLocalData::plab_size(thread))) {
391                // PLAB allocation failed because we are bumping up against the limit on old evacuation reserve.  Try resetting
392                // the desired PLAB size and retry PLAB allocation to avoid cascading of shared memory allocations.
393                ShenandoahThreadLocalData::set_plab_size(thread, PLAB::min_size());
394                copy = allocate_from_plab(thread, size, is_promotion);
395                // If we still get nullptr, we'll try a shared allocation below.
396              }
397            }
398            break;
399         }
400         default: {
401           ShouldNotReachHere();
402           break;
403         }
404       }
405     }
406 
407     if (copy == NULL) {
408       // If we failed to allocated in LAB, we'll try a shared allocation.
409       ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size, target_gen);
410       copy = allocate_memory(req, is_promotion);
411       alloc_from_lab = false;
412     }
413 #ifdef ASSERT
414   }
415 #endif
416 
417   if (copy == NULL) {
418     if (target_gen == OLD_GENERATION) {
419       assert(mode()->is_generational(), "Should only be here in generational mode.");
420       if (from_region->is_young()) {
421         // Signal that promotion failed. Will evacuate this old object somewhere in young gen.
422         handle_promotion_failure();
423         return NULL;
424       } else {
425         // Remember that evacuation to old gen failed. We'll want to trigger a full gc to recover from this
426         // after the evacuation threads have finished.
427         handle_old_evacuation_failure();
428       }
429     }
430 
431     control_thread()->handle_alloc_failure_evac(size);
432 
433     _oom_evac_handler.handle_out_of_memory_during_evacuation();
434 
435     return ShenandoahBarrierSet::resolve_forwarded(p);
436   }
437 
438   // Copy the object:
439   Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(p), copy, size);
440 

441   oop copy_val = cast_to_oop(copy);
442 
443   if (mode()->is_generational() && target_gen == YOUNG_GENERATION && is_aging_cycle()) {
444     ShenandoahHeap::increase_object_age(copy_val, from_region->age() + 1);
445   }
446 
447   // Try to install the new forwarding pointer.
448   oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val);
449   if (result == copy_val) {
450     // Successfully evacuated. Our copy is now the public one!
451     if (mode()->is_generational() && target_gen == OLD_GENERATION) {
452       handle_old_evacuation(copy, size, from_region->is_young());
453     }
454     shenandoah_assert_correct(NULL, copy_val);
455     return copy_val;
456   }  else {
457     // Failed to evacuate. We need to deal with the object that is left behind. Since this
458     // new allocation is certainly after TAMS, it will be considered live in the next cycle.
459     // But if it happens to contain references to evacuated regions, those references would
460     // not get updated for this stale copy during this cycle, and we will crash while scanning
461     // it the next cycle.
462     if (alloc_from_lab) {
463        // For LAB allocations, it is enough to rollback the allocation ptr. Either the next
464        // object will overwrite this stale copy, or the filler object on LAB retirement will
465        // do this.
466        switch (target_gen) {
467          case YOUNG_GENERATION: {
468              ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size);
469             break;
470          }
471          case OLD_GENERATION: {
472             ShenandoahThreadLocalData::plab(thread)->undo_allocation(copy, size);
473             if (is_promotion) {
474               ShenandoahThreadLocalData::subtract_from_plab_promoted(thread, size * HeapWordSize);
475             } else {
476               ShenandoahThreadLocalData::subtract_from_plab_evacuated(thread, size * HeapWordSize);
477             }
478             break;
479          }
480          default: {
481            ShouldNotReachHere();
482            break;
483          }
484        }
485     } else {
486       // For non-LAB allocations, we have no way to retract the allocation, and
487       // have to explicitly overwrite the copy with the filler object. With that overwrite,
488       // we have to keep the fwdptr initialized and pointing to our (stale) copy.
489       fill_with_object(copy, size);
490       shenandoah_assert_correct(NULL, copy_val);
491       // For non-LAB allocations, the object has already been registered
492     }
493     shenandoah_assert_correct(NULL, result);
494     return result;
495   }
496 }
497 
498 void ShenandoahHeap::increase_object_age(oop obj, uint additional_age) {
499   markWord w = obj->has_displaced_mark() ? obj->displaced_mark() : obj->mark();
500   w = w.set_age(MIN2(markWord::max_age, w.age() + additional_age));
501   if (obj->has_displaced_mark()) {
502     obj->set_displaced_mark(w);
503   } else {
504     obj->set_mark(w);
505   }
506 }
507 
508 inline bool ShenandoahHeap::clear_old_evacuation_failure() {
509   return _old_gen_oom_evac.try_unset();
510 }
511 
512 inline bool ShenandoahHeap::is_old(oop obj) const {
513   return is_gc_generation_young() && is_in_old(obj);
514 }
515 
516 inline bool ShenandoahHeap::requires_marking(const void* entry) const {
517   oop obj = cast_to_oop(entry);
518   return !_marking_context->is_marked_strong(obj);
519 }
520 
521 inline bool ShenandoahHeap::in_collection_set(oop p) const {
522   assert(collection_set() != NULL, "Sanity");
523   return collection_set()->is_in(p);
524 }
525 
526 inline bool ShenandoahHeap::in_collection_set_loc(void* p) const {
527   assert(collection_set() != NULL, "Sanity");
528   return collection_set()->is_in_loc(p);
529 }
530 
531 inline bool ShenandoahHeap::is_stable() const {
532   return _gc_state.is_clear();
533 }
534 
535 inline bool ShenandoahHeap::is_idle() const {
536   return _gc_state.is_unset(YOUNG_MARKING | OLD_MARKING | EVACUATION | UPDATEREFS);
537 }
538 
539 inline bool ShenandoahHeap::is_concurrent_mark_in_progress() const {
540   return _gc_state.is_set(YOUNG_MARKING | OLD_MARKING);
541 }
542 
543 inline bool ShenandoahHeap::is_concurrent_young_mark_in_progress() const {
544   return _gc_state.is_set(YOUNG_MARKING);
545 }
546 
547 inline bool ShenandoahHeap::is_concurrent_old_mark_in_progress() const {
548   return _gc_state.is_set(OLD_MARKING);
549 }
550 
551 inline bool ShenandoahHeap::is_evacuation_in_progress() const {
552   return _gc_state.is_set(EVACUATION);
553 }
554 
555 inline bool ShenandoahHeap::is_gc_in_progress_mask(uint mask) const {
556   return _gc_state.is_set(mask);
557 }
558 
559 inline bool ShenandoahHeap::is_degenerated_gc_in_progress() const {
560   return _degenerated_gc_in_progress.is_set();
561 }
562 
563 inline bool ShenandoahHeap::is_full_gc_in_progress() const {
564   return _full_gc_in_progress.is_set();
565 }
566 
567 inline bool ShenandoahHeap::is_full_gc_move_in_progress() const {
568   return _full_gc_move_in_progress.is_set();
569 }
570 
571 inline bool ShenandoahHeap::is_update_refs_in_progress() const {
572   return _gc_state.is_set(UPDATEREFS);
573 }
574 
575 inline bool ShenandoahHeap::is_stw_gc_in_progress() const {
576   return is_full_gc_in_progress() || is_degenerated_gc_in_progress();
577 }
578 
579 inline bool ShenandoahHeap::is_concurrent_strong_root_in_progress() const {
580   return _concurrent_strong_root_in_progress.is_set();
581 }
582 
583 inline bool ShenandoahHeap::is_concurrent_weak_root_in_progress() const {
584   return _gc_state.is_set(WEAK_ROOTS);
585 }
586 
587 inline bool ShenandoahHeap::is_aging_cycle() const {
588   return _is_aging_cycle.is_set();
589 }
590 
591 inline size_t ShenandoahHeap::set_promotion_reserve(size_t new_val) {
592   size_t orig = _promotion_reserve;
593   _promotion_reserve = new_val;
594   return orig;
595 }
596 
597 inline size_t ShenandoahHeap::get_promotion_reserve() const {
598   return _promotion_reserve;
599 }
600 
601 // returns previous value
602 size_t ShenandoahHeap::capture_old_usage(size_t old_usage) {
603   size_t previous_value = _captured_old_usage;
604   _captured_old_usage = old_usage;
605   return previous_value;
606 }
607 
608 void ShenandoahHeap::set_previous_promotion(size_t promoted_bytes) {
609   _previous_promotion = promoted_bytes;
610 }
611 
612 size_t ShenandoahHeap::get_previous_promotion() const {
613   return _previous_promotion;
614 }
615 
616 inline size_t ShenandoahHeap::set_old_evac_reserve(size_t new_val) {
617   size_t orig = _old_evac_reserve;
618   _old_evac_reserve = new_val;
619   return orig;
620 }
621 
622 inline size_t ShenandoahHeap::get_old_evac_reserve() const {
623   return _old_evac_reserve;
624 }
625 
626 inline void ShenandoahHeap::reset_old_evac_expended() {
627   _old_evac_expended = 0;
628 }
629 
630 inline size_t ShenandoahHeap::expend_old_evac(size_t increment) {
631   _old_evac_expended += increment;
632   return _old_evac_expended;
633 }
634 
635 inline size_t ShenandoahHeap::get_old_evac_expended() const {
636   return _old_evac_expended;
637 }
638 
639 inline size_t ShenandoahHeap::set_young_evac_reserve(size_t new_val) {
640   size_t orig = _young_evac_reserve;
641   _young_evac_reserve = new_val;
642   return orig;
643 }
644 
645 inline size_t ShenandoahHeap::get_young_evac_reserve() const {
646   return _young_evac_reserve;
647 }
648 
649 inline intptr_t ShenandoahHeap::set_alloc_supplement_reserve(intptr_t new_val) {
650   intptr_t orig = _alloc_supplement_reserve;
651   _alloc_supplement_reserve = new_val;
652   return orig;
653 }
654 
655 inline intptr_t ShenandoahHeap::get_alloc_supplement_reserve() const {
656   return _alloc_supplement_reserve;
657 }
658 
659 template<class T>
660 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl) {
661   marked_object_iterate(region, cl, region->top());
662 }
663 
664 template<class T>
665 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit) {
666   assert(! region->is_humongous_continuation(), "no humongous continuation regions here");
667 
668   ShenandoahMarkingContext* const ctx = marking_context();

669 
670   HeapWord* tams = ctx->top_at_mark_start(region);
671 
672   size_t skip_bitmap_delta = 1;
673   HeapWord* start = region->bottom();
674   HeapWord* end = MIN2(tams, region->end());
675 
676   // Step 1. Scan below the TAMS based on bitmap data.
677   HeapWord* limit_bitmap = MIN2(limit, tams);
678 
679   // Try to scan the initial candidate. If the candidate is above the TAMS, it would
680   // fail the subsequent "< limit_bitmap" checks, and fall through to Step 2.
681   HeapWord* cb = ctx->get_next_marked_addr(start, end);
682 
683   intx dist = ShenandoahMarkScanPrefetch;
684   if (dist > 0) {
685     // Batched scan that prefetches the oop data, anticipating the access to
686     // either header, oop field, or forwarding pointer. Not that we cannot
687     // touch anything in oop, while it still being prefetched to get enough
688     // time for prefetch to work. This is why we try to scan the bitmap linearly,

779     HeapWord* bottom = region->bottom();
780     if (top > bottom) {
781       region = region->humongous_start_region();
782       ShenandoahObjectToOopBoundedClosure<T> objs(cl, bottom, top);
783       marked_object_iterate(region, &objs);
784     }
785   } else {
786     ShenandoahObjectToOopClosure<T> objs(cl);
787     marked_object_iterate(region, &objs, top);
788   }
789 }
790 
791 inline ShenandoahHeapRegion* const ShenandoahHeap::get_region(size_t region_idx) const {
792   if (region_idx < _num_regions) {
793     return _regions[region_idx];
794   } else {
795     return NULL;
796   }
797 }
798 








799 inline ShenandoahMarkingContext* ShenandoahHeap::complete_marking_context() const {
800   assert (_marking_context->is_complete()," sanity");
801   return _marking_context;
802 }
803 
804 inline ShenandoahMarkingContext* ShenandoahHeap::marking_context() const {
805   return _marking_context;
806 }
807 
808 inline void ShenandoahHeap::clear_cards_for(ShenandoahHeapRegion* region) {
809   if (mode()->is_generational()) {
810     _card_scan->mark_range_as_empty(region->bottom(), pointer_delta(region->end(), region->bottom()));
811   }
812 }
813 
814 inline void ShenandoahHeap::dirty_cards(HeapWord* start, HeapWord* end) {
815   assert(mode()->is_generational(), "Should only be used for generational mode");
816   size_t words = pointer_delta(end, start);
817   _card_scan->mark_range_as_dirty(start, words);
818 }
819 
820 inline void ShenandoahHeap::clear_cards(HeapWord* start, HeapWord* end) {
821   assert(mode()->is_generational(), "Should only be used for generational mode");
822   size_t words = pointer_delta(end, start);
823   _card_scan->mark_range_as_clean(start, words);
824 }
825 
826 inline void ShenandoahHeap::mark_card_as_dirty(void* location) {
827   if (mode()->is_generational()) {
828     _card_scan->mark_card_as_dirty((HeapWord*)location);
829   }
830 }
831 
832 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
< prev index next >