< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp

Print this page

 24 
 25 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
 26 #define SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
 27 
 28 #include "gc/shenandoah/shenandoahHeap.hpp"
 29 
 30 #include "classfile/javaClasses.inline.hpp"
 31 #include "gc/shared/markBitMap.inline.hpp"
 32 #include "gc/shared/threadLocalAllocBuffer.inline.hpp"
 33 #include "gc/shared/suspendibleThreadSet.hpp"
 34 #include "gc/shared/tlab_globals.hpp"
 35 #include "gc/shenandoah/shenandoahAsserts.hpp"
 36 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
 37 #include "gc/shenandoah/shenandoahCollectionSet.inline.hpp"
 38 #include "gc/shenandoah/shenandoahForwarding.inline.hpp"
 39 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
 40 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp"
 41 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
 42 #include "gc/shenandoah/shenandoahControlThread.hpp"
 43 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"

 44 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"


 45 #include "oops/compressedOops.inline.hpp"
 46 #include "oops/oop.inline.hpp"
 47 #include "runtime/atomic.hpp"
 48 #include "runtime/prefetch.inline.hpp"
 49 #include "runtime/thread.hpp"
 50 #include "utilities/copy.hpp"
 51 #include "utilities/globalDefinitions.hpp"
 52 
 53 inline ShenandoahHeap* ShenandoahHeap::heap() {
 54   return named_heap<ShenandoahHeap>(CollectedHeap::Shenandoah);
 55 }
 56 
 57 inline ShenandoahHeapRegion* ShenandoahRegionIterator::next() {
 58   size_t new_index = Atomic::add(&_index, (size_t) 1, memory_order_relaxed);
 59   // get_region() provides the bounds-check and returns NULL on OOB.
 60   return _heap->get_region(new_index - 1);
 61 }
 62 
 63 inline bool ShenandoahHeap::has_forwarded_objects() const {
 64   return _gc_state.is_set(HAS_FORWARDED);

247     return cancelled_gc();
248   }
249 
250   jbyte prev = _cancelled_gc.cmpxchg(NOT_CANCELLED, CANCELLABLE);
251   if (prev == CANCELLABLE || prev == NOT_CANCELLED) {
252     if (SuspendibleThreadSet::should_yield()) {
253       SuspendibleThreadSet::yield();
254     }
255 
256     // Back to CANCELLABLE. The thread that poked NOT_CANCELLED first gets
257     // to restore to CANCELLABLE.
258     if (prev == CANCELLABLE) {
259       _cancelled_gc.set(CANCELLABLE);
260     }
261     return false;
262   } else {
263     return true;
264   }
265 }
266 
267 inline void ShenandoahHeap::clear_cancelled_gc() {
268   _cancelled_gc.set(CANCELLABLE);
269   _oom_evac_handler.clear();








270 }
271 
272 inline HeapWord* ShenandoahHeap::allocate_from_gclab(Thread* thread, size_t size) {
273   assert(UseTLAB, "TLABs should be enabled");
274 
275   PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
276   if (gclab == NULL) {
277     assert(!thread->is_Java_thread() && !thread->is_Worker_thread(),
278            "Performance: thread should have GCLAB: %s", thread->name());
279     // No GCLABs in this thread, fallback to shared allocation
280     return NULL;
281   }
282   HeapWord* obj = gclab->allocate(size);
283   if (obj != NULL) {
284     return obj;
285   }
286   // Otherwise...
287   return allocate_from_gclab_slow(thread, size);
288 }
289 

















290 inline oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
291   if (ShenandoahThreadLocalData::is_oom_during_evac(Thread::current())) {

292     // This thread went through the OOM during evac protocol and it is safe to return
293     // the forward pointer. It must not attempt to evacuate any more.
294     return ShenandoahBarrierSet::resolve_forwarded(p);
295   }
296 
297   assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope");
298 
299   size_t size = p->size();

300 
301   assert(!heap_region_containing(p)->is_humongous(), "never evacuate humongous objects");



















302 
303   bool alloc_from_gclab = true;



304   HeapWord* copy = NULL;

305 
306 #ifdef ASSERT
307   if (ShenandoahOOMDuringEvacALot &&
308       (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
309         copy = NULL;
310   } else {
311 #endif
312     if (UseTLAB) {
313       copy = allocate_from_gclab(thread, size);















314     }
315     if (copy == NULL) {
316       ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size);
317       copy = allocate_memory(req);
318       alloc_from_gclab = false;
319     }
320 #ifdef ASSERT
321   }
322 #endif
323 
324   if (copy == NULL) {













325     control_thread()->handle_alloc_failure_evac(size);
326 
327     _oom_evac_handler.handle_out_of_memory_during_evacuation();
328 
329     return ShenandoahBarrierSet::resolve_forwarded(p);
330   }
331 
332   // Copy the object:
333   Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(p), copy, size);
334 
335   // Try to install the new forwarding pointer.
336   oop copy_val = cast_to_oop(copy);


337   oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val);
338   if (result == copy_val) {
339     // Successfully evacuated. Our copy is now the public one!











340     shenandoah_assert_correct(NULL, copy_val);
341     return copy_val;
342   }  else {
343     // Failed to evacuate. We need to deal with the object that is left behind. Since this
344     // new allocation is certainly after TAMS, it will be considered live in the next cycle.
345     // But if it happens to contain references to evacuated regions, those references would
346     // not get updated for this stale copy during this cycle, and we will crash while scanning
347     // it the next cycle.
348     //
349     // For GCLAB allocations, it is enough to rollback the allocation ptr. Either the next
350     // object will overwrite this stale copy, or the filler object on LAB retirement will
351     // do this. For non-GCLAB allocations, we have no way to retract the allocation, and
352     // have to explicitly overwrite the copy with the filler object. With that overwrite,
353     // we have to keep the fwdptr initialized and pointing to our (stale) copy.
354     if (alloc_from_gclab) {
355       ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size);










356     } else {



357       fill_with_object(copy, size);
358       shenandoah_assert_correct(NULL, copy_val);

359     }
360     shenandoah_assert_correct(NULL, result);
361     return result;
362   }
363 }
364 


















365 inline bool ShenandoahHeap::requires_marking(const void* entry) const {
366   oop obj = cast_to_oop(entry);
367   return !_marking_context->is_marked_strong(obj);
368 }
369 
370 inline bool ShenandoahHeap::in_collection_set(oop p) const {
371   assert(collection_set() != NULL, "Sanity");
372   return collection_set()->is_in(p);
373 }
374 
375 inline bool ShenandoahHeap::in_collection_set_loc(void* p) const {
376   assert(collection_set() != NULL, "Sanity");
377   return collection_set()->is_in_loc(p);
378 }
379 
380 inline bool ShenandoahHeap::is_stable() const {
381   return _gc_state.is_clear();
382 }
383 
384 inline bool ShenandoahHeap::is_idle() const {
385   return _gc_state.is_unset(MARKING | EVACUATION | UPDATEREFS);
386 }
387 
388 inline bool ShenandoahHeap::is_concurrent_mark_in_progress() const {
389   return _gc_state.is_set(MARKING);








390 }
391 
392 inline bool ShenandoahHeap::is_evacuation_in_progress() const {
393   return _gc_state.is_set(EVACUATION);
394 }
395 
396 inline bool ShenandoahHeap::is_gc_in_progress_mask(uint mask) const {
397   return _gc_state.is_set(mask);
398 }
399 
400 inline bool ShenandoahHeap::is_degenerated_gc_in_progress() const {
401   return _degenerated_gc_in_progress.is_set();
402 }
403 
404 inline bool ShenandoahHeap::is_full_gc_in_progress() const {
405   return _full_gc_in_progress.is_set();
406 }
407 
408 inline bool ShenandoahHeap::is_full_gc_move_in_progress() const {
409   return _full_gc_move_in_progress.is_set();
410 }
411 
412 inline bool ShenandoahHeap::is_update_refs_in_progress() const {
413   return _gc_state.is_set(UPDATEREFS);
414 }
415 
416 inline bool ShenandoahHeap::is_stw_gc_in_progress() const {
417   return is_full_gc_in_progress() || is_degenerated_gc_in_progress();
418 }
419 
420 inline bool ShenandoahHeap::is_concurrent_strong_root_in_progress() const {
421   return _concurrent_strong_root_in_progress.is_set();
422 }
423 
424 inline bool ShenandoahHeap::is_concurrent_weak_root_in_progress() const {
425   return _gc_state.is_set(WEAK_ROOTS);
426 }
427 




428 template<class T>
429 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl) {
430   marked_object_iterate(region, cl, region->top());
431 }
432 
433 template<class T>
434 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit) {
435   assert(! region->is_humongous_continuation(), "no humongous continuation regions here");
436 
437   ShenandoahMarkingContext* const ctx = complete_marking_context();
438   assert(ctx->is_complete(), "sanity");
439 
440   HeapWord* tams = ctx->top_at_mark_start(region);
441 
442   size_t skip_bitmap_delta = 1;
443   HeapWord* start = region->bottom();
444   HeapWord* end = MIN2(tams, region->end());
445 
446   // Step 1. Scan below the TAMS based on bitmap data.
447   HeapWord* limit_bitmap = MIN2(limit, tams);
448 
449   // Try to scan the initial candidate. If the candidate is above the TAMS, it would
450   // fail the subsequent "< limit_bitmap" checks, and fall through to Step 2.
451   HeapWord* cb = ctx->get_next_marked_addr(start, end);
452 
453   intx dist = ShenandoahMarkScanPrefetch;
454   if (dist > 0) {
455     // Batched scan that prefetches the oop data, anticipating the access to
456     // either header, oop field, or forwarding pointer. Not that we cannot
457     // touch anything in oop, while it still being prefetched to get enough
458     // time for prefetch to work. This is why we try to scan the bitmap linearly,

549     HeapWord* bottom = region->bottom();
550     if (top > bottom) {
551       region = region->humongous_start_region();
552       ShenandoahObjectToOopBoundedClosure<T> objs(cl, bottom, top);
553       marked_object_iterate(region, &objs);
554     }
555   } else {
556     ShenandoahObjectToOopClosure<T> objs(cl);
557     marked_object_iterate(region, &objs, top);
558   }
559 }
560 
561 inline ShenandoahHeapRegion* const ShenandoahHeap::get_region(size_t region_idx) const {
562   if (region_idx < _num_regions) {
563     return _regions[region_idx];
564   } else {
565     return NULL;
566   }
567 }
568 
569 inline void ShenandoahHeap::mark_complete_marking_context() {
570   _marking_context->mark_complete();
571 }
572 
573 inline void ShenandoahHeap::mark_incomplete_marking_context() {
574   _marking_context->mark_incomplete();
575 }
576 
577 inline ShenandoahMarkingContext* ShenandoahHeap::complete_marking_context() const {
578   assert (_marking_context->is_complete()," sanity");
579   return _marking_context;
580 }
581 
582 inline ShenandoahMarkingContext* ShenandoahHeap::marking_context() const {
583   return _marking_context;
584 }
585 
























586 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP

 24 
 25 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
 26 #define SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
 27 
 28 #include "gc/shenandoah/shenandoahHeap.hpp"
 29 
 30 #include "classfile/javaClasses.inline.hpp"
 31 #include "gc/shared/markBitMap.inline.hpp"
 32 #include "gc/shared/threadLocalAllocBuffer.inline.hpp"
 33 #include "gc/shared/suspendibleThreadSet.hpp"
 34 #include "gc/shared/tlab_globals.hpp"
 35 #include "gc/shenandoah/shenandoahAsserts.hpp"
 36 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
 37 #include "gc/shenandoah/shenandoahCollectionSet.inline.hpp"
 38 #include "gc/shenandoah/shenandoahForwarding.inline.hpp"
 39 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
 40 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp"
 41 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
 42 #include "gc/shenandoah/shenandoahControlThread.hpp"
 43 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
 44 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
 45 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
 46 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
 47 #include "gc/shenandoah/mode/shenandoahMode.hpp"
 48 #include "oops/compressedOops.inline.hpp"
 49 #include "oops/oop.inline.hpp"
 50 #include "runtime/atomic.hpp"
 51 #include "runtime/prefetch.inline.hpp"
 52 #include "runtime/thread.hpp"
 53 #include "utilities/copy.hpp"
 54 #include "utilities/globalDefinitions.hpp"
 55 
 56 inline ShenandoahHeap* ShenandoahHeap::heap() {
 57   return named_heap<ShenandoahHeap>(CollectedHeap::Shenandoah);
 58 }
 59 
 60 inline ShenandoahHeapRegion* ShenandoahRegionIterator::next() {
 61   size_t new_index = Atomic::add(&_index, (size_t) 1, memory_order_relaxed);
 62   // get_region() provides the bounds-check and returns NULL on OOB.
 63   return _heap->get_region(new_index - 1);
 64 }
 65 
 66 inline bool ShenandoahHeap::has_forwarded_objects() const {
 67   return _gc_state.is_set(HAS_FORWARDED);

250     return cancelled_gc();
251   }
252 
253   jbyte prev = _cancelled_gc.cmpxchg(NOT_CANCELLED, CANCELLABLE);
254   if (prev == CANCELLABLE || prev == NOT_CANCELLED) {
255     if (SuspendibleThreadSet::should_yield()) {
256       SuspendibleThreadSet::yield();
257     }
258 
259     // Back to CANCELLABLE. The thread that poked NOT_CANCELLED first gets
260     // to restore to CANCELLABLE.
261     if (prev == CANCELLABLE) {
262       _cancelled_gc.set(CANCELLABLE);
263     }
264     return false;
265   } else {
266     return true;
267   }
268 }
269 
270 inline void ShenandoahHeap::clear_cancelled_gc(bool clear_oom_handler) {
271   _cancelled_gc.set(CANCELLABLE);
272   if (_cancel_requested_time > 0) {
273     double cancel_time = os::elapsedTime() - _cancel_requested_time;
274     log_info(gc)("GC cancellation took %.3fs", cancel_time);
275     _cancel_requested_time = 0;
276   }
277 
278   if (clear_oom_handler) {
279     _oom_evac_handler.clear();
280   }
281 }
282 
283 inline HeapWord* ShenandoahHeap::allocate_from_gclab(Thread* thread, size_t size) {
284   assert(UseTLAB, "TLABs should be enabled");
285 
286   PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
287   if (gclab == NULL) {
288     assert(!thread->is_Java_thread() && !thread->is_Worker_thread(),
289            "Performance: thread should have GCLAB: %s", thread->name());
290     // No GCLABs in this thread, fallback to shared allocation
291     return NULL;
292   }
293   HeapWord* obj = gclab->allocate(size);
294   if (obj != NULL) {
295     return obj;
296   }

297   return allocate_from_gclab_slow(thread, size);
298 }
299 
300 inline HeapWord* ShenandoahHeap::allocate_from_plab(Thread* thread, size_t size) {
301   assert(UseTLAB, "TLABs should be enabled");
302 
303   PLAB* plab = ShenandoahThreadLocalData::plab(thread);
304   if (plab == NULL) {
305     assert(!thread->is_Java_thread() && !thread->is_Worker_thread(),
306            "Performance: thread should have PLAB: %s", thread->name());
307     // No PLABs in this thread, fallback to shared allocation
308     return NULL;
309   }
310   HeapWord* obj = plab->allocate(size);
311   if (obj == NULL) {
312     obj = allocate_from_plab_slow(thread, size);
313   }
314   return obj;
315 }
316 
317 inline oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
318   assert(thread == Thread::current(), "Expected thread parameter to be current thread.");
319   if (ShenandoahThreadLocalData::is_oom_during_evac(thread)) {
320     // This thread went through the OOM during evac protocol and it is safe to return
321     // the forward pointer. It must not attempt to evacuate any more.
322     return ShenandoahBarrierSet::resolve_forwarded(p);
323   }
324 
325   assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope");
326 
327   ShenandoahHeapRegion* r = heap_region_containing(p);
328   assert(!r->is_humongous(), "never evacuate humongous objects");
329 
330   ShenandoahRegionAffiliation target_gen = r->affiliation();
331   if (mode()->is_generational() && ShenandoahHeap::heap()->is_gc_generation_young() &&
332       target_gen == YOUNG_GENERATION && ShenandoahPromoteTenuredObjects) {
333     markWord mark = p->mark();
334     if (mark.is_marked()) {
335       // Already forwarded.
336       return ShenandoahBarrierSet::resolve_forwarded(p);
337     }
338     if (mark.has_displaced_mark_helper()) {
339       // We don't want to deal with MT here just to ensure we read the right mark word.
340       // Skip the potential promotion attempt for this one.
341     } else if (mark.age() >= InitialTenuringThreshold) {
342       oop result = try_evacuate_object(p, thread, r, OLD_GENERATION);
343       if (result != NULL) {
344         return result;
345       }
346     }
347   }
348   return try_evacuate_object(p, thread, r, target_gen);
349 }
350 
351 // try_evacuate_object registers the object and dirties the associated remembered set information when evacuating
352 // to OLD_GENERATION.
353 inline oop ShenandoahHeap::try_evacuate_object(oop p, Thread* thread, ShenandoahHeapRegion* from_region, ShenandoahRegionAffiliation target_gen) {
354   bool alloc_from_lab = true;
355   HeapWord* copy = NULL;
356   size_t size = p->size();
357 
358 #ifdef ASSERT
359   if (ShenandoahOOMDuringEvacALot &&
360       (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
361         copy = NULL;
362   } else {
363 #endif
364     if (UseTLAB) {
365       switch (target_gen) {
366         case YOUNG_GENERATION: {
367            copy = allocate_from_gclab(thread, size);
368            break;
369         }
370         case OLD_GENERATION: {
371            if (ShenandoahUsePLAB) {
372              copy = allocate_from_plab(thread, size);
373            }
374            break;
375         }
376         default: {
377           ShouldNotReachHere();
378           break;
379         }
380       }
381     }
382     if (copy == NULL) {
383       ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size, target_gen);
384       copy = allocate_memory(req);
385       alloc_from_lab = false;
386     }
387 #ifdef ASSERT
388   }
389 #endif
390 
391   if (copy == NULL) {
392     if (target_gen == OLD_GENERATION) {
393       assert(mode()->is_generational(), "Should only be here in generational mode.");
394       if (from_region->is_young()) {
395         // Signal that promotion failed. Will evacuate this old object somewhere in young gen.
396         handle_promotion_failure();
397         return NULL;
398       } else {
399         // Remember that evacuation to old gen failed. We'll want to trigger a full gc to recover from this
400         // after the evacuation threads have finished.
401         handle_old_evacuation_failure();
402       }
403     }
404 
405     control_thread()->handle_alloc_failure_evac(size);
406 
407     _oom_evac_handler.handle_out_of_memory_during_evacuation();
408 
409     return ShenandoahBarrierSet::resolve_forwarded(p);
410   }
411 
412   // Copy the object:
413   Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(p), copy, size);
414 

415   oop copy_val = cast_to_oop(copy);
416 
417   // Try to install the new forwarding pointer.
418   oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val);
419   if (result == copy_val) {
420     // Successfully evacuated. Our copy is now the public one!
421     if (mode()->is_generational()) {
422       if (target_gen == OLD_GENERATION) {
423         handle_old_evacuation(copy, size, from_region->is_young());
424       } else if (target_gen == YOUNG_GENERATION) {
425         if (is_aging_cycle()) {
426           ShenandoahHeap::increase_object_age(copy_val, from_region->age() + 1);
427         }
428       } else {
429         ShouldNotReachHere();
430       }
431     }
432     shenandoah_assert_correct(NULL, copy_val);
433     return copy_val;
434   }  else {
435     // Failed to evacuate. We need to deal with the object that is left behind. Since this
436     // new allocation is certainly after TAMS, it will be considered live in the next cycle.
437     // But if it happens to contain references to evacuated regions, those references would
438     // not get updated for this stale copy during this cycle, and we will crash while scanning
439     // it the next cycle.
440     if (alloc_from_lab) {
441        // For LAB allocations, it is enough to rollback the allocation ptr. Either the next
442        // object will overwrite this stale copy, or the filler object on LAB retirement will
443        // do this.
444        switch (target_gen) {
445          case YOUNG_GENERATION: {
446              ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size);
447             break;
448          }
449          case OLD_GENERATION: {
450             ShenandoahThreadLocalData::plab(thread)->undo_allocation(copy, size);
451             break;
452          }
453          default: {
454            ShouldNotReachHere();
455            break;
456          }
457        }
458     } else {
459       // For non-LAB allocations, we have no way to retract the allocation, and
460       // have to explicitly overwrite the copy with the filler object. With that overwrite,
461       // we have to keep the fwdptr initialized and pointing to our (stale) copy.
462       fill_with_object(copy, size);
463       shenandoah_assert_correct(NULL, copy_val);
464       // For non-LAB allocations, the object has already been registered
465     }
466     shenandoah_assert_correct(NULL, result);
467     return result;
468   }
469 }
470 
471 void ShenandoahHeap::increase_object_age(oop obj, uint additional_age) {
472   markWord w = obj->has_displaced_mark() ? obj->displaced_mark() : obj->mark();
473   w = w.set_age(MIN2(markWord::max_age, w.age() + additional_age));
474   if (obj->has_displaced_mark()) {
475     obj->set_displaced_mark(w);
476   } else {
477     obj->set_mark(w);
478   }
479 }
480 
481 inline bool ShenandoahHeap::clear_old_evacuation_failure() {
482   return _old_gen_oom_evac.try_unset();
483 }
484 
485 inline bool ShenandoahHeap::is_old(oop obj) const {
486   return is_gc_generation_young() && is_in_old(obj);
487 }
488 
489 inline bool ShenandoahHeap::requires_marking(const void* entry) const {
490   oop obj = cast_to_oop(entry);
491   return !_marking_context->is_marked_strong(obj);
492 }
493 
494 inline bool ShenandoahHeap::in_collection_set(oop p) const {
495   assert(collection_set() != NULL, "Sanity");
496   return collection_set()->is_in(p);
497 }
498 
499 inline bool ShenandoahHeap::in_collection_set_loc(void* p) const {
500   assert(collection_set() != NULL, "Sanity");
501   return collection_set()->is_in_loc(p);
502 }
503 
504 inline bool ShenandoahHeap::is_stable() const {
505   return _gc_state.is_clear();
506 }
507 
508 inline bool ShenandoahHeap::is_idle() const {
509   return _gc_state.is_unset(YOUNG_MARKING | OLD_MARKING | EVACUATION | UPDATEREFS);
510 }
511 
512 inline bool ShenandoahHeap::is_concurrent_mark_in_progress() const {
513   return _gc_state.is_set(YOUNG_MARKING | OLD_MARKING);
514 }
515 
516 inline bool ShenandoahHeap::is_concurrent_young_mark_in_progress() const {
517   return _gc_state.is_set(YOUNG_MARKING);
518 }
519 
520 inline bool ShenandoahHeap::is_concurrent_old_mark_in_progress() const {
521   return _gc_state.is_set(OLD_MARKING);
522 }
523 
524 inline bool ShenandoahHeap::is_evacuation_in_progress() const {
525   return _gc_state.is_set(EVACUATION);
526 }
527 
528 inline bool ShenandoahHeap::is_gc_in_progress_mask(uint mask) const {
529   return _gc_state.is_set(mask);
530 }
531 
532 inline bool ShenandoahHeap::is_degenerated_gc_in_progress() const {
533   return _degenerated_gc_in_progress.is_set();
534 }
535 
536 inline bool ShenandoahHeap::is_full_gc_in_progress() const {
537   return _full_gc_in_progress.is_set();
538 }
539 
540 inline bool ShenandoahHeap::is_full_gc_move_in_progress() const {
541   return _full_gc_move_in_progress.is_set();
542 }
543 
544 inline bool ShenandoahHeap::is_update_refs_in_progress() const {
545   return _gc_state.is_set(UPDATEREFS);
546 }
547 
548 inline bool ShenandoahHeap::is_stw_gc_in_progress() const {
549   return is_full_gc_in_progress() || is_degenerated_gc_in_progress();
550 }
551 
552 inline bool ShenandoahHeap::is_concurrent_strong_root_in_progress() const {
553   return _concurrent_strong_root_in_progress.is_set();
554 }
555 
556 inline bool ShenandoahHeap::is_concurrent_weak_root_in_progress() const {
557   return _gc_state.is_set(WEAK_ROOTS);
558 }
559 
560 inline bool ShenandoahHeap::is_aging_cycle() const {
561   return _is_aging_cycle.is_set();
562 }
563 
564 template<class T>
565 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl) {
566   marked_object_iterate(region, cl, region->top());
567 }
568 
569 template<class T>
570 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit) {
571   assert(! region->is_humongous_continuation(), "no humongous continuation regions here");
572 
573   ShenandoahMarkingContext* const ctx = marking_context();

574 
575   HeapWord* tams = ctx->top_at_mark_start(region);
576 
577   size_t skip_bitmap_delta = 1;
578   HeapWord* start = region->bottom();
579   HeapWord* end = MIN2(tams, region->end());
580 
581   // Step 1. Scan below the TAMS based on bitmap data.
582   HeapWord* limit_bitmap = MIN2(limit, tams);
583 
584   // Try to scan the initial candidate. If the candidate is above the TAMS, it would
585   // fail the subsequent "< limit_bitmap" checks, and fall through to Step 2.
586   HeapWord* cb = ctx->get_next_marked_addr(start, end);
587 
588   intx dist = ShenandoahMarkScanPrefetch;
589   if (dist > 0) {
590     // Batched scan that prefetches the oop data, anticipating the access to
591     // either header, oop field, or forwarding pointer. Not that we cannot
592     // touch anything in oop, while it still being prefetched to get enough
593     // time for prefetch to work. This is why we try to scan the bitmap linearly,

684     HeapWord* bottom = region->bottom();
685     if (top > bottom) {
686       region = region->humongous_start_region();
687       ShenandoahObjectToOopBoundedClosure<T> objs(cl, bottom, top);
688       marked_object_iterate(region, &objs);
689     }
690   } else {
691     ShenandoahObjectToOopClosure<T> objs(cl);
692     marked_object_iterate(region, &objs, top);
693   }
694 }
695 
696 inline ShenandoahHeapRegion* const ShenandoahHeap::get_region(size_t region_idx) const {
697   if (region_idx < _num_regions) {
698     return _regions[region_idx];
699   } else {
700     return NULL;
701   }
702 }
703 








704 inline ShenandoahMarkingContext* ShenandoahHeap::complete_marking_context() const {
705   assert (_marking_context->is_complete()," sanity");
706   return _marking_context;
707 }
708 
709 inline ShenandoahMarkingContext* ShenandoahHeap::marking_context() const {
710   return _marking_context;
711 }
712 
713 inline void ShenandoahHeap::clear_cards_for(ShenandoahHeapRegion* region) {
714   if (mode()->is_generational()) {
715     _card_scan->mark_range_as_empty(region->bottom(), pointer_delta(region->end(), region->bottom()));
716   }
717 }
718 
719 inline void ShenandoahHeap::dirty_cards(HeapWord* start, HeapWord* end) {
720   assert(mode()->is_generational(), "Should only be used for generational mode");
721   size_t words = pointer_delta(end, start);
722   _card_scan->mark_range_as_dirty(start, words);
723 }
724 
725 inline void ShenandoahHeap::clear_cards(HeapWord* start, HeapWord* end) {
726   assert(mode()->is_generational(), "Should only be used for generational mode");
727   size_t words = pointer_delta(end, start);
728   _card_scan->mark_range_as_clean(start, words);
729 }
730 
731 inline void ShenandoahHeap::mark_card_as_dirty(void* location) {
732   if (mode()->is_generational()) {
733     _card_scan->mark_card_as_dirty((HeapWord*)location);
734   }
735 }
736 
737 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
< prev index next >