< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp

Print this page

 25 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
 26 #define SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
 27 
 28 #include "gc/shenandoah/shenandoahHeap.hpp"
 29 
 30 #include "classfile/javaClasses.inline.hpp"
 31 #include "gc/shared/markBitMap.inline.hpp"
 32 #include "gc/shared/threadLocalAllocBuffer.inline.hpp"
 33 #include "gc/shared/continuationGCSupport.inline.hpp"
 34 #include "gc/shared/suspendibleThreadSet.hpp"
 35 #include "gc/shared/tlab_globals.hpp"
 36 #include "gc/shenandoah/shenandoahAsserts.hpp"
 37 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
 38 #include "gc/shenandoah/shenandoahCollectionSet.inline.hpp"
 39 #include "gc/shenandoah/shenandoahForwarding.inline.hpp"
 40 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
 41 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp"
 42 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
 43 #include "gc/shenandoah/shenandoahControlThread.hpp"
 44 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"

 45 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"


 46 #include "oops/compressedOops.inline.hpp"
 47 #include "oops/oop.inline.hpp"
 48 #include "runtime/atomic.hpp"
 49 #include "runtime/javaThread.hpp"
 50 #include "runtime/prefetch.inline.hpp"
 51 #include "utilities/copy.hpp"
 52 #include "utilities/globalDefinitions.hpp"
 53 
 54 inline ShenandoahHeap* ShenandoahHeap::heap() {
 55   return named_heap<ShenandoahHeap>(CollectedHeap::Shenandoah);
 56 }
 57 
 58 inline ShenandoahHeapRegion* ShenandoahRegionIterator::next() {
 59   size_t new_index = Atomic::add(&_index, (size_t) 1, memory_order_relaxed);
 60   // get_region() provides the bounds-check and returns null on OOB.
 61   return _heap->get_region(new_index - 1);
 62 }
 63 
 64 inline bool ShenandoahHeap::has_forwarded_objects() const {
 65   return _gc_state.is_set(HAS_FORWARDED);

248     return cancelled_gc();
249   }
250 
251   jbyte prev = _cancelled_gc.cmpxchg(NOT_CANCELLED, CANCELLABLE);
252   if (prev == CANCELLABLE || prev == NOT_CANCELLED) {
253     if (SuspendibleThreadSet::should_yield()) {
254       SuspendibleThreadSet::yield();
255     }
256 
257     // Back to CANCELLABLE. The thread that poked NOT_CANCELLED first gets
258     // to restore to CANCELLABLE.
259     if (prev == CANCELLABLE) {
260       _cancelled_gc.set(CANCELLABLE);
261     }
262     return false;
263   } else {
264     return true;
265   }
266 }
267 
268 inline void ShenandoahHeap::clear_cancelled_gc() {
269   _cancelled_gc.set(CANCELLABLE);
270   _oom_evac_handler.clear();








271 }
272 
273 inline HeapWord* ShenandoahHeap::allocate_from_gclab(Thread* thread, size_t size) {
274   assert(UseTLAB, "TLABs should be enabled");
275 
276   PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
277   if (gclab == nullptr) {
278     assert(!thread->is_Java_thread() && !thread->is_Worker_thread(),
279            "Performance: thread should have GCLAB: %s", thread->name());
280     // No GCLABs in this thread, fallback to shared allocation
281     return nullptr;
282   }
283   HeapWord* obj = gclab->allocate(size);
284   if (obj != nullptr) {
285     return obj;
286   }
287   // Otherwise...
288   return allocate_from_gclab_slow(thread, size);
289 }
290 































291 inline oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
292   if (ShenandoahThreadLocalData::is_oom_during_evac(Thread::current())) {

293     // This thread went through the OOM during evac protocol and it is safe to return
294     // the forward pointer. It must not attempt to evacuate any more.
295     return ShenandoahBarrierSet::resolve_forwarded(p);
296   }
297 
298   assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope");
299 
300   size_t size = p->size();

301 
302   assert(!heap_region_containing(p)->is_humongous(), "never evacuate humongous objects");




















303 
304   bool alloc_from_gclab = true;





305   HeapWord* copy = nullptr;


306 
307 #ifdef ASSERT
308   if (ShenandoahOOMDuringEvacALot &&
309       (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
310         copy = nullptr;
311   } else {
312 #endif
313     if (UseTLAB) {
314       copy = allocate_from_gclab(thread, size);













































315     }

316     if (copy == nullptr) {
317       ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size);
318       copy = allocate_memory(req);
319       alloc_from_gclab = false;







320     }
321 #ifdef ASSERT
322   }
323 #endif
324 
325   if (copy == nullptr) {














326     control_thread()->handle_alloc_failure_evac(size);
327 
328     _oom_evac_handler.handle_out_of_memory_during_evacuation();
329 
330     return ShenandoahBarrierSet::resolve_forwarded(p);
331   }
332 
333   // Copy the object:

334   Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(p), copy, size);
335 
336   // Try to install the new forwarding pointer.
337   oop copy_val = cast_to_oop(copy);






338   ContinuationGCSupport::relativize_stack_chunk(copy_val);
339 
340   oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val);
341   if (result == copy_val) {
342     // Successfully evacuated. Our copy is now the public one!




343     shenandoah_assert_correct(nullptr, copy_val);
344     return copy_val;
345   }  else {
346     // Failed to evacuate. We need to deal with the object that is left behind. Since this
347     // new allocation is certainly after TAMS, it will be considered live in the next cycle.
348     // But if it happens to contain references to evacuated regions, those references would
349     // not get updated for this stale copy during this cycle, and we will crash while scanning
350     // it the next cycle.
351     //
352     // For GCLAB allocations, it is enough to rollback the allocation ptr. Either the next
353     // object will overwrite this stale copy, or the filler object on LAB retirement will
354     // do this. For non-GCLAB allocations, we have no way to retract the allocation, and
355     // have to explicitly overwrite the copy with the filler object. With that overwrite,
356     // we have to keep the fwdptr initialized and pointing to our (stale) copy.
357     if (alloc_from_gclab) {
358       ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size);















359     } else {



360       fill_with_object(copy, size);
361       shenandoah_assert_correct(nullptr, copy_val);

362     }
363     shenandoah_assert_correct(nullptr, result);
364     return result;
365   }
366 }
367 



















































































































368 inline bool ShenandoahHeap::requires_marking(const void* entry) const {
369   oop obj = cast_to_oop(entry);
370   return !_marking_context->is_marked_strong(obj);
371 }
372 
373 inline bool ShenandoahHeap::in_collection_set(oop p) const {
374   assert(collection_set() != nullptr, "Sanity");
375   return collection_set()->is_in(p);
376 }
377 
378 inline bool ShenandoahHeap::in_collection_set_loc(void* p) const {
379   assert(collection_set() != nullptr, "Sanity");
380   return collection_set()->is_in_loc(p);
381 }
382 
383 inline bool ShenandoahHeap::is_stable() const {
384   return _gc_state.is_clear();
385 }
386 
387 inline bool ShenandoahHeap::is_idle() const {
388   return _gc_state.is_unset(MARKING | EVACUATION | UPDATEREFS);
389 }
390 
391 inline bool ShenandoahHeap::is_concurrent_mark_in_progress() const {
392   return _gc_state.is_set(MARKING);








393 }
394 
395 inline bool ShenandoahHeap::is_evacuation_in_progress() const {
396   return _gc_state.is_set(EVACUATION);
397 }
398 
399 inline bool ShenandoahHeap::is_gc_in_progress_mask(uint mask) const {
400   return _gc_state.is_set(mask);
401 }
402 
403 inline bool ShenandoahHeap::is_degenerated_gc_in_progress() const {
404   return _degenerated_gc_in_progress.is_set();
405 }
406 
407 inline bool ShenandoahHeap::is_full_gc_in_progress() const {
408   return _full_gc_in_progress.is_set();
409 }
410 
411 inline bool ShenandoahHeap::is_full_gc_move_in_progress() const {
412   return _full_gc_move_in_progress.is_set();
413 }
414 
415 inline bool ShenandoahHeap::is_update_refs_in_progress() const {
416   return _gc_state.is_set(UPDATEREFS);
417 }
418 
419 inline bool ShenandoahHeap::is_stw_gc_in_progress() const {
420   return is_full_gc_in_progress() || is_degenerated_gc_in_progress();
421 }
422 
423 inline bool ShenandoahHeap::is_concurrent_strong_root_in_progress() const {
424   return _concurrent_strong_root_in_progress.is_set();
425 }
426 
427 inline bool ShenandoahHeap::is_concurrent_weak_root_in_progress() const {
428   return _gc_state.is_set(WEAK_ROOTS);
429 }
430 




























































































431 template<class T>
432 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl) {
433   marked_object_iterate(region, cl, region->top());
434 }
435 
436 template<class T>
437 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit) {
438   assert(! region->is_humongous_continuation(), "no humongous continuation regions here");
439 
440   ShenandoahMarkingContext* const ctx = complete_marking_context();
441   assert(ctx->is_complete(), "sanity");
442 
443   HeapWord* tams = ctx->top_at_mark_start(region);
444 
445   size_t skip_bitmap_delta = 1;
446   HeapWord* start = region->bottom();
447   HeapWord* end = MIN2(tams, region->end());
448 
449   // Step 1. Scan below the TAMS based on bitmap data.
450   HeapWord* limit_bitmap = MIN2(limit, tams);
451 
452   // Try to scan the initial candidate. If the candidate is above the TAMS, it would
453   // fail the subsequent "< limit_bitmap" checks, and fall through to Step 2.
454   HeapWord* cb = ctx->get_next_marked_addr(start, end);
455 
456   intx dist = ShenandoahMarkScanPrefetch;
457   if (dist > 0) {
458     // Batched scan that prefetches the oop data, anticipating the access to
459     // either header, oop field, or forwarding pointer. Not that we cannot
460     // touch anything in oop, while it still being prefetched to get enough
461     // time for prefetch to work. This is why we try to scan the bitmap linearly,

552     HeapWord* bottom = region->bottom();
553     if (top > bottom) {
554       region = region->humongous_start_region();
555       ShenandoahObjectToOopBoundedClosure<T> objs(cl, bottom, top);
556       marked_object_iterate(region, &objs);
557     }
558   } else {
559     ShenandoahObjectToOopClosure<T> objs(cl);
560     marked_object_iterate(region, &objs, top);
561   }
562 }
563 
564 inline ShenandoahHeapRegion* const ShenandoahHeap::get_region(size_t region_idx) const {
565   if (region_idx < _num_regions) {
566     return _regions[region_idx];
567   } else {
568     return nullptr;
569   }
570 }
571 
572 inline void ShenandoahHeap::mark_complete_marking_context() {
573   _marking_context->mark_complete();
574 }
575 
576 inline void ShenandoahHeap::mark_incomplete_marking_context() {
577   _marking_context->mark_incomplete();
578 }
579 
580 inline ShenandoahMarkingContext* ShenandoahHeap::complete_marking_context() const {
581   assert (_marking_context->is_complete()," sanity");
582   return _marking_context;
583 }
584 
585 inline ShenandoahMarkingContext* ShenandoahHeap::marking_context() const {
586   return _marking_context;
587 }
588 
























589 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP

 25 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
 26 #define SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
 27 
 28 #include "gc/shenandoah/shenandoahHeap.hpp"
 29 
 30 #include "classfile/javaClasses.inline.hpp"
 31 #include "gc/shared/markBitMap.inline.hpp"
 32 #include "gc/shared/threadLocalAllocBuffer.inline.hpp"
 33 #include "gc/shared/continuationGCSupport.inline.hpp"
 34 #include "gc/shared/suspendibleThreadSet.hpp"
 35 #include "gc/shared/tlab_globals.hpp"
 36 #include "gc/shenandoah/shenandoahAsserts.hpp"
 37 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
 38 #include "gc/shenandoah/shenandoahCollectionSet.inline.hpp"
 39 #include "gc/shenandoah/shenandoahForwarding.inline.hpp"
 40 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
 41 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp"
 42 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
 43 #include "gc/shenandoah/shenandoahControlThread.hpp"
 44 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
 45 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
 46 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
 47 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
 48 #include "gc/shenandoah/mode/shenandoahMode.hpp"
 49 #include "oops/compressedOops.inline.hpp"
 50 #include "oops/oop.inline.hpp"
 51 #include "runtime/atomic.hpp"
 52 #include "runtime/javaThread.hpp"
 53 #include "runtime/prefetch.inline.hpp"
 54 #include "utilities/copy.hpp"
 55 #include "utilities/globalDefinitions.hpp"
 56 
 57 inline ShenandoahHeap* ShenandoahHeap::heap() {
 58   return named_heap<ShenandoahHeap>(CollectedHeap::Shenandoah);
 59 }
 60 
 61 inline ShenandoahHeapRegion* ShenandoahRegionIterator::next() {
 62   size_t new_index = Atomic::add(&_index, (size_t) 1, memory_order_relaxed);
 63   // get_region() provides the bounds-check and returns null on OOB.
 64   return _heap->get_region(new_index - 1);
 65 }
 66 
 67 inline bool ShenandoahHeap::has_forwarded_objects() const {
 68   return _gc_state.is_set(HAS_FORWARDED);

251     return cancelled_gc();
252   }
253 
254   jbyte prev = _cancelled_gc.cmpxchg(NOT_CANCELLED, CANCELLABLE);
255   if (prev == CANCELLABLE || prev == NOT_CANCELLED) {
256     if (SuspendibleThreadSet::should_yield()) {
257       SuspendibleThreadSet::yield();
258     }
259 
260     // Back to CANCELLABLE. The thread that poked NOT_CANCELLED first gets
261     // to restore to CANCELLABLE.
262     if (prev == CANCELLABLE) {
263       _cancelled_gc.set(CANCELLABLE);
264     }
265     return false;
266   } else {
267     return true;
268   }
269 }
270 
271 inline void ShenandoahHeap::clear_cancelled_gc(bool clear_oom_handler) {
272   _cancelled_gc.set(CANCELLABLE);
273   if (_cancel_requested_time > 0) {
274     double cancel_time = os::elapsedTime() - _cancel_requested_time;
275     log_info(gc)("GC cancellation took %.3fs", cancel_time);
276     _cancel_requested_time = 0;
277   }
278 
279   if (clear_oom_handler) {
280     _oom_evac_handler.clear();
281   }
282 }
283 
284 inline HeapWord* ShenandoahHeap::allocate_from_gclab(Thread* thread, size_t size) {
285   assert(UseTLAB, "TLABs should be enabled");
286 
287   PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
288   if (gclab == nullptr) {
289     assert(!thread->is_Java_thread() && !thread->is_Worker_thread(),
290            "Performance: thread should have GCLAB: %s", thread->name());
291     // No GCLABs in this thread, fallback to shared allocation
292     return nullptr;
293   }
294   HeapWord* obj = gclab->allocate(size);
295   if (obj != nullptr) {
296     return obj;
297   }

298   return allocate_from_gclab_slow(thread, size);
299 }
300 
301 inline HeapWord* ShenandoahHeap::allocate_from_plab(Thread* thread, size_t size, bool is_promotion) {
302   assert(UseTLAB, "TLABs should be enabled");
303 
304   PLAB* plab = ShenandoahThreadLocalData::plab(thread);
305   HeapWord* obj;
306   if (plab == nullptr) {
307     assert(!thread->is_Java_thread() && !thread->is_Worker_thread(), "Performance: thread should have PLAB: %s", thread->name());
308     // No PLABs in this thread, fallback to shared allocation
309     return nullptr;
310   } else if (is_promotion && (plab->words_remaining() > 0) && !ShenandoahThreadLocalData::allow_plab_promotions(thread)) {
311     return nullptr;
312   }
313   // if plab->word_size() <= 0, thread's plab not yet initialized for this pass, so allow_plab_promotions() is not trustworthy
314   obj = plab->allocate(size);
315   if ((obj == nullptr) && (plab->words_remaining() < PLAB::min_size())) {
316     // allocate_from_plab_slow will establish allow_plab_promotions(thread) for future invocations
317     obj = allocate_from_plab_slow(thread, size, is_promotion);
318   }
319   // if plab->words_remaining() >= PLAB::min_size(), just return nullptr so we can use a shared allocation
320   if (obj == nullptr) {
321     return nullptr;
322   }
323 
324   if (is_promotion) {
325     ShenandoahThreadLocalData::add_to_plab_promoted(thread, size * HeapWordSize);
326   } else {
327     ShenandoahThreadLocalData::add_to_plab_evacuated(thread, size * HeapWordSize);
328   }
329   return obj;
330 }
331 
332 inline oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
333   assert(thread == Thread::current(), "Expected thread parameter to be current thread.");
334   if (ShenandoahThreadLocalData::is_oom_during_evac(thread)) {
335     // This thread went through the OOM during evac protocol and it is safe to return
336     // the forward pointer. It must not attempt to evacuate any more.
337     return ShenandoahBarrierSet::resolve_forwarded(p);
338   }
339 
340   assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope");
341 
342   ShenandoahHeapRegion* r = heap_region_containing(p);
343   assert(!r->is_humongous(), "never evacuate humongous objects");
344 
345   ShenandoahRegionAffiliation target_gen = r->affiliation();
346   if (mode()->is_generational() && ShenandoahHeap::heap()->is_gc_generation_young() &&
347       target_gen == YOUNG_GENERATION) {
348     markWord mark = p->mark();
349     if (mark.is_marked()) {
350       // Already forwarded.
351       return ShenandoahBarrierSet::resolve_forwarded(p);
352     }
353     if (mark.has_displaced_mark_helper()) {
354       // We don't want to deal with MT here just to ensure we read the right mark word.
355       // Skip the potential promotion attempt for this one.
356     } else if (r->age() + mark.age() >= InitialTenuringThreshold) {
357       oop result = try_evacuate_object(p, thread, r, OLD_GENERATION);
358       if (result != nullptr) {
359         return result;
360       }
361       // If we failed to promote this aged object, we'll fall through to code below and evacuate to young-gen.
362     }
363   }
364   return try_evacuate_object(p, thread, r, target_gen);
365 }
366 
367 // try_evacuate_object registers the object and dirties the associated remembered set information when evacuating
368 // to OLD_GENERATION.
369 inline oop ShenandoahHeap::try_evacuate_object(oop p, Thread* thread, ShenandoahHeapRegion* from_region,
370                                                ShenandoahRegionAffiliation target_gen) {
371   bool alloc_from_lab = true;
372   bool has_plab = false;
373   HeapWord* copy = nullptr;
374   size_t size = p->size();
375   bool is_promotion = (target_gen == OLD_GENERATION) && from_region->is_young();
376 
377 #ifdef ASSERT
378   if (ShenandoahOOMDuringEvacALot &&
379       (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
380         copy = nullptr;
381   } else {
382 #endif
383     if (UseTLAB) {
384       switch (target_gen) {
385         case YOUNG_GENERATION: {
386            copy = allocate_from_gclab(thread, size);
387            if ((copy == nullptr) && (size < ShenandoahThreadLocalData::gclab_size(thread))) {
388              // GCLAB allocation failed because we are bumping up against the limit on young evacuation reserve.  Try resetting
389              // the desired GCLAB size and retry GCLAB allocation to avoid cascading of shared memory allocations.
390              ShenandoahThreadLocalData::set_gclab_size(thread, PLAB::min_size());
391              copy = allocate_from_gclab(thread, size);
392              // If we still get nullptr, we'll try a shared allocation below.
393            }
394            break;
395         }
396         case OLD_GENERATION: {
397 
398            PLAB* plab = ShenandoahThreadLocalData::plab(thread);
399            if (plab != nullptr) {
400              has_plab = true;
401            }
402            copy = allocate_from_plab(thread, size, is_promotion);
403            if ((copy == nullptr) && (size < ShenandoahThreadLocalData::plab_size(thread)) &&
404                ShenandoahThreadLocalData::plab_retries_enabled(thread)) {
405              // PLAB allocation failed because we are bumping up against the limit on old evacuation reserve or because
406              // the requested object does not fit within the current plab but the plab still has an "abundance" of memory,
407              // where abundance is defined as >= PLAB::min_size().  In the former case, we try resetting the desired
408              // PLAB size and retry PLAB allocation to avoid cascading of shared memory allocations.
409 
410              // In this situation, PLAB memory is precious.  We'll try to preserve our existing PLAB by forcing
411              // this particular allocation to be shared.
412              if (plab->words_remaining() < PLAB::min_size()) {
413                ShenandoahThreadLocalData::set_plab_size(thread, PLAB::min_size());
414                copy = allocate_from_plab(thread, size, is_promotion);
415                // If we still get nullptr, we'll try a shared allocation below.
416                if (copy == nullptr) {
417                  // If retry fails, don't continue to retry until we have success (probably in next GC pass)
418                  ShenandoahThreadLocalData::disable_plab_retries(thread);
419                }
420              }
421              // else, copy still equals nullptr.  this causes shared allocation below, preserving this plab for future needs.
422            }
423            break;
424         }
425         default: {
426           ShouldNotReachHere();
427           break;
428         }
429       }
430     }
431 
432     if (copy == nullptr) {
433       // If we failed to allocate in LAB, we'll try a shared allocation.
434       if (!is_promotion || !has_plab || (size > PLAB::min_size())) {
435         ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size, target_gen);
436         copy = allocate_memory(req, is_promotion);
437         alloc_from_lab = false;
438       }
439       // else, we leave copy equal to nullptr, signaling a promotion failure below if appropriate.
440       // We choose not to promote objects smaller than PLAB::min_size() by way of shared allocations, as this is too
441       // costly.  Instead, we'll simply "evacuate" to young-gen memory (using a GCLAB) and will promote in a future
442       // evacuation pass.  This condition is denoted by: is_promotion && has_plab && (size <= PLAB::min_size())
443     }
444 #ifdef ASSERT
445   }
446 #endif
447 
448   if (copy == nullptr) {
449     if (target_gen == OLD_GENERATION) {
450       assert(mode()->is_generational(), "Should only be here in generational mode.");
451       if (from_region->is_young()) {
452         // Signal that promotion failed. Will evacuate this old object somewhere in young gen.
453         report_promotion_failure(thread, size);
454         handle_promotion_failure();
455         return nullptr;
456       } else {
457         // Remember that evacuation to old gen failed. We'll want to trigger a full gc to recover from this
458         // after the evacuation threads have finished.
459         handle_old_evacuation_failure();
460       }
461     }
462 
463     control_thread()->handle_alloc_failure_evac(size);
464 
465     _oom_evac_handler.handle_out_of_memory_during_evacuation();
466 
467     return ShenandoahBarrierSet::resolve_forwarded(p);
468   }
469 
470   // Copy the object:
471   _evac_tracker->begin_evacuation(thread, size * HeapWordSize);
472   Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(p), copy, size);
473 

474   oop copy_val = cast_to_oop(copy);
475 
476   if (mode()->is_generational() && target_gen == YOUNG_GENERATION && is_aging_cycle()) {
477     ShenandoahHeap::increase_object_age(copy_val, from_region->age() + 1);
478   }
479 
480   // Try to install the new forwarding pointer.
481   ContinuationGCSupport::relativize_stack_chunk(copy_val);
482 
483   oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val);
484   if (result == copy_val) {
485     // Successfully evacuated. Our copy is now the public one!
486     _evac_tracker->end_evacuation(thread, size * HeapWordSize, ShenandoahHeap::get_object_age(copy_val));
487     if (mode()->is_generational() && target_gen == OLD_GENERATION) {
488       handle_old_evacuation(copy, size, from_region->is_young());
489     }
490     shenandoah_assert_correct(nullptr, copy_val);
491     return copy_val;
492   }  else {
493     // Failed to evacuate. We need to deal with the object that is left behind. Since this
494     // new allocation is certainly after TAMS, it will be considered live in the next cycle.
495     // But if it happens to contain references to evacuated regions, those references would
496     // not get updated for this stale copy during this cycle, and we will crash while scanning
497     // it the next cycle.
498     if (alloc_from_lab) {
499        // For LAB allocations, it is enough to rollback the allocation ptr. Either the next
500        // object will overwrite this stale copy, or the filler object on LAB retirement will
501        // do this.
502        switch (target_gen) {
503          case YOUNG_GENERATION: {
504              ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size);
505             break;
506          }
507          case OLD_GENERATION: {
508             ShenandoahThreadLocalData::plab(thread)->undo_allocation(copy, size);
509             if (is_promotion) {
510               ShenandoahThreadLocalData::subtract_from_plab_promoted(thread, size * HeapWordSize);
511             } else {
512               ShenandoahThreadLocalData::subtract_from_plab_evacuated(thread, size * HeapWordSize);
513             }
514             break;
515          }
516          default: {
517            ShouldNotReachHere();
518            break;
519          }
520        }
521     } else {
522       // For non-LAB allocations, we have no way to retract the allocation, and
523       // have to explicitly overwrite the copy with the filler object. With that overwrite,
524       // we have to keep the fwdptr initialized and pointing to our (stale) copy.
525       fill_with_object(copy, size);
526       shenandoah_assert_correct(nullptr, copy_val);
527       // For non-LAB allocations, the object has already been registered
528     }
529     shenandoah_assert_correct(nullptr, result);
530     return result;
531   }
532 }
533 
534 void ShenandoahHeap::increase_object_age(oop obj, uint additional_age) {
535   markWord w = obj->has_displaced_mark() ? obj->displaced_mark() : obj->mark();
536   w = w.set_age(MIN2(markWord::max_age, w.age() + additional_age));
537   if (obj->has_displaced_mark()) {
538     obj->set_displaced_mark(w);
539   } else {
540     obj->set_mark(w);
541   }
542 }
543 
544 uint ShenandoahHeap::get_object_age(oop obj) {
545   markWord w = obj->has_displaced_mark() ? obj->displaced_mark() : obj->mark();
546   return w.age();
547 }
548 
549 inline bool ShenandoahHeap::clear_old_evacuation_failure() {
550   return _old_gen_oom_evac.try_unset();
551 }
552 
553 bool ShenandoahHeap::is_in(const void* p) const {
554   HeapWord* heap_base = (HeapWord*) base();
555   HeapWord* last_region_end = heap_base + ShenandoahHeapRegion::region_size_words() * num_regions();
556   return p >= heap_base && p < last_region_end;
557 }
558 
559 inline bool ShenandoahHeap::is_in_active_generation(oop obj) const {
560   if (!mode()->is_generational()) {
561     // everything is the same single generation
562     return true;
563   }
564 
565   if (active_generation() == nullptr) {
566     // no collection is happening, only expect this to be called
567     // when concurrent processing is active, but that could change
568     return false;
569   }
570 
571   assert(is_in(obj), "only check if is in active generation for objects (" PTR_FORMAT ") in heap", p2i(obj));
572   assert((active_generation() == (ShenandoahGeneration*) old_generation()) ||
573          (active_generation() == (ShenandoahGeneration*) young_generation()) ||
574          (active_generation() == global_generation()), "Active generation must be old, young, or global");
575 
576   size_t index = heap_region_containing(obj)->index();
577   switch (_affiliations[index]) {
578   case ShenandoahRegionAffiliation::FREE:
579     // Free regions are in Old, Young, Global
580     return true;
581   case ShenandoahRegionAffiliation::YOUNG_GENERATION:
582     // Young regions are in young_generation and global_generation, not in old_generation
583     return (active_generation() != (ShenandoahGeneration*) old_generation());
584   case ShenandoahRegionAffiliation::OLD_GENERATION:
585     // Old regions are in old_generation and global_generation, not in young_generation
586     return (active_generation() != (ShenandoahGeneration*) young_generation());
587   default:
588     assert(false, "Bad affiliation (%d) for region " SIZE_FORMAT, _affiliations[index], index);
589     return false;
590   }
591 }
592 
593 inline bool ShenandoahHeap::is_in_young(const void* p) const {
594   return is_in(p) && (_affiliations[heap_region_index_containing(p)] == ShenandoahRegionAffiliation::YOUNG_GENERATION);
595 }
596 
597 inline bool ShenandoahHeap::is_in_old(const void* p) const {
598   return is_in(p) && (_affiliations[heap_region_index_containing(p)] == ShenandoahRegionAffiliation::OLD_GENERATION);
599 }
600 
601 inline bool ShenandoahHeap::is_old(oop obj) const {
602   return is_gc_generation_young() && is_in_old(obj);
603 }
604 
605 inline ShenandoahRegionAffiliation ShenandoahHeap::region_affiliation(const ShenandoahHeapRegion *r) {
606   return (ShenandoahRegionAffiliation) _affiliations[r->index()];
607 }
608 
609 inline void ShenandoahHeap::assert_lock_for_affiliation(ShenandoahRegionAffiliation orig_affiliation,
610                                                         ShenandoahRegionAffiliation new_affiliation) {
611   // A lock is required when changing from FREE to NON-FREE.  Though it may be possible to elide the lock when
612   // transitioning from in-use to FREE, the current implementation uses a lock for this transition.  A lock is
613   // not required to change from YOUNG to OLD (i.e. when promoting humongous region).
614   //
615   //         new_affiliation is:     FREE   YOUNG   OLD
616   //  orig_affiliation is:  FREE      X       L      L
617   //                       YOUNG      L       X
618   //                         OLD      L       X      X
619   //  X means state transition won't happen (so don't care)
620   //  L means lock should be held
621   //  Blank means no lock required because affiliation visibility will not be required until subsequent safepoint
622   //
623   // Note: during full GC, all transitions between states are possible.  During Full GC, we should be in a safepoint.
624 
625   if ((orig_affiliation == ShenandoahRegionAffiliation::FREE) || (new_affiliation == ShenandoahRegionAffiliation::FREE)) {
626     extern bool _is_at_shenandoah_safepoint();
627     shenandoah_assert_heaplocked_or_fullgc_safepoint();
628   }
629 }
630 
631 inline void ShenandoahHeap::set_affiliation(ShenandoahHeapRegion* r, ShenandoahRegionAffiliation new_affiliation) {
632 #ifdef ASSERT
633   assert_lock_for_affiliation(region_affiliation(r), new_affiliation);
634 #endif
635   _affiliations[r->index()] = (uint8_t) new_affiliation;
636 }
637 
638 inline ShenandoahRegionAffiliation ShenandoahHeap::region_affiliation(size_t index) {
639   return (ShenandoahRegionAffiliation) _affiliations[index];
640 }
641 
642 inline void ShenandoahHeap::set_affiliation(size_t index, ShenandoahRegionAffiliation new_affiliation) {
643 #ifdef ASSERT
644   assert_lock_for_affiliation(region_affiliation(index), new_affiliation);
645 #endif
646   _affiliations[index] = (uint8_t) new_affiliation;
647 }
648 
649 inline bool ShenandoahHeap::requires_marking(const void* entry) const {
650   oop obj = cast_to_oop(entry);
651   return !_marking_context->is_marked_strong(obj);
652 }
653 
654 inline bool ShenandoahHeap::in_collection_set(oop p) const {
655   assert(collection_set() != nullptr, "Sanity");
656   return collection_set()->is_in(p);
657 }
658 
659 inline bool ShenandoahHeap::in_collection_set_loc(void* p) const {
660   assert(collection_set() != nullptr, "Sanity");
661   return collection_set()->is_in_loc(p);
662 }
663 
664 inline bool ShenandoahHeap::is_stable() const {
665   return _gc_state.is_clear();
666 }
667 
668 inline bool ShenandoahHeap::is_idle() const {
669   return _gc_state.is_unset(YOUNG_MARKING | OLD_MARKING | EVACUATION | UPDATEREFS);
670 }
671 
672 inline bool ShenandoahHeap::is_concurrent_mark_in_progress() const {
673   return _gc_state.is_set(YOUNG_MARKING | OLD_MARKING);
674 }
675 
676 inline bool ShenandoahHeap::is_concurrent_young_mark_in_progress() const {
677   return _gc_state.is_set(YOUNG_MARKING);
678 }
679 
680 inline bool ShenandoahHeap::is_concurrent_old_mark_in_progress() const {
681   return _gc_state.is_set(OLD_MARKING);
682 }
683 
684 inline bool ShenandoahHeap::is_evacuation_in_progress() const {
685   return _gc_state.is_set(EVACUATION);
686 }
687 
688 inline bool ShenandoahHeap::is_gc_in_progress_mask(uint mask) const {
689   return _gc_state.is_set(mask);
690 }
691 
692 inline bool ShenandoahHeap::is_degenerated_gc_in_progress() const {
693   return _degenerated_gc_in_progress.is_set();
694 }
695 
696 inline bool ShenandoahHeap::is_full_gc_in_progress() const {
697   return _full_gc_in_progress.is_set();
698 }
699 
700 inline bool ShenandoahHeap::is_full_gc_move_in_progress() const {
701   return _full_gc_move_in_progress.is_set();
702 }
703 
704 inline bool ShenandoahHeap::is_update_refs_in_progress() const {
705   return _gc_state.is_set(UPDATEREFS);
706 }
707 
708 inline bool ShenandoahHeap::is_stw_gc_in_progress() const {
709   return is_full_gc_in_progress() || is_degenerated_gc_in_progress();
710 }
711 
712 inline bool ShenandoahHeap::is_concurrent_strong_root_in_progress() const {
713   return _concurrent_strong_root_in_progress.is_set();
714 }
715 
716 inline bool ShenandoahHeap::is_concurrent_weak_root_in_progress() const {
717   return _gc_state.is_set(WEAK_ROOTS);
718 }
719 
720 inline bool ShenandoahHeap::is_aging_cycle() const {
721   return _is_aging_cycle.is_set();
722 }
723 
724 inline bool ShenandoahHeap::is_prepare_for_old_mark_in_progress() const {
725   return _prepare_for_old_mark;
726 }
727 
728 inline size_t ShenandoahHeap::set_promoted_reserve(size_t new_val) {
729   size_t orig = _promoted_reserve;
730   _promoted_reserve = new_val;
731   return orig;
732 }
733 
734 inline size_t ShenandoahHeap::get_promoted_reserve() const {
735   return _promoted_reserve;
736 }
737 
738 // returns previous value
739 size_t ShenandoahHeap::capture_old_usage(size_t old_usage) {
740   size_t previous_value = _captured_old_usage;
741   _captured_old_usage = old_usage;
742   return previous_value;
743 }
744 
745 void ShenandoahHeap::set_previous_promotion(size_t promoted_bytes) {
746   shenandoah_assert_heaplocked();
747   _previous_promotion = promoted_bytes;
748 }
749 
750 size_t ShenandoahHeap::get_previous_promotion() const {
751   return _previous_promotion;
752 }
753 
754 inline size_t ShenandoahHeap::set_old_evac_reserve(size_t new_val) {
755   size_t orig = _old_evac_reserve;
756   _old_evac_reserve = new_val;
757   return orig;
758 }
759 
760 inline size_t ShenandoahHeap::get_old_evac_reserve() const {
761   return _old_evac_reserve;
762 }
763 
764 inline void ShenandoahHeap::reset_old_evac_expended() {
765   Atomic::store(&_old_evac_expended, (size_t) 0);
766 }
767 
768 inline size_t ShenandoahHeap::expend_old_evac(size_t increment) {
769   return Atomic::add(&_old_evac_expended, increment);
770 }
771 
772 inline size_t ShenandoahHeap::get_old_evac_expended() {
773   return Atomic::load(&_old_evac_expended);
774 }
775 
776 inline void ShenandoahHeap::reset_promoted_expended() {
777   Atomic::store(&_promoted_expended, (size_t) 0);
778 }
779 
780 inline size_t ShenandoahHeap::expend_promoted(size_t increment) {
781   return Atomic::add(&_promoted_expended, increment);
782 }
783 
784 inline size_t ShenandoahHeap::unexpend_promoted(size_t decrement) {
785   return Atomic::sub(&_promoted_expended, decrement);
786 }
787 
788 inline size_t ShenandoahHeap::get_promoted_expended() {
789   return Atomic::load(&_promoted_expended);
790 }
791 
792 inline size_t ShenandoahHeap::set_young_evac_reserve(size_t new_val) {
793   size_t orig = _young_evac_reserve;
794   _young_evac_reserve = new_val;
795   return orig;
796 }
797 
798 inline size_t ShenandoahHeap::get_young_evac_reserve() const {
799   return _young_evac_reserve;
800 }
801 
802 inline intptr_t ShenandoahHeap::set_alloc_supplement_reserve(intptr_t new_val) {
803   intptr_t orig = _alloc_supplement_reserve;
804   _alloc_supplement_reserve = new_val;
805   return orig;
806 }
807 
808 inline intptr_t ShenandoahHeap::get_alloc_supplement_reserve() const {
809   return _alloc_supplement_reserve;
810 }
811 
812 template<class T>
813 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl) {
814   marked_object_iterate(region, cl, region->top());
815 }
816 
817 template<class T>
818 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit) {
819   assert(! region->is_humongous_continuation(), "no humongous continuation regions here");
820 
821   ShenandoahMarkingContext* const ctx = marking_context();

822 
823   HeapWord* tams = ctx->top_at_mark_start(region);
824 
825   size_t skip_bitmap_delta = 1;
826   HeapWord* start = region->bottom();
827   HeapWord* end = MIN2(tams, region->end());
828 
829   // Step 1. Scan below the TAMS based on bitmap data.
830   HeapWord* limit_bitmap = MIN2(limit, tams);
831 
832   // Try to scan the initial candidate. If the candidate is above the TAMS, it would
833   // fail the subsequent "< limit_bitmap" checks, and fall through to Step 2.
834   HeapWord* cb = ctx->get_next_marked_addr(start, end);
835 
836   intx dist = ShenandoahMarkScanPrefetch;
837   if (dist > 0) {
838     // Batched scan that prefetches the oop data, anticipating the access to
839     // either header, oop field, or forwarding pointer. Not that we cannot
840     // touch anything in oop, while it still being prefetched to get enough
841     // time for prefetch to work. This is why we try to scan the bitmap linearly,

932     HeapWord* bottom = region->bottom();
933     if (top > bottom) {
934       region = region->humongous_start_region();
935       ShenandoahObjectToOopBoundedClosure<T> objs(cl, bottom, top);
936       marked_object_iterate(region, &objs);
937     }
938   } else {
939     ShenandoahObjectToOopClosure<T> objs(cl);
940     marked_object_iterate(region, &objs, top);
941   }
942 }
943 
944 inline ShenandoahHeapRegion* const ShenandoahHeap::get_region(size_t region_idx) const {
945   if (region_idx < _num_regions) {
946     return _regions[region_idx];
947   } else {
948     return nullptr;
949   }
950 }
951 








952 inline ShenandoahMarkingContext* ShenandoahHeap::complete_marking_context() const {
953   assert (_marking_context->is_complete()," sanity");
954   return _marking_context;
955 }
956 
957 inline ShenandoahMarkingContext* ShenandoahHeap::marking_context() const {
958   return _marking_context;
959 }
960 
961 inline void ShenandoahHeap::clear_cards_for(ShenandoahHeapRegion* region) {
962   if (mode()->is_generational()) {
963     _card_scan->mark_range_as_empty(region->bottom(), pointer_delta(region->end(), region->bottom()));
964   }
965 }
966 
967 inline void ShenandoahHeap::dirty_cards(HeapWord* start, HeapWord* end) {
968   assert(mode()->is_generational(), "Should only be used for generational mode");
969   size_t words = pointer_delta(end, start);
970   _card_scan->mark_range_as_dirty(start, words);
971 }
972 
973 inline void ShenandoahHeap::clear_cards(HeapWord* start, HeapWord* end) {
974   assert(mode()->is_generational(), "Should only be used for generational mode");
975   size_t words = pointer_delta(end, start);
976   _card_scan->mark_range_as_clean(start, words);
977 }
978 
979 inline void ShenandoahHeap::mark_card_as_dirty(void* location) {
980   if (mode()->is_generational()) {
981     _card_scan->mark_card_as_dirty((HeapWord*)location);
982   }
983 }
984 
985 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
< prev index next >