25 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
26 #define SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
27
28 #include "gc/shenandoah/shenandoahHeap.hpp"
29
30 #include "classfile/javaClasses.inline.hpp"
31 #include "gc/shared/markBitMap.inline.hpp"
32 #include "gc/shared/threadLocalAllocBuffer.inline.hpp"
33 #include "gc/shared/continuationGCSupport.inline.hpp"
34 #include "gc/shared/suspendibleThreadSet.hpp"
35 #include "gc/shared/tlab_globals.hpp"
36 #include "gc/shenandoah/shenandoahAsserts.hpp"
37 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
38 #include "gc/shenandoah/shenandoahCollectionSet.inline.hpp"
39 #include "gc/shenandoah/shenandoahForwarding.inline.hpp"
40 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
41 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp"
42 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
43 #include "gc/shenandoah/shenandoahControlThread.hpp"
44 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
45 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
46 #include "oops/compressedOops.inline.hpp"
47 #include "oops/oop.inline.hpp"
48 #include "runtime/atomic.hpp"
49 #include "runtime/javaThread.hpp"
50 #include "runtime/prefetch.inline.hpp"
51 #include "utilities/copy.hpp"
52 #include "utilities/globalDefinitions.hpp"
53
54 inline ShenandoahHeap* ShenandoahHeap::heap() {
55 return named_heap<ShenandoahHeap>(CollectedHeap::Shenandoah);
56 }
57
58 inline ShenandoahHeapRegion* ShenandoahRegionIterator::next() {
59 size_t new_index = Atomic::add(&_index, (size_t) 1, memory_order_relaxed);
60 // get_region() provides the bounds-check and returns null on OOB.
61 return _heap->get_region(new_index - 1);
62 }
63
64 inline bool ShenandoahHeap::has_forwarded_objects() const {
267 // No GCLABs in this thread, fallback to shared allocation
268 return nullptr;
269 }
270 HeapWord* obj = gclab->allocate(size);
271 if (obj != nullptr) {
272 return obj;
273 }
274 // Otherwise...
275 return allocate_from_gclab_slow(thread, size);
276 }
277
278 inline oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
279 if (ShenandoahThreadLocalData::is_oom_during_evac(Thread::current())) {
280 // This thread went through the OOM during evac protocol and it is safe to return
281 // the forward pointer. It must not attempt to evacuate any more.
282 return ShenandoahBarrierSet::resolve_forwarded(p);
283 }
284
285 assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope");
286
287 size_t size = p->size();
288
289 assert(!heap_region_containing(p)->is_humongous(), "never evacuate humongous objects");
290
291 bool alloc_from_gclab = true;
292 HeapWord* copy = nullptr;
293
294 #ifdef ASSERT
295 if (ShenandoahOOMDuringEvacALot &&
296 (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
297 copy = nullptr;
298 } else {
299 #endif
300 if (UseTLAB) {
301 copy = allocate_from_gclab(thread, size);
302 }
303 if (copy == nullptr) {
304 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size);
305 copy = allocate_memory(req);
306 alloc_from_gclab = false;
307 }
308 #ifdef ASSERT
309 }
310 #endif
311
312 if (copy == nullptr) {
313 control_thread()->handle_alloc_failure_evac(size);
314
315 _oom_evac_handler.handle_out_of_memory_during_evacuation();
316
317 return ShenandoahBarrierSet::resolve_forwarded(p);
318 }
319
320 // Copy the object:
321 Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(p), copy, size);
322
323 // Try to install the new forwarding pointer.
324 oop copy_val = cast_to_oop(copy);
325 ContinuationGCSupport::relativize_stack_chunk(copy_val);
326
327 oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val);
328 if (result == copy_val) {
329 // Successfully evacuated. Our copy is now the public one!
330 shenandoah_assert_correct(nullptr, copy_val);
331 return copy_val;
332 } else {
333 // Failed to evacuate. We need to deal with the object that is left behind. Since this
334 // new allocation is certainly after TAMS, it will be considered live in the next cycle.
335 // But if it happens to contain references to evacuated regions, those references would
336 // not get updated for this stale copy during this cycle, and we will crash while scanning
337 // it the next cycle.
338 //
339 // For GCLAB allocations, it is enough to rollback the allocation ptr. Either the next
340 // object will overwrite this stale copy, or the filler object on LAB retirement will
341 // do this. For non-GCLAB allocations, we have no way to retract the allocation, and
342 // have to explicitly overwrite the copy with the filler object. With that overwrite,
343 // we have to keep the fwdptr initialized and pointing to our (stale) copy.
344 if (alloc_from_gclab) {
345 ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size);
346 } else {
486 assert(oopDesc::is_oop(obj), "sanity");
487 assert(ctx->is_marked(obj), "object expected to be marked");
488 cl->do_object(obj);
489 cb += skip_bitmap_delta;
490 if (cb < limit_bitmap) {
491 cb = ctx->get_next_marked_addr(cb, limit_bitmap);
492 }
493 }
494 }
495
496 // Step 2. Accurate size-based traversal, happens past the TAMS.
497 // This restarts the scan at TAMS, which makes sure we traverse all objects,
498 // regardless of what happened at Step 1.
499 HeapWord* cs = tams;
500 while (cs < limit) {
501 assert (cs >= tams, "only objects past TAMS here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cs), p2i(tams));
502 assert (cs < limit, "only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cs), p2i(limit));
503 oop obj = cast_to_oop(cs);
504 assert(oopDesc::is_oop(obj), "sanity");
505 assert(ctx->is_marked(obj), "object expected to be marked");
506 size_t size = obj->size();
507 cl->do_object(obj);
508 cs += size;
509 }
510 }
511
512 template <class T>
513 class ShenandoahObjectToOopClosure : public ObjectClosure {
514 T* _cl;
515 public:
516 ShenandoahObjectToOopClosure(T* cl) : _cl(cl) {}
517
518 void do_object(oop obj) {
519 obj->oop_iterate(_cl);
520 }
521 };
522
523 template <class T>
524 class ShenandoahObjectToOopBoundedClosure : public ObjectClosure {
525 T* _cl;
526 MemRegion _bounds;
|
25 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
26 #define SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
27
28 #include "gc/shenandoah/shenandoahHeap.hpp"
29
30 #include "classfile/javaClasses.inline.hpp"
31 #include "gc/shared/markBitMap.inline.hpp"
32 #include "gc/shared/threadLocalAllocBuffer.inline.hpp"
33 #include "gc/shared/continuationGCSupport.inline.hpp"
34 #include "gc/shared/suspendibleThreadSet.hpp"
35 #include "gc/shared/tlab_globals.hpp"
36 #include "gc/shenandoah/shenandoahAsserts.hpp"
37 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
38 #include "gc/shenandoah/shenandoahCollectionSet.inline.hpp"
39 #include "gc/shenandoah/shenandoahForwarding.inline.hpp"
40 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
41 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp"
42 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
43 #include "gc/shenandoah/shenandoahControlThread.hpp"
44 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
45 #include "gc/shenandoah/shenandoahObjectUtils.inline.hpp"
46 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
47 #include "oops/compressedOops.inline.hpp"
48 #include "oops/oop.inline.hpp"
49 #include "runtime/atomic.hpp"
50 #include "runtime/javaThread.hpp"
51 #include "runtime/prefetch.inline.hpp"
52 #include "utilities/copy.hpp"
53 #include "utilities/globalDefinitions.hpp"
54
55 inline ShenandoahHeap* ShenandoahHeap::heap() {
56 return named_heap<ShenandoahHeap>(CollectedHeap::Shenandoah);
57 }
58
59 inline ShenandoahHeapRegion* ShenandoahRegionIterator::next() {
60 size_t new_index = Atomic::add(&_index, (size_t) 1, memory_order_relaxed);
61 // get_region() provides the bounds-check and returns null on OOB.
62 return _heap->get_region(new_index - 1);
63 }
64
65 inline bool ShenandoahHeap::has_forwarded_objects() const {
268 // No GCLABs in this thread, fallback to shared allocation
269 return nullptr;
270 }
271 HeapWord* obj = gclab->allocate(size);
272 if (obj != nullptr) {
273 return obj;
274 }
275 // Otherwise...
276 return allocate_from_gclab_slow(thread, size);
277 }
278
279 inline oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
280 if (ShenandoahThreadLocalData::is_oom_during_evac(Thread::current())) {
281 // This thread went through the OOM during evac protocol and it is safe to return
282 // the forward pointer. It must not attempt to evacuate any more.
283 return ShenandoahBarrierSet::resolve_forwarded(p);
284 }
285
286 assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope");
287
288 size_t size = ShenandoahObjectUtils::size(p);
289
290 assert(!heap_region_containing(p)->is_humongous(), "never evacuate humongous objects");
291
292 bool alloc_from_gclab = true;
293 HeapWord* copy = nullptr;
294
295 #ifdef ASSERT
296 if (ShenandoahOOMDuringEvacALot &&
297 (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
298 copy = nullptr;
299 } else {
300 #endif
301 if (UseTLAB) {
302 copy = allocate_from_gclab(thread, size);
303 }
304 if (copy == nullptr) {
305 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size);
306 copy = allocate_memory(req);
307 alloc_from_gclab = false;
308 }
309 #ifdef ASSERT
310 }
311 #endif
312
313 if (copy == nullptr) {
314 control_thread()->handle_alloc_failure_evac(size);
315
316 _oom_evac_handler.handle_out_of_memory_during_evacuation();
317
318 return ShenandoahBarrierSet::resolve_forwarded(p);
319 }
320
321 // Copy the object:
322 Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(p), copy, size);
323
324 // Try to install the new forwarding pointer.
325 oop copy_val = cast_to_oop(copy);
326 if (!copy_val->mark().is_marked()) {
327 // If we copied a mark-word that indicates 'forwarded' state, then
328 // another thread beat us, and this new copy will never be published.
329 // ContinuationGCSupport would get a corrupt Klass* in that case,
330 // so don't even attempt it.
331 ContinuationGCSupport::relativize_stack_chunk(copy_val);
332 }
333 oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val);
334 if (result == copy_val) {
335 // Successfully evacuated. Our copy is now the public one!
336 shenandoah_assert_correct(nullptr, copy_val);
337 return copy_val;
338 } else {
339 // Failed to evacuate. We need to deal with the object that is left behind. Since this
340 // new allocation is certainly after TAMS, it will be considered live in the next cycle.
341 // But if it happens to contain references to evacuated regions, those references would
342 // not get updated for this stale copy during this cycle, and we will crash while scanning
343 // it the next cycle.
344 //
345 // For GCLAB allocations, it is enough to rollback the allocation ptr. Either the next
346 // object will overwrite this stale copy, or the filler object on LAB retirement will
347 // do this. For non-GCLAB allocations, we have no way to retract the allocation, and
348 // have to explicitly overwrite the copy with the filler object. With that overwrite,
349 // we have to keep the fwdptr initialized and pointing to our (stale) copy.
350 if (alloc_from_gclab) {
351 ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size);
352 } else {
492 assert(oopDesc::is_oop(obj), "sanity");
493 assert(ctx->is_marked(obj), "object expected to be marked");
494 cl->do_object(obj);
495 cb += skip_bitmap_delta;
496 if (cb < limit_bitmap) {
497 cb = ctx->get_next_marked_addr(cb, limit_bitmap);
498 }
499 }
500 }
501
502 // Step 2. Accurate size-based traversal, happens past the TAMS.
503 // This restarts the scan at TAMS, which makes sure we traverse all objects,
504 // regardless of what happened at Step 1.
505 HeapWord* cs = tams;
506 while (cs < limit) {
507 assert (cs >= tams, "only objects past TAMS here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cs), p2i(tams));
508 assert (cs < limit, "only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cs), p2i(limit));
509 oop obj = cast_to_oop(cs);
510 assert(oopDesc::is_oop(obj), "sanity");
511 assert(ctx->is_marked(obj), "object expected to be marked");
512 size_t size = ShenandoahObjectUtils::size(obj);
513 cl->do_object(obj);
514 cs += size;
515 }
516 }
517
518 template <class T>
519 class ShenandoahObjectToOopClosure : public ObjectClosure {
520 T* _cl;
521 public:
522 ShenandoahObjectToOopClosure(T* cl) : _cl(cl) {}
523
524 void do_object(oop obj) {
525 obj->oop_iterate(_cl);
526 }
527 };
528
529 template <class T>
530 class ShenandoahObjectToOopBoundedClosure : public ObjectClosure {
531 T* _cl;
532 MemRegion _bounds;
|