24
25 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
26 #define SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
27
28 #include "gc/shenandoah/shenandoahHeap.hpp"
29
30 #include "classfile/javaClasses.inline.hpp"
31 #include "gc/shared/markBitMap.inline.hpp"
32 #include "gc/shared/threadLocalAllocBuffer.inline.hpp"
33 #include "gc/shared/suspendibleThreadSet.hpp"
34 #include "gc/shared/tlab_globals.hpp"
35 #include "gc/shenandoah/shenandoahAsserts.hpp"
36 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
37 #include "gc/shenandoah/shenandoahCollectionSet.inline.hpp"
38 #include "gc/shenandoah/shenandoahForwarding.inline.hpp"
39 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
40 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp"
41 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
42 #include "gc/shenandoah/shenandoahControlThread.hpp"
43 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
44 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
45 #include "oops/compressedOops.inline.hpp"
46 #include "oops/oop.inline.hpp"
47 #include "runtime/atomic.hpp"
48 #include "runtime/prefetch.inline.hpp"
49 #include "runtime/thread.hpp"
50 #include "utilities/copy.hpp"
51 #include "utilities/globalDefinitions.hpp"
52
53 inline ShenandoahHeap* ShenandoahHeap::heap() {
54 return named_heap<ShenandoahHeap>(CollectedHeap::Shenandoah);
55 }
56
57 inline ShenandoahHeapRegion* ShenandoahRegionIterator::next() {
58 size_t new_index = Atomic::add(&_index, (size_t) 1, memory_order_relaxed);
59 // get_region() provides the bounds-check and returns NULL on OOB.
60 return _heap->get_region(new_index - 1);
61 }
62
63 inline bool ShenandoahHeap::has_forwarded_objects() const {
198 // No GCLABs in this thread, fallback to shared allocation
199 return NULL;
200 }
201 HeapWord* obj = gclab->allocate(size);
202 if (obj != NULL) {
203 return obj;
204 }
205 // Otherwise...
206 return allocate_from_gclab_slow(thread, size);
207 }
208
209 inline oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
210 if (ShenandoahThreadLocalData::is_oom_during_evac(Thread::current())) {
211 // This thread went through the OOM during evac protocol and it is safe to return
212 // the forward pointer. It must not attempt to evacuate any more.
213 return ShenandoahBarrierSet::resolve_forwarded(p);
214 }
215
216 assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope");
217
218 size_t size = p->size();
219
220 assert(!heap_region_containing(p)->is_humongous(), "never evacuate humongous objects");
221
222 bool alloc_from_gclab = true;
223 HeapWord* copy = NULL;
224
225 #ifdef ASSERT
226 if (ShenandoahOOMDuringEvacALot &&
227 (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
228 copy = NULL;
229 } else {
230 #endif
231 if (UseTLAB) {
232 copy = allocate_from_gclab(thread, size);
233 }
234 if (copy == NULL) {
235 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size);
236 copy = allocate_memory(req);
237 alloc_from_gclab = false;
238 }
415 assert(oopDesc::is_oop(obj), "sanity");
416 assert(ctx->is_marked(obj), "object expected to be marked");
417 cl->do_object(obj);
418 cb += skip_bitmap_delta;
419 if (cb < limit_bitmap) {
420 cb = ctx->get_next_marked_addr(cb, limit_bitmap);
421 }
422 }
423 }
424
425 // Step 2. Accurate size-based traversal, happens past the TAMS.
426 // This restarts the scan at TAMS, which makes sure we traverse all objects,
427 // regardless of what happened at Step 1.
428 HeapWord* cs = tams;
429 while (cs < limit) {
430 assert (cs >= tams, "only objects past TAMS here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cs), p2i(tams));
431 assert (cs < limit, "only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cs), p2i(limit));
432 oop obj = cast_to_oop(cs);
433 assert(oopDesc::is_oop(obj), "sanity");
434 assert(ctx->is_marked(obj), "object expected to be marked");
435 int size = obj->size();
436 cl->do_object(obj);
437 cs += size;
438 }
439 }
440
441 template <class T>
442 class ShenandoahObjectToOopClosure : public ObjectClosure {
443 T* _cl;
444 public:
445 ShenandoahObjectToOopClosure(T* cl) : _cl(cl) {}
446
447 void do_object(oop obj) {
448 obj->oop_iterate(_cl);
449 }
450 };
451
452 template <class T>
453 class ShenandoahObjectToOopBoundedClosure : public ObjectClosure {
454 T* _cl;
455 MemRegion _bounds;
|
24
25 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
26 #define SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
27
28 #include "gc/shenandoah/shenandoahHeap.hpp"
29
30 #include "classfile/javaClasses.inline.hpp"
31 #include "gc/shared/markBitMap.inline.hpp"
32 #include "gc/shared/threadLocalAllocBuffer.inline.hpp"
33 #include "gc/shared/suspendibleThreadSet.hpp"
34 #include "gc/shared/tlab_globals.hpp"
35 #include "gc/shenandoah/shenandoahAsserts.hpp"
36 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
37 #include "gc/shenandoah/shenandoahCollectionSet.inline.hpp"
38 #include "gc/shenandoah/shenandoahForwarding.inline.hpp"
39 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
40 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp"
41 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
42 #include "gc/shenandoah/shenandoahControlThread.hpp"
43 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
44 #include "gc/shenandoah/shenandoahObjectUtils.inline.hpp"
45 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
46 #include "oops/compressedOops.inline.hpp"
47 #include "oops/oop.inline.hpp"
48 #include "runtime/atomic.hpp"
49 #include "runtime/prefetch.inline.hpp"
50 #include "runtime/thread.hpp"
51 #include "utilities/copy.hpp"
52 #include "utilities/globalDefinitions.hpp"
53
54 inline ShenandoahHeap* ShenandoahHeap::heap() {
55 return named_heap<ShenandoahHeap>(CollectedHeap::Shenandoah);
56 }
57
58 inline ShenandoahHeapRegion* ShenandoahRegionIterator::next() {
59 size_t new_index = Atomic::add(&_index, (size_t) 1, memory_order_relaxed);
60 // get_region() provides the bounds-check and returns NULL on OOB.
61 return _heap->get_region(new_index - 1);
62 }
63
64 inline bool ShenandoahHeap::has_forwarded_objects() const {
199 // No GCLABs in this thread, fallback to shared allocation
200 return NULL;
201 }
202 HeapWord* obj = gclab->allocate(size);
203 if (obj != NULL) {
204 return obj;
205 }
206 // Otherwise...
207 return allocate_from_gclab_slow(thread, size);
208 }
209
210 inline oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
211 if (ShenandoahThreadLocalData::is_oom_during_evac(Thread::current())) {
212 // This thread went through the OOM during evac protocol and it is safe to return
213 // the forward pointer. It must not attempt to evacuate any more.
214 return ShenandoahBarrierSet::resolve_forwarded(p);
215 }
216
217 assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope");
218
219 size_t size = ShenandoahObjectUtils::size(p);
220
221 assert(!heap_region_containing(p)->is_humongous(), "never evacuate humongous objects");
222
223 bool alloc_from_gclab = true;
224 HeapWord* copy = NULL;
225
226 #ifdef ASSERT
227 if (ShenandoahOOMDuringEvacALot &&
228 (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
229 copy = NULL;
230 } else {
231 #endif
232 if (UseTLAB) {
233 copy = allocate_from_gclab(thread, size);
234 }
235 if (copy == NULL) {
236 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size);
237 copy = allocate_memory(req);
238 alloc_from_gclab = false;
239 }
416 assert(oopDesc::is_oop(obj), "sanity");
417 assert(ctx->is_marked(obj), "object expected to be marked");
418 cl->do_object(obj);
419 cb += skip_bitmap_delta;
420 if (cb < limit_bitmap) {
421 cb = ctx->get_next_marked_addr(cb, limit_bitmap);
422 }
423 }
424 }
425
426 // Step 2. Accurate size-based traversal, happens past the TAMS.
427 // This restarts the scan at TAMS, which makes sure we traverse all objects,
428 // regardless of what happened at Step 1.
429 HeapWord* cs = tams;
430 while (cs < limit) {
431 assert (cs >= tams, "only objects past TAMS here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cs), p2i(tams));
432 assert (cs < limit, "only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cs), p2i(limit));
433 oop obj = cast_to_oop(cs);
434 assert(oopDesc::is_oop(obj), "sanity");
435 assert(ctx->is_marked(obj), "object expected to be marked");
436 size_t size = ShenandoahObjectUtils::size(obj);
437 cl->do_object(obj);
438 cs += size;
439 }
440 }
441
442 template <class T>
443 class ShenandoahObjectToOopClosure : public ObjectClosure {
444 T* _cl;
445 public:
446 ShenandoahObjectToOopClosure(T* cl) : _cl(cl) {}
447
448 void do_object(oop obj) {
449 obj->oop_iterate(_cl);
450 }
451 };
452
453 template <class T>
454 class ShenandoahObjectToOopBoundedClosure : public ObjectClosure {
455 T* _cl;
456 MemRegion _bounds;
|