1 /*
  2  * Copyright (c) 2015, 2020, Red Hat, Inc. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
 26 #define SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
 27 
 28 #include "gc/shenandoah/shenandoahHeap.hpp"
 29 
 30 #include "classfile/javaClasses.inline.hpp"
 31 #include "gc/shared/markBitMap.inline.hpp"
 32 #include "gc/shared/threadLocalAllocBuffer.inline.hpp"
 33 #include "gc/shared/suspendibleThreadSet.hpp"
 34 #include "gc/shared/tlab_globals.hpp"
 35 #include "gc/shenandoah/shenandoahAsserts.hpp"
 36 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
 37 #include "gc/shenandoah/shenandoahCollectionSet.inline.hpp"
 38 #include "gc/shenandoah/shenandoahForwarding.inline.hpp"
 39 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
 40 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp"
 41 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
 42 #include "gc/shenandoah/shenandoahControlThread.hpp"
 43 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
 44 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
 45 #include "oops/compressedOops.inline.hpp"
 46 #include "oops/oop.inline.hpp"
 47 #include "runtime/atomic.hpp"
 48 #include "runtime/prefetch.inline.hpp"
 49 #include "runtime/thread.hpp"
 50 #include "utilities/copy.hpp"
 51 #include "utilities/globalDefinitions.hpp"
 52 
 53 inline ShenandoahHeap* ShenandoahHeap::heap() {
 54   return named_heap<ShenandoahHeap>(CollectedHeap::Shenandoah);
 55 }
 56 
 57 inline ShenandoahHeapRegion* ShenandoahRegionIterator::next() {
 58   size_t new_index = Atomic::add(&_index, (size_t) 1, memory_order_relaxed);
 59   // get_region() provides the bounds-check and returns NULL on OOB.
 60   return _heap->get_region(new_index - 1);
 61 }
 62 
 63 inline bool ShenandoahHeap::has_forwarded_objects() const {
 64   return _gc_state.is_set(HAS_FORWARDED);
 65 }
 66 
 67 inline WorkGang* ShenandoahHeap::workers() const {
 68   return _workers;
 69 }
 70 
 71 inline WorkGang* ShenandoahHeap::safepoint_workers() {
 72   return _safepoint_workers;
 73 }
 74 
 75 inline size_t ShenandoahHeap::heap_region_index_containing(const void* addr) const {
 76   uintptr_t region_start = ((uintptr_t) addr);
 77   uintptr_t index = (region_start - (uintptr_t) base()) >> ShenandoahHeapRegion::region_size_bytes_shift();
 78   assert(index < num_regions(), "Region index is in bounds: " PTR_FORMAT, p2i(addr));
 79   return index;
 80 }
 81 
 82 inline ShenandoahHeapRegion* const ShenandoahHeap::heap_region_containing(const void* addr) const {
 83   size_t index = heap_region_index_containing(addr);
 84   ShenandoahHeapRegion* const result = get_region(index);
 85   assert(addr >= result->bottom() && addr < result->end(), "Heap region contains the address: " PTR_FORMAT, p2i(addr));
 86   return result;
 87 }
 88 
 89 inline void ShenandoahHeap::enter_evacuation(Thread* t) {
 90   _oom_evac_handler.enter_evacuation(t);
 91 }
 92 
 93 inline void ShenandoahHeap::leave_evacuation(Thread* t) {
 94   _oom_evac_handler.leave_evacuation(t);
 95 }
 96 
 97 template <class T>
 98 inline void ShenandoahHeap::update_with_forwarded(T* p) {
 99   T o = RawAccess<>::oop_load(p);
100   if (!CompressedOops::is_null(o)) {
101     oop obj = CompressedOops::decode_not_null(o);
102     if (in_collection_set(obj)) {
103       // Corner case: when evacuation fails, there are objects in collection
104       // set that are not really forwarded. We can still go and try and update them
105       // (uselessly) to simplify the common path.
106       shenandoah_assert_forwarded_except(p, obj, cancelled_gc());
107       oop fwd = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
108       shenandoah_assert_not_in_cset_except(p, fwd, cancelled_gc());
109 
110       // Unconditionally store the update: no concurrent updates expected.
111       RawAccess<IS_NOT_NULL>::oop_store(p, fwd);
112     }
113   }
114 }
115 
116 template <class T>
117 inline void ShenandoahHeap::conc_update_with_forwarded(T* p) {
118   T o = RawAccess<>::oop_load(p);
119   if (!CompressedOops::is_null(o)) {
120     oop obj = CompressedOops::decode_not_null(o);
121     if (in_collection_set(obj)) {
122       // Corner case: when evacuation fails, there are objects in collection
123       // set that are not really forwarded. We can still go and try CAS-update them
124       // (uselessly) to simplify the common path.
125       shenandoah_assert_forwarded_except(p, obj, cancelled_gc());
126       oop fwd = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
127       shenandoah_assert_not_in_cset_except(p, fwd, cancelled_gc());
128 
129       // Sanity check: we should not be updating the cset regions themselves,
130       // unless we are recovering from the evacuation failure.
131       shenandoah_assert_not_in_cset_loc_except(p, !is_in(p) || cancelled_gc());
132 
133       // Either we succeed in updating the reference, or something else gets in our way.
134       // We don't care if that is another concurrent GC update, or another mutator update.
135       atomic_update_oop(fwd, p, obj);
136     }
137   }
138 }
139 
140 // Atomic updates of heap location. This is only expected to work with updating the same
141 // logical object with its forwardee. The reason why we need stronger-than-relaxed memory
142 // ordering has to do with coordination with GC barriers and mutator accesses.
143 //
144 // In essence, stronger CAS access is required to maintain the transitive chains that mutator
145 // accesses build by themselves. To illustrate this point, consider the following example.
146 //
147 // Suppose "o" is the object that has a field "x" and the reference to "o" is stored
148 // to field at "addr", which happens to be Java volatile field. Normally, the accesses to volatile
149 // field at "addr" would be matched with release/acquire barriers. This changes when GC moves
150 // the object under mutator feet.
151 //
152 // Thread 1 (Java)
153 //         // --- previous access starts here
154 //         ...
155 //   T1.1: store(&o.x, 1, mo_relaxed)
156 //   T1.2: store(&addr, o, mo_release) // volatile store
157 //
158 //         // --- new access starts here
159 //         // LRB: copy and install the new copy to fwdptr
160 //   T1.3: var copy = copy(o)
161 //   T1.4: cas(&fwd, t, copy, mo_release) // pointer-mediated publication
162 //         <access continues>
163 //
164 // Thread 2 (GC updater)
165 //   T2.1: var f = load(&fwd, mo_{consume|acquire}) // pointer-mediated acquisition
166 //   T2.2: cas(&addr, o, f, mo_release) // this method
167 //
168 // Thread 3 (Java)
169 //   T3.1: var o = load(&addr, mo_acquire) // volatile read
170 //   T3.2: if (o != null)
171 //   T3.3:   var r = load(&o.x, mo_relaxed)
172 //
173 // r is guaranteed to contain "1".
174 //
175 // Without GC involvement, there is synchronizes-with edge from T1.2 to T3.1,
176 // which guarantees this. With GC involvement, when LRB copies the object and
177 // another thread updates the reference to it, we need to have the transitive edge
178 // from T1.4 to T2.1 (that one is guaranteed by forwarding accesses), plus the edge
179 // from T2.2 to T3.1 (which is brought by this CAS).
180 //
181 // Note that we do not need to "acquire" in these methods, because we do not read the
182 // failure witnesses contents on any path, and "release" is enough.
183 //
184 
185 inline void ShenandoahHeap::atomic_update_oop(oop update, oop* addr, oop compare) {
186   assert(is_aligned(addr, HeapWordSize), "Address should be aligned: " PTR_FORMAT, p2i(addr));
187   Atomic::cmpxchg(addr, compare, update, memory_order_release);
188 }
189 
190 inline void ShenandoahHeap::atomic_update_oop(oop update, narrowOop* addr, narrowOop compare) {
191   assert(is_aligned(addr, sizeof(narrowOop)), "Address should be aligned: " PTR_FORMAT, p2i(addr));
192   narrowOop u = CompressedOops::encode(update);
193   Atomic::cmpxchg(addr, compare, u, memory_order_release);
194 }
195 
196 inline void ShenandoahHeap::atomic_update_oop(oop update, narrowOop* addr, oop compare) {
197   assert(is_aligned(addr, sizeof(narrowOop)), "Address should be aligned: " PTR_FORMAT, p2i(addr));
198   narrowOop c = CompressedOops::encode(compare);
199   narrowOop u = CompressedOops::encode(update);
200   Atomic::cmpxchg(addr, c, u, memory_order_release);
201 }
202 
203 inline bool ShenandoahHeap::atomic_update_oop_check(oop update, oop* addr, oop compare) {
204   assert(is_aligned(addr, HeapWordSize), "Address should be aligned: " PTR_FORMAT, p2i(addr));
205   return (oop) Atomic::cmpxchg(addr, compare, update, memory_order_release) == compare;
206 }
207 
208 inline bool ShenandoahHeap::atomic_update_oop_check(oop update, narrowOop* addr, narrowOop compare) {
209   assert(is_aligned(addr, sizeof(narrowOop)), "Address should be aligned: " PTR_FORMAT, p2i(addr));
210   narrowOop u = CompressedOops::encode(update);
211   return (narrowOop) Atomic::cmpxchg(addr, compare, u, memory_order_release) == compare;
212 }
213 
214 inline bool ShenandoahHeap::atomic_update_oop_check(oop update, narrowOop* addr, oop compare) {
215   assert(is_aligned(addr, sizeof(narrowOop)), "Address should be aligned: " PTR_FORMAT, p2i(addr));
216   narrowOop c = CompressedOops::encode(compare);
217   narrowOop u = CompressedOops::encode(update);
218   return CompressedOops::decode(Atomic::cmpxchg(addr, c, u, memory_order_release)) == compare;
219 }
220 
221 // The memory ordering discussion above does not apply for methods that store NULLs:
222 // then, there is no transitive reads in mutator (as we see NULLs), and we can do
223 // relaxed memory ordering there.
224 
225 inline void ShenandoahHeap::atomic_clear_oop(oop* addr, oop compare) {
226   assert(is_aligned(addr, HeapWordSize), "Address should be aligned: " PTR_FORMAT, p2i(addr));
227   Atomic::cmpxchg(addr, compare, oop(), memory_order_relaxed);
228 }
229 
230 inline void ShenandoahHeap::atomic_clear_oop(narrowOop* addr, oop compare) {
231   assert(is_aligned(addr, sizeof(narrowOop)), "Address should be aligned: " PTR_FORMAT, p2i(addr));
232   narrowOop cmp = CompressedOops::encode(compare);
233   Atomic::cmpxchg(addr, cmp, narrowOop(), memory_order_relaxed);
234 }
235 
236 inline void ShenandoahHeap::atomic_clear_oop(narrowOop* addr, narrowOop compare) {
237   assert(is_aligned(addr, sizeof(narrowOop)), "Address should be aligned: " PTR_FORMAT, p2i(addr));
238   Atomic::cmpxchg(addr, compare, narrowOop(), memory_order_relaxed);
239 }
240 
241 inline bool ShenandoahHeap::cancelled_gc() const {
242   return _cancelled_gc.get() == CANCELLED;
243 }
244 
245 inline bool ShenandoahHeap::check_cancelled_gc_and_yield(bool sts_active) {
246   if (sts_active && ShenandoahSuspendibleWorkers && !cancelled_gc()) {
247     if (SuspendibleThreadSet::should_yield()) {
248       SuspendibleThreadSet::yield();
249     }
250   }
251   return cancelled_gc();
252 }
253 
254 inline void ShenandoahHeap::clear_cancelled_gc() {
255   _cancelled_gc.set(CANCELLABLE);
256   _oom_evac_handler.clear();
257 }
258 
259 inline HeapWord* ShenandoahHeap::allocate_from_gclab(Thread* thread, size_t size) {
260   assert(UseTLAB, "TLABs should be enabled");
261 
262   PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
263   if (gclab == NULL) {
264     assert(!thread->is_Java_thread() && !thread->is_Worker_thread(),
265            "Performance: thread should have GCLAB: %s", thread->name());
266     // No GCLABs in this thread, fallback to shared allocation
267     return NULL;
268   }
269   HeapWord* obj = gclab->allocate(size);
270   if (obj != NULL) {
271     return obj;
272   }
273   // Otherwise...
274   return allocate_from_gclab_slow(thread, size);
275 }
276 
277 inline oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
278   if (ShenandoahThreadLocalData::is_oom_during_evac(Thread::current())) {
279     // This thread went through the OOM during evac protocol and it is safe to return
280     // the forward pointer. It must not attempt to evacuate any more.
281     return ShenandoahBarrierSet::resolve_forwarded(p);
282   }
283 
284   assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope");
285 
286   size_t size = p->size();
287 
288   assert(!heap_region_containing(p)->is_humongous(), "never evacuate humongous objects");
289 
290   bool alloc_from_gclab = true;
291   HeapWord* copy = NULL;
292 
293 #ifdef ASSERT
294   if (ShenandoahOOMDuringEvacALot &&
295       (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
296         copy = NULL;
297   } else {
298 #endif
299     if (UseTLAB) {
300       copy = allocate_from_gclab(thread, size);
301     }
302     if (copy == NULL) {
303       ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size);
304       copy = allocate_memory(req);
305       alloc_from_gclab = false;
306     }
307 #ifdef ASSERT
308   }
309 #endif
310 
311   if (copy == NULL) {
312     control_thread()->handle_alloc_failure_evac(size);
313 
314     _oom_evac_handler.handle_out_of_memory_during_evacuation();
315 
316     return ShenandoahBarrierSet::resolve_forwarded(p);
317   }
318 
319   // Copy the object:
320   Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(p), copy, size);
321 
322   // Try to install the new forwarding pointer.
323   oop copy_val = cast_to_oop(copy);
324   oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val);
325   if (result == copy_val) {
326     // Successfully evacuated. Our copy is now the public one!
327     shenandoah_assert_correct(NULL, copy_val);
328     return copy_val;
329   }  else {
330     // Failed to evacuate. We need to deal with the object that is left behind. Since this
331     // new allocation is certainly after TAMS, it will be considered live in the next cycle.
332     // But if it happens to contain references to evacuated regions, those references would
333     // not get updated for this stale copy during this cycle, and we will crash while scanning
334     // it the next cycle.
335     //
336     // For GCLAB allocations, it is enough to rollback the allocation ptr. Either the next
337     // object will overwrite this stale copy, or the filler object on LAB retirement will
338     // do this. For non-GCLAB allocations, we have no way to retract the allocation, and
339     // have to explicitly overwrite the copy with the filler object. With that overwrite,
340     // we have to keep the fwdptr initialized and pointing to our (stale) copy.
341     if (alloc_from_gclab) {
342       ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size);
343     } else {
344       fill_with_object(copy, size);
345       shenandoah_assert_correct(NULL, copy_val);
346     }
347     shenandoah_assert_correct(NULL, result);
348     return result;
349   }
350 }
351 
352 inline bool ShenandoahHeap::requires_marking(const void* entry) const {
353   oop obj = cast_to_oop(entry);
354   return !_marking_context->is_marked_strong(obj);
355 }
356 
357 inline bool ShenandoahHeap::in_collection_set(oop p) const {
358   assert(collection_set() != NULL, "Sanity");
359   return collection_set()->is_in(p);
360 }
361 
362 inline bool ShenandoahHeap::in_collection_set_loc(void* p) const {
363   assert(collection_set() != NULL, "Sanity");
364   return collection_set()->is_in_loc(p);
365 }
366 
367 inline bool ShenandoahHeap::is_stable() const {
368   return _gc_state.is_clear();
369 }
370 
371 inline bool ShenandoahHeap::is_idle() const {
372   return _gc_state.is_unset(MARKING | EVACUATION | UPDATEREFS);
373 }
374 
375 inline bool ShenandoahHeap::is_concurrent_mark_in_progress() const {
376   return _gc_state.is_set(MARKING);
377 }
378 
379 inline bool ShenandoahHeap::is_evacuation_in_progress() const {
380   return _gc_state.is_set(EVACUATION);
381 }
382 
383 inline bool ShenandoahHeap::is_gc_in_progress_mask(uint mask) const {
384   return _gc_state.is_set(mask);
385 }
386 
387 inline bool ShenandoahHeap::is_degenerated_gc_in_progress() const {
388   return _degenerated_gc_in_progress.is_set();
389 }
390 
391 inline bool ShenandoahHeap::is_full_gc_in_progress() const {
392   return _full_gc_in_progress.is_set();
393 }
394 
395 inline bool ShenandoahHeap::is_full_gc_move_in_progress() const {
396   return _full_gc_move_in_progress.is_set();
397 }
398 
399 inline bool ShenandoahHeap::is_update_refs_in_progress() const {
400   return _gc_state.is_set(UPDATEREFS);
401 }
402 
403 inline bool ShenandoahHeap::is_stw_gc_in_progress() const {
404   return is_full_gc_in_progress() || is_degenerated_gc_in_progress();
405 }
406 
407 inline bool ShenandoahHeap::is_concurrent_strong_root_in_progress() const {
408   return _concurrent_strong_root_in_progress.is_set();
409 }
410 
411 inline bool ShenandoahHeap::is_concurrent_weak_root_in_progress() const {
412   return _gc_state.is_set(WEAK_ROOTS);
413 }
414 
415 template<class T>
416 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl) {
417   marked_object_iterate(region, cl, region->top());
418 }
419 
420 template<class T>
421 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit) {
422   assert(! region->is_humongous_continuation(), "no humongous continuation regions here");
423 
424   ShenandoahMarkingContext* const ctx = complete_marking_context();
425   assert(ctx->is_complete(), "sanity");
426 
427   HeapWord* tams = ctx->top_at_mark_start(region);
428 
429   size_t skip_bitmap_delta = 1;
430   HeapWord* start = region->bottom();
431   HeapWord* end = MIN2(tams, region->end());
432 
433   // Step 1. Scan below the TAMS based on bitmap data.
434   HeapWord* limit_bitmap = MIN2(limit, tams);
435 
436   // Try to scan the initial candidate. If the candidate is above the TAMS, it would
437   // fail the subsequent "< limit_bitmap" checks, and fall through to Step 2.
438   HeapWord* cb = ctx->get_next_marked_addr(start, end);
439 
440   intx dist = ShenandoahMarkScanPrefetch;
441   if (dist > 0) {
442     // Batched scan that prefetches the oop data, anticipating the access to
443     // either header, oop field, or forwarding pointer. Not that we cannot
444     // touch anything in oop, while it still being prefetched to get enough
445     // time for prefetch to work. This is why we try to scan the bitmap linearly,
446     // disregarding the object size. However, since we know forwarding pointer
447     // preceeds the object, we can skip over it. Once we cannot trust the bitmap,
448     // there is no point for prefetching the oop contents, as oop->size() will
449     // touch it prematurely.
450 
451     // No variable-length arrays in standard C++, have enough slots to fit
452     // the prefetch distance.
453     static const int SLOT_COUNT = 256;
454     guarantee(dist <= SLOT_COUNT, "adjust slot count");
455     HeapWord* slots[SLOT_COUNT];
456 
457     int avail;
458     do {
459       avail = 0;
460       for (int c = 0; (c < dist) && (cb < limit_bitmap); c++) {
461         Prefetch::read(cb, oopDesc::mark_offset_in_bytes());
462         slots[avail++] = cb;
463         cb += skip_bitmap_delta;
464         if (cb < limit_bitmap) {
465           cb = ctx->get_next_marked_addr(cb, limit_bitmap);
466         }
467       }
468 
469       for (int c = 0; c < avail; c++) {
470         assert (slots[c] < tams,  "only objects below TAMS here: "  PTR_FORMAT " (" PTR_FORMAT ")", p2i(slots[c]), p2i(tams));
471         assert (slots[c] < limit, "only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(slots[c]), p2i(limit));
472         oop obj = cast_to_oop(slots[c]);
473         assert(oopDesc::is_oop(obj), "sanity");
474         assert(ctx->is_marked(obj), "object expected to be marked");
475         cl->do_object(obj);
476       }
477     } while (avail > 0);
478   } else {
479     while (cb < limit_bitmap) {
480       assert (cb < tams,  "only objects below TAMS here: "  PTR_FORMAT " (" PTR_FORMAT ")", p2i(cb), p2i(tams));
481       assert (cb < limit, "only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cb), p2i(limit));
482       oop obj = cast_to_oop(cb);
483       assert(oopDesc::is_oop(obj), "sanity");
484       assert(ctx->is_marked(obj), "object expected to be marked");
485       cl->do_object(obj);
486       cb += skip_bitmap_delta;
487       if (cb < limit_bitmap) {
488         cb = ctx->get_next_marked_addr(cb, limit_bitmap);
489       }
490     }
491   }
492 
493   // Step 2. Accurate size-based traversal, happens past the TAMS.
494   // This restarts the scan at TAMS, which makes sure we traverse all objects,
495   // regardless of what happened at Step 1.
496   HeapWord* cs = tams;
497   while (cs < limit) {
498     assert (cs >= tams, "only objects past TAMS here: "   PTR_FORMAT " (" PTR_FORMAT ")", p2i(cs), p2i(tams));
499     assert (cs < limit, "only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cs), p2i(limit));
500     oop obj = cast_to_oop(cs);
501     assert(oopDesc::is_oop(obj), "sanity");
502     assert(ctx->is_marked(obj), "object expected to be marked");
503     int size = obj->size();
504     cl->do_object(obj);
505     cs += size;
506   }
507 }
508 
509 template <class T>
510 class ShenandoahObjectToOopClosure : public ObjectClosure {
511   T* _cl;
512 public:
513   ShenandoahObjectToOopClosure(T* cl) : _cl(cl) {}
514 
515   void do_object(oop obj) {
516     obj->oop_iterate(_cl);
517   }
518 };
519 
520 template <class T>
521 class ShenandoahObjectToOopBoundedClosure : public ObjectClosure {
522   T* _cl;
523   MemRegion _bounds;
524 public:
525   ShenandoahObjectToOopBoundedClosure(T* cl, HeapWord* bottom, HeapWord* top) :
526     _cl(cl), _bounds(bottom, top) {}
527 
528   void do_object(oop obj) {
529     obj->oop_iterate(_cl, _bounds);
530   }
531 };
532 
533 template<class T>
534 inline void ShenandoahHeap::marked_object_oop_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* top) {
535   if (region->is_humongous()) {
536     HeapWord* bottom = region->bottom();
537     if (top > bottom) {
538       region = region->humongous_start_region();
539       ShenandoahObjectToOopBoundedClosure<T> objs(cl, bottom, top);
540       marked_object_iterate(region, &objs);
541     }
542   } else {
543     ShenandoahObjectToOopClosure<T> objs(cl);
544     marked_object_iterate(region, &objs, top);
545   }
546 }
547 
548 inline ShenandoahHeapRegion* const ShenandoahHeap::get_region(size_t region_idx) const {
549   if (region_idx < _num_regions) {
550     return _regions[region_idx];
551   } else {
552     return NULL;
553   }
554 }
555 
556 inline void ShenandoahHeap::mark_complete_marking_context() {
557   _marking_context->mark_complete();
558 }
559 
560 inline void ShenandoahHeap::mark_incomplete_marking_context() {
561   _marking_context->mark_incomplete();
562 }
563 
564 inline ShenandoahMarkingContext* ShenandoahHeap::complete_marking_context() const {
565   assert (_marking_context->is_complete()," sanity");
566   return _marking_context;
567 }
568 
569 inline ShenandoahMarkingContext* ShenandoahHeap::marking_context() const {
570   return _marking_context;
571 }
572 
573 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP