1 /*
   2  * Copyright (c) 2015, 2020, Red Hat, Inc. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
  26 #define SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
  27 
  28 #include "classfile/javaClasses.inline.hpp"
  29 #include "gc/shared/markBitMap.inline.hpp"
  30 #include "gc/shared/threadLocalAllocBuffer.inline.hpp"
  31 #include "gc/shared/suspendibleThreadSet.hpp"
  32 #include "gc/shenandoah/shenandoahAsserts.hpp"
  33 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
  34 #include "gc/shenandoah/shenandoahCollectionSet.inline.hpp"
  35 #include "gc/shenandoah/shenandoahForwarding.inline.hpp"
  36 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
  37 #include "gc/shenandoah/shenandoahHeap.hpp"
  38 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp"
  39 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
  40 #include "gc/shenandoah/shenandoahControlThread.hpp"
  41 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  42 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
  43 #include "oops/compressedOops.inline.hpp"
  44 #include "oops/oop.inline.hpp"
  45 #include "runtime/atomic.hpp"
  46 #include "runtime/prefetch.inline.hpp"
  47 #include "runtime/thread.hpp"
  48 #include "utilities/copy.hpp"
  49 #include "utilities/globalDefinitions.hpp"
  50 
  51 inline ShenandoahHeap* ShenandoahHeap::heap() {
  52   assert(_heap != NULL, "Heap is not initialized yet");
  53   return _heap;
  54 }
  55 
  56 inline ShenandoahHeapRegion* ShenandoahRegionIterator::next() {
  57   size_t new_index = Atomic::add(&_index, (size_t) 1);
  58   // get_region() provides the bounds-check and returns NULL on OOB.
  59   return _heap->get_region(new_index - 1);
  60 }
  61 
  62 inline bool ShenandoahHeap::has_forwarded_objects() const {
  63   return _gc_state.is_set(HAS_FORWARDED);
  64 }
  65 
  66 inline WorkGang* ShenandoahHeap::workers() const {
  67   return _workers;
  68 }
  69 
  70 inline WorkGang* ShenandoahHeap::get_safepoint_workers() {
  71   return _safepoint_workers;
  72 }
  73 
  74 inline size_t ShenandoahHeap::heap_region_index_containing(const void* addr) const {
  75   uintptr_t region_start = ((uintptr_t) addr);
  76   uintptr_t index = (region_start - (uintptr_t) base()) >> ShenandoahHeapRegion::region_size_bytes_shift();
  77   assert(index < num_regions(), "Region index is in bounds: " PTR_FORMAT, p2i(addr));
  78   return index;
  79 }
  80 
  81 inline ShenandoahHeapRegion* const ShenandoahHeap::heap_region_containing(const void* addr) const {
  82   size_t index = heap_region_index_containing(addr);
  83   ShenandoahHeapRegion* const result = get_region(index);
  84   assert(addr >= result->bottom() && addr < result->end(), "Heap region contains the address: " PTR_FORMAT, p2i(addr));
  85   return result;
  86 }
  87 
  88 inline void ShenandoahHeap::enter_evacuation(Thread* t) {
  89   _oom_evac_handler.enter_evacuation(t);
  90 }
  91 
  92 inline void ShenandoahHeap::leave_evacuation(Thread* t) {
  93   _oom_evac_handler.leave_evacuation(t);
  94 }
  95 
  96 template <class T>
  97 inline oop ShenandoahHeap::update_with_forwarded_not_null(T* p, oop obj) {
  98   if (in_collection_set(obj)) {
  99     shenandoah_assert_forwarded_except(p, obj, is_full_gc_in_progress() || cancelled_gc() || is_degenerated_gc_in_progress());
 100     obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
 101     RawAccess<IS_NOT_NULL>::oop_store(p, obj);
 102   }
 103 #ifdef ASSERT
 104   else {
 105     shenandoah_assert_not_forwarded(p, obj);
 106   }
 107 #endif
 108   return obj;
 109 }
 110 
 111 template <class T>
 112 inline oop ShenandoahHeap::maybe_update_with_forwarded(T* p) {
 113   T o = RawAccess<>::oop_load(p);
 114   if (!CompressedOops::is_null(o)) {
 115     oop obj = CompressedOops::decode_not_null(o);
 116     return maybe_update_with_forwarded_not_null(p, obj);
 117   } else {
 118     return NULL;
 119   }
 120 }
 121 
 122 template <class T>
 123 inline oop ShenandoahHeap::evac_update_with_forwarded(T* p) {
 124   T o = RawAccess<>::oop_load(p);
 125   if (!CompressedOops::is_null(o)) {
 126     oop heap_oop = CompressedOops::decode_not_null(o);
 127     if (in_collection_set(heap_oop)) {
 128       oop forwarded_oop = ShenandoahBarrierSet::resolve_forwarded_not_null(heap_oop);
 129       if (forwarded_oop == heap_oop) {
 130         forwarded_oop = evacuate_object(heap_oop, Thread::current());
 131       }
 132       oop prev = cas_oop(forwarded_oop, p, heap_oop);
 133       if (prev == heap_oop) {
 134         return forwarded_oop;
 135       } else {
 136         return NULL;
 137       }
 138     }
 139     return heap_oop;
 140   } else {
 141     return NULL;
 142   }
 143 }
 144 
 145 inline oop ShenandoahHeap::cas_oop(oop n, oop* addr, oop c) {
 146   assert(is_aligned(addr, HeapWordSize), "Address should be aligned: " PTR_FORMAT, p2i(addr));
 147   return (oop) Atomic::cmpxchg(addr, c, n);
 148 }
 149 
 150 inline oop ShenandoahHeap::cas_oop(oop n, narrowOop* addr, narrowOop c) {
 151   assert(is_aligned(addr, sizeof(narrowOop)), "Address should be aligned: " PTR_FORMAT, p2i(addr));
 152   narrowOop val = CompressedOops::encode(n);
 153   return CompressedOops::decode((narrowOop) Atomic::cmpxchg(addr, c, val));
 154 }
 155 
 156 inline oop ShenandoahHeap::cas_oop(oop n, narrowOop* addr, oop c) {
 157   assert(is_aligned(addr, sizeof(narrowOop)), "Address should be aligned: " PTR_FORMAT, p2i(addr));
 158   narrowOop cmp = CompressedOops::encode(c);
 159   narrowOop val = CompressedOops::encode(n);
 160   return CompressedOops::decode((narrowOop) Atomic::cmpxchg(addr, cmp, val));
 161 }
 162 
 163 template <class T>
 164 inline oop ShenandoahHeap::maybe_update_with_forwarded_not_null(T* p, oop heap_oop) {
 165   shenandoah_assert_not_in_cset_loc_except(p, !is_in(p) || is_full_gc_in_progress() || is_degenerated_gc_in_progress());
 166   shenandoah_assert_correct(p, heap_oop);
 167 
 168   if (in_collection_set(heap_oop)) {
 169     oop forwarded_oop = ShenandoahBarrierSet::resolve_forwarded_not_null(heap_oop);
 170     if (forwarded_oop == heap_oop) {
 171       // E.g. during evacuation.
 172       return forwarded_oop;
 173     }
 174 
 175     shenandoah_assert_forwarded_except(p, heap_oop, is_full_gc_in_progress() || is_degenerated_gc_in_progress());
 176     shenandoah_assert_not_forwarded(p, forwarded_oop);
 177     shenandoah_assert_not_in_cset_except(p, forwarded_oop, cancelled_gc());
 178 
 179     // If this fails, another thread wrote to p before us, it will be logged in SATB and the
 180     // reference be updated later.
 181     oop witness = cas_oop(forwarded_oop, p, heap_oop);
 182 
 183     if (witness != heap_oop) {
 184       // CAS failed, someone had beat us to it. Normally, we would return the failure witness,
 185       // because that would be the proper write of to-space object, enforced by strong barriers.
 186       // However, there is a corner case with arraycopy. It can happen that a Java thread
 187       // beats us with an arraycopy, which first copies the array, which potentially contains
 188       // from-space refs, and only afterwards updates all from-space refs to to-space refs,
 189       // which leaves a short window where the new array elements can be from-space.
 190       // In this case, we can just resolve the result again. As we resolve, we need to consider
 191       // the contended write might have been NULL.
 192       oop result = ShenandoahBarrierSet::resolve_forwarded(witness);
 193       shenandoah_assert_not_forwarded_except(p, result, (result == NULL));
 194       shenandoah_assert_not_in_cset_except(p, result, (result == NULL) || cancelled_gc());
 195       return result;
 196     } else {
 197       // Success! We have updated with known to-space copy. We have already asserted it is sane.
 198       return forwarded_oop;
 199     }
 200   } else {
 201     shenandoah_assert_not_forwarded(p, heap_oop);
 202     return heap_oop;
 203   }
 204 }
 205 
 206 inline bool ShenandoahHeap::cancelled_gc() const {
 207   return _cancelled_gc.get() == CANCELLED;
 208 }
 209 
 210 inline bool ShenandoahHeap::check_cancelled_gc_and_yield(bool sts_active) {
 211   if (! (sts_active && ShenandoahSuspendibleWorkers)) {
 212     return cancelled_gc();
 213   }
 214 
 215   jbyte prev = _cancelled_gc.cmpxchg(NOT_CANCELLED, CANCELLABLE);
 216   if (prev == CANCELLABLE || prev == NOT_CANCELLED) {
 217     if (SuspendibleThreadSet::should_yield()) {
 218       SuspendibleThreadSet::yield();
 219     }
 220 
 221     // Back to CANCELLABLE. The thread that poked NOT_CANCELLED first gets
 222     // to restore to CANCELLABLE.
 223     if (prev == CANCELLABLE) {
 224       _cancelled_gc.set(CANCELLABLE);
 225     }
 226     return false;
 227   } else {
 228     return true;
 229   }
 230 }
 231 
 232 inline void ShenandoahHeap::clear_cancelled_gc() {
 233   _cancelled_gc.set(CANCELLABLE);
 234   _oom_evac_handler.clear();
 235 }
 236 
 237 inline HeapWord* ShenandoahHeap::allocate_from_gclab(Thread* thread, size_t size) {
 238   assert(UseTLAB, "TLABs should be enabled");
 239 
 240   PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
 241   if (gclab == NULL) {
 242     assert(!thread->is_Java_thread() && !thread->is_Worker_thread(),
 243            "Performance: thread should have GCLAB: %s", thread->name());
 244     // No GCLABs in this thread, fallback to shared allocation
 245     return NULL;
 246   }
 247   HeapWord* obj = gclab->allocate(size);
 248   if (obj != NULL) {
 249     return obj;
 250   }
 251   // Otherwise...
 252   return allocate_from_gclab_slow(thread, size);
 253 }
 254 
 255 inline oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
 256   if (ShenandoahThreadLocalData::is_oom_during_evac(Thread::current())) {
 257     // This thread went through the OOM during evac protocol and it is safe to return
 258     // the forward pointer. It must not attempt to evacuate any more.
 259     return ShenandoahBarrierSet::resolve_forwarded(p);
 260   }
 261 
 262   assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope");
 263 
 264   size_t size = p->size();
 265 
 266   assert(!heap_region_containing(p)->is_humongous(), "never evacuate humongous objects");
 267 
 268   bool alloc_from_gclab = true;
 269   HeapWord* copy = NULL;
 270 
 271 #ifdef ASSERT
 272   if (ShenandoahOOMDuringEvacALot &&
 273       (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
 274         copy = NULL;
 275   } else {
 276 #endif
 277     if (UseTLAB) {
 278       copy = allocate_from_gclab(thread, size);
 279     }
 280     if (copy == NULL) {
 281       ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size);
 282       copy = allocate_memory(req);
 283       alloc_from_gclab = false;
 284     }
 285 #ifdef ASSERT
 286   }
 287 #endif
 288 
 289   if (copy == NULL) {
 290     control_thread()->handle_alloc_failure_evac(size);
 291 
 292     _oom_evac_handler.handle_out_of_memory_during_evacuation();
 293 
 294     return ShenandoahBarrierSet::resolve_forwarded(p);
 295   }
 296 
 297   // Copy the object:
 298   Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(p), copy, size);
 299 
 300   // Try to install the new forwarding pointer.
 301   oop copy_val = oop(copy);
 302   oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val);
 303   if (result == copy_val) {
 304     // Successfully evacuated. Our copy is now the public one!
 305     shenandoah_assert_correct(NULL, copy_val);
 306     return copy_val;
 307   }  else {
 308     // Failed to evacuate. We need to deal with the object that is left behind. Since this
 309     // new allocation is certainly after TAMS, it will be considered live in the next cycle.
 310     // But if it happens to contain references to evacuated regions, those references would
 311     // not get updated for this stale copy during this cycle, and we will crash while scanning
 312     // it the next cycle.
 313     //
 314     // For GCLAB allocations, it is enough to rollback the allocation ptr. Either the next
 315     // object will overwrite this stale copy, or the filler object on LAB retirement will
 316     // do this. For non-GCLAB allocations, we have no way to retract the allocation, and
 317     // have to explicitly overwrite the copy with the filler object. With that overwrite,
 318     // we have to keep the fwdptr initialized and pointing to our (stale) copy.
 319     if (alloc_from_gclab) {
 320       ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size);
 321     } else {
 322       fill_with_object(copy, size);
 323       shenandoah_assert_correct(NULL, copy_val);
 324     }
 325     shenandoah_assert_correct(NULL, result);
 326     return result;
 327   }
 328 }
 329 
 330 template<bool RESOLVE>
 331 inline bool ShenandoahHeap::requires_marking(const void* entry) const {
 332   oop obj = oop(entry);
 333   if (RESOLVE) {
 334     obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
 335   }
 336   return !_marking_context->is_marked(obj);
 337 }
 338 
 339 inline bool ShenandoahHeap::in_collection_set(oop p) const {
 340   assert(collection_set() != NULL, "Sanity");
 341   return collection_set()->is_in(p);
 342 }
 343 
 344 inline bool ShenandoahHeap::in_collection_set_loc(void* p) const {
 345   assert(collection_set() != NULL, "Sanity");
 346   return collection_set()->is_in_loc(p);
 347 }
 348 
 349 inline bool ShenandoahHeap::is_stable() const {
 350   return _gc_state.is_clear();
 351 }
 352 
 353 inline bool ShenandoahHeap::is_idle() const {
 354   return _gc_state.is_unset(MARKING | EVACUATION | UPDATEREFS);
 355 }
 356 
 357 inline bool ShenandoahHeap::is_concurrent_mark_in_progress() const {
 358   return _gc_state.is_set(MARKING);
 359 }
 360 
 361 inline bool ShenandoahHeap::is_evacuation_in_progress() const {
 362   return _gc_state.is_set(EVACUATION);
 363 }
 364 
 365 inline bool ShenandoahHeap::is_gc_in_progress_mask(uint mask) const {
 366   return _gc_state.is_set(mask);
 367 }
 368 
 369 inline bool ShenandoahHeap::is_degenerated_gc_in_progress() const {
 370   return _degenerated_gc_in_progress.is_set();
 371 }
 372 
 373 inline bool ShenandoahHeap::is_full_gc_in_progress() const {
 374   return _full_gc_in_progress.is_set();
 375 }
 376 
 377 inline bool ShenandoahHeap::is_full_gc_move_in_progress() const {
 378   return _full_gc_move_in_progress.is_set();
 379 }
 380 
 381 inline bool ShenandoahHeap::is_update_refs_in_progress() const {
 382   return _gc_state.is_set(UPDATEREFS);
 383 }
 384 
 385 inline bool ShenandoahHeap::is_stw_gc_in_progress() const {
 386   return is_full_gc_in_progress() || is_degenerated_gc_in_progress();
 387 }
 388 
 389 inline bool ShenandoahHeap::is_concurrent_strong_root_in_progress() const {
 390   return _concurrent_strong_root_in_progress.is_set();
 391 }
 392 
 393 inline bool ShenandoahHeap::is_concurrent_weak_root_in_progress() const {
 394   return _concurrent_weak_root_in_progress.is_set();
 395 }
 396 
 397 template<class T>
 398 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl) {
 399   marked_object_iterate(region, cl, region->top());
 400 }
 401 
 402 template<class T>
 403 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit) {
 404   assert(! region->is_humongous_continuation(), "no humongous continuation regions here");
 405 
 406   ShenandoahMarkingContext* const ctx = complete_marking_context();
 407   assert(ctx->is_complete(), "sanity");
 408 
 409   MarkBitMap* mark_bit_map = ctx->mark_bit_map();
 410   HeapWord* tams = ctx->top_at_mark_start(region);
 411 
 412   size_t skip_bitmap_delta = 1;
 413   HeapWord* start = region->bottom();
 414   HeapWord* end = MIN2(tams, region->end());
 415 
 416   // Step 1. Scan below the TAMS based on bitmap data.
 417   HeapWord* limit_bitmap = MIN2(limit, tams);
 418 
 419   // Try to scan the initial candidate. If the candidate is above the TAMS, it would
 420   // fail the subsequent "< limit_bitmap" checks, and fall through to Step 2.
 421   HeapWord* cb = mark_bit_map->get_next_marked_addr(start, end);
 422 
 423   intx dist = ShenandoahMarkScanPrefetch;
 424   if (dist > 0) {
 425     // Batched scan that prefetches the oop data, anticipating the access to
 426     // either header, oop field, or forwarding pointer. Not that we cannot
 427     // touch anything in oop, while it still being prefetched to get enough
 428     // time for prefetch to work. This is why we try to scan the bitmap linearly,
 429     // disregarding the object size. However, since we know forwarding pointer
 430     // preceeds the object, we can skip over it. Once we cannot trust the bitmap,
 431     // there is no point for prefetching the oop contents, as oop->size() will
 432     // touch it prematurely.
 433 
 434     // No variable-length arrays in standard C++, have enough slots to fit
 435     // the prefetch distance.
 436     static const int SLOT_COUNT = 256;
 437     guarantee(dist <= SLOT_COUNT, "adjust slot count");
 438     HeapWord* slots[SLOT_COUNT];
 439 
 440     int avail;
 441     do {
 442       avail = 0;
 443       for (int c = 0; (c < dist) && (cb < limit_bitmap); c++) {
 444         Prefetch::read(cb, oopDesc::mark_offset_in_bytes());
 445         slots[avail++] = cb;
 446         cb += skip_bitmap_delta;
 447         if (cb < limit_bitmap) {
 448           cb = mark_bit_map->get_next_marked_addr(cb, limit_bitmap);
 449         }
 450       }
 451 
 452       for (int c = 0; c < avail; c++) {
 453         assert (slots[c] < tams,  "only objects below TAMS here: "  PTR_FORMAT " (" PTR_FORMAT ")", p2i(slots[c]), p2i(tams));
 454         assert (slots[c] < limit, "only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(slots[c]), p2i(limit));
 455         oop obj = oop(slots[c]);
 456         assert(oopDesc::is_oop(obj), "sanity");
 457         assert(ctx->is_marked(obj), "object expected to be marked");
 458         cl->do_object(obj);
 459       }
 460     } while (avail > 0);
 461   } else {
 462     while (cb < limit_bitmap) {
 463       assert (cb < tams,  "only objects below TAMS here: "  PTR_FORMAT " (" PTR_FORMAT ")", p2i(cb), p2i(tams));
 464       assert (cb < limit, "only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cb), p2i(limit));
 465       oop obj = oop(cb);
 466       assert(oopDesc::is_oop(obj), "sanity");
 467       assert(ctx->is_marked(obj), "object expected to be marked");
 468       cl->do_object(obj);
 469       cb += skip_bitmap_delta;
 470       if (cb < limit_bitmap) {
 471         cb = mark_bit_map->get_next_marked_addr(cb, limit_bitmap);
 472       }
 473     }
 474   }
 475 
 476   // Step 2. Accurate size-based traversal, happens past the TAMS.
 477   // This restarts the scan at TAMS, which makes sure we traverse all objects,
 478   // regardless of what happened at Step 1.
 479   HeapWord* cs = tams;
 480   while (cs < limit) {
 481     assert (cs >= tams, "only objects past TAMS here: "   PTR_FORMAT " (" PTR_FORMAT ")", p2i(cs), p2i(tams));
 482     assert (cs < limit, "only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cs), p2i(limit));
 483     oop obj = oop(cs);
 484     assert(oopDesc::is_oop(obj), "sanity");
 485     assert(ctx->is_marked(obj), "object expected to be marked");
 486     int size = obj->size();
 487     cl->do_object(obj);
 488     cs += size;
 489   }
 490 }
 491 
 492 template <class T>
 493 class ShenandoahObjectToOopClosure : public ObjectClosure {
 494   T* _cl;
 495 public:
 496   ShenandoahObjectToOopClosure(T* cl) : _cl(cl) {}
 497 
 498   void do_object(oop obj) {
 499     obj->oop_iterate(_cl);
 500   }
 501 };
 502 
 503 template <class T>
 504 class ShenandoahObjectToOopBoundedClosure : public ObjectClosure {
 505   T* _cl;
 506   MemRegion _bounds;
 507 public:
 508   ShenandoahObjectToOopBoundedClosure(T* cl, HeapWord* bottom, HeapWord* top) :
 509     _cl(cl), _bounds(bottom, top) {}
 510 
 511   void do_object(oop obj) {
 512     obj->oop_iterate(_cl, _bounds);
 513   }
 514 };
 515 
 516 template<class T>
 517 inline void ShenandoahHeap::marked_object_oop_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* top) {
 518   if (region->is_humongous()) {
 519     HeapWord* bottom = region->bottom();
 520     if (top > bottom) {
 521       region = region->humongous_start_region();
 522       ShenandoahObjectToOopBoundedClosure<T> objs(cl, bottom, top);
 523       marked_object_iterate(region, &objs);
 524     }
 525   } else {
 526     ShenandoahObjectToOopClosure<T> objs(cl);
 527     marked_object_iterate(region, &objs, top);
 528   }
 529 }
 530 
 531 inline ShenandoahHeapRegion* const ShenandoahHeap::get_region(size_t region_idx) const {
 532   if (region_idx < _num_regions) {
 533     return _regions[region_idx];
 534   } else {
 535     return NULL;
 536   }
 537 }
 538 
 539 inline void ShenandoahHeap::mark_complete_marking_context() {
 540   _marking_context->mark_complete();
 541 }
 542 
 543 inline void ShenandoahHeap::mark_incomplete_marking_context() {
 544   _marking_context->mark_incomplete();
 545 }
 546 
 547 inline ShenandoahMarkingContext* ShenandoahHeap::complete_marking_context() const {
 548   assert (_marking_context->is_complete()," sanity");
 549   return _marking_context;
 550 }
 551 
 552 inline ShenandoahMarkingContext* ShenandoahHeap::marking_context() const {
 553   return _marking_context;
 554 }
 555 
 556 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP