1 /* 2 * Copyright (c) 2015, 2020, Red Hat, Inc. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP 26 #define SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP 27 28 #include "gc/shenandoah/shenandoahHeap.hpp" 29 30 #include "classfile/javaClasses.inline.hpp" 31 #include "gc/shared/markBitMap.inline.hpp" 32 #include "gc/shared/threadLocalAllocBuffer.inline.hpp" 33 #include "gc/shared/continuationGCSupport.inline.hpp" 34 #include "gc/shared/suspendibleThreadSet.hpp" 35 #include "gc/shared/tlab_globals.hpp" 36 #include "gc/shenandoah/shenandoahAsserts.hpp" 37 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp" 38 #include "gc/shenandoah/shenandoahCollectionSet.inline.hpp" 39 #include "gc/shenandoah/shenandoahForwarding.inline.hpp" 40 #include "gc/shenandoah/shenandoahWorkGroup.hpp" 41 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp" 42 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp" 43 #include "gc/shenandoah/shenandoahControlThread.hpp" 44 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp" 45 #include "gc/shenandoah/shenandoahThreadLocalData.hpp" 46 #include "oops/compressedOops.inline.hpp" 47 #include "oops/oop.inline.hpp" 48 #include "runtime/atomic.hpp" 49 #include "runtime/javaThread.hpp" 50 #include "runtime/prefetch.inline.hpp" 51 #include "utilities/copy.hpp" 52 #include "utilities/globalDefinitions.hpp" 53 54 inline ShenandoahHeap* ShenandoahHeap::heap() { 55 return named_heap<ShenandoahHeap>(CollectedHeap::Shenandoah); 56 } 57 58 inline ShenandoahHeapRegion* ShenandoahRegionIterator::next() { 59 size_t new_index = Atomic::add(&_index, (size_t) 1, memory_order_relaxed); 60 // get_region() provides the bounds-check and returns null on OOB. 61 return _heap->get_region(new_index - 1); 62 } 63 64 inline bool ShenandoahHeap::has_forwarded_objects() const { 65 return _gc_state.is_set(HAS_FORWARDED); 66 } 67 68 inline WorkerThreads* ShenandoahHeap::workers() const { 69 return _workers; 70 } 71 72 inline WorkerThreads* ShenandoahHeap::safepoint_workers() { 73 return _safepoint_workers; 74 } 75 76 inline size_t ShenandoahHeap::heap_region_index_containing(const void* addr) const { 77 uintptr_t region_start = ((uintptr_t) addr); 78 uintptr_t index = (region_start - (uintptr_t) base()) >> ShenandoahHeapRegion::region_size_bytes_shift(); 79 assert(index < num_regions(), "Region index is in bounds: " PTR_FORMAT, p2i(addr)); 80 return index; 81 } 82 83 inline ShenandoahHeapRegion* const ShenandoahHeap::heap_region_containing(const void* addr) const { 84 size_t index = heap_region_index_containing(addr); 85 ShenandoahHeapRegion* const result = get_region(index); 86 assert(addr >= result->bottom() && addr < result->end(), "Heap region contains the address: " PTR_FORMAT, p2i(addr)); 87 return result; 88 } 89 90 inline void ShenandoahHeap::enter_evacuation(Thread* t) { 91 _oom_evac_handler.enter_evacuation(t); 92 } 93 94 inline void ShenandoahHeap::leave_evacuation(Thread* t) { 95 _oom_evac_handler.leave_evacuation(t); 96 } 97 98 template <class T> 99 inline void ShenandoahHeap::update_with_forwarded(T* p) { 100 T o = RawAccess<>::oop_load(p); 101 if (!CompressedOops::is_null(o)) { 102 oop obj = CompressedOops::decode_not_null(o); 103 if (in_collection_set(obj)) { 104 // Corner case: when evacuation fails, there are objects in collection 105 // set that are not really forwarded. We can still go and try and update them 106 // (uselessly) to simplify the common path. 107 shenandoah_assert_forwarded_except(p, obj, cancelled_gc()); 108 oop fwd = ShenandoahBarrierSet::resolve_forwarded_not_null(obj); 109 shenandoah_assert_not_in_cset_except(p, fwd, cancelled_gc()); 110 111 // Unconditionally store the update: no concurrent updates expected. 112 RawAccess<IS_NOT_NULL>::oop_store(p, fwd); 113 } 114 } 115 } 116 117 template <class T> 118 inline void ShenandoahHeap::conc_update_with_forwarded(T* p) { 119 T o = RawAccess<>::oop_load(p); 120 if (!CompressedOops::is_null(o)) { 121 oop obj = CompressedOops::decode_not_null(o); 122 if (in_collection_set(obj)) { 123 // Corner case: when evacuation fails, there are objects in collection 124 // set that are not really forwarded. We can still go and try CAS-update them 125 // (uselessly) to simplify the common path. 126 shenandoah_assert_forwarded_except(p, obj, cancelled_gc()); 127 oop fwd = ShenandoahBarrierSet::resolve_forwarded_not_null(obj); 128 shenandoah_assert_not_in_cset_except(p, fwd, cancelled_gc()); 129 130 // Sanity check: we should not be updating the cset regions themselves, 131 // unless we are recovering from the evacuation failure. 132 shenandoah_assert_not_in_cset_loc_except(p, !is_in(p) || cancelled_gc()); 133 134 // Either we succeed in updating the reference, or something else gets in our way. 135 // We don't care if that is another concurrent GC update, or another mutator update. 136 atomic_update_oop(fwd, p, obj); 137 } 138 } 139 } 140 141 // Atomic updates of heap location. This is only expected to work with updating the same 142 // logical object with its forwardee. The reason why we need stronger-than-relaxed memory 143 // ordering has to do with coordination with GC barriers and mutator accesses. 144 // 145 // In essence, stronger CAS access is required to maintain the transitive chains that mutator 146 // accesses build by themselves. To illustrate this point, consider the following example. 147 // 148 // Suppose "o" is the object that has a field "x" and the reference to "o" is stored 149 // to field at "addr", which happens to be Java volatile field. Normally, the accesses to volatile 150 // field at "addr" would be matched with release/acquire barriers. This changes when GC moves 151 // the object under mutator feet. 152 // 153 // Thread 1 (Java) 154 // // --- previous access starts here 155 // ... 156 // T1.1: store(&o.x, 1, mo_relaxed) 157 // T1.2: store(&addr, o, mo_release) // volatile store 158 // 159 // // --- new access starts here 160 // // LRB: copy and install the new copy to fwdptr 161 // T1.3: var copy = copy(o) 162 // T1.4: cas(&fwd, t, copy, mo_release) // pointer-mediated publication 163 // <access continues> 164 // 165 // Thread 2 (GC updater) 166 // T2.1: var f = load(&fwd, mo_{consume|acquire}) // pointer-mediated acquisition 167 // T2.2: cas(&addr, o, f, mo_release) // this method 168 // 169 // Thread 3 (Java) 170 // T3.1: var o = load(&addr, mo_acquire) // volatile read 171 // T3.2: if (o != null) 172 // T3.3: var r = load(&o.x, mo_relaxed) 173 // 174 // r is guaranteed to contain "1". 175 // 176 // Without GC involvement, there is synchronizes-with edge from T1.2 to T3.1, 177 // which guarantees this. With GC involvement, when LRB copies the object and 178 // another thread updates the reference to it, we need to have the transitive edge 179 // from T1.4 to T2.1 (that one is guaranteed by forwarding accesses), plus the edge 180 // from T2.2 to T3.1 (which is brought by this CAS). 181 // 182 // Note that we do not need to "acquire" in these methods, because we do not read the 183 // failure witnesses contents on any path, and "release" is enough. 184 // 185 186 inline void ShenandoahHeap::atomic_update_oop(oop update, oop* addr, oop compare) { 187 assert(is_aligned(addr, HeapWordSize), "Address should be aligned: " PTR_FORMAT, p2i(addr)); 188 Atomic::cmpxchg(addr, compare, update, memory_order_release); 189 } 190 191 inline void ShenandoahHeap::atomic_update_oop(oop update, narrowOop* addr, narrowOop compare) { 192 assert(is_aligned(addr, sizeof(narrowOop)), "Address should be aligned: " PTR_FORMAT, p2i(addr)); 193 narrowOop u = CompressedOops::encode(update); 194 Atomic::cmpxchg(addr, compare, u, memory_order_release); 195 } 196 197 inline void ShenandoahHeap::atomic_update_oop(oop update, narrowOop* addr, oop compare) { 198 assert(is_aligned(addr, sizeof(narrowOop)), "Address should be aligned: " PTR_FORMAT, p2i(addr)); 199 narrowOop c = CompressedOops::encode(compare); 200 narrowOop u = CompressedOops::encode(update); 201 Atomic::cmpxchg(addr, c, u, memory_order_release); 202 } 203 204 inline bool ShenandoahHeap::atomic_update_oop_check(oop update, oop* addr, oop compare) { 205 assert(is_aligned(addr, HeapWordSize), "Address should be aligned: " PTR_FORMAT, p2i(addr)); 206 return (oop) Atomic::cmpxchg(addr, compare, update, memory_order_release) == compare; 207 } 208 209 inline bool ShenandoahHeap::atomic_update_oop_check(oop update, narrowOop* addr, narrowOop compare) { 210 assert(is_aligned(addr, sizeof(narrowOop)), "Address should be aligned: " PTR_FORMAT, p2i(addr)); 211 narrowOop u = CompressedOops::encode(update); 212 return (narrowOop) Atomic::cmpxchg(addr, compare, u, memory_order_release) == compare; 213 } 214 215 inline bool ShenandoahHeap::atomic_update_oop_check(oop update, narrowOop* addr, oop compare) { 216 assert(is_aligned(addr, sizeof(narrowOop)), "Address should be aligned: " PTR_FORMAT, p2i(addr)); 217 narrowOop c = CompressedOops::encode(compare); 218 narrowOop u = CompressedOops::encode(update); 219 return CompressedOops::decode(Atomic::cmpxchg(addr, c, u, memory_order_release)) == compare; 220 } 221 222 // The memory ordering discussion above does not apply for methods that store nulls: 223 // then, there is no transitive reads in mutator (as we see nulls), and we can do 224 // relaxed memory ordering there. 225 226 inline void ShenandoahHeap::atomic_clear_oop(oop* addr, oop compare) { 227 assert(is_aligned(addr, HeapWordSize), "Address should be aligned: " PTR_FORMAT, p2i(addr)); 228 Atomic::cmpxchg(addr, compare, oop(), memory_order_relaxed); 229 } 230 231 inline void ShenandoahHeap::atomic_clear_oop(narrowOop* addr, oop compare) { 232 assert(is_aligned(addr, sizeof(narrowOop)), "Address should be aligned: " PTR_FORMAT, p2i(addr)); 233 narrowOop cmp = CompressedOops::encode(compare); 234 Atomic::cmpxchg(addr, cmp, narrowOop(), memory_order_relaxed); 235 } 236 237 inline void ShenandoahHeap::atomic_clear_oop(narrowOop* addr, narrowOop compare) { 238 assert(is_aligned(addr, sizeof(narrowOop)), "Address should be aligned: " PTR_FORMAT, p2i(addr)); 239 Atomic::cmpxchg(addr, compare, narrowOop(), memory_order_relaxed); 240 } 241 242 inline bool ShenandoahHeap::cancelled_gc() const { 243 return _cancelled_gc.get() == CANCELLED; 244 } 245 246 inline bool ShenandoahHeap::check_cancelled_gc_and_yield(bool sts_active) { 247 if (sts_active && !cancelled_gc()) { 248 if (SuspendibleThreadSet::should_yield()) { 249 SuspendibleThreadSet::yield(); 250 } 251 } 252 return cancelled_gc(); 253 } 254 255 inline void ShenandoahHeap::clear_cancelled_gc() { 256 _cancelled_gc.set(CANCELLABLE); 257 _oom_evac_handler.clear(); 258 } 259 260 inline HeapWord* ShenandoahHeap::allocate_from_gclab(Thread* thread, size_t size) { 261 assert(UseTLAB, "TLABs should be enabled"); 262 263 PLAB* gclab = ShenandoahThreadLocalData::gclab(thread); 264 if (gclab == nullptr) { 265 assert(!thread->is_Java_thread() && !thread->is_Worker_thread(), 266 "Performance: thread should have GCLAB: %s", thread->name()); 267 // No GCLABs in this thread, fallback to shared allocation 268 return nullptr; 269 } 270 HeapWord* obj = gclab->allocate(size); 271 if (obj != nullptr) { 272 return obj; 273 } 274 // Otherwise... 275 return allocate_from_gclab_slow(thread, size); 276 } 277 278 inline oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) { 279 if (ShenandoahThreadLocalData::is_oom_during_evac(Thread::current())) { 280 // This thread went through the OOM during evac protocol and it is safe to return 281 // the forward pointer. It must not attempt to evacuate any more. 282 return ShenandoahBarrierSet::resolve_forwarded(p); 283 } 284 285 assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope"); 286 287 size_t size = p->size(); 288 289 assert(!heap_region_containing(p)->is_humongous(), "never evacuate humongous objects"); 290 291 bool alloc_from_gclab = true; 292 HeapWord* copy = nullptr; 293 294 #ifdef ASSERT 295 if (ShenandoahOOMDuringEvacALot && 296 (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call 297 copy = nullptr; 298 } else { 299 #endif 300 if (UseTLAB) { 301 copy = allocate_from_gclab(thread, size); 302 } 303 if (copy == nullptr) { 304 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size); 305 copy = allocate_memory(req); 306 alloc_from_gclab = false; 307 } 308 #ifdef ASSERT 309 } 310 #endif 311 312 if (copy == nullptr) { 313 control_thread()->handle_alloc_failure_evac(size); 314 315 _oom_evac_handler.handle_out_of_memory_during_evacuation(); 316 317 return ShenandoahBarrierSet::resolve_forwarded(p); 318 } 319 320 // Copy the object: 321 Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(p), copy, size); 322 323 // Try to install the new forwarding pointer. 324 oop copy_val = cast_to_oop(copy); 325 ContinuationGCSupport::relativize_stack_chunk(copy_val); 326 327 oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val); 328 if (result == copy_val) { 329 // Successfully evacuated. Our copy is now the public one! 330 shenandoah_assert_correct(nullptr, copy_val); 331 return copy_val; 332 } else { 333 // Failed to evacuate. We need to deal with the object that is left behind. Since this 334 // new allocation is certainly after TAMS, it will be considered live in the next cycle. 335 // But if it happens to contain references to evacuated regions, those references would 336 // not get updated for this stale copy during this cycle, and we will crash while scanning 337 // it the next cycle. 338 // 339 // For GCLAB allocations, it is enough to rollback the allocation ptr. Either the next 340 // object will overwrite this stale copy, or the filler object on LAB retirement will 341 // do this. For non-GCLAB allocations, we have no way to retract the allocation, and 342 // have to explicitly overwrite the copy with the filler object. With that overwrite, 343 // we have to keep the fwdptr initialized and pointing to our (stale) copy. 344 if (alloc_from_gclab) { 345 ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size); 346 } else { 347 fill_with_object(copy, size); 348 shenandoah_assert_correct(nullptr, copy_val); 349 } 350 shenandoah_assert_correct(nullptr, result); 351 return result; 352 } 353 } 354 355 inline bool ShenandoahHeap::requires_marking(const void* entry) const { 356 oop obj = cast_to_oop(entry); 357 return !_marking_context->is_marked_strong(obj); 358 } 359 360 inline bool ShenandoahHeap::in_collection_set(oop p) const { 361 assert(collection_set() != nullptr, "Sanity"); 362 return collection_set()->is_in(p); 363 } 364 365 inline bool ShenandoahHeap::in_collection_set_loc(void* p) const { 366 assert(collection_set() != nullptr, "Sanity"); 367 return collection_set()->is_in_loc(p); 368 } 369 370 inline bool ShenandoahHeap::is_stable() const { 371 return _gc_state.is_clear(); 372 } 373 374 inline bool ShenandoahHeap::is_idle() const { 375 return _gc_state.is_unset(MARKING | EVACUATION | UPDATEREFS); 376 } 377 378 inline bool ShenandoahHeap::is_concurrent_mark_in_progress() const { 379 return _gc_state.is_set(MARKING); 380 } 381 382 inline bool ShenandoahHeap::is_evacuation_in_progress() const { 383 return _gc_state.is_set(EVACUATION); 384 } 385 386 inline bool ShenandoahHeap::is_degenerated_gc_in_progress() const { 387 return _degenerated_gc_in_progress.is_set(); 388 } 389 390 inline bool ShenandoahHeap::is_full_gc_in_progress() const { 391 return _full_gc_in_progress.is_set(); 392 } 393 394 inline bool ShenandoahHeap::is_full_gc_move_in_progress() const { 395 return _full_gc_move_in_progress.is_set(); 396 } 397 398 inline bool ShenandoahHeap::is_update_refs_in_progress() const { 399 return _gc_state.is_set(UPDATEREFS); 400 } 401 402 inline bool ShenandoahHeap::is_stw_gc_in_progress() const { 403 return is_full_gc_in_progress() || is_degenerated_gc_in_progress(); 404 } 405 406 inline bool ShenandoahHeap::is_concurrent_strong_root_in_progress() const { 407 return _concurrent_strong_root_in_progress.is_set(); 408 } 409 410 inline bool ShenandoahHeap::is_concurrent_weak_root_in_progress() const { 411 return _gc_state.is_set(WEAK_ROOTS); 412 } 413 414 template<class T> 415 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl) { 416 marked_object_iterate(region, cl, region->top()); 417 } 418 419 template<class T> 420 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit) { 421 assert(! region->is_humongous_continuation(), "no humongous continuation regions here"); 422 423 ShenandoahMarkingContext* const ctx = complete_marking_context(); 424 assert(ctx->is_complete(), "sanity"); 425 426 HeapWord* tams = ctx->top_at_mark_start(region); 427 428 size_t skip_bitmap_delta = 1; 429 HeapWord* start = region->bottom(); 430 HeapWord* end = MIN2(tams, region->end()); 431 432 // Step 1. Scan below the TAMS based on bitmap data. 433 HeapWord* limit_bitmap = MIN2(limit, tams); 434 435 // Try to scan the initial candidate. If the candidate is above the TAMS, it would 436 // fail the subsequent "< limit_bitmap" checks, and fall through to Step 2. 437 HeapWord* cb = ctx->get_next_marked_addr(start, end); 438 439 intx dist = ShenandoahMarkScanPrefetch; 440 if (dist > 0) { 441 // Batched scan that prefetches the oop data, anticipating the access to 442 // either header, oop field, or forwarding pointer. Not that we cannot 443 // touch anything in oop, while it still being prefetched to get enough 444 // time for prefetch to work. This is why we try to scan the bitmap linearly, 445 // disregarding the object size. However, since we know forwarding pointer 446 // precedes the object, we can skip over it. Once we cannot trust the bitmap, 447 // there is no point for prefetching the oop contents, as oop->size() will 448 // touch it prematurely. 449 450 // No variable-length arrays in standard C++, have enough slots to fit 451 // the prefetch distance. 452 static const int SLOT_COUNT = 256; 453 guarantee(dist <= SLOT_COUNT, "adjust slot count"); 454 HeapWord* slots[SLOT_COUNT]; 455 456 int avail; 457 do { 458 avail = 0; 459 for (int c = 0; (c < dist) && (cb < limit_bitmap); c++) { 460 Prefetch::read(cb, oopDesc::mark_offset_in_bytes()); 461 slots[avail++] = cb; 462 cb += skip_bitmap_delta; 463 if (cb < limit_bitmap) { 464 cb = ctx->get_next_marked_addr(cb, limit_bitmap); 465 } 466 } 467 468 for (int c = 0; c < avail; c++) { 469 assert (slots[c] < tams, "only objects below TAMS here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(slots[c]), p2i(tams)); 470 assert (slots[c] < limit, "only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(slots[c]), p2i(limit)); 471 oop obj = cast_to_oop(slots[c]); 472 assert(oopDesc::is_oop(obj), "sanity"); 473 assert(ctx->is_marked(obj), "object expected to be marked"); 474 cl->do_object(obj); 475 } 476 } while (avail > 0); 477 } else { 478 while (cb < limit_bitmap) { 479 assert (cb < tams, "only objects below TAMS here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cb), p2i(tams)); 480 assert (cb < limit, "only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cb), p2i(limit)); 481 oop obj = cast_to_oop(cb); 482 assert(oopDesc::is_oop(obj), "sanity"); 483 assert(ctx->is_marked(obj), "object expected to be marked"); 484 cl->do_object(obj); 485 cb += skip_bitmap_delta; 486 if (cb < limit_bitmap) { 487 cb = ctx->get_next_marked_addr(cb, limit_bitmap); 488 } 489 } 490 } 491 492 // Step 2. Accurate size-based traversal, happens past the TAMS. 493 // This restarts the scan at TAMS, which makes sure we traverse all objects, 494 // regardless of what happened at Step 1. 495 HeapWord* cs = tams; 496 while (cs < limit) { 497 assert (cs >= tams, "only objects past TAMS here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cs), p2i(tams)); 498 assert (cs < limit, "only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cs), p2i(limit)); 499 oop obj = cast_to_oop(cs); 500 assert(oopDesc::is_oop(obj), "sanity"); 501 assert(ctx->is_marked(obj), "object expected to be marked"); 502 size_t size = obj->size(); 503 cl->do_object(obj); 504 cs += size; 505 } 506 } 507 508 template <class T> 509 class ShenandoahObjectToOopClosure : public ObjectClosure { 510 T* _cl; 511 public: 512 ShenandoahObjectToOopClosure(T* cl) : _cl(cl) {} 513 514 void do_object(oop obj) { 515 obj->oop_iterate(_cl); 516 } 517 }; 518 519 template <class T> 520 class ShenandoahObjectToOopBoundedClosure : public ObjectClosure { 521 T* _cl; 522 MemRegion _bounds; 523 public: 524 ShenandoahObjectToOopBoundedClosure(T* cl, HeapWord* bottom, HeapWord* top) : 525 _cl(cl), _bounds(bottom, top) {} 526 527 void do_object(oop obj) { 528 obj->oop_iterate(_cl, _bounds); 529 } 530 }; 531 532 template<class T> 533 inline void ShenandoahHeap::marked_object_oop_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* top) { 534 if (region->is_humongous()) { 535 HeapWord* bottom = region->bottom(); 536 if (top > bottom) { 537 region = region->humongous_start_region(); 538 ShenandoahObjectToOopBoundedClosure<T> objs(cl, bottom, top); 539 marked_object_iterate(region, &objs); 540 } 541 } else { 542 ShenandoahObjectToOopClosure<T> objs(cl); 543 marked_object_iterate(region, &objs, top); 544 } 545 } 546 547 inline ShenandoahHeapRegion* const ShenandoahHeap::get_region(size_t region_idx) const { 548 if (region_idx < _num_regions) { 549 return _regions[region_idx]; 550 } else { 551 return nullptr; 552 } 553 } 554 555 inline void ShenandoahHeap::mark_complete_marking_context() { 556 _marking_context->mark_complete(); 557 } 558 559 inline void ShenandoahHeap::mark_incomplete_marking_context() { 560 _marking_context->mark_incomplete(); 561 } 562 563 inline ShenandoahMarkingContext* ShenandoahHeap::complete_marking_context() const { 564 assert (_marking_context->is_complete()," sanity"); 565 return _marking_context; 566 } 567 568 inline ShenandoahMarkingContext* ShenandoahHeap::marking_context() const { 569 return _marking_context; 570 } 571 572 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP