1 /* 2 * Copyright (c) 2015, 2020, Red Hat, Inc. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP 26 #define SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP 27 28 #include "gc/shenandoah/shenandoahHeap.hpp" 29 30 #include "classfile/javaClasses.inline.hpp" 31 #include "gc/shared/markBitMap.inline.hpp" 32 #include "gc/shared/threadLocalAllocBuffer.inline.hpp" 33 #include "gc/shared/continuationGCSupport.inline.hpp" 34 #include "gc/shared/suspendibleThreadSet.hpp" 35 #include "gc/shared/tlab_globals.hpp" 36 #include "gc/shenandoah/shenandoahAsserts.hpp" 37 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp" 38 #include "gc/shenandoah/shenandoahCollectionSet.inline.hpp" 39 #include "gc/shenandoah/shenandoahForwarding.inline.hpp" 40 #include "gc/shenandoah/shenandoahWorkGroup.hpp" 41 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp" 42 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp" 43 #include "gc/shenandoah/shenandoahControlThread.hpp" 44 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp" 45 #include "gc/shenandoah/shenandoahThreadLocalData.hpp" 46 #include "oops/compressedOops.inline.hpp" 47 #include "oops/oop.inline.hpp" 48 #include "runtime/atomic.hpp" 49 #include "runtime/javaThread.hpp" 50 #include "runtime/prefetch.inline.hpp" 51 #include "utilities/copy.hpp" 52 #include "utilities/globalDefinitions.hpp" 53 54 inline ShenandoahHeap* ShenandoahHeap::heap() { 55 return named_heap<ShenandoahHeap>(CollectedHeap::Shenandoah); 56 } 57 58 inline ShenandoahHeapRegion* ShenandoahRegionIterator::next() { 59 size_t new_index = Atomic::add(&_index, (size_t) 1, memory_order_relaxed); 60 // get_region() provides the bounds-check and returns null on OOB. 61 return _heap->get_region(new_index - 1); 62 } 63 64 inline bool ShenandoahHeap::has_forwarded_objects() const { 65 return _gc_state.is_set(HAS_FORWARDED); 66 } 67 68 inline WorkerThreads* ShenandoahHeap::workers() const { 69 return _workers; 70 } 71 72 inline WorkerThreads* ShenandoahHeap::safepoint_workers() { 73 return _safepoint_workers; 74 } 75 76 inline void ShenandoahHeap::notify_gc_progress() { 77 Atomic::store(&_gc_no_progress_count, (size_t) 0); 78 79 } 80 inline void ShenandoahHeap::notify_gc_no_progress() { 81 Atomic::inc(&_gc_no_progress_count); 82 } 83 84 inline size_t ShenandoahHeap::get_gc_no_progress_count() const { 85 return Atomic::load(&_gc_no_progress_count); 86 } 87 88 inline size_t ShenandoahHeap::heap_region_index_containing(const void* addr) const { 89 uintptr_t region_start = ((uintptr_t) addr); 90 uintptr_t index = (region_start - (uintptr_t) base()) >> ShenandoahHeapRegion::region_size_bytes_shift(); 91 assert(index < num_regions(), "Region index is in bounds: " PTR_FORMAT, p2i(addr)); 92 return index; 93 } 94 95 inline ShenandoahHeapRegion* ShenandoahHeap::heap_region_containing(const void* addr) const { 96 size_t index = heap_region_index_containing(addr); 97 ShenandoahHeapRegion* const result = get_region(index); 98 assert(addr >= result->bottom() && addr < result->end(), "Heap region contains the address: " PTR_FORMAT, p2i(addr)); 99 return result; 100 } 101 102 inline void ShenandoahHeap::enter_evacuation(Thread* t) { 103 _oom_evac_handler.enter_evacuation(t); 104 } 105 106 inline void ShenandoahHeap::leave_evacuation(Thread* t) { 107 _oom_evac_handler.leave_evacuation(t); 108 } 109 110 template <class T> 111 inline void ShenandoahHeap::update_with_forwarded(T* p) { 112 T o = RawAccess<>::oop_load(p); 113 if (!CompressedOops::is_null(o)) { 114 oop obj = CompressedOops::decode_not_null(o); 115 if (in_collection_set(obj)) { 116 // Corner case: when evacuation fails, there are objects in collection 117 // set that are not really forwarded. We can still go and try and update them 118 // (uselessly) to simplify the common path. 119 shenandoah_assert_forwarded_except(p, obj, cancelled_gc()); 120 oop fwd = ShenandoahBarrierSet::resolve_forwarded_not_null(obj); 121 shenandoah_assert_not_in_cset_except(p, fwd, cancelled_gc()); 122 123 // Unconditionally store the update: no concurrent updates expected. 124 RawAccess<IS_NOT_NULL>::oop_store(p, fwd); 125 } 126 } 127 } 128 129 template <class T> 130 inline void ShenandoahHeap::conc_update_with_forwarded(T* p) { 131 T o = RawAccess<>::oop_load(p); 132 if (!CompressedOops::is_null(o)) { 133 oop obj = CompressedOops::decode_not_null(o); 134 if (in_collection_set(obj)) { 135 // Corner case: when evacuation fails, there are objects in collection 136 // set that are not really forwarded. We can still go and try CAS-update them 137 // (uselessly) to simplify the common path. 138 shenandoah_assert_forwarded_except(p, obj, cancelled_gc()); 139 oop fwd = ShenandoahBarrierSet::resolve_forwarded_not_null(obj); 140 shenandoah_assert_not_in_cset_except(p, fwd, cancelled_gc()); 141 142 // Sanity check: we should not be updating the cset regions themselves, 143 // unless we are recovering from the evacuation failure. 144 shenandoah_assert_not_in_cset_loc_except(p, !is_in(p) || cancelled_gc()); 145 146 // Either we succeed in updating the reference, or something else gets in our way. 147 // We don't care if that is another concurrent GC update, or another mutator update. 148 atomic_update_oop(fwd, p, obj); 149 } 150 } 151 } 152 153 // Atomic updates of heap location. This is only expected to work with updating the same 154 // logical object with its forwardee. The reason why we need stronger-than-relaxed memory 155 // ordering has to do with coordination with GC barriers and mutator accesses. 156 // 157 // In essence, stronger CAS access is required to maintain the transitive chains that mutator 158 // accesses build by themselves. To illustrate this point, consider the following example. 159 // 160 // Suppose "o" is the object that has a field "x" and the reference to "o" is stored 161 // to field at "addr", which happens to be Java volatile field. Normally, the accesses to volatile 162 // field at "addr" would be matched with release/acquire barriers. This changes when GC moves 163 // the object under mutator feet. 164 // 165 // Thread 1 (Java) 166 // // --- previous access starts here 167 // ... 168 // T1.1: store(&o.x, 1, mo_relaxed) 169 // T1.2: store(&addr, o, mo_release) // volatile store 170 // 171 // // --- new access starts here 172 // // LRB: copy and install the new copy to fwdptr 173 // T1.3: var copy = copy(o) 174 // T1.4: cas(&fwd, t, copy, mo_release) // pointer-mediated publication 175 // <access continues> 176 // 177 // Thread 2 (GC updater) 178 // T2.1: var f = load(&fwd, mo_{consume|acquire}) // pointer-mediated acquisition 179 // T2.2: cas(&addr, o, f, mo_release) // this method 180 // 181 // Thread 3 (Java) 182 // T3.1: var o = load(&addr, mo_acquire) // volatile read 183 // T3.2: if (o != null) 184 // T3.3: var r = load(&o.x, mo_relaxed) 185 // 186 // r is guaranteed to contain "1". 187 // 188 // Without GC involvement, there is synchronizes-with edge from T1.2 to T3.1, 189 // which guarantees this. With GC involvement, when LRB copies the object and 190 // another thread updates the reference to it, we need to have the transitive edge 191 // from T1.4 to T2.1 (that one is guaranteed by forwarding accesses), plus the edge 192 // from T2.2 to T3.1 (which is brought by this CAS). 193 // 194 // Note that we do not need to "acquire" in these methods, because we do not read the 195 // failure witnesses contents on any path, and "release" is enough. 196 // 197 198 inline void ShenandoahHeap::atomic_update_oop(oop update, oop* addr, oop compare) { 199 assert(is_aligned(addr, HeapWordSize), "Address should be aligned: " PTR_FORMAT, p2i(addr)); 200 Atomic::cmpxchg(addr, compare, update, memory_order_release); 201 } 202 203 inline void ShenandoahHeap::atomic_update_oop(oop update, narrowOop* addr, narrowOop compare) { 204 assert(is_aligned(addr, sizeof(narrowOop)), "Address should be aligned: " PTR_FORMAT, p2i(addr)); 205 narrowOop u = CompressedOops::encode(update); 206 Atomic::cmpxchg(addr, compare, u, memory_order_release); 207 } 208 209 inline void ShenandoahHeap::atomic_update_oop(oop update, narrowOop* addr, oop compare) { 210 assert(is_aligned(addr, sizeof(narrowOop)), "Address should be aligned: " PTR_FORMAT, p2i(addr)); 211 narrowOop c = CompressedOops::encode(compare); 212 narrowOop u = CompressedOops::encode(update); 213 Atomic::cmpxchg(addr, c, u, memory_order_release); 214 } 215 216 inline bool ShenandoahHeap::atomic_update_oop_check(oop update, oop* addr, oop compare) { 217 assert(is_aligned(addr, HeapWordSize), "Address should be aligned: " PTR_FORMAT, p2i(addr)); 218 return (oop) Atomic::cmpxchg(addr, compare, update, memory_order_release) == compare; 219 } 220 221 inline bool ShenandoahHeap::atomic_update_oop_check(oop update, narrowOop* addr, narrowOop compare) { 222 assert(is_aligned(addr, sizeof(narrowOop)), "Address should be aligned: " PTR_FORMAT, p2i(addr)); 223 narrowOop u = CompressedOops::encode(update); 224 return (narrowOop) Atomic::cmpxchg(addr, compare, u, memory_order_release) == compare; 225 } 226 227 inline bool ShenandoahHeap::atomic_update_oop_check(oop update, narrowOop* addr, oop compare) { 228 assert(is_aligned(addr, sizeof(narrowOop)), "Address should be aligned: " PTR_FORMAT, p2i(addr)); 229 narrowOop c = CompressedOops::encode(compare); 230 narrowOop u = CompressedOops::encode(update); 231 return CompressedOops::decode(Atomic::cmpxchg(addr, c, u, memory_order_release)) == compare; 232 } 233 234 // The memory ordering discussion above does not apply for methods that store nulls: 235 // then, there is no transitive reads in mutator (as we see nulls), and we can do 236 // relaxed memory ordering there. 237 238 inline void ShenandoahHeap::atomic_clear_oop(oop* addr, oop compare) { 239 assert(is_aligned(addr, HeapWordSize), "Address should be aligned: " PTR_FORMAT, p2i(addr)); 240 Atomic::cmpxchg(addr, compare, oop(), memory_order_relaxed); 241 } 242 243 inline void ShenandoahHeap::atomic_clear_oop(narrowOop* addr, oop compare) { 244 assert(is_aligned(addr, sizeof(narrowOop)), "Address should be aligned: " PTR_FORMAT, p2i(addr)); 245 narrowOop cmp = CompressedOops::encode(compare); 246 Atomic::cmpxchg(addr, cmp, narrowOop(), memory_order_relaxed); 247 } 248 249 inline void ShenandoahHeap::atomic_clear_oop(narrowOop* addr, narrowOop compare) { 250 assert(is_aligned(addr, sizeof(narrowOop)), "Address should be aligned: " PTR_FORMAT, p2i(addr)); 251 Atomic::cmpxchg(addr, compare, narrowOop(), memory_order_relaxed); 252 } 253 254 inline bool ShenandoahHeap::cancelled_gc() const { 255 return _cancelled_gc.get() == CANCELLED; 256 } 257 258 inline bool ShenandoahHeap::check_cancelled_gc_and_yield(bool sts_active) { 259 if (sts_active && !cancelled_gc()) { 260 if (SuspendibleThreadSet::should_yield()) { 261 SuspendibleThreadSet::yield(); 262 } 263 } 264 return cancelled_gc(); 265 } 266 267 inline void ShenandoahHeap::clear_cancelled_gc() { 268 _cancelled_gc.set(CANCELLABLE); 269 _oom_evac_handler.clear(); 270 } 271 272 inline HeapWord* ShenandoahHeap::allocate_from_gclab(Thread* thread, size_t size) { 273 assert(UseTLAB, "TLABs should be enabled"); 274 275 PLAB* gclab = ShenandoahThreadLocalData::gclab(thread); 276 if (gclab == nullptr) { 277 assert(!thread->is_Java_thread() && !thread->is_Worker_thread(), 278 "Performance: thread should have GCLAB: %s", thread->name()); 279 // No GCLABs in this thread, fallback to shared allocation 280 return nullptr; 281 } 282 HeapWord* obj = gclab->allocate(size); 283 if (obj != nullptr) { 284 return obj; 285 } 286 // Otherwise... 287 return allocate_from_gclab_slow(thread, size); 288 } 289 290 inline oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) { 291 if (ShenandoahThreadLocalData::is_oom_during_evac(Thread::current())) { 292 // This thread went through the OOM during evac protocol and it is safe to return 293 // the forward pointer. It must not attempt to evacuate any more. 294 return ShenandoahBarrierSet::resolve_forwarded(p); 295 } 296 297 assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope"); 298 299 size_t size = p->forward_safe_size(); 300 301 assert(!heap_region_containing(p)->is_humongous(), "never evacuate humongous objects"); 302 303 bool alloc_from_gclab = true; 304 HeapWord* copy = nullptr; 305 306 #ifdef ASSERT 307 if (ShenandoahOOMDuringEvacALot && 308 (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call 309 copy = nullptr; 310 } else { 311 #endif 312 if (UseTLAB) { 313 copy = allocate_from_gclab(thread, size); 314 } 315 if (copy == nullptr) { 316 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size); 317 copy = allocate_memory(req); 318 alloc_from_gclab = false; 319 } 320 #ifdef ASSERT 321 } 322 #endif 323 324 if (copy == nullptr) { 325 control_thread()->handle_alloc_failure_evac(size); 326 327 _oom_evac_handler.handle_out_of_memory_during_evacuation(); 328 329 return ShenandoahBarrierSet::resolve_forwarded(p); 330 } 331 332 // Copy the object: 333 Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(p), copy, size); 334 oop copy_val = cast_to_oop(copy); 335 336 if (UseCompactObjectHeaders) { 337 // The copy above is not atomic. Make sure we have seen the proper mark 338 // and re-install it into the copy, so that Klass* is guaranteed to be correct. 339 markWord mark = copy_val->mark(); 340 if (!mark.is_marked()) { 341 copy_val->set_mark(mark); 342 ContinuationGCSupport::relativize_stack_chunk(copy_val); 343 } else { 344 // If we copied a mark-word that indicates 'forwarded' state, the object 345 // installation would not succeed. We cannot access Klass* anymore either. 346 // Skip the transformation. 347 } 348 } else { 349 ContinuationGCSupport::relativize_stack_chunk(copy_val); 350 } 351 352 // Try to install the new forwarding pointer. 353 oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val); 354 if (result == copy_val) { 355 // Successfully evacuated. Our copy is now the public one! 356 shenandoah_assert_correct(nullptr, copy_val); 357 return copy_val; 358 } else { 359 // Failed to evacuate. We need to deal with the object that is left behind. Since this 360 // new allocation is certainly after TAMS, it will be considered live in the next cycle. 361 // But if it happens to contain references to evacuated regions, those references would 362 // not get updated for this stale copy during this cycle, and we will crash while scanning 363 // it the next cycle. 364 // 365 // For GCLAB allocations, it is enough to rollback the allocation ptr. Either the next 366 // object will overwrite this stale copy, or the filler object on LAB retirement will 367 // do this. For non-GCLAB allocations, we have no way to retract the allocation, and 368 // have to explicitly overwrite the copy with the filler object. With that overwrite, 369 // we have to keep the fwdptr initialized and pointing to our (stale) copy. 370 if (alloc_from_gclab) { 371 ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size); 372 } else { 373 fill_with_object(copy, size); 374 shenandoah_assert_correct(nullptr, copy_val); 375 } 376 shenandoah_assert_correct(nullptr, result); 377 return result; 378 } 379 } 380 381 inline bool ShenandoahHeap::requires_marking(const void* entry) const { 382 oop obj = cast_to_oop(entry); 383 return !_marking_context->is_marked_strong(obj); 384 } 385 386 inline bool ShenandoahHeap::in_collection_set(oop p) const { 387 assert(collection_set() != nullptr, "Sanity"); 388 return collection_set()->is_in(p); 389 } 390 391 inline bool ShenandoahHeap::in_collection_set_loc(void* p) const { 392 assert(collection_set() != nullptr, "Sanity"); 393 return collection_set()->is_in_loc(p); 394 } 395 396 inline bool ShenandoahHeap::is_stable() const { 397 return _gc_state.is_clear(); 398 } 399 400 inline bool ShenandoahHeap::is_idle() const { 401 return _gc_state.is_unset(MARKING | EVACUATION | UPDATEREFS); 402 } 403 404 inline bool ShenandoahHeap::is_concurrent_mark_in_progress() const { 405 return _gc_state.is_set(MARKING); 406 } 407 408 inline bool ShenandoahHeap::is_evacuation_in_progress() const { 409 return _gc_state.is_set(EVACUATION); 410 } 411 412 inline bool ShenandoahHeap::is_degenerated_gc_in_progress() const { 413 return _degenerated_gc_in_progress.is_set(); 414 } 415 416 inline bool ShenandoahHeap::is_full_gc_in_progress() const { 417 return _full_gc_in_progress.is_set(); 418 } 419 420 inline bool ShenandoahHeap::is_full_gc_move_in_progress() const { 421 return _full_gc_move_in_progress.is_set(); 422 } 423 424 inline bool ShenandoahHeap::is_update_refs_in_progress() const { 425 return _gc_state.is_set(UPDATEREFS); 426 } 427 428 inline bool ShenandoahHeap::is_stw_gc_in_progress() const { 429 return is_full_gc_in_progress() || is_degenerated_gc_in_progress(); 430 } 431 432 inline bool ShenandoahHeap::is_concurrent_strong_root_in_progress() const { 433 return _concurrent_strong_root_in_progress.is_set(); 434 } 435 436 inline bool ShenandoahHeap::is_concurrent_weak_root_in_progress() const { 437 return _gc_state.is_set(WEAK_ROOTS); 438 } 439 440 template<class T> 441 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl) { 442 marked_object_iterate(region, cl, region->top()); 443 } 444 445 template<class T> 446 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit) { 447 assert(! region->is_humongous_continuation(), "no humongous continuation regions here"); 448 449 ShenandoahMarkingContext* const ctx = complete_marking_context(); 450 assert(ctx->is_complete(), "sanity"); 451 452 HeapWord* tams = ctx->top_at_mark_start(region); 453 454 size_t skip_bitmap_delta = 1; 455 HeapWord* start = region->bottom(); 456 HeapWord* end = MIN2(tams, region->end()); 457 458 // Step 1. Scan below the TAMS based on bitmap data. 459 HeapWord* limit_bitmap = MIN2(limit, tams); 460 461 // Try to scan the initial candidate. If the candidate is above the TAMS, it would 462 // fail the subsequent "< limit_bitmap" checks, and fall through to Step 2. 463 HeapWord* cb = ctx->get_next_marked_addr(start, end); 464 465 intx dist = ShenandoahMarkScanPrefetch; 466 if (dist > 0) { 467 // Batched scan that prefetches the oop data, anticipating the access to 468 // either header, oop field, or forwarding pointer. Not that we cannot 469 // touch anything in oop, while it still being prefetched to get enough 470 // time for prefetch to work. This is why we try to scan the bitmap linearly, 471 // disregarding the object size. However, since we know forwarding pointer 472 // precedes the object, we can skip over it. Once we cannot trust the bitmap, 473 // there is no point for prefetching the oop contents, as oop->size() will 474 // touch it prematurely. 475 476 // No variable-length arrays in standard C++, have enough slots to fit 477 // the prefetch distance. 478 static const int SLOT_COUNT = 256; 479 guarantee(dist <= SLOT_COUNT, "adjust slot count"); 480 HeapWord* slots[SLOT_COUNT]; 481 482 int avail; 483 do { 484 avail = 0; 485 for (int c = 0; (c < dist) && (cb < limit_bitmap); c++) { 486 Prefetch::read(cb, oopDesc::mark_offset_in_bytes()); 487 slots[avail++] = cb; 488 cb += skip_bitmap_delta; 489 if (cb < limit_bitmap) { 490 cb = ctx->get_next_marked_addr(cb, limit_bitmap); 491 } 492 } 493 494 for (int c = 0; c < avail; c++) { 495 assert (slots[c] < tams, "only objects below TAMS here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(slots[c]), p2i(tams)); 496 assert (slots[c] < limit, "only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(slots[c]), p2i(limit)); 497 oop obj = cast_to_oop(slots[c]); 498 assert(oopDesc::is_oop(obj), "sanity"); 499 assert(ctx->is_marked(obj), "object expected to be marked"); 500 cl->do_object(obj); 501 } 502 } while (avail > 0); 503 } else { 504 while (cb < limit_bitmap) { 505 assert (cb < tams, "only objects below TAMS here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cb), p2i(tams)); 506 assert (cb < limit, "only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cb), p2i(limit)); 507 oop obj = cast_to_oop(cb); 508 assert(oopDesc::is_oop(obj), "sanity"); 509 assert(ctx->is_marked(obj), "object expected to be marked"); 510 cl->do_object(obj); 511 cb += skip_bitmap_delta; 512 if (cb < limit_bitmap) { 513 cb = ctx->get_next_marked_addr(cb, limit_bitmap); 514 } 515 } 516 } 517 518 // Step 2. Accurate size-based traversal, happens past the TAMS. 519 // This restarts the scan at TAMS, which makes sure we traverse all objects, 520 // regardless of what happened at Step 1. 521 HeapWord* cs = tams; 522 while (cs < limit) { 523 assert (cs >= tams, "only objects past TAMS here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cs), p2i(tams)); 524 assert (cs < limit, "only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cs), p2i(limit)); 525 oop obj = cast_to_oop(cs); 526 assert(oopDesc::is_oop(obj), "sanity"); 527 assert(ctx->is_marked(obj), "object expected to be marked"); 528 size_t size = obj->forward_safe_size(); 529 cl->do_object(obj); 530 cs += size; 531 } 532 } 533 534 template <class T> 535 class ShenandoahObjectToOopClosure : public ObjectClosure { 536 T* _cl; 537 public: 538 ShenandoahObjectToOopClosure(T* cl) : _cl(cl) {} 539 540 void do_object(oop obj) { 541 obj->oop_iterate(_cl); 542 } 543 }; 544 545 template <class T> 546 class ShenandoahObjectToOopBoundedClosure : public ObjectClosure { 547 T* _cl; 548 MemRegion _bounds; 549 public: 550 ShenandoahObjectToOopBoundedClosure(T* cl, HeapWord* bottom, HeapWord* top) : 551 _cl(cl), _bounds(bottom, top) {} 552 553 void do_object(oop obj) { 554 obj->oop_iterate(_cl, _bounds); 555 } 556 }; 557 558 template<class T> 559 inline void ShenandoahHeap::marked_object_oop_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* top) { 560 if (region->is_humongous()) { 561 HeapWord* bottom = region->bottom(); 562 if (top > bottom) { 563 region = region->humongous_start_region(); 564 ShenandoahObjectToOopBoundedClosure<T> objs(cl, bottom, top); 565 marked_object_iterate(region, &objs); 566 } 567 } else { 568 ShenandoahObjectToOopClosure<T> objs(cl); 569 marked_object_iterate(region, &objs, top); 570 } 571 } 572 573 inline ShenandoahHeapRegion* ShenandoahHeap::get_region(size_t region_idx) const { 574 if (region_idx < _num_regions) { 575 return _regions[region_idx]; 576 } else { 577 return nullptr; 578 } 579 } 580 581 inline void ShenandoahHeap::mark_complete_marking_context() { 582 _marking_context->mark_complete(); 583 } 584 585 inline void ShenandoahHeap::mark_incomplete_marking_context() { 586 _marking_context->mark_incomplete(); 587 } 588 589 inline ShenandoahMarkingContext* ShenandoahHeap::complete_marking_context() const { 590 assert (_marking_context->is_complete()," sanity"); 591 return _marking_context; 592 } 593 594 inline ShenandoahMarkingContext* ShenandoahHeap::marking_context() const { 595 return _marking_context; 596 } 597 598 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP