1 /* 2 * Copyright (c) 2015, 2020, Red Hat, Inc. All rights reserved. 3 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP 27 #define SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP 28 29 #include "gc/shenandoah/shenandoahHeap.hpp" 30 31 #include "classfile/javaClasses.inline.hpp" 32 #include "gc/shared/markBitMap.inline.hpp" 33 #include "gc/shared/threadLocalAllocBuffer.inline.hpp" 34 #include "gc/shared/continuationGCSupport.inline.hpp" 35 #include "gc/shared/suspendibleThreadSet.hpp" 36 #include "gc/shared/tlab_globals.hpp" 37 #include "gc/shenandoah/shenandoahAsserts.hpp" 38 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp" 39 #include "gc/shenandoah/shenandoahCollectionSet.inline.hpp" 40 #include "gc/shenandoah/shenandoahForwarding.inline.hpp" 41 #include "gc/shenandoah/shenandoahWorkGroup.hpp" 42 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp" 43 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp" 44 #include "gc/shenandoah/shenandoahGeneration.hpp" 45 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp" 46 #include "gc/shenandoah/shenandoahThreadLocalData.hpp" 47 #include "gc/shenandoah/mode/shenandoahMode.hpp" 48 #include "oops/compressedOops.inline.hpp" 49 #include "oops/oop.inline.hpp" 50 #include "runtime/atomic.hpp" 51 #include "runtime/javaThread.hpp" 52 #include "runtime/prefetch.inline.hpp" 53 #include "runtime/objectMonitor.inline.hpp" 54 #include "utilities/copy.hpp" 55 #include "utilities/globalDefinitions.hpp" 56 57 inline ShenandoahHeap* ShenandoahHeap::heap() { 58 return named_heap<ShenandoahHeap>(CollectedHeap::Shenandoah); 59 } 60 61 inline ShenandoahHeapRegion* ShenandoahRegionIterator::next() { 62 size_t new_index = Atomic::add(&_index, (size_t) 1, memory_order_relaxed); 63 // get_region() provides the bounds-check and returns null on OOB. 64 return _heap->get_region(new_index - 1); 65 } 66 67 inline WorkerThreads* ShenandoahHeap::workers() const { 68 return _workers; 69 } 70 71 inline WorkerThreads* ShenandoahHeap::safepoint_workers() { 72 return _safepoint_workers; 73 } 74 75 inline void ShenandoahHeap::notify_gc_progress() { 76 Atomic::store(&_gc_no_progress_count, (size_t) 0); 77 78 } 79 inline void ShenandoahHeap::notify_gc_no_progress() { 80 Atomic::inc(&_gc_no_progress_count); 81 } 82 83 inline size_t ShenandoahHeap::get_gc_no_progress_count() const { 84 return Atomic::load(&_gc_no_progress_count); 85 } 86 87 inline size_t ShenandoahHeap::heap_region_index_containing(const void* addr) const { 88 uintptr_t region_start = ((uintptr_t) addr); 89 uintptr_t index = (region_start - (uintptr_t) base()) >> ShenandoahHeapRegion::region_size_bytes_shift(); 90 assert(index < num_regions(), "Region index is in bounds: " PTR_FORMAT, p2i(addr)); 91 return index; 92 } 93 94 inline ShenandoahHeapRegion* ShenandoahHeap::heap_region_containing(const void* addr) const { 95 size_t index = heap_region_index_containing(addr); 96 ShenandoahHeapRegion* const result = get_region(index); 97 assert(addr >= result->bottom() && addr < result->end(), "Heap region contains the address: " PTR_FORMAT, p2i(addr)); 98 return result; 99 } 100 101 inline void ShenandoahHeap::enter_evacuation(Thread* t) { 102 _oom_evac_handler.enter_evacuation(t); 103 } 104 105 inline void ShenandoahHeap::leave_evacuation(Thread* t) { 106 _oom_evac_handler.leave_evacuation(t); 107 } 108 109 template <class T> 110 inline void ShenandoahHeap::update_with_forwarded(T* p) { 111 T o = RawAccess<>::oop_load(p); 112 if (!CompressedOops::is_null(o)) { 113 oop obj = CompressedOops::decode_not_null(o); 114 if (in_collection_set(obj)) { 115 // Corner case: when evacuation fails, there are objects in collection 116 // set that are not really forwarded. We can still go and try and update them 117 // (uselessly) to simplify the common path. 118 shenandoah_assert_forwarded_except(p, obj, cancelled_gc()); 119 oop fwd = ShenandoahBarrierSet::resolve_forwarded_not_null(obj); 120 shenandoah_assert_not_in_cset_except(p, fwd, cancelled_gc()); 121 122 // Unconditionally store the update: no concurrent updates expected. 123 RawAccess<IS_NOT_NULL>::oop_store(p, fwd); 124 } 125 } 126 } 127 128 template <class T> 129 inline void ShenandoahHeap::conc_update_with_forwarded(T* p) { 130 T o = RawAccess<>::oop_load(p); 131 if (!CompressedOops::is_null(o)) { 132 oop obj = CompressedOops::decode_not_null(o); 133 if (in_collection_set(obj)) { 134 // Corner case: when evacuation fails, there are objects in collection 135 // set that are not really forwarded. We can still go and try CAS-update them 136 // (uselessly) to simplify the common path. 137 shenandoah_assert_forwarded_except(p, obj, cancelled_gc()); 138 oop fwd = ShenandoahBarrierSet::resolve_forwarded_not_null(obj); 139 shenandoah_assert_not_in_cset_except(p, fwd, cancelled_gc()); 140 141 // Sanity check: we should not be updating the cset regions themselves, 142 // unless we are recovering from the evacuation failure. 143 shenandoah_assert_not_in_cset_loc_except(p, !is_in(p) || cancelled_gc()); 144 145 // Either we succeed in updating the reference, or something else gets in our way. 146 // We don't care if that is another concurrent GC update, or another mutator update. 147 atomic_update_oop(fwd, p, obj); 148 } 149 } 150 } 151 152 // Atomic updates of heap location. This is only expected to work with updating the same 153 // logical object with its forwardee. The reason why we need stronger-than-relaxed memory 154 // ordering has to do with coordination with GC barriers and mutator accesses. 155 // 156 // In essence, stronger CAS access is required to maintain the transitive chains that mutator 157 // accesses build by themselves. To illustrate this point, consider the following example. 158 // 159 // Suppose "o" is the object that has a field "x" and the reference to "o" is stored 160 // to field at "addr", which happens to be Java volatile field. Normally, the accesses to volatile 161 // field at "addr" would be matched with release/acquire barriers. This changes when GC moves 162 // the object under mutator feet. 163 // 164 // Thread 1 (Java) 165 // // --- previous access starts here 166 // ... 167 // T1.1: store(&o.x, 1, mo_relaxed) 168 // T1.2: store(&addr, o, mo_release) // volatile store 169 // 170 // // --- new access starts here 171 // // LRB: copy and install the new copy to fwdptr 172 // T1.3: var copy = copy(o) 173 // T1.4: cas(&fwd, t, copy, mo_release) // pointer-mediated publication 174 // <access continues> 175 // 176 // Thread 2 (GC updater) 177 // T2.1: var f = load(&fwd, mo_{consume|acquire}) // pointer-mediated acquisition 178 // T2.2: cas(&addr, o, f, mo_release) // this method 179 // 180 // Thread 3 (Java) 181 // T3.1: var o = load(&addr, mo_acquire) // volatile read 182 // T3.2: if (o != null) 183 // T3.3: var r = load(&o.x, mo_relaxed) 184 // 185 // r is guaranteed to contain "1". 186 // 187 // Without GC involvement, there is synchronizes-with edge from T1.2 to T3.1, 188 // which guarantees this. With GC involvement, when LRB copies the object and 189 // another thread updates the reference to it, we need to have the transitive edge 190 // from T1.4 to T2.1 (that one is guaranteed by forwarding accesses), plus the edge 191 // from T2.2 to T3.1 (which is brought by this CAS). 192 // 193 // Note that we do not need to "acquire" in these methods, because we do not read the 194 // failure witnesses contents on any path, and "release" is enough. 195 // 196 197 inline void ShenandoahHeap::atomic_update_oop(oop update, oop* addr, oop compare) { 198 assert(is_aligned(addr, HeapWordSize), "Address should be aligned: " PTR_FORMAT, p2i(addr)); 199 Atomic::cmpxchg(addr, compare, update, memory_order_release); 200 } 201 202 inline void ShenandoahHeap::atomic_update_oop(oop update, narrowOop* addr, narrowOop compare) { 203 assert(is_aligned(addr, sizeof(narrowOop)), "Address should be aligned: " PTR_FORMAT, p2i(addr)); 204 narrowOop u = CompressedOops::encode(update); 205 Atomic::cmpxchg(addr, compare, u, memory_order_release); 206 } 207 208 inline void ShenandoahHeap::atomic_update_oop(oop update, narrowOop* addr, oop compare) { 209 assert(is_aligned(addr, sizeof(narrowOop)), "Address should be aligned: " PTR_FORMAT, p2i(addr)); 210 narrowOop c = CompressedOops::encode(compare); 211 narrowOop u = CompressedOops::encode(update); 212 Atomic::cmpxchg(addr, c, u, memory_order_release); 213 } 214 215 inline bool ShenandoahHeap::atomic_update_oop_check(oop update, oop* addr, oop compare) { 216 assert(is_aligned(addr, HeapWordSize), "Address should be aligned: " PTR_FORMAT, p2i(addr)); 217 return (oop) Atomic::cmpxchg(addr, compare, update, memory_order_release) == compare; 218 } 219 220 inline bool ShenandoahHeap::atomic_update_oop_check(oop update, narrowOop* addr, narrowOop compare) { 221 assert(is_aligned(addr, sizeof(narrowOop)), "Address should be aligned: " PTR_FORMAT, p2i(addr)); 222 narrowOop u = CompressedOops::encode(update); 223 return (narrowOop) Atomic::cmpxchg(addr, compare, u, memory_order_release) == compare; 224 } 225 226 inline bool ShenandoahHeap::atomic_update_oop_check(oop update, narrowOop* addr, oop compare) { 227 assert(is_aligned(addr, sizeof(narrowOop)), "Address should be aligned: " PTR_FORMAT, p2i(addr)); 228 narrowOop c = CompressedOops::encode(compare); 229 narrowOop u = CompressedOops::encode(update); 230 return CompressedOops::decode(Atomic::cmpxchg(addr, c, u, memory_order_release)) == compare; 231 } 232 233 // The memory ordering discussion above does not apply for methods that store nulls: 234 // then, there is no transitive reads in mutator (as we see nulls), and we can do 235 // relaxed memory ordering there. 236 237 inline void ShenandoahHeap::atomic_clear_oop(oop* addr, oop compare) { 238 assert(is_aligned(addr, HeapWordSize), "Address should be aligned: " PTR_FORMAT, p2i(addr)); 239 Atomic::cmpxchg(addr, compare, oop(), memory_order_relaxed); 240 } 241 242 inline void ShenandoahHeap::atomic_clear_oop(narrowOop* addr, oop compare) { 243 assert(is_aligned(addr, sizeof(narrowOop)), "Address should be aligned: " PTR_FORMAT, p2i(addr)); 244 narrowOop cmp = CompressedOops::encode(compare); 245 Atomic::cmpxchg(addr, cmp, narrowOop(), memory_order_relaxed); 246 } 247 248 inline void ShenandoahHeap::atomic_clear_oop(narrowOop* addr, narrowOop compare) { 249 assert(is_aligned(addr, sizeof(narrowOop)), "Address should be aligned: " PTR_FORMAT, p2i(addr)); 250 Atomic::cmpxchg(addr, compare, narrowOop(), memory_order_relaxed); 251 } 252 253 inline bool ShenandoahHeap::cancelled_gc() const { 254 return _cancelled_gc.get() != GCCause::_no_gc; 255 } 256 257 inline bool ShenandoahHeap::check_cancelled_gc_and_yield(bool sts_active) { 258 if (sts_active && !cancelled_gc()) { 259 if (SuspendibleThreadSet::should_yield()) { 260 SuspendibleThreadSet::yield(); 261 } 262 } 263 return cancelled_gc(); 264 } 265 266 inline GCCause::Cause ShenandoahHeap::cancelled_cause() const { 267 return _cancelled_gc.get(); 268 } 269 270 inline void ShenandoahHeap::clear_cancelled_gc(bool clear_oom_handler) { 271 _cancelled_gc.set(GCCause::_no_gc); 272 if (_cancel_requested_time > 0) { 273 log_debug(gc)("GC cancellation took %.3fs", (os::elapsedTime() - _cancel_requested_time)); 274 _cancel_requested_time = 0; 275 } 276 277 if (clear_oom_handler) { 278 _oom_evac_handler.clear(); 279 } 280 } 281 282 inline HeapWord* ShenandoahHeap::allocate_from_gclab(Thread* thread, size_t size) { 283 assert(UseTLAB, "TLABs should be enabled"); 284 285 PLAB* gclab = ShenandoahThreadLocalData::gclab(thread); 286 if (gclab == nullptr) { 287 assert(!thread->is_Java_thread() && !thread->is_Worker_thread(), 288 "Performance: thread should have GCLAB: %s", thread->name()); 289 // No GCLABs in this thread, fallback to shared allocation 290 return nullptr; 291 } 292 HeapWord* obj = gclab->allocate(size); 293 if (obj != nullptr) { 294 return obj; 295 } 296 return allocate_from_gclab_slow(thread, size); 297 } 298 299 void ShenandoahHeap::increase_object_age(oop obj, uint additional_age) { 300 // This operates on new copy of an object. This means that the object's mark-word 301 // is thread-local and therefore safe to access. However, when the mark is 302 // displaced (i.e. stack-locked or monitor-locked), then it must be considered 303 // a shared memory location. It can be accessed by other threads. 304 // In particular, a competing evacuating thread can succeed to install its copy 305 // as the forwardee and continue to unlock the object, at which point 'our' 306 // write to the foreign stack-location would potentially over-write random 307 // information on that stack. Writing to a monitor is less problematic, 308 // but still not safe: while the ObjectMonitor would not randomly disappear, 309 // the other thread would also write to the same displaced header location, 310 // possibly leading to increase the age twice. 311 // For all these reasons, we take the conservative approach and not attempt 312 // to increase the age when the header is displaced. 313 markWord w = obj->mark(); 314 // The mark-word has been copied from the original object. It can not be 315 // inflating, because inflation can not be interrupted by a safepoint, 316 // and after a safepoint, a Java thread would first have to successfully 317 // evacuate the object before it could inflate the monitor. 318 assert(!w.is_being_inflated() || LockingMode == LM_LIGHTWEIGHT, "must not inflate monitor before evacuation of object succeeds"); 319 // It is possible that we have copied the object after another thread has 320 // already successfully completed evacuation. While harmless (we would never 321 // publish our copy), don't even attempt to modify the age when that 322 // happens. 323 if (!w.has_displaced_mark_helper() && !w.is_marked()) { 324 w = w.set_age(MIN2(markWord::max_age, w.age() + additional_age)); 325 obj->set_mark(w); 326 } 327 } 328 329 // Return the object's age, or a sentinel value when the age can't 330 // necessarily be determined because of concurrent locking by the 331 // mutator 332 uint ShenandoahHeap::get_object_age(oop obj) { 333 markWord w = obj->mark(); 334 assert(!w.is_marked(), "must not be forwarded"); 335 if (w.has_monitor()) { 336 w = w.monitor()->header(); 337 } else if (w.is_being_inflated() || w.has_displaced_mark_helper()) { 338 // Informs caller that we aren't able to determine the age 339 return markWord::max_age + 1; // sentinel 340 } 341 assert(w.age() <= markWord::max_age, "Impossible!"); 342 return w.age(); 343 } 344 345 inline bool ShenandoahHeap::is_in_active_generation(oop obj) const { 346 if (!mode()->is_generational()) { 347 // everything is the same single generation 348 assert(is_in(obj), "Otherwise shouldn't return true below"); 349 return true; 350 } 351 352 ShenandoahGeneration* const gen = active_generation(); 353 354 if (gen == nullptr) { 355 // no collection is happening: only expect this to be called 356 // when concurrent processing is active, but that could change 357 return false; 358 } 359 360 assert(is_in(obj), "only check if is in active generation for objects (" PTR_FORMAT ") in heap", p2i(obj)); 361 assert(gen->is_old() || gen->is_young() || gen->is_global(), 362 "Active generation must be old, young, or global"); 363 364 size_t index = heap_region_containing(obj)->index(); 365 366 // No flickering! 367 assert(gen == active_generation(), "Race?"); 368 369 switch (region_affiliation(index)) { 370 case ShenandoahAffiliation::FREE: 371 // Free regions are in old, young, and global collections 372 return true; 373 case ShenandoahAffiliation::YOUNG_GENERATION: 374 // Young regions are in young and global collections, not in old collections 375 return !gen->is_old(); 376 case ShenandoahAffiliation::OLD_GENERATION: 377 // Old regions are in old and global collections, not in young collections 378 return !gen->is_young(); 379 default: 380 assert(false, "Bad affiliation (%d) for region " SIZE_FORMAT, region_affiliation(index), index); 381 return false; 382 } 383 } 384 385 inline bool ShenandoahHeap::is_in_young(const void* p) const { 386 return is_in(p) && (_affiliations[heap_region_index_containing(p)] == ShenandoahAffiliation::YOUNG_GENERATION); 387 } 388 389 inline bool ShenandoahHeap::is_in_old(const void* p) const { 390 return is_in(p) && (_affiliations[heap_region_index_containing(p)] == ShenandoahAffiliation::OLD_GENERATION); 391 } 392 393 inline bool ShenandoahHeap::is_in_old_during_young_collection(oop obj) const { 394 return active_generation()->is_young() && is_in_old(obj); 395 } 396 397 inline ShenandoahAffiliation ShenandoahHeap::region_affiliation(const ShenandoahHeapRegion *r) const { 398 return region_affiliation(r->index()); 399 } 400 401 inline void ShenandoahHeap::assert_lock_for_affiliation(ShenandoahAffiliation orig_affiliation, 402 ShenandoahAffiliation new_affiliation) { 403 // A lock is required when changing from FREE to NON-FREE. Though it may be possible to elide the lock when 404 // transitioning from in-use to FREE, the current implementation uses a lock for this transition. A lock is 405 // not required to change from YOUNG to OLD (i.e. when promoting humongous region). 406 // 407 // new_affiliation is: FREE YOUNG OLD 408 // orig_affiliation is: FREE X L L 409 // YOUNG L X 410 // OLD L X X 411 // X means state transition won't happen (so don't care) 412 // L means lock should be held 413 // Blank means no lock required because affiliation visibility will not be required until subsequent safepoint 414 // 415 // Note: during full GC, all transitions between states are possible. During Full GC, we should be in a safepoint. 416 417 if (orig_affiliation == ShenandoahAffiliation::FREE) { 418 shenandoah_assert_heaplocked_or_safepoint(); 419 } 420 } 421 422 inline void ShenandoahHeap::set_affiliation(ShenandoahHeapRegion* r, ShenandoahAffiliation new_affiliation) { 423 #ifdef ASSERT 424 assert_lock_for_affiliation(region_affiliation(r), new_affiliation); 425 #endif 426 Atomic::store(_affiliations + r->index(), (uint8_t) new_affiliation); 427 } 428 429 inline ShenandoahAffiliation ShenandoahHeap::region_affiliation(size_t index) const { 430 return (ShenandoahAffiliation) Atomic::load(_affiliations + index); 431 } 432 433 inline bool ShenandoahHeap::requires_marking(const void* entry) const { 434 oop obj = cast_to_oop(entry); 435 return !_marking_context->is_marked_strong(obj); 436 } 437 438 inline bool ShenandoahHeap::in_collection_set(oop p) const { 439 assert(collection_set() != nullptr, "Sanity"); 440 return collection_set()->is_in(p); 441 } 442 443 inline bool ShenandoahHeap::in_collection_set_loc(void* p) const { 444 assert(collection_set() != nullptr, "Sanity"); 445 return collection_set()->is_in_loc(p); 446 } 447 448 inline bool ShenandoahHeap::is_idle() const { 449 return _gc_state_changed ? _gc_state.is_clear() : ShenandoahThreadLocalData::gc_state(Thread::current()) == 0; 450 } 451 452 inline bool ShenandoahHeap::has_forwarded_objects() const { 453 return is_gc_state(HAS_FORWARDED); 454 } 455 456 inline bool ShenandoahHeap::is_concurrent_mark_in_progress() const { 457 return is_gc_state(MARKING); 458 } 459 460 inline bool ShenandoahHeap::is_concurrent_young_mark_in_progress() const { 461 return is_gc_state(YOUNG_MARKING); 462 } 463 464 inline bool ShenandoahHeap::is_concurrent_old_mark_in_progress() const { 465 return is_gc_state(OLD_MARKING); 466 } 467 468 inline bool ShenandoahHeap::is_evacuation_in_progress() const { 469 return is_gc_state(EVACUATION); 470 } 471 472 inline bool ShenandoahHeap::is_update_refs_in_progress() const { 473 return is_gc_state(UPDATE_REFS); 474 } 475 476 inline bool ShenandoahHeap::is_concurrent_weak_root_in_progress() const { 477 return is_gc_state(WEAK_ROOTS); 478 } 479 480 inline bool ShenandoahHeap::is_degenerated_gc_in_progress() const { 481 return _degenerated_gc_in_progress.is_set(); 482 } 483 484 inline bool ShenandoahHeap::is_full_gc_in_progress() const { 485 return _full_gc_in_progress.is_set(); 486 } 487 488 inline bool ShenandoahHeap::is_full_gc_move_in_progress() const { 489 return _full_gc_move_in_progress.is_set(); 490 } 491 492 inline bool ShenandoahHeap::is_stw_gc_in_progress() const { 493 return is_full_gc_in_progress() || is_degenerated_gc_in_progress(); 494 } 495 496 inline bool ShenandoahHeap::is_concurrent_strong_root_in_progress() const { 497 return _concurrent_strong_root_in_progress.is_set(); 498 } 499 500 template<class T> 501 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl) { 502 marked_object_iterate(region, cl, region->top()); 503 } 504 505 template<class T> 506 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit) { 507 assert(! region->is_humongous_continuation(), "no humongous continuation regions here"); 508 509 ShenandoahMarkingContext* const ctx = marking_context(); 510 511 HeapWord* tams = ctx->top_at_mark_start(region); 512 513 size_t skip_bitmap_delta = 1; 514 HeapWord* start = region->bottom(); 515 HeapWord* end = MIN2(tams, region->end()); 516 517 // Step 1. Scan below the TAMS based on bitmap data. 518 HeapWord* limit_bitmap = MIN2(limit, tams); 519 520 // Try to scan the initial candidate. If the candidate is above the TAMS, it would 521 // fail the subsequent "< limit_bitmap" checks, and fall through to Step 2. 522 HeapWord* cb = ctx->get_next_marked_addr(start, end); 523 524 intx dist = ShenandoahMarkScanPrefetch; 525 if (dist > 0) { 526 // Batched scan that prefetches the oop data, anticipating the access to 527 // either header, oop field, or forwarding pointer. Not that we cannot 528 // touch anything in oop, while it still being prefetched to get enough 529 // time for prefetch to work. This is why we try to scan the bitmap linearly, 530 // disregarding the object size. However, since we know forwarding pointer 531 // precedes the object, we can skip over it. Once we cannot trust the bitmap, 532 // there is no point for prefetching the oop contents, as oop->size() will 533 // touch it prematurely. 534 535 // No variable-length arrays in standard C++, have enough slots to fit 536 // the prefetch distance. 537 static const int SLOT_COUNT = 256; 538 guarantee(dist <= SLOT_COUNT, "adjust slot count"); 539 HeapWord* slots[SLOT_COUNT]; 540 541 int avail; 542 do { 543 avail = 0; 544 for (int c = 0; (c < dist) && (cb < limit_bitmap); c++) { 545 Prefetch::read(cb, oopDesc::mark_offset_in_bytes()); 546 slots[avail++] = cb; 547 cb += skip_bitmap_delta; 548 if (cb < limit_bitmap) { 549 cb = ctx->get_next_marked_addr(cb, limit_bitmap); 550 } 551 } 552 553 for (int c = 0; c < avail; c++) { 554 assert (slots[c] < tams, "only objects below TAMS here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(slots[c]), p2i(tams)); 555 assert (slots[c] < limit, "only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(slots[c]), p2i(limit)); 556 oop obj = cast_to_oop(slots[c]); 557 assert(oopDesc::is_oop(obj), "sanity"); 558 assert(ctx->is_marked(obj), "object expected to be marked"); 559 cl->do_object(obj); 560 } 561 } while (avail > 0); 562 } else { 563 while (cb < limit_bitmap) { 564 assert (cb < tams, "only objects below TAMS here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cb), p2i(tams)); 565 assert (cb < limit, "only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cb), p2i(limit)); 566 oop obj = cast_to_oop(cb); 567 assert(oopDesc::is_oop(obj), "sanity"); 568 assert(ctx->is_marked(obj), "object expected to be marked"); 569 cl->do_object(obj); 570 cb += skip_bitmap_delta; 571 if (cb < limit_bitmap) { 572 cb = ctx->get_next_marked_addr(cb, limit_bitmap); 573 } 574 } 575 } 576 577 // Step 2. Accurate size-based traversal, happens past the TAMS. 578 // This restarts the scan at TAMS, which makes sure we traverse all objects, 579 // regardless of what happened at Step 1. 580 HeapWord* cs = tams; 581 while (cs < limit) { 582 assert (cs >= tams, "only objects past TAMS here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cs), p2i(tams)); 583 assert (cs < limit, "only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cs), p2i(limit)); 584 oop obj = cast_to_oop(cs); 585 assert(oopDesc::is_oop(obj), "sanity"); 586 assert(ctx->is_marked(obj), "object expected to be marked"); 587 size_t size = obj->size(); 588 cl->do_object(obj); 589 cs += size; 590 } 591 } 592 593 template <class T> 594 class ShenandoahObjectToOopClosure : public ObjectClosure { 595 T* _cl; 596 public: 597 ShenandoahObjectToOopClosure(T* cl) : _cl(cl) {} 598 599 void do_object(oop obj) { 600 obj->oop_iterate(_cl); 601 } 602 }; 603 604 template <class T> 605 class ShenandoahObjectToOopBoundedClosure : public ObjectClosure { 606 T* _cl; 607 MemRegion _bounds; 608 public: 609 ShenandoahObjectToOopBoundedClosure(T* cl, HeapWord* bottom, HeapWord* top) : 610 _cl(cl), _bounds(bottom, top) {} 611 612 void do_object(oop obj) { 613 obj->oop_iterate(_cl, _bounds); 614 } 615 }; 616 617 template<class T> 618 inline void ShenandoahHeap::marked_object_oop_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* top) { 619 if (region->is_humongous()) { 620 HeapWord* bottom = region->bottom(); 621 if (top > bottom) { 622 region = region->humongous_start_region(); 623 ShenandoahObjectToOopBoundedClosure<T> objs(cl, bottom, top); 624 marked_object_iterate(region, &objs); 625 } 626 } else { 627 ShenandoahObjectToOopClosure<T> objs(cl); 628 marked_object_iterate(region, &objs, top); 629 } 630 } 631 632 inline ShenandoahHeapRegion* ShenandoahHeap::get_region(size_t region_idx) const { 633 if (region_idx < _num_regions) { 634 return _regions[region_idx]; 635 } else { 636 return nullptr; 637 } 638 } 639 640 inline ShenandoahMarkingContext* ShenandoahHeap::complete_marking_context() const { 641 assert (_marking_context->is_complete()," sanity"); 642 return _marking_context; 643 } 644 645 inline ShenandoahMarkingContext* ShenandoahHeap::marking_context() const { 646 return _marking_context; 647 } 648 649 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP