1 /* 2 * Copyright (c) 2015, 2020, Red Hat, Inc. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP 26 #define SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP 27 28 #include "gc/shenandoah/shenandoahHeap.hpp" 29 30 #include "classfile/javaClasses.inline.hpp" 31 #include "gc/shared/markBitMap.inline.hpp" 32 #include "gc/shared/threadLocalAllocBuffer.inline.hpp" 33 #include "gc/shared/suspendibleThreadSet.hpp" 34 #include "gc/shared/tlab_globals.hpp" 35 #include "gc/shenandoah/shenandoahAsserts.hpp" 36 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp" 37 #include "gc/shenandoah/shenandoahCollectionSet.inline.hpp" 38 #include "gc/shenandoah/shenandoahForwarding.inline.hpp" 39 #include "gc/shenandoah/shenandoahWorkGroup.hpp" 40 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp" 41 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp" 42 #include "gc/shenandoah/shenandoahControlThread.hpp" 43 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp" 44 #include "gc/shenandoah/shenandoahObjectUtils.inline.hpp" 45 #include "gc/shenandoah/shenandoahThreadLocalData.hpp" 46 #include "oops/compressedOops.inline.hpp" 47 #include "oops/oop.inline.hpp" 48 #include "runtime/atomic.hpp" 49 #include "runtime/prefetch.inline.hpp" 50 #include "runtime/thread.hpp" 51 #include "utilities/copy.hpp" 52 #include "utilities/globalDefinitions.hpp" 53 54 inline ShenandoahHeap* ShenandoahHeap::heap() { 55 return named_heap<ShenandoahHeap>(CollectedHeap::Shenandoah); 56 } 57 58 inline ShenandoahHeapRegion* ShenandoahRegionIterator::next() { 59 size_t new_index = Atomic::add(&_index, (size_t) 1, memory_order_relaxed); 60 // get_region() provides the bounds-check and returns NULL on OOB. 61 return _heap->get_region(new_index - 1); 62 } 63 64 inline bool ShenandoahHeap::has_forwarded_objects() const { 65 return _gc_state.is_set(HAS_FORWARDED); 66 } 67 68 inline WorkGang* ShenandoahHeap::workers() const { 69 return _workers; 70 } 71 72 inline WorkGang* ShenandoahHeap::safepoint_workers() { 73 return _safepoint_workers; 74 } 75 76 inline size_t ShenandoahHeap::heap_region_index_containing(const void* addr) const { 77 uintptr_t region_start = ((uintptr_t) addr); 78 uintptr_t index = (region_start - (uintptr_t) base()) >> ShenandoahHeapRegion::region_size_bytes_shift(); 79 assert(index < num_regions(), "Region index is in bounds: " PTR_FORMAT, p2i(addr)); 80 return index; 81 } 82 83 inline ShenandoahHeapRegion* const ShenandoahHeap::heap_region_containing(const void* addr) const { 84 size_t index = heap_region_index_containing(addr); 85 ShenandoahHeapRegion* const result = get_region(index); 86 assert(addr >= result->bottom() && addr < result->end(), "Heap region contains the address: " PTR_FORMAT, p2i(addr)); 87 return result; 88 } 89 90 inline void ShenandoahHeap::enter_evacuation(Thread* t) { 91 _oom_evac_handler.enter_evacuation(t); 92 } 93 94 inline void ShenandoahHeap::leave_evacuation(Thread* t) { 95 _oom_evac_handler.leave_evacuation(t); 96 } 97 98 template <class T> 99 inline void ShenandoahHeap::update_with_forwarded(T* p) { 100 T o = RawAccess<>::oop_load(p); 101 if (!CompressedOops::is_null(o)) { 102 oop obj = CompressedOops::decode_not_null(o); 103 if (in_collection_set(obj)) { 104 // Corner case: when evacuation fails, there are objects in collection 105 // set that are not really forwarded. We can still go and try and update them 106 // (uselessly) to simplify the common path. 107 shenandoah_assert_forwarded_except(p, obj, cancelled_gc()); 108 oop fwd = ShenandoahBarrierSet::resolve_forwarded_not_null(obj); 109 shenandoah_assert_not_in_cset_except(p, fwd, cancelled_gc()); 110 111 // Unconditionally store the update: no concurrent updates expected. 112 RawAccess<IS_NOT_NULL>::oop_store(p, fwd); 113 } 114 } 115 } 116 117 template <class T> 118 inline void ShenandoahHeap::conc_update_with_forwarded(T* p) { 119 T o = RawAccess<>::oop_load(p); 120 if (!CompressedOops::is_null(o)) { 121 oop obj = CompressedOops::decode_not_null(o); 122 if (in_collection_set(obj)) { 123 // Corner case: when evacuation fails, there are objects in collection 124 // set that are not really forwarded. We can still go and try CAS-update them 125 // (uselessly) to simplify the common path. 126 shenandoah_assert_forwarded_except(p, obj, cancelled_gc()); 127 oop fwd = ShenandoahBarrierSet::resolve_forwarded_not_null(obj); 128 shenandoah_assert_not_in_cset_except(p, fwd, cancelled_gc()); 129 130 // Sanity check: we should not be updating the cset regions themselves, 131 // unless we are recovering from the evacuation failure. 132 shenandoah_assert_not_in_cset_loc_except(p, !is_in(p) || cancelled_gc()); 133 134 // Either we succeed in updating the reference, or something else gets in our way. 135 // We don't care if that is another concurrent GC update, or another mutator update. 136 // We only check that non-NULL store still updated with non-forwarded reference. 137 oop witness = cas_oop(fwd, p, obj); 138 shenandoah_assert_not_forwarded_except(p, witness, (witness == NULL) || (witness == obj)); 139 } 140 } 141 } 142 143 inline oop ShenandoahHeap::cas_oop(oop n, oop* addr, oop c) { 144 assert(is_aligned(addr, HeapWordSize), "Address should be aligned: " PTR_FORMAT, p2i(addr)); 145 return (oop) Atomic::cmpxchg(addr, c, n); 146 } 147 148 inline oop ShenandoahHeap::cas_oop(oop n, narrowOop* addr, narrowOop c) { 149 assert(is_aligned(addr, sizeof(narrowOop)), "Address should be aligned: " PTR_FORMAT, p2i(addr)); 150 narrowOop val = CompressedOops::encode(n); 151 return CompressedOops::decode(Atomic::cmpxchg(addr, c, val)); 152 } 153 154 inline oop ShenandoahHeap::cas_oop(oop n, narrowOop* addr, oop c) { 155 assert(is_aligned(addr, sizeof(narrowOop)), "Address should be aligned: " PTR_FORMAT, p2i(addr)); 156 narrowOop cmp = CompressedOops::encode(c); 157 narrowOop val = CompressedOops::encode(n); 158 return CompressedOops::decode(Atomic::cmpxchg(addr, cmp, val)); 159 } 160 161 inline bool ShenandoahHeap::cancelled_gc() const { 162 return _cancelled_gc.get() == CANCELLED; 163 } 164 165 inline bool ShenandoahHeap::check_cancelled_gc_and_yield(bool sts_active) { 166 if (sts_active && ShenandoahSuspendibleWorkers && !cancelled_gc()) { 167 if (SuspendibleThreadSet::should_yield()) { 168 SuspendibleThreadSet::yield(); 169 } 170 } 171 return cancelled_gc(); 172 } 173 174 inline void ShenandoahHeap::clear_cancelled_gc() { 175 _cancelled_gc.set(CANCELLABLE); 176 _oom_evac_handler.clear(); 177 } 178 179 inline HeapWord* ShenandoahHeap::allocate_from_gclab(Thread* thread, size_t size) { 180 assert(UseTLAB, "TLABs should be enabled"); 181 182 PLAB* gclab = ShenandoahThreadLocalData::gclab(thread); 183 if (gclab == NULL) { 184 assert(!thread->is_Java_thread() && !thread->is_Worker_thread(), 185 "Performance: thread should have GCLAB: %s", thread->name()); 186 // No GCLABs in this thread, fallback to shared allocation 187 return NULL; 188 } 189 HeapWord* obj = gclab->allocate(size); 190 if (obj != NULL) { 191 return obj; 192 } 193 // Otherwise... 194 return allocate_from_gclab_slow(thread, size); 195 } 196 197 inline oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) { 198 if (ShenandoahThreadLocalData::is_oom_during_evac(Thread::current())) { 199 // This thread went through the OOM during evac protocol and it is safe to return 200 // the forward pointer. It must not attempt to evacuate any more. 201 return ShenandoahBarrierSet::resolve_forwarded(p); 202 } 203 204 assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope"); 205 206 size_t size = ShenandoahObjectUtils::size(p); 207 208 assert(!heap_region_containing(p)->is_humongous(), "never evacuate humongous objects"); 209 210 bool alloc_from_gclab = true; 211 HeapWord* copy = NULL; 212 213 #ifdef ASSERT 214 if (ShenandoahOOMDuringEvacALot && 215 (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call 216 copy = NULL; 217 } else { 218 #endif 219 if (UseTLAB) { 220 copy = allocate_from_gclab(thread, size); 221 } 222 if (copy == NULL) { 223 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size); 224 copy = allocate_memory(req); 225 alloc_from_gclab = false; 226 } 227 #ifdef ASSERT 228 } 229 #endif 230 231 if (copy == NULL) { 232 control_thread()->handle_alloc_failure_evac(size); 233 234 _oom_evac_handler.handle_out_of_memory_during_evacuation(); 235 236 return ShenandoahBarrierSet::resolve_forwarded(p); 237 } 238 239 // Copy the object: 240 Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(p), copy, size); 241 242 // Try to install the new forwarding pointer. 243 oop copy_val = cast_to_oop(copy); 244 oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val); 245 if (result == copy_val) { 246 // Successfully evacuated. Our copy is now the public one! 247 shenandoah_assert_correct(NULL, copy_val); 248 return copy_val; 249 } else { 250 // Failed to evacuate. We need to deal with the object that is left behind. Since this 251 // new allocation is certainly after TAMS, it will be considered live in the next cycle. 252 // But if it happens to contain references to evacuated regions, those references would 253 // not get updated for this stale copy during this cycle, and we will crash while scanning 254 // it the next cycle. 255 // 256 // For GCLAB allocations, it is enough to rollback the allocation ptr. Either the next 257 // object will overwrite this stale copy, or the filler object on LAB retirement will 258 // do this. For non-GCLAB allocations, we have no way to retract the allocation, and 259 // have to explicitly overwrite the copy with the filler object. With that overwrite, 260 // we have to keep the fwdptr initialized and pointing to our (stale) copy. 261 if (alloc_from_gclab) { 262 ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size); 263 } else { 264 fill_with_object(copy, size); 265 shenandoah_assert_correct(NULL, copy_val); 266 } 267 shenandoah_assert_correct(NULL, result); 268 return result; 269 } 270 } 271 272 inline bool ShenandoahHeap::requires_marking(const void* entry) const { 273 oop obj = cast_to_oop(entry); 274 return !_marking_context->is_marked_strong(obj); 275 } 276 277 inline bool ShenandoahHeap::in_collection_set(oop p) const { 278 assert(collection_set() != NULL, "Sanity"); 279 return collection_set()->is_in(p); 280 } 281 282 inline bool ShenandoahHeap::in_collection_set_loc(void* p) const { 283 assert(collection_set() != NULL, "Sanity"); 284 return collection_set()->is_in_loc(p); 285 } 286 287 inline bool ShenandoahHeap::is_stable() const { 288 return _gc_state.is_clear(); 289 } 290 291 inline bool ShenandoahHeap::is_idle() const { 292 return _gc_state.is_unset(MARKING | EVACUATION | UPDATEREFS); 293 } 294 295 inline bool ShenandoahHeap::is_concurrent_mark_in_progress() const { 296 return _gc_state.is_set(MARKING); 297 } 298 299 inline bool ShenandoahHeap::is_evacuation_in_progress() const { 300 return _gc_state.is_set(EVACUATION); 301 } 302 303 inline bool ShenandoahHeap::is_gc_in_progress_mask(uint mask) const { 304 return _gc_state.is_set(mask); 305 } 306 307 inline bool ShenandoahHeap::is_degenerated_gc_in_progress() const { 308 return _degenerated_gc_in_progress.is_set(); 309 } 310 311 inline bool ShenandoahHeap::is_full_gc_in_progress() const { 312 return _full_gc_in_progress.is_set(); 313 } 314 315 inline bool ShenandoahHeap::is_full_gc_move_in_progress() const { 316 return _full_gc_move_in_progress.is_set(); 317 } 318 319 inline bool ShenandoahHeap::is_update_refs_in_progress() const { 320 return _gc_state.is_set(UPDATEREFS); 321 } 322 323 inline bool ShenandoahHeap::is_stw_gc_in_progress() const { 324 return is_full_gc_in_progress() || is_degenerated_gc_in_progress(); 325 } 326 327 inline bool ShenandoahHeap::is_concurrent_strong_root_in_progress() const { 328 return _concurrent_strong_root_in_progress.is_set(); 329 } 330 331 inline bool ShenandoahHeap::is_concurrent_weak_root_in_progress() const { 332 return _gc_state.is_set(WEAK_ROOTS); 333 } 334 335 template<class T> 336 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl) { 337 marked_object_iterate(region, cl, region->top()); 338 } 339 340 template<class T> 341 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit) { 342 assert(! region->is_humongous_continuation(), "no humongous continuation regions here"); 343 344 ShenandoahMarkingContext* const ctx = complete_marking_context(); 345 assert(ctx->is_complete(), "sanity"); 346 347 HeapWord* tams = ctx->top_at_mark_start(region); 348 349 size_t skip_bitmap_delta = 1; 350 HeapWord* start = region->bottom(); 351 HeapWord* end = MIN2(tams, region->end()); 352 353 // Step 1. Scan below the TAMS based on bitmap data. 354 HeapWord* limit_bitmap = MIN2(limit, tams); 355 356 // Try to scan the initial candidate. If the candidate is above the TAMS, it would 357 // fail the subsequent "< limit_bitmap" checks, and fall through to Step 2. 358 HeapWord* cb = ctx->get_next_marked_addr(start, end); 359 360 intx dist = ShenandoahMarkScanPrefetch; 361 if (dist > 0) { 362 // Batched scan that prefetches the oop data, anticipating the access to 363 // either header, oop field, or forwarding pointer. Not that we cannot 364 // touch anything in oop, while it still being prefetched to get enough 365 // time for prefetch to work. This is why we try to scan the bitmap linearly, 366 // disregarding the object size. However, since we know forwarding pointer 367 // preceeds the object, we can skip over it. Once we cannot trust the bitmap, 368 // there is no point for prefetching the oop contents, as oop->size() will 369 // touch it prematurely. 370 371 // No variable-length arrays in standard C++, have enough slots to fit 372 // the prefetch distance. 373 static const int SLOT_COUNT = 256; 374 guarantee(dist <= SLOT_COUNT, "adjust slot count"); 375 HeapWord* slots[SLOT_COUNT]; 376 377 int avail; 378 do { 379 avail = 0; 380 for (int c = 0; (c < dist) && (cb < limit_bitmap); c++) { 381 Prefetch::read(cb, oopDesc::mark_offset_in_bytes()); 382 slots[avail++] = cb; 383 cb += skip_bitmap_delta; 384 if (cb < limit_bitmap) { 385 cb = ctx->get_next_marked_addr(cb, limit_bitmap); 386 } 387 } 388 389 for (int c = 0; c < avail; c++) { 390 assert (slots[c] < tams, "only objects below TAMS here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(slots[c]), p2i(tams)); 391 assert (slots[c] < limit, "only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(slots[c]), p2i(limit)); 392 oop obj = cast_to_oop(slots[c]); 393 assert(oopDesc::is_oop(obj), "sanity"); 394 assert(ctx->is_marked(obj), "object expected to be marked"); 395 cl->do_object(obj); 396 } 397 } while (avail > 0); 398 } else { 399 while (cb < limit_bitmap) { 400 assert (cb < tams, "only objects below TAMS here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cb), p2i(tams)); 401 assert (cb < limit, "only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cb), p2i(limit)); 402 oop obj = cast_to_oop(cb); 403 assert(oopDesc::is_oop(obj), "sanity"); 404 assert(ctx->is_marked(obj), "object expected to be marked"); 405 cl->do_object(obj); 406 cb += skip_bitmap_delta; 407 if (cb < limit_bitmap) { 408 cb = ctx->get_next_marked_addr(cb, limit_bitmap); 409 } 410 } 411 } 412 413 // Step 2. Accurate size-based traversal, happens past the TAMS. 414 // This restarts the scan at TAMS, which makes sure we traverse all objects, 415 // regardless of what happened at Step 1. 416 HeapWord* cs = tams; 417 while (cs < limit) { 418 assert (cs >= tams, "only objects past TAMS here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cs), p2i(tams)); 419 assert (cs < limit, "only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cs), p2i(limit)); 420 oop obj = cast_to_oop(cs); 421 assert(oopDesc::is_oop(obj), "sanity"); 422 assert(ctx->is_marked(obj), "object expected to be marked"); 423 size_t size = ShenandoahObjectUtils::size(obj); 424 cl->do_object(obj); 425 cs += size; 426 } 427 } 428 429 template <class T> 430 class ShenandoahObjectToOopClosure : public ObjectClosure { 431 T* _cl; 432 public: 433 ShenandoahObjectToOopClosure(T* cl) : _cl(cl) {} 434 435 void do_object(oop obj) { 436 obj->oop_iterate(_cl); 437 } 438 }; 439 440 template <class T> 441 class ShenandoahObjectToOopBoundedClosure : public ObjectClosure { 442 T* _cl; 443 MemRegion _bounds; 444 public: 445 ShenandoahObjectToOopBoundedClosure(T* cl, HeapWord* bottom, HeapWord* top) : 446 _cl(cl), _bounds(bottom, top) {} 447 448 void do_object(oop obj) { 449 obj->oop_iterate(_cl, _bounds); 450 } 451 }; 452 453 template<class T> 454 inline void ShenandoahHeap::marked_object_oop_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* top) { 455 if (region->is_humongous()) { 456 HeapWord* bottom = region->bottom(); 457 if (top > bottom) { 458 region = region->humongous_start_region(); 459 ShenandoahObjectToOopBoundedClosure<T> objs(cl, bottom, top); 460 marked_object_iterate(region, &objs); 461 } 462 } else { 463 ShenandoahObjectToOopClosure<T> objs(cl); 464 marked_object_iterate(region, &objs, top); 465 } 466 } 467 468 inline ShenandoahHeapRegion* const ShenandoahHeap::get_region(size_t region_idx) const { 469 if (region_idx < _num_regions) { 470 return _regions[region_idx]; 471 } else { 472 return NULL; 473 } 474 } 475 476 inline void ShenandoahHeap::mark_complete_marking_context() { 477 _marking_context->mark_complete(); 478 } 479 480 inline void ShenandoahHeap::mark_incomplete_marking_context() { 481 _marking_context->mark_incomplete(); 482 } 483 484 inline ShenandoahMarkingContext* ShenandoahHeap::complete_marking_context() const { 485 assert (_marking_context->is_complete()," sanity"); 486 return _marking_context; 487 } 488 489 inline ShenandoahMarkingContext* ShenandoahHeap::marking_context() const { 490 return _marking_context; 491 } 492 493 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP --- EOF ---