1 /* 2 * Copyright (c) 2015, 2020, Red Hat, Inc. All rights reserved. 3 * 4 * This code is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License version 2 only, as 6 * published by the Free Software Foundation. 7 * 8 * This code is distributed in the hope that it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 11 * version 2 for more details (a copy is included in the LICENSE file that 12 * accompanied this code). 13 * 14 * You should have received a copy of the GNU General Public License version 15 * 2 along with this work; if not, write to the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 17 * 18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 19 * or visit www.oracle.com if you need additional information or have any 20 * questions. 21 * 22 */ 23 24 #ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP 25 #define SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP 26 27 #include "gc_implementation/shared/markBitMap.inline.hpp" 28 #include "memory/threadLocalAllocBuffer.inline.hpp" 29 #include "gc_implementation/shenandoah/shenandoahAsserts.hpp" 30 #include "gc_implementation/shenandoah/shenandoahBarrierSet.inline.hpp" 31 #include "gc_implementation/shenandoah/shenandoahCollectionSet.hpp" 32 #include "gc_implementation/shenandoah/shenandoahCollectionSet.inline.hpp" 33 #include "gc_implementation/shenandoah/shenandoahForwarding.inline.hpp" 34 #include "gc_implementation/shenandoah/shenandoahControlThread.hpp" 35 #include "gc_implementation/shenandoah/shenandoahMarkingContext.inline.hpp" 36 #include "gc_implementation/shenandoah/shenandoahHeap.hpp" 37 #include "gc_implementation/shenandoah/shenandoahHeapRegionSet.hpp" 38 #include "gc_implementation/shenandoah/shenandoahHeapRegion.inline.hpp" 39 #include "oops/oop.inline.hpp" 40 #include "runtime/atomic.hpp" 41 #include "runtime/prefetch.hpp" 42 #include "runtime/prefetch.inline.hpp" 43 #include "utilities/copy.hpp" 44 #include "utilities/globalDefinitions.hpp" 45 46 inline ShenandoahHeap* ShenandoahHeap::heap() { 47 assert(_heap != NULL, "Heap is not initialized yet"); 48 return _heap; 49 } 50 51 inline ShenandoahHeapRegion* ShenandoahRegionIterator::next() { 52 size_t new_index = Atomic::add((size_t) 1, &_index); 53 // get_region() provides the bounds-check and returns NULL on OOB. 54 return _heap->get_region(new_index - 1); 55 } 56 57 inline bool ShenandoahHeap::has_forwarded_objects() const { 58 return _gc_state.is_set(HAS_FORWARDED); 59 } 60 61 inline ShenandoahWorkGang* ShenandoahHeap::workers() const { 62 return (ShenandoahWorkGang*)_workers; 63 } 64 65 inline size_t ShenandoahHeap::heap_region_index_containing(const void* addr) const { 66 uintptr_t region_start = ((uintptr_t) addr); 67 uintptr_t index = (region_start - (uintptr_t) base()) >> ShenandoahHeapRegion::region_size_bytes_shift(); 68 assert(index < num_regions(), err_msg("Region index is in bounds: " PTR_FORMAT, p2i(addr))); 69 return index; 70 } 71 72 inline ShenandoahHeapRegion* const ShenandoahHeap::heap_region_containing(const void* addr) const { 73 size_t index = heap_region_index_containing(addr); 74 ShenandoahHeapRegion* const result = get_region(index); 75 assert(addr >= result->bottom() && addr < result->end(), err_msg("Heap region contains the address: " PTR_FORMAT, p2i(addr))); 76 return result; 77 } 78 79 template <class T> 80 inline oop ShenandoahHeap::update_with_forwarded_not_null(T* p, oop obj) { 81 if (in_collection_set(obj)) { 82 shenandoah_assert_forwarded_except(p, obj, is_full_gc_in_progress() || cancelled_gc() || is_degenerated_gc_in_progress()); 83 obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj); 84 oopDesc::encode_store_heap_oop(p, obj); 85 } 86 #ifdef ASSERT 87 else { 88 shenandoah_assert_not_forwarded(p, obj); 89 } 90 #endif 91 return obj; 92 } 93 94 template <class T> 95 inline oop ShenandoahHeap::maybe_update_with_forwarded(T* p) { 96 T o = oopDesc::load_heap_oop(p); 97 if (! oopDesc::is_null(o)) { 98 oop obj = oopDesc::decode_heap_oop_not_null(o); 99 return maybe_update_with_forwarded_not_null(p, obj); 100 } else { 101 return NULL; 102 } 103 } 104 105 template <class T> 106 inline oop ShenandoahHeap::evac_update_with_forwarded(T* p) { 107 T o = oopDesc::load_heap_oop(p); 108 if (!oopDesc::is_null(o)) { 109 oop heap_oop = oopDesc::decode_heap_oop_not_null(o); 110 if (in_collection_set(heap_oop)) { 111 oop forwarded_oop = ShenandoahBarrierSet::resolve_forwarded_not_null(heap_oop); 112 if (forwarded_oop == heap_oop) { 113 forwarded_oop = evacuate_object(heap_oop, Thread::current()); 114 } 115 oop prev = cas_oop(forwarded_oop, p, heap_oop); 116 if (prev == heap_oop) { 117 return forwarded_oop; 118 } else { 119 return NULL; 120 } 121 } 122 return heap_oop; 123 } else { 124 return NULL; 125 } 126 } 127 128 inline oop ShenandoahHeap::cas_oop(oop n, oop* addr, oop c) { 129 assert(is_ptr_aligned(addr, sizeof(narrowOop)), err_msg("Address should be aligned: " PTR_FORMAT, p2i(addr))); 130 return (oop) Atomic::cmpxchg_ptr(n, addr, c); 131 } 132 133 inline oop ShenandoahHeap::cas_oop(oop n, narrowOop* addr, narrowOop c) { 134 narrowOop val = oopDesc::encode_heap_oop(n); 135 return oopDesc::decode_heap_oop((narrowOop) Atomic::cmpxchg(val, addr, c)); 136 } 137 138 inline oop ShenandoahHeap::cas_oop(oop n, narrowOop* addr, oop c) { 139 assert(is_ptr_aligned(addr, sizeof(narrowOop)), err_msg("Address should be aligned: " PTR_FORMAT, p2i(addr))); 140 narrowOop cmp = oopDesc::encode_heap_oop(c); 141 narrowOop val = oopDesc::encode_heap_oop(n); 142 return oopDesc::decode_heap_oop((narrowOop) Atomic::cmpxchg(val, addr, cmp)); 143 } 144 145 template <class T> 146 inline oop ShenandoahHeap::maybe_update_with_forwarded_not_null(T* p, oop heap_oop) { 147 shenandoah_assert_not_in_cset_loc_except(p, !is_in(p) || is_full_gc_in_progress() || is_degenerated_gc_in_progress()); 148 shenandoah_assert_correct(p, heap_oop); 149 150 if (in_collection_set(heap_oop)) { 151 oop forwarded_oop = ShenandoahBarrierSet::resolve_forwarded_not_null(heap_oop); 152 153 shenandoah_assert_forwarded_except(p, heap_oop, is_full_gc_in_progress() || is_degenerated_gc_in_progress()); 154 shenandoah_assert_not_forwarded(p, forwarded_oop); 155 shenandoah_assert_not_in_cset_except(p, forwarded_oop, cancelled_gc()); 156 157 // If this fails, another thread wrote to p before us, it will be logged in SATB and the 158 // reference be updated later. 159 oop witness = cas_oop(forwarded_oop, p, heap_oop); 160 161 if (witness != heap_oop) { 162 // CAS failed, someone had beat us to it. Normally, we would return the failure witness, 163 // because that would be the proper write of to-space object, enforced by strong barriers. 164 // However, there is a corner case with arraycopy. It can happen that a Java thread 165 // beats us with an arraycopy, which first copies the array, which potentially contains 166 // from-space refs, and only afterwards updates all from-space refs to to-space refs, 167 // which leaves a short window where the new array elements can be from-space. 168 // In this case, we can just resolve the result again. As we resolve, we need to consider 169 // the contended write might have been NULL. 170 oop result = ShenandoahBarrierSet::resolve_forwarded(witness); 171 shenandoah_assert_not_forwarded_except(p, result, (result == NULL)); 172 shenandoah_assert_not_in_cset_except(p, result, (result == NULL) || cancelled_gc()); 173 return result; 174 } else { 175 // Success! We have updated with known to-space copy. We have already asserted it is sane. 176 return forwarded_oop; 177 } 178 } else { 179 shenandoah_assert_not_forwarded(p, heap_oop); 180 return heap_oop; 181 } 182 } 183 184 inline bool ShenandoahHeap::cancelled_gc() const { 185 return _cancelled_gc.is_set(); 186 } 187 188 inline bool ShenandoahHeap::try_cancel_gc() { 189 return _cancelled_gc.try_set(); 190 } 191 192 inline void ShenandoahHeap::clear_cancelled_gc() { 193 _cancelled_gc.unset(); 194 _oom_evac_handler.clear(); 195 } 196 197 inline HeapWord* ShenandoahHeap::allocate_from_gclab(Thread* thread, size_t size) { 198 assert(UseTLAB, "TLABs should be enabled"); 199 200 if (!thread->gclab().is_initialized()) { 201 assert(!thread->is_Java_thread() && !thread->is_Worker_thread(), 202 err_msg("Performance: thread should have GCLAB: %s", thread->name())); 203 // No GCLABs in this thread, fallback to shared allocation 204 return NULL; 205 } 206 HeapWord *obj = thread->gclab().allocate(size); 207 if (obj != NULL) { 208 return obj; 209 } 210 // Otherwise... 211 return allocate_from_gclab_slow(thread, size); 212 } 213 214 inline oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) { 215 if (Thread::current()->is_oom_during_evac()) { 216 // This thread went through the OOM during evac protocol and it is safe to return 217 // the forward pointer. It must not attempt to evacuate any more. 218 return ShenandoahBarrierSet::resolve_forwarded(p); 219 } 220 221 assert(thread->is_evac_allowed(), "must be enclosed in in oom-evac scope"); 222 223 size_t size = p->size(); 224 225 assert(!heap_region_containing(p)->is_humongous(), "never evacuate humongous objects"); 226 227 bool alloc_from_gclab = true; 228 HeapWord* copy = NULL; 229 230 #ifdef ASSERT 231 if (ShenandoahOOMDuringEvacALot && 232 (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call 233 copy = NULL; 234 } else { 235 #endif 236 if (UseTLAB) { 237 copy = allocate_from_gclab(thread, size); 238 } 239 if (copy == NULL) { 240 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size); 241 copy = allocate_memory(req); 242 alloc_from_gclab = false; 243 } 244 #ifdef ASSERT 245 } 246 #endif 247 248 if (copy == NULL) { 249 control_thread()->handle_alloc_failure_evac(size); 250 251 _oom_evac_handler.handle_out_of_memory_during_evacuation(); 252 253 return ShenandoahBarrierSet::resolve_forwarded(p); 254 } 255 256 // Copy the object: 257 Copy::aligned_disjoint_words((HeapWord*) p, copy, size); 258 259 // Try to install the new forwarding pointer. 260 oop copy_val = oop(copy); 261 oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val); 262 if (result == copy_val) { 263 // Successfully evacuated. Our copy is now the public one! 264 shenandoah_assert_correct(NULL, copy_val); 265 return copy_val; 266 } else { 267 // Failed to evacuate. We need to deal with the object that is left behind. Since this 268 // new allocation is certainly after TAMS, it will be considered live in the next cycle. 269 // But if it happens to contain references to evacuated regions, those references would 270 // not get updated for this stale copy during this cycle, and we will crash while scanning 271 // it the next cycle. 272 // 273 // For GCLAB allocations, it is enough to rollback the allocation ptr. Either the next 274 // object will overwrite this stale copy, or the filler object on LAB retirement will 275 // do this. For non-GCLAB allocations, we have no way to retract the allocation, and 276 // have to explicitly overwrite the copy with the filler object. With that overwrite, 277 // we have to keep the fwdptr initialized and pointing to our (stale) copy. 278 if (alloc_from_gclab) { 279 thread->gclab().rollback(size); 280 } else { 281 fill_with_object(copy, size); 282 shenandoah_assert_correct(NULL, copy_val); 283 } 284 shenandoah_assert_correct(NULL, result); 285 return result; 286 } 287 } 288 289 inline bool ShenandoahHeap::requires_marking(const void* entry) const { 290 return !_marking_context->is_marked(oop(entry)); 291 } 292 293 inline bool ShenandoahHeap::in_collection_set(oop p) const { 294 assert(collection_set() != NULL, "Sanity"); 295 return collection_set()->is_in(p); 296 } 297 298 inline bool ShenandoahHeap::in_collection_set_loc(void* p) const { 299 assert(collection_set() != NULL, "Sanity"); 300 return collection_set()->is_in_loc(p); 301 } 302 303 inline bool ShenandoahHeap::is_stable() const { 304 return _gc_state.is_clear(); 305 } 306 307 inline bool ShenandoahHeap::is_idle() const { 308 return _gc_state.is_unset(MARKING | EVACUATION | UPDATEREFS); 309 } 310 311 inline bool ShenandoahHeap::is_concurrent_mark_in_progress() const { 312 return _gc_state.is_set(MARKING); 313 } 314 315 inline bool ShenandoahHeap::is_evacuation_in_progress() const { 316 return _gc_state.is_set(EVACUATION); 317 } 318 319 inline bool ShenandoahHeap::is_gc_in_progress_mask(uint mask) const { 320 return _gc_state.is_set(mask); 321 } 322 323 inline bool ShenandoahHeap::is_degenerated_gc_in_progress() const { 324 return _degenerated_gc_in_progress.is_set(); 325 } 326 327 inline bool ShenandoahHeap::is_full_gc_in_progress() const { 328 return _full_gc_in_progress.is_set(); 329 } 330 331 inline bool ShenandoahHeap::is_full_gc_move_in_progress() const { 332 return _full_gc_move_in_progress.is_set(); 333 } 334 335 inline bool ShenandoahHeap::is_update_refs_in_progress() const { 336 return _gc_state.is_set(UPDATEREFS); 337 } 338 339 template<class T> 340 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl) { 341 marked_object_iterate(region, cl, region->top()); 342 } 343 344 template<class T> 345 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit) { 346 assert(! region->is_humongous_continuation(), "no humongous continuation regions here"); 347 348 ShenandoahMarkingContext* const ctx = complete_marking_context(); 349 assert(ctx->is_complete(), "sanity"); 350 351 MarkBitMap* mark_bit_map = ctx->mark_bit_map(); 352 HeapWord* tams = ctx->top_at_mark_start(region); 353 354 size_t skip_bitmap_delta = 1; 355 HeapWord* start = region->bottom(); 356 HeapWord* end = MIN2(tams, region->end()); 357 358 // Step 1. Scan below the TAMS based on bitmap data. 359 HeapWord* limit_bitmap = MIN2(limit, tams); 360 361 // Try to scan the initial candidate. If the candidate is above the TAMS, it would 362 // fail the subsequent "< limit_bitmap" checks, and fall through to Step 2. 363 HeapWord* cb = mark_bit_map->getNextMarkedWordAddress(start, end); 364 365 intx dist = ShenandoahMarkScanPrefetch; 366 if (dist > 0) { 367 // Batched scan that prefetches the oop data, anticipating the access to 368 // either header, oop field, or forwarding pointer. Not that we cannot 369 // touch anything in oop, while it still being prefetched to get enough 370 // time for prefetch to work. This is why we try to scan the bitmap linearly, 371 // disregarding the object size. However, since we know forwarding pointer 372 // preceeds the object, we can skip over it. Once we cannot trust the bitmap, 373 // there is no point for prefetching the oop contents, as oop->size() will 374 // touch it prematurely. 375 376 // No variable-length arrays in standard C++, have enough slots to fit 377 // the prefetch distance. 378 static const int SLOT_COUNT = 256; 379 guarantee(dist <= SLOT_COUNT, "adjust slot count"); 380 HeapWord* slots[SLOT_COUNT]; 381 382 int avail; 383 do { 384 avail = 0; 385 for (int c = 0; (c < dist) && (cb < limit_bitmap); c++) { 386 Prefetch::read(cb, oopDesc::mark_offset_in_bytes()); 387 slots[avail++] = cb; 388 cb += skip_bitmap_delta; 389 if (cb < limit_bitmap) { 390 cb = mark_bit_map->getNextMarkedWordAddress(cb, limit_bitmap); 391 } 392 } 393 394 for (int c = 0; c < avail; c++) { 395 assert (slots[c] < tams, err_msg("only objects below TAMS here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(slots[c]), p2i(tams))); 396 assert (slots[c] < limit, err_msg("only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(slots[c]), p2i(limit))); 397 oop obj = oop(slots[c]); 398 assert(!oopDesc::is_null(obj), "sanity"); 399 assert(obj->is_oop(), "sanity"); 400 assert(_marking_context->is_marked(obj), "object expected to be marked"); 401 cl->do_object(obj); 402 } 403 } while (avail > 0); 404 } else { 405 while (cb < limit_bitmap) { 406 assert (cb < tams, err_msg("only objects below TAMS here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cb), p2i(tams))); 407 assert (cb < limit, err_msg("only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cb), p2i(limit))); 408 oop obj = oop(cb); 409 assert(!oopDesc::is_null(obj), "sanity"); 410 assert(obj->is_oop(), "sanity"); 411 assert(_marking_context->is_marked(obj), "object expected to be marked"); 412 cl->do_object(obj); 413 cb += skip_bitmap_delta; 414 if (cb < limit_bitmap) { 415 cb = mark_bit_map->getNextMarkedWordAddress(cb, limit_bitmap); 416 } 417 } 418 } 419 420 // Step 2. Accurate size-based traversal, happens past the TAMS. 421 // This restarts the scan at TAMS, which makes sure we traverse all objects, 422 // regardless of what happened at Step 1. 423 HeapWord* cs = tams; 424 while (cs < limit) { 425 assert (cs >= tams, err_msg("only objects past TAMS here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cs), p2i(tams))); 426 assert (cs < limit, err_msg("only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cs), p2i(limit))); 427 oop obj = oop(cs); 428 int size = obj->size(); 429 assert(!oopDesc::is_null(obj), "sanity"); 430 assert(obj->is_oop(), "sanity"); 431 assert(_marking_context->is_marked(obj), "object expected to be marked"); 432 cl->do_object(obj); 433 cs += size; 434 } 435 } 436 437 template <class T> 438 class ShenandoahObjectToOopClosure : public ObjectClosure { 439 T* _cl; 440 public: 441 ShenandoahObjectToOopClosure(T* cl) : _cl(cl) {} 442 443 void do_object(oop obj) { 444 obj->oop_iterate(_cl); 445 } 446 }; 447 448 template <class T> 449 class ShenandoahObjectToOopBoundedClosure : public ObjectClosure { 450 T* _cl; 451 MemRegion _bounds; 452 public: 453 ShenandoahObjectToOopBoundedClosure(T* cl, HeapWord* bottom, HeapWord* top) : 454 _cl(cl), _bounds(bottom, top) {} 455 456 void do_object(oop obj) { 457 obj->oop_iterate(_cl, _bounds); 458 } 459 }; 460 461 template<class T> 462 inline void ShenandoahHeap::marked_object_oop_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* top) { 463 if (region->is_humongous()) { 464 HeapWord* bottom = region->bottom(); 465 if (top > bottom) { 466 region = region->humongous_start_region(); 467 ShenandoahObjectToOopBoundedClosure<T> objs(cl, bottom, top); 468 marked_object_iterate(region, &objs); 469 } 470 } else { 471 ShenandoahObjectToOopClosure<T> objs(cl); 472 marked_object_iterate(region, &objs, top); 473 } 474 } 475 476 inline ShenandoahHeapRegion* const ShenandoahHeap::get_region(size_t region_idx) const { 477 if (region_idx < _num_regions) { 478 return _regions[region_idx]; 479 } else { 480 return NULL; 481 } 482 } 483 484 inline void ShenandoahHeap::mark_complete_marking_context() { 485 _marking_context->mark_complete(); 486 } 487 488 inline void ShenandoahHeap::mark_incomplete_marking_context() { 489 _marking_context->mark_incomplete(); 490 } 491 492 inline ShenandoahMarkingContext* ShenandoahHeap::complete_marking_context() const { 493 assert (_marking_context->is_complete()," sanity"); 494 return _marking_context; 495 } 496 497 inline ShenandoahMarkingContext* ShenandoahHeap::marking_context() const { 498 return _marking_context; 499 } 500 501 #endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP