< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp

Print this page

   1 /*
   2  * Copyright (c) 2015, 2020, Red Hat, Inc. All rights reserved.

   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *

  25 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
  26 #define SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
  27 
  28 #include "gc/shenandoah/shenandoahHeap.hpp"
  29 
  30 #include "classfile/javaClasses.inline.hpp"
  31 #include "gc/shared/markBitMap.inline.hpp"
  32 #include "gc/shared/threadLocalAllocBuffer.inline.hpp"
  33 #include "gc/shared/continuationGCSupport.inline.hpp"
  34 #include "gc/shared/suspendibleThreadSet.hpp"
  35 #include "gc/shared/tlab_globals.hpp"
  36 #include "gc/shenandoah/shenandoahAsserts.hpp"
  37 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
  38 #include "gc/shenandoah/shenandoahCollectionSet.inline.hpp"
  39 #include "gc/shenandoah/shenandoahForwarding.inline.hpp"
  40 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
  41 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp"
  42 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
  43 #include "gc/shenandoah/shenandoahControlThread.hpp"
  44 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"

  45 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"


  46 #include "oops/compressedOops.inline.hpp"
  47 #include "oops/oop.inline.hpp"
  48 #include "runtime/atomic.hpp"
  49 #include "runtime/javaThread.hpp"
  50 #include "runtime/prefetch.inline.hpp"
  51 #include "utilities/copy.hpp"
  52 #include "utilities/globalDefinitions.hpp"
  53 
  54 inline ShenandoahHeap* ShenandoahHeap::heap() {
  55   return named_heap<ShenandoahHeap>(CollectedHeap::Shenandoah);
  56 }
  57 
  58 inline ShenandoahHeapRegion* ShenandoahRegionIterator::next() {
  59   size_t new_index = Atomic::add(&_index, (size_t) 1, memory_order_relaxed);
  60   // get_region() provides the bounds-check and returns null on OOB.
  61   return _heap->get_region(new_index - 1);
  62 }
  63 
  64 inline bool ShenandoahHeap::has_forwarded_objects() const {
  65   return _gc_state.is_set(HAS_FORWARDED);

 235 }
 236 
 237 inline void ShenandoahHeap::atomic_clear_oop(narrowOop* addr, narrowOop compare) {
 238   assert(is_aligned(addr, sizeof(narrowOop)), "Address should be aligned: " PTR_FORMAT, p2i(addr));
 239   Atomic::cmpxchg(addr, compare, narrowOop(), memory_order_relaxed);
 240 }
 241 
 242 inline bool ShenandoahHeap::cancelled_gc() const {
 243   return _cancelled_gc.get() == CANCELLED;
 244 }
 245 
 246 inline bool ShenandoahHeap::check_cancelled_gc_and_yield(bool sts_active) {
 247   if (sts_active && ShenandoahSuspendibleWorkers && !cancelled_gc()) {
 248     if (SuspendibleThreadSet::should_yield()) {
 249       SuspendibleThreadSet::yield();
 250     }
 251   }
 252   return cancelled_gc();
 253 }
 254 
 255 inline void ShenandoahHeap::clear_cancelled_gc() {
 256   _cancelled_gc.set(CANCELLABLE);
 257   _oom_evac_handler.clear();








 258 }
 259 
 260 inline HeapWord* ShenandoahHeap::allocate_from_gclab(Thread* thread, size_t size) {
 261   assert(UseTLAB, "TLABs should be enabled");
 262 
 263   PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
 264   if (gclab == nullptr) {
 265     assert(!thread->is_Java_thread() && !thread->is_Worker_thread(),
 266            "Performance: thread should have GCLAB: %s", thread->name());
 267     // No GCLABs in this thread, fallback to shared allocation
 268     return nullptr;
 269   }
 270   HeapWord* obj = gclab->allocate(size);
 271   if (obj != nullptr) {
 272     return obj;
 273   }
 274   // Otherwise...
 275   return allocate_from_gclab_slow(thread, size);
 276 }
 277 






































 278 inline oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
 279   if (ShenandoahThreadLocalData::is_oom_during_evac(Thread::current())) {

 280     // This thread went through the OOM during evac protocol and it is safe to return
 281     // the forward pointer. It must not attempt to evacuate any more.
 282     return ShenandoahBarrierSet::resolve_forwarded(p);
 283   }
 284 
 285   assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope");
 286 
 287   size_t size = p->size();

 288 
 289   assert(!heap_region_containing(p)->is_humongous(), "never evacuate humongous objects");




















 290 
 291   bool alloc_from_gclab = true;





 292   HeapWord* copy = nullptr;


 293 
 294 #ifdef ASSERT
 295   if (ShenandoahOOMDuringEvacALot &&
 296       (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
 297         copy = nullptr;
 298   } else {
 299 #endif
 300     if (UseTLAB) {
 301       copy = allocate_from_gclab(thread, size);












































 302     }

 303     if (copy == nullptr) {
 304       ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size);
 305       copy = allocate_memory(req);
 306       alloc_from_gclab = false;







 307     }
 308 #ifdef ASSERT
 309   }
 310 #endif
 311 
 312   if (copy == nullptr) {













 313     control_thread()->handle_alloc_failure_evac(size);
 314 
 315     _oom_evac_handler.handle_out_of_memory_during_evacuation();
 316 
 317     return ShenandoahBarrierSet::resolve_forwarded(p);
 318   }
 319 
 320   // Copy the object:

 321   Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(p), copy, size);
 322 
 323   // Try to install the new forwarding pointer.
 324   oop copy_val = cast_to_oop(copy);






 325   ContinuationGCSupport::relativize_stack_chunk(copy_val);
 326 
 327   oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val);
 328   if (result == copy_val) {
 329     // Successfully evacuated. Our copy is now the public one!















 330     shenandoah_assert_correct(nullptr, copy_val);
 331     return copy_val;
 332   }  else {
 333     // Failed to evacuate. We need to deal with the object that is left behind. Since this
 334     // new allocation is certainly after TAMS, it will be considered live in the next cycle.
 335     // But if it happens to contain references to evacuated regions, those references would
 336     // not get updated for this stale copy during this cycle, and we will crash while scanning
 337     // it the next cycle.
 338     //
 339     // For GCLAB allocations, it is enough to rollback the allocation ptr. Either the next
 340     // object will overwrite this stale copy, or the filler object on LAB retirement will
 341     // do this. For non-GCLAB allocations, we have no way to retract the allocation, and
 342     // have to explicitly overwrite the copy with the filler object. With that overwrite,
 343     // we have to keep the fwdptr initialized and pointing to our (stale) copy.
 344     if (alloc_from_gclab) {
 345       ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size);















 346     } else {




 347       fill_with_object(copy, size);
 348       shenandoah_assert_correct(nullptr, copy_val);

 349     }
 350     shenandoah_assert_correct(nullptr, result);
 351     return result;
 352   }
 353 }
 354 





































































































































 355 inline bool ShenandoahHeap::requires_marking(const void* entry) const {
 356   oop obj = cast_to_oop(entry);
 357   return !_marking_context->is_marked_strong(obj);
 358 }
 359 
 360 inline bool ShenandoahHeap::in_collection_set(oop p) const {
 361   assert(collection_set() != nullptr, "Sanity");
 362   return collection_set()->is_in(p);
 363 }
 364 
 365 inline bool ShenandoahHeap::in_collection_set_loc(void* p) const {
 366   assert(collection_set() != nullptr, "Sanity");
 367   return collection_set()->is_in_loc(p);
 368 }
 369 

 370 inline bool ShenandoahHeap::is_stable() const {
 371   return _gc_state.is_clear();
 372 }
 373 




 374 inline bool ShenandoahHeap::is_idle() const {
 375   return _gc_state.is_unset(MARKING | EVACUATION | UPDATEREFS);
 376 }
 377 
 378 inline bool ShenandoahHeap::is_concurrent_mark_in_progress() const {
 379   return _gc_state.is_set(MARKING);
 380 }
 381 








 382 inline bool ShenandoahHeap::is_evacuation_in_progress() const {
 383   return _gc_state.is_set(EVACUATION);
 384 }
 385 
 386 inline bool ShenandoahHeap::is_gc_in_progress_mask(uint mask) const {
 387   return _gc_state.is_set(mask);
 388 }
 389 
 390 inline bool ShenandoahHeap::is_degenerated_gc_in_progress() const {
 391   return _degenerated_gc_in_progress.is_set();
 392 }
 393 
 394 inline bool ShenandoahHeap::is_full_gc_in_progress() const {
 395   return _full_gc_in_progress.is_set();
 396 }
 397 
 398 inline bool ShenandoahHeap::is_full_gc_move_in_progress() const {
 399   return _full_gc_move_in_progress.is_set();
 400 }
 401 
 402 inline bool ShenandoahHeap::is_update_refs_in_progress() const {
 403   return _gc_state.is_set(UPDATEREFS);
 404 }
 405 
 406 inline bool ShenandoahHeap::is_stw_gc_in_progress() const {
 407   return is_full_gc_in_progress() || is_degenerated_gc_in_progress();
 408 }
 409 
 410 inline bool ShenandoahHeap::is_concurrent_strong_root_in_progress() const {
 411   return _concurrent_strong_root_in_progress.is_set();
 412 }
 413 
 414 inline bool ShenandoahHeap::is_concurrent_weak_root_in_progress() const {
 415   return _gc_state.is_set(WEAK_ROOTS);
 416 }
 417 


























































































 418 template<class T>
 419 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl) {
 420   marked_object_iterate(region, cl, region->top());
 421 }
 422 
 423 template<class T>
 424 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit) {
 425   assert(! region->is_humongous_continuation(), "no humongous continuation regions here");
 426 
 427   ShenandoahMarkingContext* const ctx = complete_marking_context();
 428   assert(ctx->is_complete(), "sanity");
 429 
 430   HeapWord* tams = ctx->top_at_mark_start(region);
 431 
 432   size_t skip_bitmap_delta = 1;
 433   HeapWord* start = region->bottom();
 434   HeapWord* end = MIN2(tams, region->end());
 435 
 436   // Step 1. Scan below the TAMS based on bitmap data.
 437   HeapWord* limit_bitmap = MIN2(limit, tams);
 438 
 439   // Try to scan the initial candidate. If the candidate is above the TAMS, it would
 440   // fail the subsequent "< limit_bitmap" checks, and fall through to Step 2.
 441   HeapWord* cb = ctx->get_next_marked_addr(start, end);
 442 
 443   intx dist = ShenandoahMarkScanPrefetch;
 444   if (dist > 0) {
 445     // Batched scan that prefetches the oop data, anticipating the access to
 446     // either header, oop field, or forwarding pointer. Not that we cannot
 447     // touch anything in oop, while it still being prefetched to get enough
 448     // time for prefetch to work. This is why we try to scan the bitmap linearly,

 539     HeapWord* bottom = region->bottom();
 540     if (top > bottom) {
 541       region = region->humongous_start_region();
 542       ShenandoahObjectToOopBoundedClosure<T> objs(cl, bottom, top);
 543       marked_object_iterate(region, &objs);
 544     }
 545   } else {
 546     ShenandoahObjectToOopClosure<T> objs(cl);
 547     marked_object_iterate(region, &objs, top);
 548   }
 549 }
 550 
 551 inline ShenandoahHeapRegion* ShenandoahHeap::get_region(size_t region_idx) const {
 552   if (region_idx < _num_regions) {
 553     return _regions[region_idx];
 554   } else {
 555     return nullptr;
 556   }
 557 }
 558 
 559 inline void ShenandoahHeap::mark_complete_marking_context() {
 560   _marking_context->mark_complete();
 561 }
 562 
 563 inline void ShenandoahHeap::mark_incomplete_marking_context() {
 564   _marking_context->mark_incomplete();
 565 }
 566 
 567 inline ShenandoahMarkingContext* ShenandoahHeap::complete_marking_context() const {
 568   assert (_marking_context->is_complete()," sanity");
 569   return _marking_context;
 570 }
 571 
 572 inline ShenandoahMarkingContext* ShenandoahHeap::marking_context() const {
 573   return _marking_context;
 574 }
 575 
























 576 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP

   1 /*
   2  * Copyright (c) 2015, 2020, Red Hat, Inc. All rights reserved.
   3  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *

  26 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
  27 #define SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
  28 
  29 #include "gc/shenandoah/shenandoahHeap.hpp"
  30 
  31 #include "classfile/javaClasses.inline.hpp"
  32 #include "gc/shared/markBitMap.inline.hpp"
  33 #include "gc/shared/threadLocalAllocBuffer.inline.hpp"
  34 #include "gc/shared/continuationGCSupport.inline.hpp"
  35 #include "gc/shared/suspendibleThreadSet.hpp"
  36 #include "gc/shared/tlab_globals.hpp"
  37 #include "gc/shenandoah/shenandoahAsserts.hpp"
  38 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
  39 #include "gc/shenandoah/shenandoahCollectionSet.inline.hpp"
  40 #include "gc/shenandoah/shenandoahForwarding.inline.hpp"
  41 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
  42 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp"
  43 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
  44 #include "gc/shenandoah/shenandoahControlThread.hpp"
  45 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  46 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
  47 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
  48 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
  49 #include "gc/shenandoah/mode/shenandoahMode.hpp"
  50 #include "oops/compressedOops.inline.hpp"
  51 #include "oops/oop.inline.hpp"
  52 #include "runtime/atomic.hpp"
  53 #include "runtime/javaThread.hpp"
  54 #include "runtime/prefetch.inline.hpp"
  55 #include "utilities/copy.hpp"
  56 #include "utilities/globalDefinitions.hpp"
  57 
  58 inline ShenandoahHeap* ShenandoahHeap::heap() {
  59   return named_heap<ShenandoahHeap>(CollectedHeap::Shenandoah);
  60 }
  61 
  62 inline ShenandoahHeapRegion* ShenandoahRegionIterator::next() {
  63   size_t new_index = Atomic::add(&_index, (size_t) 1, memory_order_relaxed);
  64   // get_region() provides the bounds-check and returns null on OOB.
  65   return _heap->get_region(new_index - 1);
  66 }
  67 
  68 inline bool ShenandoahHeap::has_forwarded_objects() const {
  69   return _gc_state.is_set(HAS_FORWARDED);

 239 }
 240 
 241 inline void ShenandoahHeap::atomic_clear_oop(narrowOop* addr, narrowOop compare) {
 242   assert(is_aligned(addr, sizeof(narrowOop)), "Address should be aligned: " PTR_FORMAT, p2i(addr));
 243   Atomic::cmpxchg(addr, compare, narrowOop(), memory_order_relaxed);
 244 }
 245 
 246 inline bool ShenandoahHeap::cancelled_gc() const {
 247   return _cancelled_gc.get() == CANCELLED;
 248 }
 249 
 250 inline bool ShenandoahHeap::check_cancelled_gc_and_yield(bool sts_active) {
 251   if (sts_active && ShenandoahSuspendibleWorkers && !cancelled_gc()) {
 252     if (SuspendibleThreadSet::should_yield()) {
 253       SuspendibleThreadSet::yield();
 254     }
 255   }
 256   return cancelled_gc();
 257 }
 258 
 259 inline void ShenandoahHeap::clear_cancelled_gc(bool clear_oom_handler) {
 260   _cancelled_gc.set(CANCELLABLE);
 261   if (_cancel_requested_time > 0) {
 262     double cancel_time = os::elapsedTime() - _cancel_requested_time;
 263     log_info(gc)("GC cancellation took %.3fs", cancel_time);
 264     _cancel_requested_time = 0;
 265   }
 266 
 267   if (clear_oom_handler) {
 268     _oom_evac_handler.clear();
 269   }
 270 }
 271 
 272 inline HeapWord* ShenandoahHeap::allocate_from_gclab(Thread* thread, size_t size) {
 273   assert(UseTLAB, "TLABs should be enabled");
 274 
 275   PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
 276   if (gclab == nullptr) {
 277     assert(!thread->is_Java_thread() && !thread->is_Worker_thread(),
 278            "Performance: thread should have GCLAB: %s", thread->name());
 279     // No GCLABs in this thread, fallback to shared allocation
 280     return nullptr;
 281   }
 282   HeapWord* obj = gclab->allocate(size);
 283   if (obj != nullptr) {
 284     return obj;
 285   }

 286   return allocate_from_gclab_slow(thread, size);
 287 }
 288 
 289 inline HeapWord* ShenandoahHeap::allocate_from_plab(Thread* thread, size_t size, bool is_promotion) {
 290   assert(UseTLAB, "TLABs should be enabled");
 291 
 292   PLAB* plab = ShenandoahThreadLocalData::plab(thread);
 293   HeapWord* obj;
 294 
 295   if (plab == nullptr) {
 296     assert(!thread->is_Java_thread() && !thread->is_Worker_thread(), "Performance: thread should have PLAB: %s", thread->name());
 297     // No PLABs in this thread, fallback to shared allocation
 298     return nullptr;
 299   } else if (is_promotion && !ShenandoahThreadLocalData::allow_plab_promotions(thread)) {
 300     return nullptr;
 301   }
 302   // if plab->word_size() <= 0, thread's plab not yet initialized for this pass, so allow_plab_promotions() is not trustworthy
 303   obj = plab->allocate(size);
 304   if ((obj == nullptr) && (plab->words_remaining() < PLAB::min_size())) {
 305     // allocate_from_plab_slow will establish allow_plab_promotions(thread) for future invocations
 306     obj = allocate_from_plab_slow(thread, size, is_promotion);
 307   }
 308   // if plab->words_remaining() >= PLAB::min_size(), just return nullptr so we can use a shared allocation
 309   if (obj == nullptr) {
 310     return nullptr;
 311   }
 312 
 313   if (is_promotion) {
 314     ShenandoahThreadLocalData::add_to_plab_promoted(thread, size * HeapWordSize);
 315   } else {
 316     ShenandoahThreadLocalData::add_to_plab_evacuated(thread, size * HeapWordSize);
 317   }
 318   return obj;
 319 }
 320 
 321 inline ShenandoahAgeCensus* ShenandoahHeap::age_census() const {
 322   assert(mode()->is_generational(), "Only in generational mode");
 323   assert(_age_census != nullptr, "Error: not initialized");
 324   return _age_census;
 325 }
 326 
 327 inline oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
 328   assert(thread == Thread::current(), "Expected thread parameter to be current thread.");
 329   if (ShenandoahThreadLocalData::is_oom_during_evac(thread)) {
 330     // This thread went through the OOM during evac protocol and it is safe to return
 331     // the forward pointer. It must not attempt to evacuate any more.
 332     return ShenandoahBarrierSet::resolve_forwarded(p);
 333   }
 334 
 335   assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope");
 336 
 337   ShenandoahHeapRegion* r = heap_region_containing(p);
 338   assert(!r->is_humongous(), "never evacuate humongous objects");
 339 
 340   ShenandoahAffiliation target_gen = r->affiliation();
 341   if (mode()->is_generational() && ShenandoahHeap::heap()->is_gc_generation_young() &&
 342       target_gen == YOUNG_GENERATION) {
 343     markWord mark = p->mark();
 344     if (mark.is_marked()) {
 345       // Already forwarded.
 346       return ShenandoahBarrierSet::resolve_forwarded(p);
 347     }
 348     if (mark.has_displaced_mark_helper()) {
 349       // We don't want to deal with MT here just to ensure we read the right mark word.
 350       // Skip the potential promotion attempt for this one.
 351     } else if (r->age() + mark.age() >= age_census()->tenuring_threshold()) {
 352       oop result = try_evacuate_object(p, thread, r, OLD_GENERATION);
 353       if (result != nullptr) {
 354         return result;
 355       }
 356       // If we failed to promote this aged object, we'll fall through to code below and evacuate to young-gen.
 357     }
 358   }
 359   return try_evacuate_object(p, thread, r, target_gen);
 360 }
 361 
 362 // try_evacuate_object registers the object and dirties the associated remembered set information when evacuating
 363 // to OLD_GENERATION.
 364 inline oop ShenandoahHeap::try_evacuate_object(oop p, Thread* thread, ShenandoahHeapRegion* from_region,
 365                                                ShenandoahAffiliation target_gen) {
 366   bool alloc_from_lab = true;
 367   bool has_plab = false;
 368   HeapWord* copy = nullptr;
 369   size_t size = p->size();
 370   bool is_promotion = (target_gen == OLD_GENERATION) && from_region->is_young();
 371 
 372 #ifdef ASSERT
 373   if (ShenandoahOOMDuringEvacALot &&
 374       (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
 375         copy = nullptr;
 376   } else {
 377 #endif
 378     if (UseTLAB) {
 379       switch (target_gen) {
 380         case YOUNG_GENERATION: {
 381            copy = allocate_from_gclab(thread, size);
 382            if ((copy == nullptr) && (size < ShenandoahThreadLocalData::gclab_size(thread))) {
 383              // GCLAB allocation failed because we are bumping up against the limit on young evacuation reserve.  Try resetting
 384              // the desired GCLAB size and retry GCLAB allocation to avoid cascading of shared memory allocations.
 385              ShenandoahThreadLocalData::set_gclab_size(thread, PLAB::min_size());
 386              copy = allocate_from_gclab(thread, size);
 387              // If we still get nullptr, we'll try a shared allocation below.
 388            }
 389            break;
 390         }
 391         case OLD_GENERATION: {
 392            PLAB* plab = ShenandoahThreadLocalData::plab(thread);
 393            if (plab != nullptr) {
 394              has_plab = true;
 395            }
 396            copy = allocate_from_plab(thread, size, is_promotion);
 397            if ((copy == nullptr) && (size < ShenandoahThreadLocalData::plab_size(thread)) &&
 398                ShenandoahThreadLocalData::plab_retries_enabled(thread)) {
 399              // PLAB allocation failed because we are bumping up against the limit on old evacuation reserve or because
 400              // the requested object does not fit within the current plab but the plab still has an "abundance" of memory,
 401              // where abundance is defined as >= PLAB::min_size().  In the former case, we try resetting the desired
 402              // PLAB size and retry PLAB allocation to avoid cascading of shared memory allocations.
 403 
 404              // In this situation, PLAB memory is precious.  We'll try to preserve our existing PLAB by forcing
 405              // this particular allocation to be shared.
 406              if (plab->words_remaining() < PLAB::min_size()) {
 407                ShenandoahThreadLocalData::set_plab_size(thread, PLAB::min_size());
 408                copy = allocate_from_plab(thread, size, is_promotion);
 409                // If we still get nullptr, we'll try a shared allocation below.
 410                if (copy == nullptr) {
 411                  // If retry fails, don't continue to retry until we have success (probably in next GC pass)
 412                  ShenandoahThreadLocalData::disable_plab_retries(thread);
 413                }
 414              }
 415              // else, copy still equals nullptr.  this causes shared allocation below, preserving this plab for future needs.
 416            }
 417            break;
 418         }
 419         default: {
 420           ShouldNotReachHere();
 421           break;
 422         }
 423       }
 424     }
 425 
 426     if (copy == nullptr) {
 427       // If we failed to allocate in LAB, we'll try a shared allocation.
 428       if (!is_promotion || !has_plab || (size > PLAB::min_size())) {
 429         ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size, target_gen);
 430         copy = allocate_memory(req, is_promotion);
 431         alloc_from_lab = false;
 432       }
 433       // else, we leave copy equal to nullptr, signaling a promotion failure below if appropriate.
 434       // We choose not to promote objects smaller than PLAB::min_size() by way of shared allocations, as this is too
 435       // costly.  Instead, we'll simply "evacuate" to young-gen memory (using a GCLAB) and will promote in a future
 436       // evacuation pass.  This condition is denoted by: is_promotion && has_plab && (size <= PLAB::min_size())
 437     }
 438 #ifdef ASSERT
 439   }
 440 #endif
 441 
 442   if (copy == nullptr) {
 443     if (target_gen == OLD_GENERATION) {
 444       assert(mode()->is_generational(), "Should only be here in generational mode.");
 445       if (from_region->is_young()) {
 446         // Signal that promotion failed. Will evacuate this old object somewhere in young gen.
 447         report_promotion_failure(thread, size);
 448         return nullptr;
 449       } else {
 450         // Remember that evacuation to old gen failed. We'll want to trigger a full gc to recover from this
 451         // after the evacuation threads have finished.
 452         handle_old_evacuation_failure();
 453       }
 454     }
 455 
 456     control_thread()->handle_alloc_failure_evac(size);
 457 
 458     _oom_evac_handler.handle_out_of_memory_during_evacuation();
 459 
 460     return ShenandoahBarrierSet::resolve_forwarded(p);
 461   }
 462 
 463   // Copy the object:
 464   _evac_tracker->begin_evacuation(thread, size * HeapWordSize);
 465   Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(p), copy, size);
 466 

 467   oop copy_val = cast_to_oop(copy);
 468 
 469   if (mode()->is_generational() && target_gen == YOUNG_GENERATION && is_aging_cycle()) {
 470     ShenandoahHeap::increase_object_age(copy_val, from_region->age() + 1);
 471   }
 472 
 473   // Try to install the new forwarding pointer.
 474   ContinuationGCSupport::relativize_stack_chunk(copy_val);
 475 
 476   oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val);
 477   if (result == copy_val) {
 478     // Successfully evacuated. Our copy is now the public one!
 479     _evac_tracker->end_evacuation(thread, size * HeapWordSize);
 480     if (mode()->is_generational()) {
 481       if (target_gen == OLD_GENERATION) {
 482         handle_old_evacuation(copy, size, from_region->is_young());
 483       } else {
 484         // When copying to the old generation above, we don't care
 485         // about recording object age in the census stats.
 486         assert(target_gen == YOUNG_GENERATION, "Error");
 487         // We record this census only when simulating pre-adaptive tenuring behavior, or
 488         // when we have been asked to record the census at evacuation rather than at mark
 489         if (ShenandoahGenerationalCensusAtEvac || !ShenandoahGenerationalAdaptiveTenuring) {
 490           _evac_tracker->record_age(thread, size * HeapWordSize, ShenandoahHeap::get_object_age(copy_val));
 491         }
 492       }
 493     }
 494     shenandoah_assert_correct(nullptr, copy_val);
 495     return copy_val;
 496   }  else {
 497     // Failed to evacuate. We need to deal with the object that is left behind. Since this
 498     // new allocation is certainly after TAMS, it will be considered live in the next cycle.
 499     // But if it happens to contain references to evacuated regions, those references would
 500     // not get updated for this stale copy during this cycle, and we will crash while scanning
 501     // it the next cycle.
 502     if (alloc_from_lab) {
 503        // For LAB allocations, it is enough to rollback the allocation ptr. Either the next
 504        // object will overwrite this stale copy, or the filler object on LAB retirement will
 505        // do this.
 506        switch (target_gen) {
 507          case YOUNG_GENERATION: {
 508              ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size);
 509             break;
 510          }
 511          case OLD_GENERATION: {
 512             ShenandoahThreadLocalData::plab(thread)->undo_allocation(copy, size);
 513             if (is_promotion) {
 514               ShenandoahThreadLocalData::subtract_from_plab_promoted(thread, size * HeapWordSize);
 515             } else {
 516               ShenandoahThreadLocalData::subtract_from_plab_evacuated(thread, size * HeapWordSize);
 517             }
 518             break;
 519          }
 520          default: {
 521            ShouldNotReachHere();
 522            break;
 523          }
 524        }
 525     } else {
 526       // For non-LAB allocations, we have no way to retract the allocation, and
 527       // have to explicitly overwrite the copy with the filler object. With that overwrite,
 528       // we have to keep the fwdptr initialized and pointing to our (stale) copy.
 529       assert(size >= ShenandoahHeap::min_fill_size(), "previously allocated object known to be larger than min_size");
 530       fill_with_object(copy, size);
 531       shenandoah_assert_correct(nullptr, copy_val);
 532       // For non-LAB allocations, the object has already been registered
 533     }
 534     shenandoah_assert_correct(nullptr, result);
 535     return result;
 536   }
 537 }
 538 
 539 void ShenandoahHeap::increase_object_age(oop obj, uint additional_age) {
 540   markWord w = obj->has_displaced_mark() ? obj->displaced_mark() : obj->mark();
 541   w = w.set_age(MIN2(markWord::max_age, w.age() + additional_age));
 542   if (obj->has_displaced_mark()) {
 543     obj->set_displaced_mark(w);
 544   } else {
 545     obj->set_mark(w);
 546   }
 547 }
 548 
 549 // Return the object's age (at a safepoint or when object isn't
 550 // mutable by the mutator)
 551 uint ShenandoahHeap::get_object_age(oop obj) {
 552   markWord w = obj->has_displaced_mark() ? obj->displaced_mark() : obj->mark();
 553   assert(w.age() <= markWord::max_age, "Impossible!");
 554   return w.age();
 555 }
 556 
 557 // Return the object's age, or a sentinel value when the age can't
 558 // necessarily be determined because of concurrent locking by the
 559 // mutator
 560 uint ShenandoahHeap::get_object_age_concurrent(oop obj) {
 561   // This is impossible to do unless we "freeze" ABA-type oscillations
 562   // With Lilliput, we can do this more easily.
 563   markWord w = obj->mark();
 564   // We can do better for objects with inflated monitor
 565   if (w.is_being_inflated() || w.has_displaced_mark_helper()) {
 566     // Informs caller that we aren't able to determine the age
 567     return markWord::max_age + 1; // sentinel
 568   }
 569   assert(w.age() <= markWord::max_age, "Impossible!");
 570   return w.age();
 571 }
 572 
 573 inline bool ShenandoahHeap::clear_old_evacuation_failure() {
 574   return _old_gen_oom_evac.try_unset();
 575 }
 576 
 577 bool ShenandoahHeap::is_in(const void* p) const {
 578   HeapWord* heap_base = (HeapWord*) base();
 579   HeapWord* last_region_end = heap_base + ShenandoahHeapRegion::region_size_words() * num_regions();
 580   return p >= heap_base && p < last_region_end;
 581 }
 582 
 583 inline bool ShenandoahHeap::is_in_active_generation(oop obj) const {
 584   if (!mode()->is_generational()) {
 585     // everything is the same single generation
 586     return true;
 587   }
 588 
 589   if (active_generation() == nullptr) {
 590     // no collection is happening, only expect this to be called
 591     // when concurrent processing is active, but that could change
 592     return false;
 593   }
 594 
 595   assert(is_in(obj), "only check if is in active generation for objects (" PTR_FORMAT ") in heap", p2i(obj));
 596   assert((active_generation() == (ShenandoahGeneration*) old_generation()) ||
 597          (active_generation() == (ShenandoahGeneration*) young_generation()) ||
 598          (active_generation() == global_generation()), "Active generation must be old, young, or global");
 599 
 600   size_t index = heap_region_containing(obj)->index();
 601   switch (_affiliations[index]) {
 602   case ShenandoahAffiliation::FREE:
 603     // Free regions are in Old, Young, Global
 604     return true;
 605   case ShenandoahAffiliation::YOUNG_GENERATION:
 606     // Young regions are in young_generation and global_generation, not in old_generation
 607     return (active_generation() != (ShenandoahGeneration*) old_generation());
 608   case ShenandoahAffiliation::OLD_GENERATION:
 609     // Old regions are in old_generation and global_generation, not in young_generation
 610     return (active_generation() != (ShenandoahGeneration*) young_generation());
 611   default:
 612     assert(false, "Bad affiliation (%d) for region " SIZE_FORMAT, _affiliations[index], index);
 613     return false;
 614   }
 615 }
 616 
 617 inline bool ShenandoahHeap::is_in_young(const void* p) const {
 618   return is_in(p) && (_affiliations[heap_region_index_containing(p)] == ShenandoahAffiliation::YOUNG_GENERATION);
 619 }
 620 
 621 inline bool ShenandoahHeap::is_in_old(const void* p) const {
 622   return is_in(p) && (_affiliations[heap_region_index_containing(p)] == ShenandoahAffiliation::OLD_GENERATION);
 623 }
 624 
 625 inline bool ShenandoahHeap::is_old(oop obj) const {
 626   return is_gc_generation_young() && is_in_old(obj);
 627 }
 628 
 629 inline ShenandoahAffiliation ShenandoahHeap::region_affiliation(const ShenandoahHeapRegion *r) {
 630   return (ShenandoahAffiliation) _affiliations[r->index()];
 631 }
 632 
 633 inline void ShenandoahHeap::assert_lock_for_affiliation(ShenandoahAffiliation orig_affiliation,
 634                                                         ShenandoahAffiliation new_affiliation) {
 635   // A lock is required when changing from FREE to NON-FREE.  Though it may be possible to elide the lock when
 636   // transitioning from in-use to FREE, the current implementation uses a lock for this transition.  A lock is
 637   // not required to change from YOUNG to OLD (i.e. when promoting humongous region).
 638   //
 639   //         new_affiliation is:     FREE   YOUNG   OLD
 640   //  orig_affiliation is:  FREE      X       L      L
 641   //                       YOUNG      L       X
 642   //                         OLD      L       X      X
 643   //  X means state transition won't happen (so don't care)
 644   //  L means lock should be held
 645   //  Blank means no lock required because affiliation visibility will not be required until subsequent safepoint
 646   //
 647   // Note: during full GC, all transitions between states are possible.  During Full GC, we should be in a safepoint.
 648 
 649   if ((orig_affiliation == ShenandoahAffiliation::FREE) || (new_affiliation == ShenandoahAffiliation::FREE)) {
 650     shenandoah_assert_heaplocked_or_fullgc_safepoint();
 651   }
 652 }
 653 
 654 inline void ShenandoahHeap::set_affiliation(ShenandoahHeapRegion* r, ShenandoahAffiliation new_affiliation) {
 655 #ifdef ASSERT
 656   assert_lock_for_affiliation(region_affiliation(r), new_affiliation);
 657 #endif
 658   _affiliations[r->index()] = (uint8_t) new_affiliation;
 659 }
 660 
 661 inline ShenandoahAffiliation ShenandoahHeap::region_affiliation(size_t index) {
 662   return (ShenandoahAffiliation) _affiliations[index];
 663 }
 664 
 665 inline void ShenandoahHeap::set_affiliation(size_t index, ShenandoahAffiliation new_affiliation) {
 666 #ifdef ASSERT
 667   assert_lock_for_affiliation(region_affiliation(index), new_affiliation);
 668 #endif
 669   _affiliations[index] = (uint8_t) new_affiliation;
 670 }
 671 
 672 inline bool ShenandoahHeap::requires_marking(const void* entry) const {
 673   oop obj = cast_to_oop(entry);
 674   return !_marking_context->is_marked_strong(obj);
 675 }
 676 
 677 inline bool ShenandoahHeap::in_collection_set(oop p) const {
 678   assert(collection_set() != nullptr, "Sanity");
 679   return collection_set()->is_in(p);
 680 }
 681 
 682 inline bool ShenandoahHeap::in_collection_set_loc(void* p) const {
 683   assert(collection_set() != nullptr, "Sanity");
 684   return collection_set()->is_in_loc(p);
 685 }
 686 
 687 
 688 inline bool ShenandoahHeap::is_stable() const {
 689   return _gc_state.is_clear();
 690 }
 691 
 692 inline bool ShenandoahHeap::has_evacuation_reserve_quantities() const {
 693   return _has_evacuation_reserve_quantities;
 694 }
 695 
 696 inline bool ShenandoahHeap::is_idle() const {
 697   return _gc_state.is_unset(MARKING | EVACUATION | UPDATEREFS);
 698 }
 699 
 700 inline bool ShenandoahHeap::is_concurrent_mark_in_progress() const {
 701   return _gc_state.is_set(MARKING);
 702 }
 703 
 704 inline bool ShenandoahHeap::is_concurrent_young_mark_in_progress() const {
 705   return _gc_state.is_set(YOUNG_MARKING);
 706 }
 707 
 708 inline bool ShenandoahHeap::is_concurrent_old_mark_in_progress() const {
 709   return _gc_state.is_set(OLD_MARKING);
 710 }
 711 
 712 inline bool ShenandoahHeap::is_evacuation_in_progress() const {
 713   return _gc_state.is_set(EVACUATION);
 714 }
 715 
 716 inline bool ShenandoahHeap::is_gc_in_progress_mask(uint mask) const {
 717   return _gc_state.is_set(mask);
 718 }
 719 
 720 inline bool ShenandoahHeap::is_degenerated_gc_in_progress() const {
 721   return _degenerated_gc_in_progress.is_set();
 722 }
 723 
 724 inline bool ShenandoahHeap::is_full_gc_in_progress() const {
 725   return _full_gc_in_progress.is_set();
 726 }
 727 
 728 inline bool ShenandoahHeap::is_full_gc_move_in_progress() const {
 729   return _full_gc_move_in_progress.is_set();
 730 }
 731 
 732 inline bool ShenandoahHeap::is_update_refs_in_progress() const {
 733   return _gc_state.is_set(UPDATEREFS);
 734 }
 735 
 736 inline bool ShenandoahHeap::is_stw_gc_in_progress() const {
 737   return is_full_gc_in_progress() || is_degenerated_gc_in_progress();
 738 }
 739 
 740 inline bool ShenandoahHeap::is_concurrent_strong_root_in_progress() const {
 741   return _concurrent_strong_root_in_progress.is_set();
 742 }
 743 
 744 inline bool ShenandoahHeap::is_concurrent_weak_root_in_progress() const {
 745   return _gc_state.is_set(WEAK_ROOTS);
 746 }
 747 
 748 inline bool ShenandoahHeap::is_aging_cycle() const {
 749   return _is_aging_cycle.is_set();
 750 }
 751 
 752 inline bool ShenandoahHeap::is_prepare_for_old_mark_in_progress() const {
 753   return _prepare_for_old_mark;
 754 }
 755 
 756 inline size_t ShenandoahHeap::set_promoted_reserve(size_t new_val) {
 757   size_t orig = _promoted_reserve;
 758   _promoted_reserve = new_val;
 759   return orig;
 760 }
 761 
 762 inline size_t ShenandoahHeap::get_promoted_reserve() const {
 763   return _promoted_reserve;
 764 }
 765 
 766 // returns previous value
 767 size_t ShenandoahHeap::capture_old_usage(size_t old_usage) {
 768   size_t previous_value = _captured_old_usage;
 769   _captured_old_usage = old_usage;
 770   return previous_value;
 771 }
 772 
 773 void ShenandoahHeap::set_previous_promotion(size_t promoted_bytes) {
 774   shenandoah_assert_heaplocked();
 775   _previous_promotion = promoted_bytes;
 776 }
 777 
 778 size_t ShenandoahHeap::get_previous_promotion() const {
 779   return _previous_promotion;
 780 }
 781 
 782 inline size_t ShenandoahHeap::set_old_evac_reserve(size_t new_val) {
 783   size_t orig = _old_evac_reserve;
 784   _old_evac_reserve = new_val;
 785   return orig;
 786 }
 787 
 788 inline size_t ShenandoahHeap::get_old_evac_reserve() const {
 789   return _old_evac_reserve;
 790 }
 791 
 792 inline void ShenandoahHeap::augment_old_evac_reserve(size_t increment) {
 793   _old_evac_reserve += increment;
 794 }
 795 
 796 inline void ShenandoahHeap::augment_promo_reserve(size_t increment) {
 797   _promoted_reserve += increment;
 798 }
 799 
 800 inline void ShenandoahHeap::reset_old_evac_expended() {
 801   Atomic::store(&_old_evac_expended, (size_t) 0);
 802 }
 803 
 804 inline size_t ShenandoahHeap::expend_old_evac(size_t increment) {
 805   return Atomic::add(&_old_evac_expended, increment);
 806 }
 807 
 808 inline size_t ShenandoahHeap::get_old_evac_expended() {
 809   return Atomic::load(&_old_evac_expended);
 810 }
 811 
 812 inline void ShenandoahHeap::reset_promoted_expended() {
 813   Atomic::store(&_promoted_expended, (size_t) 0);
 814 }
 815 
 816 inline size_t ShenandoahHeap::expend_promoted(size_t increment) {
 817   return Atomic::add(&_promoted_expended, increment);
 818 }
 819 
 820 inline size_t ShenandoahHeap::unexpend_promoted(size_t decrement) {
 821   return Atomic::sub(&_promoted_expended, decrement);
 822 }
 823 
 824 inline size_t ShenandoahHeap::get_promoted_expended() {
 825   return Atomic::load(&_promoted_expended);
 826 }
 827 
 828 inline size_t ShenandoahHeap::set_young_evac_reserve(size_t new_val) {
 829   size_t orig = _young_evac_reserve;
 830   _young_evac_reserve = new_val;
 831   return orig;
 832 }
 833 
 834 inline size_t ShenandoahHeap::get_young_evac_reserve() const {
 835   return _young_evac_reserve;
 836 }
 837 
 838 template<class T>
 839 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl) {
 840   marked_object_iterate(region, cl, region->top());
 841 }
 842 
 843 template<class T>
 844 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit) {
 845   assert(! region->is_humongous_continuation(), "no humongous continuation regions here");
 846 
 847   ShenandoahMarkingContext* const ctx = marking_context();

 848 
 849   HeapWord* tams = ctx->top_at_mark_start(region);
 850 
 851   size_t skip_bitmap_delta = 1;
 852   HeapWord* start = region->bottom();
 853   HeapWord* end = MIN2(tams, region->end());
 854 
 855   // Step 1. Scan below the TAMS based on bitmap data.
 856   HeapWord* limit_bitmap = MIN2(limit, tams);
 857 
 858   // Try to scan the initial candidate. If the candidate is above the TAMS, it would
 859   // fail the subsequent "< limit_bitmap" checks, and fall through to Step 2.
 860   HeapWord* cb = ctx->get_next_marked_addr(start, end);
 861 
 862   intx dist = ShenandoahMarkScanPrefetch;
 863   if (dist > 0) {
 864     // Batched scan that prefetches the oop data, anticipating the access to
 865     // either header, oop field, or forwarding pointer. Not that we cannot
 866     // touch anything in oop, while it still being prefetched to get enough
 867     // time for prefetch to work. This is why we try to scan the bitmap linearly,

 958     HeapWord* bottom = region->bottom();
 959     if (top > bottom) {
 960       region = region->humongous_start_region();
 961       ShenandoahObjectToOopBoundedClosure<T> objs(cl, bottom, top);
 962       marked_object_iterate(region, &objs);
 963     }
 964   } else {
 965     ShenandoahObjectToOopClosure<T> objs(cl);
 966     marked_object_iterate(region, &objs, top);
 967   }
 968 }
 969 
 970 inline ShenandoahHeapRegion* ShenandoahHeap::get_region(size_t region_idx) const {
 971   if (region_idx < _num_regions) {
 972     return _regions[region_idx];
 973   } else {
 974     return nullptr;
 975   }
 976 }
 977 








 978 inline ShenandoahMarkingContext* ShenandoahHeap::complete_marking_context() const {
 979   assert (_marking_context->is_complete()," sanity");
 980   return _marking_context;
 981 }
 982 
 983 inline ShenandoahMarkingContext* ShenandoahHeap::marking_context() const {
 984   return _marking_context;
 985 }
 986 
 987 inline void ShenandoahHeap::clear_cards_for(ShenandoahHeapRegion* region) {
 988   if (mode()->is_generational()) {
 989     _card_scan->mark_range_as_empty(region->bottom(), pointer_delta(region->end(), region->bottom()));
 990   }
 991 }
 992 
 993 inline void ShenandoahHeap::dirty_cards(HeapWord* start, HeapWord* end) {
 994   assert(mode()->is_generational(), "Should only be used for generational mode");
 995   size_t words = pointer_delta(end, start);
 996   _card_scan->mark_range_as_dirty(start, words);
 997 }
 998 
 999 inline void ShenandoahHeap::clear_cards(HeapWord* start, HeapWord* end) {
1000   assert(mode()->is_generational(), "Should only be used for generational mode");
1001   size_t words = pointer_delta(end, start);
1002   _card_scan->mark_range_as_clean(start, words);
1003 }
1004 
1005 inline void ShenandoahHeap::mark_card_as_dirty(void* location) {
1006   if (mode()->is_generational()) {
1007     _card_scan->mark_card_as_dirty((HeapWord*)location);
1008   }
1009 }
1010 
1011 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
< prev index next >