< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp

Print this page




  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
  26 #define SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
  27 
  28 #include "classfile/javaClasses.inline.hpp"
  29 #include "gc/shared/markBitMap.inline.hpp"
  30 #include "gc/shared/threadLocalAllocBuffer.inline.hpp"
  31 #include "gc/shared/suspendibleThreadSet.hpp"
  32 #include "gc/shenandoah/shenandoahAsserts.hpp"
  33 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
  34 #include "gc/shenandoah/shenandoahCollectionSet.inline.hpp"

  35 #include "gc/shenandoah/shenandoahForwarding.inline.hpp"
  36 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
  37 #include "gc/shenandoah/shenandoahHeap.hpp"
  38 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp"
  39 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
  40 #include "gc/shenandoah/shenandoahControlThread.hpp"
  41 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  42 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
  43 #include "oops/compressedOops.inline.hpp"
  44 #include "oops/oop.inline.hpp"
  45 #include "runtime/atomic.hpp"
  46 #include "runtime/prefetch.inline.hpp"
  47 #include "runtime/thread.hpp"
  48 #include "utilities/copy.hpp"
  49 #include "utilities/globalDefinitions.hpp"
  50 
  51 inline ShenandoahHeap* ShenandoahHeap::heap() {
  52   assert(_heap != NULL, "Heap is not initialized yet");
  53   return _heap;
  54 }


  68 }
  69 
  70 inline WorkGang* ShenandoahHeap::get_safepoint_workers() {
  71   return _safepoint_workers;
  72 }
  73 
  74 inline size_t ShenandoahHeap::heap_region_index_containing(const void* addr) const {
  75   uintptr_t region_start = ((uintptr_t) addr);
  76   uintptr_t index = (region_start - (uintptr_t) base()) >> ShenandoahHeapRegion::region_size_bytes_shift();
  77   assert(index < num_regions(), "Region index is in bounds: " PTR_FORMAT, p2i(addr));
  78   return index;
  79 }
  80 
  81 inline ShenandoahHeapRegion* const ShenandoahHeap::heap_region_containing(const void* addr) const {
  82   size_t index = heap_region_index_containing(addr);
  83   ShenandoahHeapRegion* const result = get_region(index);
  84   assert(addr >= result->bottom() && addr < result->end(), "Heap region contains the address: " PTR_FORMAT, p2i(addr));
  85   return result;
  86 }
  87 
  88 inline void ShenandoahHeap::enter_evacuation(Thread* t) {
  89   _oom_evac_handler.enter_evacuation(t);
  90 }
  91 
  92 inline void ShenandoahHeap::leave_evacuation(Thread* t) {
  93   _oom_evac_handler.leave_evacuation(t);
  94 }
  95 
  96 template <class T>
  97 inline oop ShenandoahHeap::update_with_forwarded_not_null(T* p, oop obj) {
  98   if (in_collection_set(obj)) {
  99     shenandoah_assert_forwarded_except(p, obj, is_full_gc_in_progress() || cancelled_gc() || is_degenerated_gc_in_progress());
 100     obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
 101     RawAccess<IS_NOT_NULL>::oop_store(p, obj);
 102   }
 103 #ifdef ASSERT
 104   else {
 105     shenandoah_assert_not_forwarded(p, obj);
 106   }
 107 #endif
 108   return obj;
 109 }
 110 
 111 template <class T>
 112 inline oop ShenandoahHeap::maybe_update_with_forwarded(T* p) {
 113   T o = RawAccess<>::oop_load(p);
 114   if (!CompressedOops::is_null(o)) {
 115     oop obj = CompressedOops::decode_not_null(o);


 214 
 215   jbyte prev = _cancelled_gc.cmpxchg(NOT_CANCELLED, CANCELLABLE);
 216   if (prev == CANCELLABLE || prev == NOT_CANCELLED) {
 217     if (SuspendibleThreadSet::should_yield()) {
 218       SuspendibleThreadSet::yield();
 219     }
 220 
 221     // Back to CANCELLABLE. The thread that poked NOT_CANCELLED first gets
 222     // to restore to CANCELLABLE.
 223     if (prev == CANCELLABLE) {
 224       _cancelled_gc.set(CANCELLABLE);
 225     }
 226     return false;
 227   } else {
 228     return true;
 229   }
 230 }
 231 
 232 inline void ShenandoahHeap::clear_cancelled_gc() {
 233   _cancelled_gc.set(CANCELLABLE);
 234   _oom_evac_handler.clear();
 235 }
 236 
 237 inline HeapWord* ShenandoahHeap::allocate_from_gclab(Thread* thread, size_t size) {
 238   assert(UseTLAB, "TLABs should be enabled");
 239 
 240   PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
 241   if (gclab == NULL) {
 242     assert(!thread->is_Java_thread() && !thread->is_Worker_thread(),
 243            "Performance: thread should have GCLAB: %s", thread->name());
 244     // No GCLABs in this thread, fallback to shared allocation
 245     return NULL;
 246   }
 247   HeapWord* obj = gclab->allocate(size);
 248   if (obj != NULL) {
 249     return obj;
 250   }
 251   // Otherwise...
 252   return allocate_from_gclab_slow(thread, size);
 253 }
 254 
 255 inline oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
 256   if (ShenandoahThreadLocalData::is_oom_during_evac(Thread::current())) {
 257     // This thread went through the OOM during evac protocol and it is safe to return
 258     // the forward pointer. It must not attempt to evacuate any more.
 259     return ShenandoahBarrierSet::resolve_forwarded(p);
 260   }
 261 
 262   assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope");






 263 
 264   size_t size = p->size();


 265 
 266   assert(!heap_region_containing(p)->is_humongous(), "never evacuate humongous objects");
 267 
 268   bool alloc_from_gclab = true;
 269   HeapWord* copy = NULL;
 270 
 271 #ifdef ASSERT
 272   if (ShenandoahOOMDuringEvacALot &&
 273       (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
 274         copy = NULL;
 275   } else {
 276 #endif
 277     if (UseTLAB) {
 278       copy = allocate_from_gclab(thread, size);
 279     }
 280     if (copy == NULL) {
 281       ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size);
 282       copy = allocate_memory(req);
 283       alloc_from_gclab = false;
 284     }
 285 #ifdef ASSERT
 286   }
 287 #endif
 288 
 289   if (copy == NULL) {
 290     control_thread()->handle_alloc_failure_evac(size);
 291 
 292     _oom_evac_handler.handle_out_of_memory_during_evacuation();
 293 
 294     return ShenandoahBarrierSet::resolve_forwarded(p);
 295   }
 296 
 297   // Copy the object:
 298   Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(p), copy, size);
 299 
 300   // Try to install the new forwarding pointer.
 301   oop copy_val = oop(copy);
 302   oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val);
 303   if (result == copy_val) {
 304     // Successfully evacuated. Our copy is now the public one!
 305     shenandoah_assert_correct(NULL, copy_val);
 306     return copy_val;
 307   }  else {
 308     // Failed to evacuate. We need to deal with the object that is left behind. Since this
 309     // new allocation is certainly after TAMS, it will be considered live in the next cycle.
 310     // But if it happens to contain references to evacuated regions, those references would
 311     // not get updated for this stale copy during this cycle, and we will crash while scanning
 312     // it the next cycle.
 313     //
 314     // For GCLAB allocations, it is enough to rollback the allocation ptr. Either the next
 315     // object will overwrite this stale copy, or the filler object on LAB retirement will
 316     // do this. For non-GCLAB allocations, we have no way to retract the allocation, and
 317     // have to explicitly overwrite the copy with the filler object. With that overwrite,
 318     // we have to keep the fwdptr initialized and pointing to our (stale) copy.
 319     if (alloc_from_gclab) {
 320       ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size);
 321     } else {
 322       fill_with_object(copy, size);
 323       shenandoah_assert_correct(NULL, copy_val);
 324     }
 325     shenandoah_assert_correct(NULL, result);
 326     return result;
 327   }
 328 }
 329 
 330 template<bool RESOLVE>
 331 inline bool ShenandoahHeap::requires_marking(const void* entry) const {
 332   oop obj = oop(entry);
 333   if (RESOLVE) {
 334     obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
 335   }
 336   return !_marking_context->is_marked(obj);
 337 }
 338 
 339 inline bool ShenandoahHeap::in_collection_set(oop p) const {
 340   assert(collection_set() != NULL, "Sanity");
 341   return collection_set()->is_in(p);
 342 }
 343 
 344 inline bool ShenandoahHeap::in_collection_set_loc(void* p) const {
 345   assert(collection_set() != NULL, "Sanity");
 346   return collection_set()->is_in_loc(p);




  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
  26 #define SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
  27 
  28 #include "classfile/javaClasses.inline.hpp"
  29 #include "gc/shared/markBitMap.inline.hpp"
  30 #include "gc/shared/threadLocalAllocBuffer.inline.hpp"
  31 #include "gc/shared/suspendibleThreadSet.hpp"
  32 #include "gc/shenandoah/shenandoahAsserts.hpp"
  33 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
  34 #include "gc/shenandoah/shenandoahCollectionSet.inline.hpp"
  35 #include "gc/shenandoah/shenandoahEvacLockingBitmap.inline.hpp"
  36 #include "gc/shenandoah/shenandoahForwarding.inline.hpp"
  37 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
  38 #include "gc/shenandoah/shenandoahHeap.hpp"
  39 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp"
  40 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
  41 #include "gc/shenandoah/shenandoahControlThread.hpp"
  42 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  43 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
  44 #include "oops/compressedOops.inline.hpp"
  45 #include "oops/oop.inline.hpp"
  46 #include "runtime/atomic.hpp"
  47 #include "runtime/prefetch.inline.hpp"
  48 #include "runtime/thread.hpp"
  49 #include "utilities/copy.hpp"
  50 #include "utilities/globalDefinitions.hpp"
  51 
  52 inline ShenandoahHeap* ShenandoahHeap::heap() {
  53   assert(_heap != NULL, "Heap is not initialized yet");
  54   return _heap;
  55 }


  69 }
  70 
  71 inline WorkGang* ShenandoahHeap::get_safepoint_workers() {
  72   return _safepoint_workers;
  73 }
  74 
  75 inline size_t ShenandoahHeap::heap_region_index_containing(const void* addr) const {
  76   uintptr_t region_start = ((uintptr_t) addr);
  77   uintptr_t index = (region_start - (uintptr_t) base()) >> ShenandoahHeapRegion::region_size_bytes_shift();
  78   assert(index < num_regions(), "Region index is in bounds: " PTR_FORMAT, p2i(addr));
  79   return index;
  80 }
  81 
  82 inline ShenandoahHeapRegion* const ShenandoahHeap::heap_region_containing(const void* addr) const {
  83   size_t index = heap_region_index_containing(addr);
  84   ShenandoahHeapRegion* const result = get_region(index);
  85   assert(addr >= result->bottom() && addr < result->end(), "Heap region contains the address: " PTR_FORMAT, p2i(addr));
  86   return result;
  87 }
  88 








  89 template <class T>
  90 inline oop ShenandoahHeap::update_with_forwarded_not_null(T* p, oop obj) {
  91   if (in_collection_set(obj)) {
  92     shenandoah_assert_forwarded_except(p, obj, is_full_gc_in_progress() || cancelled_gc() || is_degenerated_gc_in_progress());
  93     obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
  94     RawAccess<IS_NOT_NULL>::oop_store(p, obj);
  95   }
  96 #ifdef ASSERT
  97   else {
  98     shenandoah_assert_not_forwarded(p, obj);
  99   }
 100 #endif
 101   return obj;
 102 }
 103 
 104 template <class T>
 105 inline oop ShenandoahHeap::maybe_update_with_forwarded(T* p) {
 106   T o = RawAccess<>::oop_load(p);
 107   if (!CompressedOops::is_null(o)) {
 108     oop obj = CompressedOops::decode_not_null(o);


 207 
 208   jbyte prev = _cancelled_gc.cmpxchg(NOT_CANCELLED, CANCELLABLE);
 209   if (prev == CANCELLABLE || prev == NOT_CANCELLED) {
 210     if (SuspendibleThreadSet::should_yield()) {
 211       SuspendibleThreadSet::yield();
 212     }
 213 
 214     // Back to CANCELLABLE. The thread that poked NOT_CANCELLED first gets
 215     // to restore to CANCELLABLE.
 216     if (prev == CANCELLABLE) {
 217       _cancelled_gc.set(CANCELLABLE);
 218     }
 219     return false;
 220   } else {
 221     return true;
 222   }
 223 }
 224 
 225 inline void ShenandoahHeap::clear_cancelled_gc() {
 226   _cancelled_gc.set(CANCELLABLE);
 227   _evac_failed.unset();
 228 }
 229 
 230 inline HeapWord* ShenandoahHeap::allocate_from_gclab(Thread* thread, size_t size) {
 231   assert(UseTLAB, "TLABs should be enabled");
 232 
 233   PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
 234   if (gclab == NULL) {
 235     assert(!thread->is_Java_thread() && !thread->is_Worker_thread(),
 236            "Performance: thread should have GCLAB: %s", thread->name());
 237     // No GCLABs in this thread, fallback to shared allocation
 238     return NULL;
 239   }
 240   HeapWord* obj = gclab->allocate(size);
 241   if (obj != NULL) {
 242     return obj;
 243   }
 244   // Otherwise...
 245   return allocate_from_gclab_slow(thread, size);
 246 }
 247 
 248 inline oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
 249 
 250   // Fast-path
 251   if (ShenandoahForwarding::is_forwarded(p)) {
 252     return ShenandoahForwarding::get_forwardee(p);
 253   }
 254 
 255   {
 256     ShenandoahEvacLocker evac_locker(_evac_locking_bitmap, p);
 257 
 258     // Fast-path, double-checked
 259     if (ShenandoahForwarding::is_forwarded(p)) {
 260       return ShenandoahForwarding::get_forwardee(p);
 261     }
 262 
 263     if (_evac_failed.is_set()) {
 264       return p;
 265     }
 266 
 267     assert(!heap_region_containing(p)->is_humongous(), "never evacuate humongous objects");
 268 
 269     size_t size = p->size();
 270     HeapWord* copy = NULL;
 271 
 272 #ifdef ASSERT
 273     if (ShenandoahOOMDuringEvacALot &&
 274         (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
 275       copy = NULL;
 276     } else {
 277 #endif
 278       if (UseTLAB) {
 279         copy = allocate_from_gclab(thread, size);
 280       }
 281       if (copy == NULL) {
 282         ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size);
 283         copy = allocate_memory(req);
 284       }

 285 #ifdef ASSERT
 286     }
 287 #endif
 288 
 289     if (copy == NULL) {
 290       control_thread()->handle_alloc_failure_evac(size);
 291       _evac_failed.set();
 292       return p;
 293     }


 294 
 295     // Copy the object:
 296     Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(p), copy, size);
 297 
 298     // Install the new forwarding pointer.
 299     oop copy_val = oop(copy);
 300     ShenandoahForwarding::update_forwardee(p, copy_val);


 301     shenandoah_assert_correct(NULL, copy_val);
 302     return copy_val;




















 303   }
 304 }
 305 
 306 template<bool RESOLVE>
 307 inline bool ShenandoahHeap::requires_marking(const void* entry) const {
 308   oop obj = oop(entry);
 309   if (RESOLVE) {
 310     obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
 311   }
 312   return !_marking_context->is_marked(obj);
 313 }
 314 
 315 inline bool ShenandoahHeap::in_collection_set(oop p) const {
 316   assert(collection_set() != NULL, "Sanity");
 317   return collection_set()->is_in(p);
 318 }
 319 
 320 inline bool ShenandoahHeap::in_collection_set_loc(void* p) const {
 321   assert(collection_set() != NULL, "Sanity");
 322   return collection_set()->is_in_loc(p);


< prev index next >