< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp

Print this page

  1 /*
  2  * Copyright (c) 2015, 2020, Red Hat, Inc. All rights reserved.

  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
 26 #define SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
 27 
 28 #include "gc/shenandoah/shenandoahHeap.hpp"
 29 
 30 #include "classfile/javaClasses.inline.hpp"
 31 #include "gc/shared/markBitMap.inline.hpp"
 32 #include "gc/shared/threadLocalAllocBuffer.inline.hpp"
 33 #include "gc/shared/continuationGCSupport.inline.hpp"
 34 #include "gc/shared/suspendibleThreadSet.hpp"
 35 #include "gc/shared/tlab_globals.hpp"
 36 #include "gc/shenandoah/shenandoahAsserts.hpp"
 37 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
 38 #include "gc/shenandoah/shenandoahCollectionSet.inline.hpp"
 39 #include "gc/shenandoah/shenandoahForwarding.inline.hpp"
 40 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
 41 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp"
 42 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
 43 #include "gc/shenandoah/shenandoahControlThread.hpp"
 44 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
 45 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"

 46 #include "oops/compressedOops.inline.hpp"
 47 #include "oops/oop.inline.hpp"
 48 #include "runtime/atomic.hpp"
 49 #include "runtime/javaThread.hpp"
 50 #include "runtime/prefetch.inline.hpp"

 51 #include "utilities/copy.hpp"
 52 #include "utilities/globalDefinitions.hpp"
 53 
 54 inline ShenandoahHeap* ShenandoahHeap::heap() {
 55   return named_heap<ShenandoahHeap>(CollectedHeap::Shenandoah);
 56 }
 57 
 58 inline ShenandoahHeapRegion* ShenandoahRegionIterator::next() {
 59   size_t new_index = Atomic::add(&_index, (size_t) 1, memory_order_relaxed);
 60   // get_region() provides the bounds-check and returns null on OOB.
 61   return _heap->get_region(new_index - 1);
 62 }
 63 
 64 inline bool ShenandoahHeap::has_forwarded_objects() const {
 65   return _gc_state.is_set(HAS_FORWARDED);
 66 }
 67 
 68 inline WorkerThreads* ShenandoahHeap::workers() const {
 69   return _workers;
 70 }
 71 
 72 inline WorkerThreads* ShenandoahHeap::safepoint_workers() {
 73   return _safepoint_workers;
 74 }
 75 












 76 inline size_t ShenandoahHeap::heap_region_index_containing(const void* addr) const {
 77   uintptr_t region_start = ((uintptr_t) addr);
 78   uintptr_t index = (region_start - (uintptr_t) base()) >> ShenandoahHeapRegion::region_size_bytes_shift();
 79   assert(index < num_regions(), "Region index is in bounds: " PTR_FORMAT, p2i(addr));
 80   return index;
 81 }
 82 
 83 inline ShenandoahHeapRegion* const ShenandoahHeap::heap_region_containing(const void* addr) const {
 84   size_t index = heap_region_index_containing(addr);
 85   ShenandoahHeapRegion* const result = get_region(index);
 86   assert(addr >= result->bottom() && addr < result->end(), "Heap region contains the address: " PTR_FORMAT, p2i(addr));
 87   return result;
 88 }
 89 
 90 inline void ShenandoahHeap::enter_evacuation(Thread* t) {
 91   _oom_evac_handler.enter_evacuation(t);
 92 }
 93 
 94 inline void ShenandoahHeap::leave_evacuation(Thread* t) {
 95   _oom_evac_handler.leave_evacuation(t);
 96 }
 97 
 98 template <class T>
 99 inline void ShenandoahHeap::update_with_forwarded(T* p) {
100   T o = RawAccess<>::oop_load(p);
101   if (!CompressedOops::is_null(o)) {
102     oop obj = CompressedOops::decode_not_null(o);
103     if (in_collection_set(obj)) {

223 // then, there is no transitive reads in mutator (as we see nulls), and we can do
224 // relaxed memory ordering there.
225 
226 inline void ShenandoahHeap::atomic_clear_oop(oop* addr, oop compare) {
227   assert(is_aligned(addr, HeapWordSize), "Address should be aligned: " PTR_FORMAT, p2i(addr));
228   Atomic::cmpxchg(addr, compare, oop(), memory_order_relaxed);
229 }
230 
231 inline void ShenandoahHeap::atomic_clear_oop(narrowOop* addr, oop compare) {
232   assert(is_aligned(addr, sizeof(narrowOop)), "Address should be aligned: " PTR_FORMAT, p2i(addr));
233   narrowOop cmp = CompressedOops::encode(compare);
234   Atomic::cmpxchg(addr, cmp, narrowOop(), memory_order_relaxed);
235 }
236 
237 inline void ShenandoahHeap::atomic_clear_oop(narrowOop* addr, narrowOop compare) {
238   assert(is_aligned(addr, sizeof(narrowOop)), "Address should be aligned: " PTR_FORMAT, p2i(addr));
239   Atomic::cmpxchg(addr, compare, narrowOop(), memory_order_relaxed);
240 }
241 
242 inline bool ShenandoahHeap::cancelled_gc() const {
243   return _cancelled_gc.get() == CANCELLED;
244 }
245 
246 inline bool ShenandoahHeap::check_cancelled_gc_and_yield(bool sts_active) {
247   if (sts_active && !cancelled_gc()) {
248     if (SuspendibleThreadSet::should_yield()) {
249       SuspendibleThreadSet::yield();
250     }
251   }
252   return cancelled_gc();
253 }
254 
255 inline void ShenandoahHeap::clear_cancelled_gc() {
256   _cancelled_gc.set(CANCELLABLE);
257   _oom_evac_handler.clear();











258 }
259 
260 inline HeapWord* ShenandoahHeap::allocate_from_gclab(Thread* thread, size_t size) {
261   assert(UseTLAB, "TLABs should be enabled");
262 
263   PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
264   if (gclab == nullptr) {
265     assert(!thread->is_Java_thread() && !thread->is_Worker_thread(),
266            "Performance: thread should have GCLAB: %s", thread->name());
267     // No GCLABs in this thread, fallback to shared allocation
268     return nullptr;
269   }
270   HeapWord* obj = gclab->allocate(size);
271   if (obj != nullptr) {
272     return obj;
273   }
274   // Otherwise...
275   return allocate_from_gclab_slow(thread, size);
276 }
277 
278 inline oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
279   if (ShenandoahThreadLocalData::is_oom_during_evac(Thread::current())) {
280     // This thread went through the OOM during evac protocol and it is safe to return
281     // the forward pointer. It must not attempt to evacuate any more.
282     return ShenandoahBarrierSet::resolve_forwarded(p);






















283   }

284 
285   assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope");














286 
287   size_t size = p->size();





288 
289   assert(!heap_region_containing(p)->is_humongous(), "never evacuate humongous objects");
290 
291   bool alloc_from_gclab = true;
292   HeapWord* copy = nullptr;



293 
294 #ifdef ASSERT
295   if (ShenandoahOOMDuringEvacALot &&
296       (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
297         copy = nullptr;
298   } else {
299 #endif
300     if (UseTLAB) {
301       copy = allocate_from_gclab(thread, size);
302     }
303     if (copy == nullptr) {
304       ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size);
305       copy = allocate_memory(req);
306       alloc_from_gclab = false;
307     }
308 #ifdef ASSERT







309   }
310 #endif
311 
312   if (copy == nullptr) {
313     control_thread()->handle_alloc_failure_evac(size);

314 
315     _oom_evac_handler.handle_out_of_memory_during_evacuation();


316 
317     return ShenandoahBarrierSet::resolve_forwarded(p);
318   }

319 
320   // Copy the object:
321   Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(p), copy, size);
322 
323   // Try to install the new forwarding pointer.
324   oop copy_val = cast_to_oop(copy);
325   ContinuationGCSupport::relativize_stack_chunk(copy_val);
326 
327   oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val);
328   if (result == copy_val) {
329     // Successfully evacuated. Our copy is now the public one!
330     shenandoah_assert_correct(nullptr, copy_val);
331     return copy_val;
332   }  else {
333     // Failed to evacuate. We need to deal with the object that is left behind. Since this
334     // new allocation is certainly after TAMS, it will be considered live in the next cycle.
335     // But if it happens to contain references to evacuated regions, those references would
336     // not get updated for this stale copy during this cycle, and we will crash while scanning
337     // it the next cycle.
338     //
339     // For GCLAB allocations, it is enough to rollback the allocation ptr. Either the next
340     // object will overwrite this stale copy, or the filler object on LAB retirement will
341     // do this. For non-GCLAB allocations, we have no way to retract the allocation, and
342     // have to explicitly overwrite the copy with the filler object. With that overwrite,
343     // we have to keep the fwdptr initialized and pointing to our (stale) copy.
344     if (alloc_from_gclab) {
345       ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size);
346     } else {
347       fill_with_object(copy, size);
348       shenandoah_assert_correct(nullptr, copy_val);
349     }
350     shenandoah_assert_correct(nullptr, result);
351     return result;
352   }
353 }
354 











355 inline bool ShenandoahHeap::requires_marking(const void* entry) const {
356   oop obj = cast_to_oop(entry);
357   return !_marking_context->is_marked_strong(obj);
358 }
359 
360 inline bool ShenandoahHeap::in_collection_set(oop p) const {
361   assert(collection_set() != nullptr, "Sanity");
362   return collection_set()->is_in(p);
363 }
364 
365 inline bool ShenandoahHeap::in_collection_set_loc(void* p) const {
366   assert(collection_set() != nullptr, "Sanity");
367   return collection_set()->is_in_loc(p);
368 }
369 
370 inline bool ShenandoahHeap::is_stable() const {
371   return _gc_state.is_clear();
372 }
373 
374 inline bool ShenandoahHeap::is_idle() const {
375   return _gc_state.is_unset(MARKING | EVACUATION | UPDATEREFS);
376 }
377 
378 inline bool ShenandoahHeap::is_concurrent_mark_in_progress() const {
379   return _gc_state.is_set(MARKING);








380 }
381 
382 inline bool ShenandoahHeap::is_evacuation_in_progress() const {
383   return _gc_state.is_set(EVACUATION);








384 }
385 
386 inline bool ShenandoahHeap::is_degenerated_gc_in_progress() const {
387   return _degenerated_gc_in_progress.is_set();
388 }
389 
390 inline bool ShenandoahHeap::is_full_gc_in_progress() const {
391   return _full_gc_in_progress.is_set();
392 }
393 
394 inline bool ShenandoahHeap::is_full_gc_move_in_progress() const {
395   return _full_gc_move_in_progress.is_set();
396 }
397 
398 inline bool ShenandoahHeap::is_update_refs_in_progress() const {
399   return _gc_state.is_set(UPDATEREFS);
400 }
401 
402 inline bool ShenandoahHeap::is_stw_gc_in_progress() const {
403   return is_full_gc_in_progress() || is_degenerated_gc_in_progress();
404 }
405 
406 inline bool ShenandoahHeap::is_concurrent_strong_root_in_progress() const {
407   return _concurrent_strong_root_in_progress.is_set();
408 }
409 
410 inline bool ShenandoahHeap::is_concurrent_weak_root_in_progress() const {
411   return _gc_state.is_set(WEAK_ROOTS);
412 }
413 
414 template<class T>
415 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl) {
416   marked_object_iterate(region, cl, region->top());
417 }
418 
419 template<class T>
420 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit) {
421   assert(! region->is_humongous_continuation(), "no humongous continuation regions here");
422 
423   ShenandoahMarkingContext* const ctx = complete_marking_context();
424   assert(ctx->is_complete(), "sanity");
425 
426   HeapWord* tams = ctx->top_at_mark_start(region);
427 
428   size_t skip_bitmap_delta = 1;
429   HeapWord* start = region->bottom();
430   HeapWord* end = MIN2(tams, region->end());
431 
432   // Step 1. Scan below the TAMS based on bitmap data.
433   HeapWord* limit_bitmap = MIN2(limit, tams);
434 
435   // Try to scan the initial candidate. If the candidate is above the TAMS, it would
436   // fail the subsequent "< limit_bitmap" checks, and fall through to Step 2.
437   HeapWord* cb = ctx->get_next_marked_addr(start, end);
438 
439   intx dist = ShenandoahMarkScanPrefetch;
440   if (dist > 0) {
441     // Batched scan that prefetches the oop data, anticipating the access to
442     // either header, oop field, or forwarding pointer. Not that we cannot
443     // touch anything in oop, while it still being prefetched to get enough
444     // time for prefetch to work. This is why we try to scan the bitmap linearly,

527   void do_object(oop obj) {
528     obj->oop_iterate(_cl, _bounds);
529   }
530 };
531 
532 template<class T>
533 inline void ShenandoahHeap::marked_object_oop_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* top) {
534   if (region->is_humongous()) {
535     HeapWord* bottom = region->bottom();
536     if (top > bottom) {
537       region = region->humongous_start_region();
538       ShenandoahObjectToOopBoundedClosure<T> objs(cl, bottom, top);
539       marked_object_iterate(region, &objs);
540     }
541   } else {
542     ShenandoahObjectToOopClosure<T> objs(cl);
543     marked_object_iterate(region, &objs, top);
544   }
545 }
546 
547 inline ShenandoahHeapRegion* const ShenandoahHeap::get_region(size_t region_idx) const {
548   if (region_idx < _num_regions) {
549     return _regions[region_idx];
550   } else {
551     return nullptr;
552   }
553 }
554 
555 inline void ShenandoahHeap::mark_complete_marking_context() {
556   _marking_context->mark_complete();
557 }
558 
559 inline void ShenandoahHeap::mark_incomplete_marking_context() {
560   _marking_context->mark_incomplete();
561 }
562 
563 inline ShenandoahMarkingContext* ShenandoahHeap::complete_marking_context() const {
564   assert (_marking_context->is_complete()," sanity");
565   return _marking_context;
566 }
567 
568 inline ShenandoahMarkingContext* ShenandoahHeap::marking_context() const {
569   return _marking_context;
570 }
571 
572 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP

  1 /*
  2  * Copyright (c) 2015, 2020, Red Hat, Inc. All rights reserved.
  3  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
  4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  5  *
  6  * This code is free software; you can redistribute it and/or modify it
  7  * under the terms of the GNU General Public License version 2 only, as
  8  * published by the Free Software Foundation.
  9  *
 10  * This code is distributed in the hope that it will be useful, but WITHOUT
 11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
 27 #define SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
 28 
 29 #include "gc/shenandoah/shenandoahHeap.hpp"
 30 
 31 #include "classfile/javaClasses.inline.hpp"
 32 #include "gc/shared/markBitMap.inline.hpp"
 33 #include "gc/shared/threadLocalAllocBuffer.inline.hpp"
 34 #include "gc/shared/continuationGCSupport.inline.hpp"
 35 #include "gc/shared/suspendibleThreadSet.hpp"
 36 #include "gc/shared/tlab_globals.hpp"
 37 #include "gc/shenandoah/shenandoahAsserts.hpp"
 38 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
 39 #include "gc/shenandoah/shenandoahCollectionSet.inline.hpp"
 40 #include "gc/shenandoah/shenandoahForwarding.inline.hpp"
 41 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
 42 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp"
 43 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
 44 #include "gc/shenandoah/shenandoahGeneration.hpp"
 45 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
 46 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
 47 #include "gc/shenandoah/mode/shenandoahMode.hpp"
 48 #include "oops/compressedOops.inline.hpp"
 49 #include "oops/oop.inline.hpp"
 50 #include "runtime/atomic.hpp"
 51 #include "runtime/javaThread.hpp"
 52 #include "runtime/prefetch.inline.hpp"
 53 #include "runtime/objectMonitor.inline.hpp"
 54 #include "utilities/copy.hpp"
 55 #include "utilities/globalDefinitions.hpp"
 56 
 57 inline ShenandoahHeap* ShenandoahHeap::heap() {
 58   return named_heap<ShenandoahHeap>(CollectedHeap::Shenandoah);
 59 }
 60 
 61 inline ShenandoahHeapRegion* ShenandoahRegionIterator::next() {
 62   size_t new_index = Atomic::add(&_index, (size_t) 1, memory_order_relaxed);
 63   // get_region() provides the bounds-check and returns null on OOB.
 64   return _heap->get_region(new_index - 1);
 65 }
 66 




 67 inline WorkerThreads* ShenandoahHeap::workers() const {
 68   return _workers;
 69 }
 70 
 71 inline WorkerThreads* ShenandoahHeap::safepoint_workers() {
 72   return _safepoint_workers;
 73 }
 74 
 75 inline void ShenandoahHeap::notify_gc_progress() {
 76   Atomic::store(&_gc_no_progress_count, (size_t) 0);
 77 
 78 }
 79 inline void ShenandoahHeap::notify_gc_no_progress() {
 80   Atomic::inc(&_gc_no_progress_count);
 81 }
 82 
 83 inline size_t ShenandoahHeap::get_gc_no_progress_count() const {
 84   return Atomic::load(&_gc_no_progress_count);
 85 }
 86 
 87 inline size_t ShenandoahHeap::heap_region_index_containing(const void* addr) const {
 88   uintptr_t region_start = ((uintptr_t) addr);
 89   uintptr_t index = (region_start - (uintptr_t) base()) >> ShenandoahHeapRegion::region_size_bytes_shift();
 90   assert(index < num_regions(), "Region index is in bounds: " PTR_FORMAT, p2i(addr));
 91   return index;
 92 }
 93 
 94 inline ShenandoahHeapRegion* ShenandoahHeap::heap_region_containing(const void* addr) const {
 95   size_t index = heap_region_index_containing(addr);
 96   ShenandoahHeapRegion* const result = get_region(index);
 97   assert(addr >= result->bottom() && addr < result->end(), "Heap region contains the address: " PTR_FORMAT, p2i(addr));
 98   return result;
 99 }
100 
101 inline void ShenandoahHeap::enter_evacuation(Thread* t) {
102   _oom_evac_handler.enter_evacuation(t);
103 }
104 
105 inline void ShenandoahHeap::leave_evacuation(Thread* t) {
106   _oom_evac_handler.leave_evacuation(t);
107 }
108 
109 template <class T>
110 inline void ShenandoahHeap::update_with_forwarded(T* p) {
111   T o = RawAccess<>::oop_load(p);
112   if (!CompressedOops::is_null(o)) {
113     oop obj = CompressedOops::decode_not_null(o);
114     if (in_collection_set(obj)) {

234 // then, there is no transitive reads in mutator (as we see nulls), and we can do
235 // relaxed memory ordering there.
236 
237 inline void ShenandoahHeap::atomic_clear_oop(oop* addr, oop compare) {
238   assert(is_aligned(addr, HeapWordSize), "Address should be aligned: " PTR_FORMAT, p2i(addr));
239   Atomic::cmpxchg(addr, compare, oop(), memory_order_relaxed);
240 }
241 
242 inline void ShenandoahHeap::atomic_clear_oop(narrowOop* addr, oop compare) {
243   assert(is_aligned(addr, sizeof(narrowOop)), "Address should be aligned: " PTR_FORMAT, p2i(addr));
244   narrowOop cmp = CompressedOops::encode(compare);
245   Atomic::cmpxchg(addr, cmp, narrowOop(), memory_order_relaxed);
246 }
247 
248 inline void ShenandoahHeap::atomic_clear_oop(narrowOop* addr, narrowOop compare) {
249   assert(is_aligned(addr, sizeof(narrowOop)), "Address should be aligned: " PTR_FORMAT, p2i(addr));
250   Atomic::cmpxchg(addr, compare, narrowOop(), memory_order_relaxed);
251 }
252 
253 inline bool ShenandoahHeap::cancelled_gc() const {
254   return _cancelled_gc.get() != GCCause::_no_gc;
255 }
256 
257 inline bool ShenandoahHeap::check_cancelled_gc_and_yield(bool sts_active) {
258   if (sts_active && !cancelled_gc()) {
259     if (SuspendibleThreadSet::should_yield()) {
260       SuspendibleThreadSet::yield();
261     }
262   }
263   return cancelled_gc();
264 }
265 
266 inline GCCause::Cause ShenandoahHeap::cancelled_cause() const {
267   return _cancelled_gc.get();
268 }
269 
270 inline void ShenandoahHeap::clear_cancelled_gc(bool clear_oom_handler) {
271   _cancelled_gc.set(GCCause::_no_gc);
272   if (_cancel_requested_time > 0) {
273     log_debug(gc)("GC cancellation took %.3fs", (os::elapsedTime() - _cancel_requested_time));
274     _cancel_requested_time = 0;
275   }
276 
277   if (clear_oom_handler) {
278     _oom_evac_handler.clear();
279   }
280 }
281 
282 inline HeapWord* ShenandoahHeap::allocate_from_gclab(Thread* thread, size_t size) {
283   assert(UseTLAB, "TLABs should be enabled");
284 
285   PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
286   if (gclab == nullptr) {
287     assert(!thread->is_Java_thread() && !thread->is_Worker_thread(),
288            "Performance: thread should have GCLAB: %s", thread->name());
289     // No GCLABs in this thread, fallback to shared allocation
290     return nullptr;
291   }
292   HeapWord* obj = gclab->allocate(size);
293   if (obj != nullptr) {
294     return obj;
295   }

296   return allocate_from_gclab_slow(thread, size);
297 }
298 
299 void ShenandoahHeap::increase_object_age(oop obj, uint additional_age) {
300   // This operates on new copy of an object. This means that the object's mark-word
301   // is thread-local and therefore safe to access. However, when the mark is
302   // displaced (i.e. stack-locked or monitor-locked), then it must be considered
303   // a shared memory location. It can be accessed by other threads.
304   // In particular, a competing evacuating thread can succeed to install its copy
305   // as the forwardee and continue to unlock the object, at which point 'our'
306   // write to the foreign stack-location would potentially over-write random
307   // information on that stack. Writing to a monitor is less problematic,
308   // but still not safe: while the ObjectMonitor would not randomly disappear,
309   // the other thread would also write to the same displaced header location,
310   // possibly leading to increase the age twice.
311   // For all these reasons, we take the conservative approach and not attempt
312   // to increase the age when the header is displaced.
313   markWord w = obj->mark();
314   // The mark-word has been copied from the original object. It can not be
315   // inflating, because inflation can not be interrupted by a safepoint,
316   // and after a safepoint, a Java thread would first have to successfully
317   // evacuate the object before it could inflate the monitor.
318   assert(!w.is_being_inflated() || LockingMode == LM_LIGHTWEIGHT, "must not inflate monitor before evacuation of object succeeds");
319   // It is possible that we have copied the object after another thread has
320   // already successfully completed evacuation. While harmless (we would never
321   // publish our copy), don't even attempt to modify the age when that
322   // happens.
323   if (!w.has_displaced_mark_helper() && !w.is_marked()) {
324     w = w.set_age(MIN2(markWord::max_age, w.age() + additional_age));
325     obj->set_mark(w);
326   }
327 }
328 
329 // Return the object's age, or a sentinel value when the age can't
330 // necessarily be determined because of concurrent locking by the
331 // mutator
332 uint ShenandoahHeap::get_object_age(oop obj) {
333   markWord w = obj->mark();
334   assert(!w.is_marked(), "must not be forwarded");
335   if (w.has_monitor()) {
336     w = w.monitor()->header();
337   } else if (w.is_being_inflated() || w.has_displaced_mark_helper()) {
338     // Informs caller that we aren't able to determine the age
339     return markWord::max_age + 1; // sentinel
340   }
341   assert(w.age() <= markWord::max_age, "Impossible!");
342   return w.age();
343 }
344 
345 inline bool ShenandoahHeap::is_in_active_generation(oop obj) const {
346   if (!mode()->is_generational()) {
347     // everything is the same single generation
348     assert(is_in(obj), "Otherwise shouldn't return true below");
349     return true;
350   }
351 
352   ShenandoahGeneration* const gen = active_generation();
353 
354   if (gen == nullptr) {
355     // no collection is happening: only expect this to be called
356     // when concurrent processing is active, but that could change
357     return false;
358   }
359 
360   assert(is_in(obj), "only check if is in active generation for objects (" PTR_FORMAT ") in heap", p2i(obj));
361   assert(gen->is_old() || gen->is_young() || gen->is_global(),
362          "Active generation must be old, young, or global");
363 
364   size_t index = heap_region_containing(obj)->index();
365 
366   // No flickering!
367   assert(gen == active_generation(), "Race?");
368 
369   switch (region_affiliation(index)) {
370   case ShenandoahAffiliation::FREE:
371     // Free regions are in old, young, and global collections
372     return true;
373   case ShenandoahAffiliation::YOUNG_GENERATION:
374     // Young regions are in young and global collections, not in old collections
375     return !gen->is_old();
376   case ShenandoahAffiliation::OLD_GENERATION:
377     // Old regions are in old and global collections, not in young collections
378     return !gen->is_young();
379   default:
380     assert(false, "Bad affiliation (%d) for region " SIZE_FORMAT, region_affiliation(index), index);
381     return false;
382   }
383 }
384 
385 inline bool ShenandoahHeap::is_in_young(const void* p) const {
386   return is_in(p) && (_affiliations[heap_region_index_containing(p)] == ShenandoahAffiliation::YOUNG_GENERATION);
387 }
388 
389 inline bool ShenandoahHeap::is_in_old(const void* p) const {
390   return is_in(p) && (_affiliations[heap_region_index_containing(p)] == ShenandoahAffiliation::OLD_GENERATION);
391 }
392 
393 inline bool ShenandoahHeap::is_in_old_during_young_collection(oop obj) const {
394   return active_generation()->is_young() && is_in_old(obj);
395 }
396 
397 inline ShenandoahAffiliation ShenandoahHeap::region_affiliation(const ShenandoahHeapRegion *r) const {
398   return region_affiliation(r->index());
399 }
400 
401 inline void ShenandoahHeap::assert_lock_for_affiliation(ShenandoahAffiliation orig_affiliation,
402                                                         ShenandoahAffiliation new_affiliation) {
403   // A lock is required when changing from FREE to NON-FREE.  Though it may be possible to elide the lock when
404   // transitioning from in-use to FREE, the current implementation uses a lock for this transition.  A lock is
405   // not required to change from YOUNG to OLD (i.e. when promoting humongous region).
406   //
407   //         new_affiliation is:     FREE   YOUNG   OLD
408   //  orig_affiliation is:  FREE      X       L      L
409   //                       YOUNG      L       X
410   //                         OLD      L       X      X
411   //  X means state transition won't happen (so don't care)
412   //  L means lock should be held
413   //  Blank means no lock required because affiliation visibility will not be required until subsequent safepoint
414   //
415   // Note: during full GC, all transitions between states are possible.  During Full GC, we should be in a safepoint.
416 
417   if (orig_affiliation == ShenandoahAffiliation::FREE) {
418     shenandoah_assert_heaplocked_or_safepoint();










419   }
420 }
421 
422 inline void ShenandoahHeap::set_affiliation(ShenandoahHeapRegion* r, ShenandoahAffiliation new_affiliation) {
423 #ifdef ASSERT
424   assert_lock_for_affiliation(region_affiliation(r), new_affiliation);
425 #endif
426   Atomic::store(_affiliations + r->index(), (uint8_t) new_affiliation);
427 }
428 
429 inline ShenandoahAffiliation ShenandoahHeap::region_affiliation(size_t index) const {
430   return (ShenandoahAffiliation) Atomic::load(_affiliations + index);
431 }
432 
433 inline bool ShenandoahHeap::requires_marking(const void* entry) const {
434   oop obj = cast_to_oop(entry);
435   return !_marking_context->is_marked_strong(obj);
436 }
437 
438 inline bool ShenandoahHeap::in_collection_set(oop p) const {
439   assert(collection_set() != nullptr, "Sanity");
440   return collection_set()->is_in(p);
441 }
442 
443 inline bool ShenandoahHeap::in_collection_set_loc(void* p) const {
444   assert(collection_set() != nullptr, "Sanity");
445   return collection_set()->is_in_loc(p);
446 }
447 
448 inline bool ShenandoahHeap::is_idle() const {
449   return _gc_state_changed ? _gc_state.is_clear() : ShenandoahThreadLocalData::gc_state(Thread::current()) == 0;
450 }
451 
452 inline bool ShenandoahHeap::has_forwarded_objects() const {
453   return is_gc_state(HAS_FORWARDED);
454 }
455 
456 inline bool ShenandoahHeap::is_concurrent_mark_in_progress() const {
457   return is_gc_state(MARKING);
458 }
459 
460 inline bool ShenandoahHeap::is_concurrent_young_mark_in_progress() const {
461   return is_gc_state(YOUNG_MARKING);
462 }
463 
464 inline bool ShenandoahHeap::is_concurrent_old_mark_in_progress() const {
465   return is_gc_state(OLD_MARKING);
466 }
467 
468 inline bool ShenandoahHeap::is_evacuation_in_progress() const {
469   return is_gc_state(EVACUATION);
470 }
471 
472 inline bool ShenandoahHeap::is_update_refs_in_progress() const {
473   return is_gc_state(UPDATE_REFS);
474 }
475 
476 inline bool ShenandoahHeap::is_concurrent_weak_root_in_progress() const {
477   return is_gc_state(WEAK_ROOTS);
478 }
479 
480 inline bool ShenandoahHeap::is_degenerated_gc_in_progress() const {
481   return _degenerated_gc_in_progress.is_set();
482 }
483 
484 inline bool ShenandoahHeap::is_full_gc_in_progress() const {
485   return _full_gc_in_progress.is_set();
486 }
487 
488 inline bool ShenandoahHeap::is_full_gc_move_in_progress() const {
489   return _full_gc_move_in_progress.is_set();
490 }
491 




492 inline bool ShenandoahHeap::is_stw_gc_in_progress() const {
493   return is_full_gc_in_progress() || is_degenerated_gc_in_progress();
494 }
495 
496 inline bool ShenandoahHeap::is_concurrent_strong_root_in_progress() const {
497   return _concurrent_strong_root_in_progress.is_set();
498 }
499 




500 template<class T>
501 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl) {
502   marked_object_iterate(region, cl, region->top());
503 }
504 
505 template<class T>
506 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit) {
507   assert(! region->is_humongous_continuation(), "no humongous continuation regions here");
508 
509   ShenandoahMarkingContext* const ctx = marking_context();

510 
511   HeapWord* tams = ctx->top_at_mark_start(region);
512 
513   size_t skip_bitmap_delta = 1;
514   HeapWord* start = region->bottom();
515   HeapWord* end = MIN2(tams, region->end());
516 
517   // Step 1. Scan below the TAMS based on bitmap data.
518   HeapWord* limit_bitmap = MIN2(limit, tams);
519 
520   // Try to scan the initial candidate. If the candidate is above the TAMS, it would
521   // fail the subsequent "< limit_bitmap" checks, and fall through to Step 2.
522   HeapWord* cb = ctx->get_next_marked_addr(start, end);
523 
524   intx dist = ShenandoahMarkScanPrefetch;
525   if (dist > 0) {
526     // Batched scan that prefetches the oop data, anticipating the access to
527     // either header, oop field, or forwarding pointer. Not that we cannot
528     // touch anything in oop, while it still being prefetched to get enough
529     // time for prefetch to work. This is why we try to scan the bitmap linearly,

612   void do_object(oop obj) {
613     obj->oop_iterate(_cl, _bounds);
614   }
615 };
616 
617 template<class T>
618 inline void ShenandoahHeap::marked_object_oop_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* top) {
619   if (region->is_humongous()) {
620     HeapWord* bottom = region->bottom();
621     if (top > bottom) {
622       region = region->humongous_start_region();
623       ShenandoahObjectToOopBoundedClosure<T> objs(cl, bottom, top);
624       marked_object_iterate(region, &objs);
625     }
626   } else {
627     ShenandoahObjectToOopClosure<T> objs(cl);
628     marked_object_iterate(region, &objs, top);
629   }
630 }
631 
632 inline ShenandoahHeapRegion* ShenandoahHeap::get_region(size_t region_idx) const {
633   if (region_idx < _num_regions) {
634     return _regions[region_idx];
635   } else {
636     return nullptr;
637   }
638 }
639 








640 inline ShenandoahMarkingContext* ShenandoahHeap::complete_marking_context() const {
641   assert (_marking_context->is_complete()," sanity");
642   return _marking_context;
643 }
644 
645 inline ShenandoahMarkingContext* ShenandoahHeap::marking_context() const {
646   return _marking_context;
647 }
648 
649 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
< prev index next >