1 /*
  2  * Copyright (c) 2015, 2020, Red Hat, Inc. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
 26 #define SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
 27 
 28 #include "gc/shenandoah/shenandoahHeap.hpp"
 29 
 30 #include "classfile/javaClasses.inline.hpp"
 31 #include "gc/shared/markBitMap.inline.hpp"
 32 #include "gc/shared/threadLocalAllocBuffer.inline.hpp"
 33 #include "gc/shared/suspendibleThreadSet.hpp"
 34 #include "gc/shared/tlab_globals.hpp"
 35 #include "gc/shenandoah/shenandoahAsserts.hpp"
 36 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
 37 #include "gc/shenandoah/shenandoahCollectionSet.inline.hpp"
 38 #include "gc/shenandoah/shenandoahForwarding.inline.hpp"
 39 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
 40 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp"
 41 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
 42 #include "gc/shenandoah/shenandoahControlThread.hpp"
 43 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
 44 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
 45 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
 46 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
 47 #include "gc/shenandoah/mode/shenandoahMode.hpp"
 48 #include "oops/compressedOops.inline.hpp"
 49 #include "oops/oop.inline.hpp"
 50 #include "runtime/atomic.hpp"
 51 #include "runtime/prefetch.inline.hpp"
 52 #include "runtime/thread.hpp"
 53 #include "utilities/copy.hpp"
 54 #include "utilities/globalDefinitions.hpp"
 55 
 56 inline ShenandoahHeap* ShenandoahHeap::heap() {
 57   return named_heap<ShenandoahHeap>(CollectedHeap::Shenandoah);
 58 }
 59 
 60 inline ShenandoahHeapRegion* ShenandoahRegionIterator::next() {
 61   size_t new_index = Atomic::add(&_index, (size_t) 1, memory_order_relaxed);
 62   // get_region() provides the bounds-check and returns NULL on OOB.
 63   return _heap->get_region(new_index - 1);
 64 }
 65 
 66 inline bool ShenandoahHeap::has_forwarded_objects() const {
 67   return _gc_state.is_set(HAS_FORWARDED);
 68 }
 69 
 70 inline WorkerThreads* ShenandoahHeap::workers() const {
 71   return _workers;
 72 }
 73 
 74 inline WorkerThreads* ShenandoahHeap::safepoint_workers() {
 75   return _safepoint_workers;
 76 }
 77 
 78 inline size_t ShenandoahHeap::heap_region_index_containing(const void* addr) const {
 79   uintptr_t region_start = ((uintptr_t) addr);
 80   uintptr_t index = (region_start - (uintptr_t) base()) >> ShenandoahHeapRegion::region_size_bytes_shift();
 81   assert(index < num_regions(), "Region index is in bounds: " PTR_FORMAT, p2i(addr));
 82   return index;
 83 }
 84 
 85 inline ShenandoahHeapRegion* const ShenandoahHeap::heap_region_containing(const void* addr) const {
 86   size_t index = heap_region_index_containing(addr);
 87   ShenandoahHeapRegion* const result = get_region(index);
 88   assert(addr >= result->bottom() && addr < result->end(), "Heap region contains the address: " PTR_FORMAT, p2i(addr));
 89   return result;
 90 }
 91 
 92 inline void ShenandoahHeap::enter_evacuation(Thread* t) {
 93   _oom_evac_handler.enter_evacuation(t);
 94 }
 95 
 96 inline void ShenandoahHeap::leave_evacuation(Thread* t) {
 97   _oom_evac_handler.leave_evacuation(t);
 98 }
 99 
100 template <class T>
101 inline void ShenandoahHeap::update_with_forwarded(T* p) {
102   T o = RawAccess<>::oop_load(p);
103   if (!CompressedOops::is_null(o)) {
104     oop obj = CompressedOops::decode_not_null(o);
105     if (in_collection_set(obj)) {
106       // Corner case: when evacuation fails, there are objects in collection
107       // set that are not really forwarded. We can still go and try and update them
108       // (uselessly) to simplify the common path.
109       shenandoah_assert_forwarded_except(p, obj, cancelled_gc());
110       oop fwd = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
111       shenandoah_assert_not_in_cset_except(p, fwd, cancelled_gc());
112 
113       // Unconditionally store the update: no concurrent updates expected.
114       RawAccess<IS_NOT_NULL>::oop_store(p, fwd);
115     }
116   }
117 }
118 
119 template <class T>
120 inline void ShenandoahHeap::conc_update_with_forwarded(T* p) {
121   T o = RawAccess<>::oop_load(p);
122   if (!CompressedOops::is_null(o)) {
123     oop obj = CompressedOops::decode_not_null(o);
124     if (in_collection_set(obj)) {
125       // Corner case: when evacuation fails, there are objects in collection
126       // set that are not really forwarded. We can still go and try CAS-update them
127       // (uselessly) to simplify the common path.
128       shenandoah_assert_forwarded_except(p, obj, cancelled_gc());
129       oop fwd = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
130       shenandoah_assert_not_in_cset_except(p, fwd, cancelled_gc());
131 
132       // Sanity check: we should not be updating the cset regions themselves,
133       // unless we are recovering from the evacuation failure.
134       shenandoah_assert_not_in_cset_loc_except(p, !is_in(p) || cancelled_gc());
135 
136       // Either we succeed in updating the reference, or something else gets in our way.
137       // We don't care if that is another concurrent GC update, or another mutator update.
138       atomic_update_oop(fwd, p, obj);
139     }
140   }
141 }
142 
143 // Atomic updates of heap location. This is only expected to work with updating the same
144 // logical object with its forwardee. The reason why we need stronger-than-relaxed memory
145 // ordering has to do with coordination with GC barriers and mutator accesses.
146 //
147 // In essence, stronger CAS access is required to maintain the transitive chains that mutator
148 // accesses build by themselves. To illustrate this point, consider the following example.
149 //
150 // Suppose "o" is the object that has a field "x" and the reference to "o" is stored
151 // to field at "addr", which happens to be Java volatile field. Normally, the accesses to volatile
152 // field at "addr" would be matched with release/acquire barriers. This changes when GC moves
153 // the object under mutator feet.
154 //
155 // Thread 1 (Java)
156 //         // --- previous access starts here
157 //         ...
158 //   T1.1: store(&o.x, 1, mo_relaxed)
159 //   T1.2: store(&addr, o, mo_release) // volatile store
160 //
161 //         // --- new access starts here
162 //         // LRB: copy and install the new copy to fwdptr
163 //   T1.3: var copy = copy(o)
164 //   T1.4: cas(&fwd, t, copy, mo_release) // pointer-mediated publication
165 //         <access continues>
166 //
167 // Thread 2 (GC updater)
168 //   T2.1: var f = load(&fwd, mo_{consume|acquire}) // pointer-mediated acquisition
169 //   T2.2: cas(&addr, o, f, mo_release) // this method
170 //
171 // Thread 3 (Java)
172 //   T3.1: var o = load(&addr, mo_acquire) // volatile read
173 //   T3.2: if (o != null)
174 //   T3.3:   var r = load(&o.x, mo_relaxed)
175 //
176 // r is guaranteed to contain "1".
177 //
178 // Without GC involvement, there is synchronizes-with edge from T1.2 to T3.1,
179 // which guarantees this. With GC involvement, when LRB copies the object and
180 // another thread updates the reference to it, we need to have the transitive edge
181 // from T1.4 to T2.1 (that one is guaranteed by forwarding accesses), plus the edge
182 // from T2.2 to T3.1 (which is brought by this CAS).
183 //
184 // Note that we do not need to "acquire" in these methods, because we do not read the
185 // failure witnesses contents on any path, and "release" is enough.
186 //
187 
188 inline void ShenandoahHeap::atomic_update_oop(oop update, oop* addr, oop compare) {
189   assert(is_aligned(addr, HeapWordSize), "Address should be aligned: " PTR_FORMAT, p2i(addr));
190   Atomic::cmpxchg(addr, compare, update, memory_order_release);
191 }
192 
193 inline void ShenandoahHeap::atomic_update_oop(oop update, narrowOop* addr, narrowOop compare) {
194   assert(is_aligned(addr, sizeof(narrowOop)), "Address should be aligned: " PTR_FORMAT, p2i(addr));
195   narrowOop u = CompressedOops::encode(update);
196   Atomic::cmpxchg(addr, compare, u, memory_order_release);
197 }
198 
199 inline void ShenandoahHeap::atomic_update_oop(oop update, narrowOop* addr, oop compare) {
200   assert(is_aligned(addr, sizeof(narrowOop)), "Address should be aligned: " PTR_FORMAT, p2i(addr));
201   narrowOop c = CompressedOops::encode(compare);
202   narrowOop u = CompressedOops::encode(update);
203   Atomic::cmpxchg(addr, c, u, memory_order_release);
204 }
205 
206 inline bool ShenandoahHeap::atomic_update_oop_check(oop update, oop* addr, oop compare) {
207   assert(is_aligned(addr, HeapWordSize), "Address should be aligned: " PTR_FORMAT, p2i(addr));
208   return (oop) Atomic::cmpxchg(addr, compare, update, memory_order_release) == compare;
209 }
210 
211 inline bool ShenandoahHeap::atomic_update_oop_check(oop update, narrowOop* addr, narrowOop compare) {
212   assert(is_aligned(addr, sizeof(narrowOop)), "Address should be aligned: " PTR_FORMAT, p2i(addr));
213   narrowOop u = CompressedOops::encode(update);
214   return (narrowOop) Atomic::cmpxchg(addr, compare, u, memory_order_release) == compare;
215 }
216 
217 inline bool ShenandoahHeap::atomic_update_oop_check(oop update, narrowOop* addr, oop compare) {
218   assert(is_aligned(addr, sizeof(narrowOop)), "Address should be aligned: " PTR_FORMAT, p2i(addr));
219   narrowOop c = CompressedOops::encode(compare);
220   narrowOop u = CompressedOops::encode(update);
221   return CompressedOops::decode(Atomic::cmpxchg(addr, c, u, memory_order_release)) == compare;
222 }
223 
224 // The memory ordering discussion above does not apply for methods that store NULLs:
225 // then, there is no transitive reads in mutator (as we see NULLs), and we can do
226 // relaxed memory ordering there.
227 
228 inline void ShenandoahHeap::atomic_clear_oop(oop* addr, oop compare) {
229   assert(is_aligned(addr, HeapWordSize), "Address should be aligned: " PTR_FORMAT, p2i(addr));
230   Atomic::cmpxchg(addr, compare, oop(), memory_order_relaxed);
231 }
232 
233 inline void ShenandoahHeap::atomic_clear_oop(narrowOop* addr, oop compare) {
234   assert(is_aligned(addr, sizeof(narrowOop)), "Address should be aligned: " PTR_FORMAT, p2i(addr));
235   narrowOop cmp = CompressedOops::encode(compare);
236   Atomic::cmpxchg(addr, cmp, narrowOop(), memory_order_relaxed);
237 }
238 
239 inline void ShenandoahHeap::atomic_clear_oop(narrowOop* addr, narrowOop compare) {
240   assert(is_aligned(addr, sizeof(narrowOop)), "Address should be aligned: " PTR_FORMAT, p2i(addr));
241   Atomic::cmpxchg(addr, compare, narrowOop(), memory_order_relaxed);
242 }
243 
244 inline bool ShenandoahHeap::cancelled_gc() const {
245   return _cancelled_gc.get() == CANCELLED;
246 }
247 
248 inline bool ShenandoahHeap::check_cancelled_gc_and_yield(bool sts_active) {
249   if (! (sts_active && ShenandoahSuspendibleWorkers)) {
250     return cancelled_gc();
251   }
252 
253   jbyte prev = _cancelled_gc.cmpxchg(NOT_CANCELLED, CANCELLABLE);
254   if (prev == CANCELLABLE || prev == NOT_CANCELLED) {
255     if (SuspendibleThreadSet::should_yield()) {
256       SuspendibleThreadSet::yield();
257     }
258 
259     // Back to CANCELLABLE. The thread that poked NOT_CANCELLED first gets
260     // to restore to CANCELLABLE.
261     if (prev == CANCELLABLE) {
262       _cancelled_gc.set(CANCELLABLE);
263     }
264     return false;
265   } else {
266     return true;
267   }
268 }
269 
270 inline void ShenandoahHeap::clear_cancelled_gc(bool clear_oom_handler) {
271   _cancelled_gc.set(CANCELLABLE);
272   if (_cancel_requested_time > 0) {
273     double cancel_time = os::elapsedTime() - _cancel_requested_time;
274     log_info(gc)("GC cancellation took %.3fs", cancel_time);
275     _cancel_requested_time = 0;
276   }
277 
278   if (clear_oom_handler) {
279     _oom_evac_handler.clear();
280   }
281 }
282 
283 inline HeapWord* ShenandoahHeap::allocate_from_gclab(Thread* thread, size_t size) {
284   assert(UseTLAB, "TLABs should be enabled");
285 
286   PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
287   if (gclab == NULL) {
288     assert(!thread->is_Java_thread() && !thread->is_Worker_thread(),
289            "Performance: thread should have GCLAB: %s", thread->name());
290     // No GCLABs in this thread, fallback to shared allocation
291     return NULL;
292   }
293   HeapWord* obj = gclab->allocate(size);
294   if (obj != NULL) {
295     return obj;
296   }
297   return allocate_from_gclab_slow(thread, size);
298 }
299 
300 inline HeapWord* ShenandoahHeap::allocate_from_plab(Thread* thread, size_t size, bool is_promotion) {
301   assert(UseTLAB, "TLABs should be enabled");
302 
303   PLAB* plab = ShenandoahThreadLocalData::plab(thread);
304   if (is_promotion && !ShenandoahThreadLocalData::allow_plab_promotions(thread)) {
305     return NULL;
306   } else if (plab == NULL) {
307     assert(!thread->is_Java_thread() && !thread->is_Worker_thread(),
308            "Performance: thread should have PLAB: %s", thread->name());
309     // No PLABs in this thread, fallback to shared allocation
310     return NULL;
311   }
312   HeapWord* obj = plab->allocate(size);
313   if (obj == NULL) {
314     obj = allocate_from_plab_slow(thread, size, is_promotion);
315   }
316   if (is_promotion) {
317     ShenandoahThreadLocalData::add_to_plab_promoted(thread, size * HeapWordSize);
318   } else {
319     ShenandoahThreadLocalData::add_to_plab_evacuated(thread, size * HeapWordSize);
320   }
321   return obj;
322 }
323 
324 inline oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
325   assert(thread == Thread::current(), "Expected thread parameter to be current thread.");
326   if (ShenandoahThreadLocalData::is_oom_during_evac(thread)) {
327     // This thread went through the OOM during evac protocol and it is safe to return
328     // the forward pointer. It must not attempt to evacuate any more.
329     return ShenandoahBarrierSet::resolve_forwarded(p);
330   }
331 
332   assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope");
333 
334   ShenandoahHeapRegion* r = heap_region_containing(p);
335   assert(!r->is_humongous(), "never evacuate humongous objects");
336 
337   ShenandoahRegionAffiliation target_gen = r->affiliation();
338   if (mode()->is_generational() && ShenandoahHeap::heap()->is_gc_generation_young() &&
339       target_gen == YOUNG_GENERATION && ShenandoahPromoteTenuredObjects) {
340     markWord mark = p->mark();
341     if (mark.is_marked()) {
342       // Already forwarded.
343       return ShenandoahBarrierSet::resolve_forwarded(p);
344     }
345     if (mark.has_displaced_mark_helper()) {
346       // We don't want to deal with MT here just to ensure we read the right mark word.
347       // Skip the potential promotion attempt for this one.
348     } else if (r->age() + mark.age() >= InitialTenuringThreshold) {
349       oop result = try_evacuate_object(p, thread, r, OLD_GENERATION);
350       if (result != NULL) {
351         return result;
352       }
353       // If we failed to promote this aged object, we'll fall through to code below and evacuat to young-gen.
354     }
355   }
356   return try_evacuate_object(p, thread, r, target_gen);
357 }
358 
359 // try_evacuate_object registers the object and dirties the associated remembered set information when evacuating
360 // to OLD_GENERATION.
361 inline oop ShenandoahHeap::try_evacuate_object(oop p, Thread* thread, ShenandoahHeapRegion* from_region,
362                                                ShenandoahRegionAffiliation target_gen) {
363   bool alloc_from_lab = true;
364   HeapWord* copy = NULL;
365   size_t size = p->size();
366   bool is_promotion = (target_gen == OLD_GENERATION) && from_region->is_young();
367 
368 #ifdef ASSERT
369   if (ShenandoahOOMDuringEvacALot &&
370       (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
371         copy = NULL;
372   } else {
373 #endif
374     if (UseTLAB) {
375       switch (target_gen) {
376         case YOUNG_GENERATION: {
377            copy = allocate_from_gclab(thread, size);
378            if ((copy == nullptr) && (size < ShenandoahThreadLocalData::gclab_size(thread))) {
379              // GCLAB allocation failed because we are bumping up against the limit on young evacuation reserve.  Try resetting
380              // the desired GCLAB size and retry GCLAB allocation to avoid cascading of shared memory allocations.
381              ShenandoahThreadLocalData::set_gclab_size(thread, PLAB::min_size());
382              copy = allocate_from_gclab(thread, size);
383              // If we still get nullptr, we'll try a shared allocation below.
384            }
385            break;
386         }
387         case OLD_GENERATION: {
388            if (ShenandoahUsePLAB) {
389              copy = allocate_from_plab(thread, size, is_promotion);
390              if ((copy == nullptr) && (size < ShenandoahThreadLocalData::plab_size(thread))) {
391                // PLAB allocation failed because we are bumping up against the limit on old evacuation reserve.  Try resetting
392                // the desired PLAB size and retry PLAB allocation to avoid cascading of shared memory allocations.
393                ShenandoahThreadLocalData::set_plab_size(thread, PLAB::min_size());
394                copy = allocate_from_plab(thread, size, is_promotion);
395                // If we still get nullptr, we'll try a shared allocation below.
396              }
397            }
398            break;
399         }
400         default: {
401           ShouldNotReachHere();
402           break;
403         }
404       }
405     }
406 
407     if (copy == NULL) {
408       // If we failed to allocated in LAB, we'll try a shared allocation.
409       ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size, target_gen);
410       copy = allocate_memory(req, is_promotion);
411       alloc_from_lab = false;
412     }
413 #ifdef ASSERT
414   }
415 #endif
416 
417   if (copy == NULL) {
418     if (target_gen == OLD_GENERATION) {
419       assert(mode()->is_generational(), "Should only be here in generational mode.");
420       if (from_region->is_young()) {
421         // Signal that promotion failed. Will evacuate this old object somewhere in young gen.
422         handle_promotion_failure();
423         return NULL;
424       } else {
425         // Remember that evacuation to old gen failed. We'll want to trigger a full gc to recover from this
426         // after the evacuation threads have finished.
427         handle_old_evacuation_failure();
428       }
429     }
430 
431     control_thread()->handle_alloc_failure_evac(size);
432 
433     _oom_evac_handler.handle_out_of_memory_during_evacuation();
434 
435     return ShenandoahBarrierSet::resolve_forwarded(p);
436   }
437 
438   // Copy the object:
439   Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(p), copy, size);
440 
441   oop copy_val = cast_to_oop(copy);
442 
443   if (mode()->is_generational() && target_gen == YOUNG_GENERATION && is_aging_cycle()) {
444     ShenandoahHeap::increase_object_age(copy_val, from_region->age() + 1);
445   }
446 
447   // Try to install the new forwarding pointer.
448   oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val);
449   if (result == copy_val) {
450     // Successfully evacuated. Our copy is now the public one!
451     if (mode()->is_generational() && target_gen == OLD_GENERATION) {
452       handle_old_evacuation(copy, size, from_region->is_young());
453     }
454     shenandoah_assert_correct(NULL, copy_val);
455     return copy_val;
456   }  else {
457     // Failed to evacuate. We need to deal with the object that is left behind. Since this
458     // new allocation is certainly after TAMS, it will be considered live in the next cycle.
459     // But if it happens to contain references to evacuated regions, those references would
460     // not get updated for this stale copy during this cycle, and we will crash while scanning
461     // it the next cycle.
462     if (alloc_from_lab) {
463        // For LAB allocations, it is enough to rollback the allocation ptr. Either the next
464        // object will overwrite this stale copy, or the filler object on LAB retirement will
465        // do this.
466        switch (target_gen) {
467          case YOUNG_GENERATION: {
468              ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size);
469             break;
470          }
471          case OLD_GENERATION: {
472             ShenandoahThreadLocalData::plab(thread)->undo_allocation(copy, size);
473             if (is_promotion) {
474               ShenandoahThreadLocalData::subtract_from_plab_promoted(thread, size * HeapWordSize);
475             } else {
476               ShenandoahThreadLocalData::subtract_from_plab_evacuated(thread, size * HeapWordSize);
477             }
478             break;
479          }
480          default: {
481            ShouldNotReachHere();
482            break;
483          }
484        }
485     } else {
486       // For non-LAB allocations, we have no way to retract the allocation, and
487       // have to explicitly overwrite the copy with the filler object. With that overwrite,
488       // we have to keep the fwdptr initialized and pointing to our (stale) copy.
489       fill_with_object(copy, size);
490       shenandoah_assert_correct(NULL, copy_val);
491       // For non-LAB allocations, the object has already been registered
492     }
493     shenandoah_assert_correct(NULL, result);
494     return result;
495   }
496 }
497 
498 void ShenandoahHeap::increase_object_age(oop obj, uint additional_age) {
499   markWord w = obj->has_displaced_mark() ? obj->displaced_mark() : obj->mark();
500   w = w.set_age(MIN2(markWord::max_age, w.age() + additional_age));
501   if (obj->has_displaced_mark()) {
502     obj->set_displaced_mark(w);
503   } else {
504     obj->set_mark(w);
505   }
506 }
507 
508 inline bool ShenandoahHeap::clear_old_evacuation_failure() {
509   return _old_gen_oom_evac.try_unset();
510 }
511 
512 inline bool ShenandoahHeap::is_old(oop obj) const {
513   return is_gc_generation_young() && is_in_old(obj);
514 }
515 
516 inline bool ShenandoahHeap::requires_marking(const void* entry) const {
517   oop obj = cast_to_oop(entry);
518   return !_marking_context->is_marked_strong(obj);
519 }
520 
521 inline bool ShenandoahHeap::in_collection_set(oop p) const {
522   assert(collection_set() != NULL, "Sanity");
523   return collection_set()->is_in(p);
524 }
525 
526 inline bool ShenandoahHeap::in_collection_set_loc(void* p) const {
527   assert(collection_set() != NULL, "Sanity");
528   return collection_set()->is_in_loc(p);
529 }
530 
531 inline bool ShenandoahHeap::is_stable() const {
532   return _gc_state.is_clear();
533 }
534 
535 inline bool ShenandoahHeap::is_idle() const {
536   return _gc_state.is_unset(YOUNG_MARKING | OLD_MARKING | EVACUATION | UPDATEREFS);
537 }
538 
539 inline bool ShenandoahHeap::is_concurrent_mark_in_progress() const {
540   return _gc_state.is_set(YOUNG_MARKING | OLD_MARKING);
541 }
542 
543 inline bool ShenandoahHeap::is_concurrent_young_mark_in_progress() const {
544   return _gc_state.is_set(YOUNG_MARKING);
545 }
546 
547 inline bool ShenandoahHeap::is_concurrent_old_mark_in_progress() const {
548   return _gc_state.is_set(OLD_MARKING);
549 }
550 
551 inline bool ShenandoahHeap::is_evacuation_in_progress() const {
552   return _gc_state.is_set(EVACUATION);
553 }
554 
555 inline bool ShenandoahHeap::is_gc_in_progress_mask(uint mask) const {
556   return _gc_state.is_set(mask);
557 }
558 
559 inline bool ShenandoahHeap::is_degenerated_gc_in_progress() const {
560   return _degenerated_gc_in_progress.is_set();
561 }
562 
563 inline bool ShenandoahHeap::is_full_gc_in_progress() const {
564   return _full_gc_in_progress.is_set();
565 }
566 
567 inline bool ShenandoahHeap::is_full_gc_move_in_progress() const {
568   return _full_gc_move_in_progress.is_set();
569 }
570 
571 inline bool ShenandoahHeap::is_update_refs_in_progress() const {
572   return _gc_state.is_set(UPDATEREFS);
573 }
574 
575 inline bool ShenandoahHeap::is_stw_gc_in_progress() const {
576   return is_full_gc_in_progress() || is_degenerated_gc_in_progress();
577 }
578 
579 inline bool ShenandoahHeap::is_concurrent_strong_root_in_progress() const {
580   return _concurrent_strong_root_in_progress.is_set();
581 }
582 
583 inline bool ShenandoahHeap::is_concurrent_weak_root_in_progress() const {
584   return _gc_state.is_set(WEAK_ROOTS);
585 }
586 
587 inline bool ShenandoahHeap::is_aging_cycle() const {
588   return _is_aging_cycle.is_set();
589 }
590 
591 inline size_t ShenandoahHeap::set_promotion_reserve(size_t new_val) {
592   size_t orig = _promotion_reserve;
593   _promotion_reserve = new_val;
594   return orig;
595 }
596 
597 inline size_t ShenandoahHeap::get_promotion_reserve() const {
598   return _promotion_reserve;
599 }
600 
601 // returns previous value
602 size_t ShenandoahHeap::capture_old_usage(size_t old_usage) {
603   size_t previous_value = _captured_old_usage;
604   _captured_old_usage = old_usage;
605   return previous_value;
606 }
607 
608 void ShenandoahHeap::set_previous_promotion(size_t promoted_bytes) {
609   _previous_promotion = promoted_bytes;
610 }
611 
612 size_t ShenandoahHeap::get_previous_promotion() const {
613   return _previous_promotion;
614 }
615 
616 inline size_t ShenandoahHeap::set_old_evac_reserve(size_t new_val) {
617   size_t orig = _old_evac_reserve;
618   _old_evac_reserve = new_val;
619   return orig;
620 }
621 
622 inline size_t ShenandoahHeap::get_old_evac_reserve() const {
623   return _old_evac_reserve;
624 }
625 
626 inline void ShenandoahHeap::reset_old_evac_expended() {
627   _old_evac_expended = 0;
628 }
629 
630 inline size_t ShenandoahHeap::expend_old_evac(size_t increment) {
631   _old_evac_expended += increment;
632   return _old_evac_expended;
633 }
634 
635 inline size_t ShenandoahHeap::get_old_evac_expended() const {
636   return _old_evac_expended;
637 }
638 
639 inline size_t ShenandoahHeap::set_young_evac_reserve(size_t new_val) {
640   size_t orig = _young_evac_reserve;
641   _young_evac_reserve = new_val;
642   return orig;
643 }
644 
645 inline size_t ShenandoahHeap::get_young_evac_reserve() const {
646   return _young_evac_reserve;
647 }
648 
649 inline intptr_t ShenandoahHeap::set_alloc_supplement_reserve(intptr_t new_val) {
650   intptr_t orig = _alloc_supplement_reserve;
651   _alloc_supplement_reserve = new_val;
652   return orig;
653 }
654 
655 inline intptr_t ShenandoahHeap::get_alloc_supplement_reserve() const {
656   return _alloc_supplement_reserve;
657 }
658 
659 template<class T>
660 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl) {
661   marked_object_iterate(region, cl, region->top());
662 }
663 
664 template<class T>
665 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit) {
666   assert(! region->is_humongous_continuation(), "no humongous continuation regions here");
667 
668   ShenandoahMarkingContext* const ctx = marking_context();
669 
670   HeapWord* tams = ctx->top_at_mark_start(region);
671 
672   size_t skip_bitmap_delta = 1;
673   HeapWord* start = region->bottom();
674   HeapWord* end = MIN2(tams, region->end());
675 
676   // Step 1. Scan below the TAMS based on bitmap data.
677   HeapWord* limit_bitmap = MIN2(limit, tams);
678 
679   // Try to scan the initial candidate. If the candidate is above the TAMS, it would
680   // fail the subsequent "< limit_bitmap" checks, and fall through to Step 2.
681   HeapWord* cb = ctx->get_next_marked_addr(start, end);
682 
683   intx dist = ShenandoahMarkScanPrefetch;
684   if (dist > 0) {
685     // Batched scan that prefetches the oop data, anticipating the access to
686     // either header, oop field, or forwarding pointer. Not that we cannot
687     // touch anything in oop, while it still being prefetched to get enough
688     // time for prefetch to work. This is why we try to scan the bitmap linearly,
689     // disregarding the object size. However, since we know forwarding pointer
690     // preceeds the object, we can skip over it. Once we cannot trust the bitmap,
691     // there is no point for prefetching the oop contents, as oop->size() will
692     // touch it prematurely.
693 
694     // No variable-length arrays in standard C++, have enough slots to fit
695     // the prefetch distance.
696     static const int SLOT_COUNT = 256;
697     guarantee(dist <= SLOT_COUNT, "adjust slot count");
698     HeapWord* slots[SLOT_COUNT];
699 
700     int avail;
701     do {
702       avail = 0;
703       for (int c = 0; (c < dist) && (cb < limit_bitmap); c++) {
704         Prefetch::read(cb, oopDesc::mark_offset_in_bytes());
705         slots[avail++] = cb;
706         cb += skip_bitmap_delta;
707         if (cb < limit_bitmap) {
708           cb = ctx->get_next_marked_addr(cb, limit_bitmap);
709         }
710       }
711 
712       for (int c = 0; c < avail; c++) {
713         assert (slots[c] < tams,  "only objects below TAMS here: "  PTR_FORMAT " (" PTR_FORMAT ")", p2i(slots[c]), p2i(tams));
714         assert (slots[c] < limit, "only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(slots[c]), p2i(limit));
715         oop obj = cast_to_oop(slots[c]);
716         assert(oopDesc::is_oop(obj), "sanity");
717         assert(ctx->is_marked(obj), "object expected to be marked");
718         cl->do_object(obj);
719       }
720     } while (avail > 0);
721   } else {
722     while (cb < limit_bitmap) {
723       assert (cb < tams,  "only objects below TAMS here: "  PTR_FORMAT " (" PTR_FORMAT ")", p2i(cb), p2i(tams));
724       assert (cb < limit, "only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cb), p2i(limit));
725       oop obj = cast_to_oop(cb);
726       assert(oopDesc::is_oop(obj), "sanity");
727       assert(ctx->is_marked(obj), "object expected to be marked");
728       cl->do_object(obj);
729       cb += skip_bitmap_delta;
730       if (cb < limit_bitmap) {
731         cb = ctx->get_next_marked_addr(cb, limit_bitmap);
732       }
733     }
734   }
735 
736   // Step 2. Accurate size-based traversal, happens past the TAMS.
737   // This restarts the scan at TAMS, which makes sure we traverse all objects,
738   // regardless of what happened at Step 1.
739   HeapWord* cs = tams;
740   while (cs < limit) {
741     assert (cs >= tams, "only objects past TAMS here: "   PTR_FORMAT " (" PTR_FORMAT ")", p2i(cs), p2i(tams));
742     assert (cs < limit, "only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cs), p2i(limit));
743     oop obj = cast_to_oop(cs);
744     assert(oopDesc::is_oop(obj), "sanity");
745     assert(ctx->is_marked(obj), "object expected to be marked");
746     size_t size = obj->size();
747     cl->do_object(obj);
748     cs += size;
749   }
750 }
751 
752 template <class T>
753 class ShenandoahObjectToOopClosure : public ObjectClosure {
754   T* _cl;
755 public:
756   ShenandoahObjectToOopClosure(T* cl) : _cl(cl) {}
757 
758   void do_object(oop obj) {
759     obj->oop_iterate(_cl);
760   }
761 };
762 
763 template <class T>
764 class ShenandoahObjectToOopBoundedClosure : public ObjectClosure {
765   T* _cl;
766   MemRegion _bounds;
767 public:
768   ShenandoahObjectToOopBoundedClosure(T* cl, HeapWord* bottom, HeapWord* top) :
769     _cl(cl), _bounds(bottom, top) {}
770 
771   void do_object(oop obj) {
772     obj->oop_iterate(_cl, _bounds);
773   }
774 };
775 
776 template<class T>
777 inline void ShenandoahHeap::marked_object_oop_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* top) {
778   if (region->is_humongous()) {
779     HeapWord* bottom = region->bottom();
780     if (top > bottom) {
781       region = region->humongous_start_region();
782       ShenandoahObjectToOopBoundedClosure<T> objs(cl, bottom, top);
783       marked_object_iterate(region, &objs);
784     }
785   } else {
786     ShenandoahObjectToOopClosure<T> objs(cl);
787     marked_object_iterate(region, &objs, top);
788   }
789 }
790 
791 inline ShenandoahHeapRegion* const ShenandoahHeap::get_region(size_t region_idx) const {
792   if (region_idx < _num_regions) {
793     return _regions[region_idx];
794   } else {
795     return NULL;
796   }
797 }
798 
799 inline ShenandoahMarkingContext* ShenandoahHeap::complete_marking_context() const {
800   assert (_marking_context->is_complete()," sanity");
801   return _marking_context;
802 }
803 
804 inline ShenandoahMarkingContext* ShenandoahHeap::marking_context() const {
805   return _marking_context;
806 }
807 
808 inline void ShenandoahHeap::clear_cards_for(ShenandoahHeapRegion* region) {
809   if (mode()->is_generational()) {
810     _card_scan->mark_range_as_empty(region->bottom(), pointer_delta(region->end(), region->bottom()));
811   }
812 }
813 
814 inline void ShenandoahHeap::dirty_cards(HeapWord* start, HeapWord* end) {
815   assert(mode()->is_generational(), "Should only be used for generational mode");
816   size_t words = pointer_delta(end, start);
817   _card_scan->mark_range_as_dirty(start, words);
818 }
819 
820 inline void ShenandoahHeap::clear_cards(HeapWord* start, HeapWord* end) {
821   assert(mode()->is_generational(), "Should only be used for generational mode");
822   size_t words = pointer_delta(end, start);
823   _card_scan->mark_range_as_clean(start, words);
824 }
825 
826 inline void ShenandoahHeap::mark_card_as_dirty(void* location) {
827   if (mode()->is_generational()) {
828     _card_scan->mark_card_as_dirty((HeapWord*)location);
829   }
830 }
831 
832 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP