1 /*
  2  * Copyright (c) 2015, 2020, Red Hat, Inc. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
 26 #define SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
 27 
 28 #include "gc/shenandoah/shenandoahHeap.hpp"
 29 
 30 #include "classfile/javaClasses.inline.hpp"
 31 #include "gc/shared/markBitMap.inline.hpp"
 32 #include "gc/shared/threadLocalAllocBuffer.inline.hpp"
 33 #include "gc/shared/suspendibleThreadSet.hpp"
 34 #include "gc/shared/tlab_globals.hpp"
 35 #include "gc/shenandoah/shenandoahAsserts.hpp"
 36 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
 37 #include "gc/shenandoah/shenandoahCollectionSet.inline.hpp"
 38 #include "gc/shenandoah/shenandoahForwarding.inline.hpp"
 39 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
 40 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp"
 41 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
 42 #include "gc/shenandoah/shenandoahControlThread.hpp"
 43 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
 44 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
 45 #include "oops/compressedOops.inline.hpp"
 46 #include "oops/oop.inline.hpp"
 47 #include "runtime/atomic.hpp"
 48 #include "runtime/prefetch.inline.hpp"
 49 #include "runtime/thread.hpp"
 50 #include "utilities/copy.hpp"
 51 #include "utilities/globalDefinitions.hpp"
 52 
 53 inline ShenandoahHeap* ShenandoahHeap::heap() {
 54   return named_heap<ShenandoahHeap>(CollectedHeap::Shenandoah);
 55 }
 56 
 57 inline ShenandoahHeapRegion* ShenandoahRegionIterator::next() {
 58   size_t new_index = Atomic::add(&_index, (size_t) 1, memory_order_relaxed);
 59   // get_region() provides the bounds-check and returns NULL on OOB.
 60   return _heap->get_region(new_index - 1);
 61 }
 62 
 63 inline bool ShenandoahHeap::has_forwarded_objects() const {
 64   return _gc_state.is_set(HAS_FORWARDED);
 65 }
 66 
 67 inline WorkGang* ShenandoahHeap::workers() const {
 68   return _workers;
 69 }
 70 
 71 inline WorkGang* ShenandoahHeap::safepoint_workers() {
 72   return _safepoint_workers;
 73 }
 74 
 75 inline size_t ShenandoahHeap::heap_region_index_containing(const void* addr) const {
 76   uintptr_t region_start = ((uintptr_t) addr);
 77   uintptr_t index = (region_start - (uintptr_t) base()) >> ShenandoahHeapRegion::region_size_bytes_shift();
 78   assert(index < num_regions(), "Region index is in bounds: " PTR_FORMAT, p2i(addr));
 79   return index;
 80 }
 81 
 82 inline ShenandoahHeapRegion* const ShenandoahHeap::heap_region_containing(const void* addr) const {
 83   size_t index = heap_region_index_containing(addr);
 84   ShenandoahHeapRegion* const result = get_region(index);
 85   assert(addr >= result->bottom() && addr < result->end(), "Heap region contains the address: " PTR_FORMAT, p2i(addr));
 86   return result;
 87 }
 88 
 89 inline void ShenandoahHeap::enter_evacuation(Thread* t) {
 90   _oom_evac_handler.enter_evacuation(t);
 91 }
 92 
 93 inline void ShenandoahHeap::leave_evacuation(Thread* t) {
 94   _oom_evac_handler.leave_evacuation(t);
 95 }
 96 
 97 template <class T>
 98 inline void ShenandoahHeap::update_with_forwarded(T* p) {
 99   T o = RawAccess<>::oop_load(p);
100   if (!CompressedOops::is_null(o)) {
101     oop obj = CompressedOops::decode_not_null(o);
102     if (in_collection_set(obj)) {
103       // Corner case: when evacuation fails, there are objects in collection
104       // set that are not really forwarded. We can still go and try and update them
105       // (uselessly) to simplify the common path.
106       shenandoah_assert_forwarded_except(p, obj, cancelled_gc());
107       oop fwd = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
108       shenandoah_assert_not_in_cset_except(p, fwd, cancelled_gc());
109 
110       // Unconditionally store the update: no concurrent updates expected.
111       RawAccess<IS_NOT_NULL>::oop_store(p, fwd);
112     }
113   }
114 }
115 
116 template <class T>
117 inline void ShenandoahHeap::conc_update_with_forwarded(T* p) {
118   T o = RawAccess<>::oop_load(p);
119   if (!CompressedOops::is_null(o)) {
120     oop obj = CompressedOops::decode_not_null(o);
121     if (in_collection_set(obj)) {
122       // Corner case: when evacuation fails, there are objects in collection
123       // set that are not really forwarded. We can still go and try CAS-update them
124       // (uselessly) to simplify the common path.
125       shenandoah_assert_forwarded_except(p, obj, cancelled_gc());
126       oop fwd = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
127       shenandoah_assert_not_in_cset_except(p, fwd, cancelled_gc());
128 
129       // Sanity check: we should not be updating the cset regions themselves,
130       // unless we are recovering from the evacuation failure.
131       shenandoah_assert_not_in_cset_loc_except(p, !is_in(p) || cancelled_gc());
132 
133       // Either we succeed in updating the reference, or something else gets in our way.
134       // We don't care if that is another concurrent GC update, or another mutator update.
135       // We only check that non-NULL store still updated with non-forwarded reference.
136       oop witness = cas_oop(fwd, p, obj);
137       shenandoah_assert_not_forwarded_except(p, witness, (witness == NULL) || (witness == obj));
138     }
139   }
140 }
141 
142 inline oop ShenandoahHeap::cas_oop(oop n, oop* addr, oop c) {
143   assert(is_aligned(addr, HeapWordSize), "Address should be aligned: " PTR_FORMAT, p2i(addr));
144   return (oop) Atomic::cmpxchg(addr, c, n);
145 }
146 
147 inline oop ShenandoahHeap::cas_oop(oop n, narrowOop* addr, narrowOop c) {
148   assert(is_aligned(addr, sizeof(narrowOop)), "Address should be aligned: " PTR_FORMAT, p2i(addr));
149   narrowOop val = CompressedOops::encode(n);
150   return CompressedOops::decode(Atomic::cmpxchg(addr, c, val));
151 }
152 
153 inline oop ShenandoahHeap::cas_oop(oop n, narrowOop* addr, oop c) {
154   assert(is_aligned(addr, sizeof(narrowOop)), "Address should be aligned: " PTR_FORMAT, p2i(addr));
155   narrowOop cmp = CompressedOops::encode(c);
156   narrowOop val = CompressedOops::encode(n);
157   return CompressedOops::decode(Atomic::cmpxchg(addr, cmp, val));
158 }
159 
160 inline bool ShenandoahHeap::cancelled_gc() const {
161   return _cancelled_gc.get() == CANCELLED;
162 }
163 
164 inline bool ShenandoahHeap::check_cancelled_gc_and_yield(bool sts_active) {
165   if (! (sts_active && ShenandoahSuspendibleWorkers)) {
166     return cancelled_gc();
167   }
168 
169   jbyte prev = _cancelled_gc.cmpxchg(NOT_CANCELLED, CANCELLABLE);
170   if (prev == CANCELLABLE || prev == NOT_CANCELLED) {
171     if (SuspendibleThreadSet::should_yield()) {
172       SuspendibleThreadSet::yield();
173     }
174 
175     // Back to CANCELLABLE. The thread that poked NOT_CANCELLED first gets
176     // to restore to CANCELLABLE.
177     if (prev == CANCELLABLE) {
178       _cancelled_gc.set(CANCELLABLE);
179     }
180     return false;
181   } else {
182     return true;
183   }
184 }
185 
186 inline void ShenandoahHeap::clear_cancelled_gc() {
187   _cancelled_gc.set(CANCELLABLE);
188   _oom_evac_handler.clear();
189 }
190 
191 inline HeapWord* ShenandoahHeap::allocate_from_gclab(Thread* thread, size_t size) {
192   assert(UseTLAB, "TLABs should be enabled");
193 
194   PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
195   if (gclab == NULL) {
196     assert(!thread->is_Java_thread() && !thread->is_Worker_thread(),
197            "Performance: thread should have GCLAB: %s", thread->name());
198     // No GCLABs in this thread, fallback to shared allocation
199     return NULL;
200   }
201   HeapWord* obj = gclab->allocate(size);
202   if (obj != NULL) {
203     return obj;
204   }
205   // Otherwise...
206   return allocate_from_gclab_slow(thread, size);
207 }
208 
209 inline oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
210   if (ShenandoahThreadLocalData::is_oom_during_evac(Thread::current())) {
211     // This thread went through the OOM during evac protocol and it is safe to return
212     // the forward pointer. It must not attempt to evacuate any more.
213     return ShenandoahBarrierSet::resolve_forwarded(p);
214   }
215 
216   assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope");
217 
218   size_t size = p->size();
219 
220   assert(!heap_region_containing(p)->is_humongous(), "never evacuate humongous objects");
221 
222   bool alloc_from_gclab = true;
223   HeapWord* copy = NULL;
224 
225 #ifdef ASSERT
226   if (ShenandoahOOMDuringEvacALot &&
227       (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
228         copy = NULL;
229   } else {
230 #endif
231     if (UseTLAB) {
232       copy = allocate_from_gclab(thread, size);
233     }
234     if (copy == NULL) {
235       ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size);
236       copy = allocate_memory(req);
237       alloc_from_gclab = false;
238     }
239 #ifdef ASSERT
240   }
241 #endif
242 
243   if (copy == NULL) {
244     control_thread()->handle_alloc_failure_evac(size);
245 
246     _oom_evac_handler.handle_out_of_memory_during_evacuation();
247 
248     return ShenandoahBarrierSet::resolve_forwarded(p);
249   }
250 
251   // Copy the object:
252   Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(p), copy, size);
253 
254   // Try to install the new forwarding pointer.
255   oop copy_val = cast_to_oop(copy);
256   oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val);
257   if (result == copy_val) {
258     // Successfully evacuated. Our copy is now the public one!
259     shenandoah_assert_correct(NULL, copy_val);
260     return copy_val;
261   }  else {
262     // Failed to evacuate. We need to deal with the object that is left behind. Since this
263     // new allocation is certainly after TAMS, it will be considered live in the next cycle.
264     // But if it happens to contain references to evacuated regions, those references would
265     // not get updated for this stale copy during this cycle, and we will crash while scanning
266     // it the next cycle.
267     //
268     // For GCLAB allocations, it is enough to rollback the allocation ptr. Either the next
269     // object will overwrite this stale copy, or the filler object on LAB retirement will
270     // do this. For non-GCLAB allocations, we have no way to retract the allocation, and
271     // have to explicitly overwrite the copy with the filler object. With that overwrite,
272     // we have to keep the fwdptr initialized and pointing to our (stale) copy.
273     if (alloc_from_gclab) {
274       ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size);
275     } else {
276       fill_with_object(copy, size);
277       shenandoah_assert_correct(NULL, copy_val);
278     }
279     shenandoah_assert_correct(NULL, result);
280     return result;
281   }
282 }
283 
284 inline bool ShenandoahHeap::requires_marking(const void* entry) const {
285   oop obj = cast_to_oop(entry);
286   return !_marking_context->is_marked_strong(obj);
287 }
288 
289 inline bool ShenandoahHeap::in_collection_set(oop p) const {
290   assert(collection_set() != NULL, "Sanity");
291   return collection_set()->is_in(p);
292 }
293 
294 inline bool ShenandoahHeap::in_collection_set_loc(void* p) const {
295   assert(collection_set() != NULL, "Sanity");
296   return collection_set()->is_in_loc(p);
297 }
298 
299 inline bool ShenandoahHeap::is_stable() const {
300   return _gc_state.is_clear();
301 }
302 
303 inline bool ShenandoahHeap::is_idle() const {
304   return _gc_state.is_unset(MARKING | EVACUATION | UPDATEREFS);
305 }
306 
307 inline bool ShenandoahHeap::is_concurrent_mark_in_progress() const {
308   return _gc_state.is_set(MARKING);
309 }
310 
311 inline bool ShenandoahHeap::is_evacuation_in_progress() const {
312   return _gc_state.is_set(EVACUATION);
313 }
314 
315 inline bool ShenandoahHeap::is_gc_in_progress_mask(uint mask) const {
316   return _gc_state.is_set(mask);
317 }
318 
319 inline bool ShenandoahHeap::is_degenerated_gc_in_progress() const {
320   return _degenerated_gc_in_progress.is_set();
321 }
322 
323 inline bool ShenandoahHeap::is_full_gc_in_progress() const {
324   return _full_gc_in_progress.is_set();
325 }
326 
327 inline bool ShenandoahHeap::is_full_gc_move_in_progress() const {
328   return _full_gc_move_in_progress.is_set();
329 }
330 
331 inline bool ShenandoahHeap::is_update_refs_in_progress() const {
332   return _gc_state.is_set(UPDATEREFS);
333 }
334 
335 inline bool ShenandoahHeap::is_stw_gc_in_progress() const {
336   return is_full_gc_in_progress() || is_degenerated_gc_in_progress();
337 }
338 
339 inline bool ShenandoahHeap::is_concurrent_strong_root_in_progress() const {
340   return _concurrent_strong_root_in_progress.is_set();
341 }
342 
343 inline bool ShenandoahHeap::is_concurrent_weak_root_in_progress() const {
344   return _gc_state.is_set(WEAK_ROOTS);
345 }
346 
347 template<class T>
348 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl) {
349   marked_object_iterate(region, cl, region->top());
350 }
351 
352 template<class T>
353 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit) {
354   assert(! region->is_humongous_continuation(), "no humongous continuation regions here");
355 
356   ShenandoahMarkingContext* const ctx = complete_marking_context();
357   assert(ctx->is_complete(), "sanity");
358 
359   HeapWord* tams = ctx->top_at_mark_start(region);
360 
361   size_t skip_bitmap_delta = 1;
362   HeapWord* start = region->bottom();
363   HeapWord* end = MIN2(tams, region->end());
364 
365   // Step 1. Scan below the TAMS based on bitmap data.
366   HeapWord* limit_bitmap = MIN2(limit, tams);
367 
368   // Try to scan the initial candidate. If the candidate is above the TAMS, it would
369   // fail the subsequent "< limit_bitmap" checks, and fall through to Step 2.
370   HeapWord* cb = ctx->get_next_marked_addr(start, end);
371 
372   intx dist = ShenandoahMarkScanPrefetch;
373   if (dist > 0) {
374     // Batched scan that prefetches the oop data, anticipating the access to
375     // either header, oop field, or forwarding pointer. Not that we cannot
376     // touch anything in oop, while it still being prefetched to get enough
377     // time for prefetch to work. This is why we try to scan the bitmap linearly,
378     // disregarding the object size. However, since we know forwarding pointer
379     // preceeds the object, we can skip over it. Once we cannot trust the bitmap,
380     // there is no point for prefetching the oop contents, as oop->size() will
381     // touch it prematurely.
382 
383     // No variable-length arrays in standard C++, have enough slots to fit
384     // the prefetch distance.
385     static const int SLOT_COUNT = 256;
386     guarantee(dist <= SLOT_COUNT, "adjust slot count");
387     HeapWord* slots[SLOT_COUNT];
388 
389     int avail;
390     do {
391       avail = 0;
392       for (int c = 0; (c < dist) && (cb < limit_bitmap); c++) {
393         Prefetch::read(cb, oopDesc::mark_offset_in_bytes());
394         slots[avail++] = cb;
395         cb += skip_bitmap_delta;
396         if (cb < limit_bitmap) {
397           cb = ctx->get_next_marked_addr(cb, limit_bitmap);
398         }
399       }
400 
401       for (int c = 0; c < avail; c++) {
402         assert (slots[c] < tams,  "only objects below TAMS here: "  PTR_FORMAT " (" PTR_FORMAT ")", p2i(slots[c]), p2i(tams));
403         assert (slots[c] < limit, "only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(slots[c]), p2i(limit));
404         oop obj = cast_to_oop(slots[c]);
405         assert(oopDesc::is_oop(obj), "sanity");
406         assert(ctx->is_marked(obj), "object expected to be marked");
407         cl->do_object(obj);
408       }
409     } while (avail > 0);
410   } else {
411     while (cb < limit_bitmap) {
412       assert (cb < tams,  "only objects below TAMS here: "  PTR_FORMAT " (" PTR_FORMAT ")", p2i(cb), p2i(tams));
413       assert (cb < limit, "only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cb), p2i(limit));
414       oop obj = cast_to_oop(cb);
415       assert(oopDesc::is_oop(obj), "sanity");
416       assert(ctx->is_marked(obj), "object expected to be marked");
417       cl->do_object(obj);
418       cb += skip_bitmap_delta;
419       if (cb < limit_bitmap) {
420         cb = ctx->get_next_marked_addr(cb, limit_bitmap);
421       }
422     }
423   }
424 
425   // Step 2. Accurate size-based traversal, happens past the TAMS.
426   // This restarts the scan at TAMS, which makes sure we traverse all objects,
427   // regardless of what happened at Step 1.
428   HeapWord* cs = tams;
429   while (cs < limit) {
430     assert (cs >= tams, "only objects past TAMS here: "   PTR_FORMAT " (" PTR_FORMAT ")", p2i(cs), p2i(tams));
431     assert (cs < limit, "only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cs), p2i(limit));
432     oop obj = cast_to_oop(cs);
433     assert(oopDesc::is_oop(obj), "sanity");
434     assert(ctx->is_marked(obj), "object expected to be marked");
435     int size = obj->size();
436     cl->do_object(obj);
437     cs += size;
438   }
439 }
440 
441 template <class T>
442 class ShenandoahObjectToOopClosure : public ObjectClosure {
443   T* _cl;
444 public:
445   ShenandoahObjectToOopClosure(T* cl) : _cl(cl) {}
446 
447   void do_object(oop obj) {
448     obj->oop_iterate(_cl);
449   }
450 };
451 
452 template <class T>
453 class ShenandoahObjectToOopBoundedClosure : public ObjectClosure {
454   T* _cl;
455   MemRegion _bounds;
456 public:
457   ShenandoahObjectToOopBoundedClosure(T* cl, HeapWord* bottom, HeapWord* top) :
458     _cl(cl), _bounds(bottom, top) {}
459 
460   void do_object(oop obj) {
461     obj->oop_iterate(_cl, _bounds);
462   }
463 };
464 
465 template<class T>
466 inline void ShenandoahHeap::marked_object_oop_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* top) {
467   if (region->is_humongous()) {
468     HeapWord* bottom = region->bottom();
469     if (top > bottom) {
470       region = region->humongous_start_region();
471       ShenandoahObjectToOopBoundedClosure<T> objs(cl, bottom, top);
472       marked_object_iterate(region, &objs);
473     }
474   } else {
475     ShenandoahObjectToOopClosure<T> objs(cl);
476     marked_object_iterate(region, &objs, top);
477   }
478 }
479 
480 inline ShenandoahHeapRegion* const ShenandoahHeap::get_region(size_t region_idx) const {
481   if (region_idx < _num_regions) {
482     return _regions[region_idx];
483   } else {
484     return NULL;
485   }
486 }
487 
488 inline void ShenandoahHeap::mark_complete_marking_context() {
489   _marking_context->mark_complete();
490 }
491 
492 inline void ShenandoahHeap::mark_incomplete_marking_context() {
493   _marking_context->mark_incomplete();
494 }
495 
496 inline ShenandoahMarkingContext* ShenandoahHeap::complete_marking_context() const {
497   assert (_marking_context->is_complete()," sanity");
498   return _marking_context;
499 }
500 
501 inline ShenandoahMarkingContext* ShenandoahHeap::marking_context() const {
502   return _marking_context;
503 }
504 
505 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP