1 /*
   2  * Copyright (c) 2013, 2019, Red Hat, Inc. All rights reserved.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/shenandoah/shenandoahAsserts.hpp"
  26 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  27 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
  28 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  29 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  30 #include "gc/shenandoah/shenandoahHeuristics.hpp"
  31 #include "gc/shenandoah/shenandoahTraversalGC.hpp"
  32 #include "memory/iterator.inline.hpp"
  33 #include "runtime/interfaceSupport.inline.hpp"
  34 #ifdef COMPILER1
  35 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
  36 #endif
  37 #ifdef COMPILER2
  38 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
  39 #endif
  40 
  41 class ShenandoahBarrierSetC1;
  42 class ShenandoahBarrierSetC2;
  43 
  44 template <bool STOREVAL_EVAC_BARRIER>
  45 class ShenandoahUpdateRefsForOopClosure: public BasicOopIterateClosure {
  46 private:
  47   ShenandoahHeap* _heap;
  48   ShenandoahBarrierSet* _bs;
  49 
  50   template <class T>
  51   inline void do_oop_work(T* p) {
  52     oop o;
  53     if (STOREVAL_EVAC_BARRIER) {
  54       o = _heap->evac_update_with_forwarded(p);
  55       if (!CompressedOops::is_null(o)) {
  56         _bs->enqueue(o);
  57       }
  58     } else {
  59       _heap->maybe_update_with_forwarded(p);
  60     }
  61   }
  62 public:
  63   ShenandoahUpdateRefsForOopClosure() : _heap(ShenandoahHeap::heap()), _bs(ShenandoahBarrierSet::barrier_set()) {
  64     assert(UseShenandoahGC && ShenandoahCloneBarrier, "should be enabled");
  65   }
  66 
  67   virtual void do_oop(oop* p)       { do_oop_work(p); }
  68   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
  69 };
  70 
  71 ShenandoahBarrierSet::ShenandoahBarrierSet(ShenandoahHeap* heap) :
  72   BarrierSet(make_barrier_set_assembler<ShenandoahBarrierSetAssembler>(),
  73              make_barrier_set_c1<ShenandoahBarrierSetC1>(),
  74              make_barrier_set_c2<ShenandoahBarrierSetC2>(),
  75              NULL /* barrier_set_nmethod */,
  76              BarrierSet::FakeRtti(BarrierSet::ShenandoahBarrierSet)),
  77   _heap(heap),
  78   _satb_mark_queue_set()
  79 {
  80 }
  81 
  82 ShenandoahBarrierSetAssembler* ShenandoahBarrierSet::assembler() {
  83   BarrierSetAssembler* const bsa = BarrierSet::barrier_set()->barrier_set_assembler();
  84   return reinterpret_cast<ShenandoahBarrierSetAssembler*>(bsa);
  85 }
  86 
  87 void ShenandoahBarrierSet::print_on(outputStream* st) const {
  88   st->print("ShenandoahBarrierSet");
  89 }
  90 
  91 bool ShenandoahBarrierSet::is_a(BarrierSet::Name bsn) {
  92   return bsn == BarrierSet::ShenandoahBarrierSet;
  93 }
  94 
  95 bool ShenandoahBarrierSet::is_aligned(HeapWord* hw) {
  96   return true;
  97 }
  98 
  99 template <class T, bool STOREVAL_EVAC_BARRIER>
 100 void ShenandoahBarrierSet::write_ref_array_loop(HeapWord* start, size_t count) {
 101   assert(UseShenandoahGC && ShenandoahCloneBarrier, "should be enabled");
 102   ShenandoahUpdateRefsForOopClosure<STOREVAL_EVAC_BARRIER> cl;
 103   T* dst = (T*) start;
 104   for (size_t i = 0; i < count; i++) {
 105     cl.do_oop(dst++);
 106   }
 107 }
 108 
 109 void ShenandoahBarrierSet::write_ref_array(HeapWord* start, size_t count) {
 110   assert(_heap->is_update_refs_in_progress(), "should not be here otherwise");
 111   assert(count > 0, "Should have been filtered before");
 112 
 113   if (_heap->is_concurrent_traversal_in_progress()) {
 114     ShenandoahEvacOOMScope oom_evac_scope;
 115     if (UseCompressedOops) {
 116       write_ref_array_loop<narrowOop, /* evac = */ true>(start, count);
 117     } else {
 118       write_ref_array_loop<oop,       /* evac = */ true>(start, count);
 119     }
 120   } else {
 121     if (UseCompressedOops) {
 122       write_ref_array_loop<narrowOop, /* evac = */ false>(start, count);
 123     } else {
 124       write_ref_array_loop<oop,       /* evac = */ false>(start, count);
 125     }
 126   }
 127 }
 128 
 129 template <class T>
 130 void ShenandoahBarrierSet::write_ref_array_pre_work(T* dst, size_t count) {
 131   shenandoah_assert_not_in_cset_loc_except(dst, _heap->cancelled_gc());
 132   assert(ShenandoahThreadLocalData::satb_mark_queue(Thread::current()).is_active(), "Shouldn't be here otherwise");
 133   assert(ShenandoahSATBBarrier, "Shouldn't be here otherwise");
 134   assert(count > 0, "Should have been filtered before");
 135 
 136   Thread* thread = Thread::current();
 137   ShenandoahMarkingContext* ctx = _heap->marking_context();
 138   bool has_forwarded = _heap->has_forwarded_objects();
 139   T* elem_ptr = dst;
 140   for (size_t i = 0; i < count; i++, elem_ptr++) {
 141     T heap_oop = RawAccess<>::oop_load(elem_ptr);
 142     if (!CompressedOops::is_null(heap_oop)) {
 143       oop obj = CompressedOops::decode_not_null(heap_oop);
 144       if (has_forwarded) {
 145         obj = resolve_forwarded_not_null(obj);
 146       }
 147       if (!ctx->is_marked(obj)) {
 148         ShenandoahThreadLocalData::satb_mark_queue(thread).enqueue_known_active(obj);
 149       }
 150     }
 151   }
 152 }
 153 
 154 void ShenandoahBarrierSet::write_ref_array_pre(oop* dst, size_t count, bool dest_uninitialized) {
 155   if (! dest_uninitialized) {
 156     write_ref_array_pre_work(dst, count);
 157   }
 158 }
 159 
 160 void ShenandoahBarrierSet::write_ref_array_pre(narrowOop* dst, size_t count, bool dest_uninitialized) {
 161   if (! dest_uninitialized) {
 162     write_ref_array_pre_work(dst, count);
 163   }
 164 }
 165 
 166 template <class T>
 167 inline void ShenandoahBarrierSet::inline_write_ref_field_pre(T* field, oop new_val) {
 168   shenandoah_assert_not_in_cset_loc_except(field, _heap->cancelled_gc());
 169   if (_heap->is_concurrent_mark_in_progress()) {
 170     T heap_oop = RawAccess<>::oop_load(field);
 171     if (!CompressedOops::is_null(heap_oop)) {
 172       enqueue(CompressedOops::decode(heap_oop));
 173     }
 174   }
 175 }
 176 
 177 // These are the more general virtual versions.
 178 void ShenandoahBarrierSet::write_ref_field_pre_work(oop* field, oop new_val) {
 179   inline_write_ref_field_pre(field, new_val);
 180 }
 181 
 182 void ShenandoahBarrierSet::write_ref_field_pre_work(narrowOop* field, oop new_val) {
 183   inline_write_ref_field_pre(field, new_val);
 184 }
 185 
 186 void ShenandoahBarrierSet::write_ref_field_pre_work(void* field, oop new_val) {
 187   guarantee(false, "Not needed");
 188 }
 189 
 190 void ShenandoahBarrierSet::write_ref_field_work(void* v, oop o, bool release) {
 191   shenandoah_assert_not_in_cset_loc_except(v, _heap->cancelled_gc());
 192   shenandoah_assert_not_forwarded_except  (v, o, o == NULL || _heap->cancelled_gc() || !_heap->is_concurrent_mark_in_progress());
 193   shenandoah_assert_not_in_cset_except    (v, o, o == NULL || _heap->cancelled_gc() || !_heap->is_concurrent_mark_in_progress());
 194 }
 195 
 196 void ShenandoahBarrierSet::write_region(MemRegion mr) {
 197   if (!ShenandoahCloneBarrier) return;
 198   if (!_heap->is_update_refs_in_progress()) return;
 199 
 200   // This is called for cloning an object (see jvm.cpp) after the clone
 201   // has been made. We are not interested in any 'previous value' because
 202   // it would be NULL in any case. But we *are* interested in any oop*
 203   // that potentially need to be updated.
 204 
 205   oop obj = oop(mr.start());
 206   shenandoah_assert_correct(NULL, obj);
 207   if (_heap->is_concurrent_traversal_in_progress()) {
 208     ShenandoahEvacOOMScope oom_evac_scope;
 209     ShenandoahUpdateRefsForOopClosure</* evac = */ true> cl;
 210     obj->oop_iterate(&cl);
 211   } else {
 212     ShenandoahUpdateRefsForOopClosure</* evac = */ false> cl;
 213     obj->oop_iterate(&cl);
 214   }
 215 }
 216 
 217 oop ShenandoahBarrierSet::load_reference_barrier_not_null(oop obj) {
 218   if (ShenandoahLoadRefBarrier && _heap->has_forwarded_objects()) {
 219     return load_reference_barrier_impl(obj);
 220   } else {
 221     return obj;
 222   }
 223 }
 224 
 225 oop ShenandoahBarrierSet::load_reference_barrier(oop obj) {
 226   if (obj != NULL) {
 227     return load_reference_barrier_not_null(obj);
 228   } else {
 229     return obj;
 230   }
 231 }
 232 
 233 oop ShenandoahBarrierSet::load_reference_barrier_mutator(oop obj, oop* load_addr) {
 234   return load_reference_barrier_mutator_work(obj, load_addr);
 235 }
 236 
 237 oop ShenandoahBarrierSet::load_reference_barrier_mutator(oop obj, narrowOop* load_addr) {
 238   return load_reference_barrier_mutator_work(obj, load_addr);
 239 }
 240 
 241 template <class T>
 242 oop ShenandoahBarrierSet::load_reference_barrier_mutator_work(oop obj, T* load_addr) {
 243   assert(ShenandoahLoadRefBarrier, "should be enabled");
 244   shenandoah_assert_in_cset(load_addr, obj);
 245 
 246   oop fwd = resolve_forwarded_not_null(obj);
 247   if (oopDesc::equals_raw(obj, fwd)) {
 248     assert(_heap->is_gc_in_progress_mask(ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL),
 249            "evac should be in progress");
 250 
 251     ShenandoahEvacOOMScope oom_evac_scope;
 252 
 253     Thread* thread = Thread::current();
 254     oop res_oop = _heap->evacuate_object(obj, thread);
 255 
 256     // Since we are already here and paid the price of getting through runtime call adapters
 257     // and acquiring oom-scope, it makes sense to try and evacuate more adjacent objects,
 258     // thus amortizing the overhead. For sparsely live heaps, scan costs easily dominate
 259     // total assist costs, and can introduce a lot of evacuation latency. This is why we
 260     // only scan for _nearest_ N objects, regardless if they are eligible for evac or not.
 261     // The scan itself should also avoid touching the non-marked objects below TAMS, because
 262     // their metadata (notably, klasses) may be incorrect already.
 263 
 264     size_t max = ShenandoahEvacAssist;
 265     if (max > 0) {
 266       // Traversal is special: it uses incomplete marking context, because it coalesces evac with mark.
 267       // Other code uses complete marking context, because evac happens after the mark.
 268       ShenandoahMarkingContext* ctx = _heap->is_concurrent_traversal_in_progress() ?
 269                                       _heap->marking_context() : _heap->complete_marking_context();
 270 
 271       ShenandoahHeapRegion* r = _heap->heap_region_containing(obj);
 272       assert(r->is_cset(), "sanity");
 273 
 274       HeapWord* cur = (HeapWord*)obj + obj->size();
 275 
 276       size_t count = 0;
 277       while ((cur < r->top()) && ctx->is_marked(oop(cur)) && (count++ < max)) {
 278         oop cur_oop = oop(cur);
 279         if (oopDesc::equals_raw(cur_oop, resolve_forwarded_not_null(cur_oop))) {
 280           _heap->evacuate_object(cur_oop, thread);
 281         }
 282         cur = cur + cur_oop->size();
 283       }
 284     }
 285 
 286     fwd = res_oop;
 287   }
 288 
 289   if (load_addr != NULL && fwd != obj) {
 290     // Since we are here and we know the load address, update the reference.
 291     ShenandoahHeap::cas_oop(fwd, load_addr, obj);
 292   }
 293 
 294   return fwd;
 295 }
 296 
 297 oop ShenandoahBarrierSet::load_reference_barrier_impl(oop obj) {
 298   assert(ShenandoahLoadRefBarrier, "should be enabled");
 299   if (!CompressedOops::is_null(obj)) {
 300     bool evac_in_progress = _heap->is_gc_in_progress_mask(ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL);
 301     oop fwd = resolve_forwarded_not_null(obj);
 302     if (evac_in_progress &&
 303         _heap->in_collection_set(obj) &&
 304         oopDesc::equals_raw(obj, fwd)) {
 305       Thread *t = Thread::current();
 306       if (t->is_GC_task_thread()) {
 307         return _heap->evacuate_object(obj, t);
 308       } else {
 309         ShenandoahEvacOOMScope oom_evac_scope;
 310         return _heap->evacuate_object(obj, t);
 311       }
 312     } else {
 313       return fwd;
 314     }
 315   } else {
 316     return obj;
 317   }
 318 }
 319 
 320 void ShenandoahBarrierSet::storeval_barrier(oop obj) {
 321   if (ShenandoahStoreValEnqueueBarrier && !CompressedOops::is_null(obj) && _heap->is_concurrent_traversal_in_progress()) {
 322     enqueue(obj);
 323   }
 324 }
 325 
 326 void ShenandoahBarrierSet::keep_alive_barrier(oop obj) {
 327   if (ShenandoahKeepAliveBarrier && _heap->is_concurrent_mark_in_progress()) {
 328     enqueue(obj);
 329   }
 330 }
 331 
 332 void ShenandoahBarrierSet::enqueue(oop obj) {
 333   shenandoah_assert_not_forwarded_if(NULL, obj, _heap->is_concurrent_traversal_in_progress());
 334   assert(_satb_mark_queue_set.is_active(), "only get here when SATB active");
 335 
 336   // Filter marked objects before hitting the SATB queues. The same predicate would
 337   // be used by SATBMQ::filter to eliminate already marked objects downstream, but
 338   // filtering here helps to avoid wasteful SATB queueing work to begin with.
 339   if (!_heap->requires_marking<false>(obj)) return;
 340 
 341   ShenandoahThreadLocalData::satb_mark_queue(Thread::current()).enqueue_known_active(obj);
 342 }
 343 
 344 void ShenandoahBarrierSet::on_thread_create(Thread* thread) {
 345   // Create thread local data
 346   ShenandoahThreadLocalData::create(thread);
 347 }
 348 
 349 void ShenandoahBarrierSet::on_thread_destroy(Thread* thread) {
 350   // Destroy thread local data
 351   ShenandoahThreadLocalData::destroy(thread);
 352 }
 353 
 354 void ShenandoahBarrierSet::on_thread_attach(Thread *thread) {
 355   assert(!thread->is_Java_thread() || !SafepointSynchronize::is_at_safepoint(),
 356          "We should not be at a safepoint");
 357   SATBMarkQueue& queue = ShenandoahThreadLocalData::satb_mark_queue(thread);
 358   assert(!queue.is_active(), "SATB queue should not be active");
 359   assert( queue.is_empty(),  "SATB queue should be empty");
 360   queue.set_active(_satb_mark_queue_set.is_active());
 361   if (thread->is_Java_thread()) {
 362     ShenandoahThreadLocalData::set_gc_state(thread, _heap->gc_state());
 363     ShenandoahThreadLocalData::initialize_gclab(thread);
 364   }
 365 }
 366 
 367 void ShenandoahBarrierSet::on_thread_detach(Thread *thread) {
 368   SATBMarkQueue& queue = ShenandoahThreadLocalData::satb_mark_queue(thread);
 369   queue.flush();
 370   if (thread->is_Java_thread()) {
 371     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
 372     if (gclab != NULL) {
 373       gclab->retire();
 374     }
 375   }
 376 }