1 /*
   2  * Copyright (c) 2013, 2019, Red Hat, Inc. All rights reserved.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/shenandoah/shenandoahAsserts.hpp"
  26 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  27 #include "gc/shenandoah/shenandoahBarrierSetClone.inline.hpp"
  28 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"

  29 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"

  30 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  31 #include "gc/shenandoah/shenandoahHeuristics.hpp"
  32 #include "gc/shenandoah/shenandoahTraversalGC.hpp"
  33 #include "memory/iterator.inline.hpp"
  34 #include "runtime/interfaceSupport.inline.hpp"
  35 #ifdef COMPILER1
  36 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
  37 #endif
  38 #ifdef COMPILER2
  39 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
  40 #endif
  41 
  42 class ShenandoahBarrierSetC1;
  43 class ShenandoahBarrierSetC2;
  44 








  45 ShenandoahBarrierSet::ShenandoahBarrierSet(ShenandoahHeap* heap) :
  46   BarrierSet(make_barrier_set_assembler<ShenandoahBarrierSetAssembler>(),
  47              make_barrier_set_c1<ShenandoahBarrierSetC1>(),
  48              make_barrier_set_c2<ShenandoahBarrierSetC2>(),
  49              NULL /* barrier_set_nmethod */,
  50              BarrierSet::FakeRtti(BarrierSet::ShenandoahBarrierSet)),
  51   _heap(heap),
  52   _satb_mark_queue_buffer_allocator("SATB Buffer Allocator", ShenandoahSATBBufferSize),
  53   _satb_mark_queue_set(&_satb_mark_queue_buffer_allocator)
  54 {
  55 }
  56 
  57 ShenandoahBarrierSetAssembler* ShenandoahBarrierSet::assembler() {
  58   BarrierSetAssembler* const bsa = BarrierSet::barrier_set()->barrier_set_assembler();
  59   return reinterpret_cast<ShenandoahBarrierSetAssembler*>(bsa);
  60 }
  61 
  62 void ShenandoahBarrierSet::print_on(outputStream* st) const {
  63   st->print("ShenandoahBarrierSet");
  64 }
  65 
  66 bool ShenandoahBarrierSet::is_a(BarrierSet::Name bsn) {
  67   return bsn == BarrierSet::ShenandoahBarrierSet;
  68 }
  69 
  70 bool ShenandoahBarrierSet::is_aligned(HeapWord* hw) {
  71   return true;
  72 }
  73 
  74 template <class T>
  75 inline void ShenandoahBarrierSet::inline_write_ref_field_pre(T* field, oop new_val) {
  76   shenandoah_assert_not_in_cset_loc_except(field, _heap->cancelled_gc());
  77   if (_heap->is_concurrent_mark_in_progress()) {
  78     T heap_oop = RawAccess<>::oop_load(field);
  79     if (!CompressedOops::is_null(heap_oop)) {
  80       enqueue(CompressedOops::decode(heap_oop));
  81     }
  82   }
  83 }
  84 
  85 // These are the more general virtual versions.
  86 void ShenandoahBarrierSet::write_ref_field_pre_work(oop* field, oop new_val) {
  87   inline_write_ref_field_pre(field, new_val);
  88 }
  89 
  90 void ShenandoahBarrierSet::write_ref_field_pre_work(narrowOop* field, oop new_val) {
  91   inline_write_ref_field_pre(field, new_val);
  92 }
  93 
  94 void ShenandoahBarrierSet::write_ref_field_pre_work(void* field, oop new_val) {
  95   guarantee(false, "Not needed");
  96 }
  97 
  98 void ShenandoahBarrierSet::write_ref_field_work(void* v, oop o, bool release) {
  99   shenandoah_assert_not_in_cset_loc_except(v, _heap->cancelled_gc());
 100   shenandoah_assert_not_forwarded_except  (v, o, o == NULL || _heap->cancelled_gc() || !_heap->is_concurrent_mark_in_progress());
 101   shenandoah_assert_not_in_cset_except    (v, o, o == NULL || _heap->cancelled_gc() || !_heap->is_concurrent_mark_in_progress());
 102 }
 103 
 104 oop ShenandoahBarrierSet::load_reference_barrier_not_null(oop obj) {
 105   if (ShenandoahLoadRefBarrier && _heap->has_forwarded_objects()) {
 106     return load_reference_barrier_impl(obj);
 107   } else {
 108     return obj;
 109   }
 110 }
 111 
 112 oop ShenandoahBarrierSet::load_reference_barrier(oop obj) {
 113   if (obj != NULL) {
 114     return load_reference_barrier_not_null(obj);
 115   } else {
 116     return obj;
 117   }
 118 }
 119 
 120 oop ShenandoahBarrierSet::load_reference_barrier_mutator(oop obj, oop* load_addr) {
 121   return load_reference_barrier_mutator_work(obj, load_addr);
 122 }
 123 
 124 oop ShenandoahBarrierSet::load_reference_barrier_mutator(oop obj, narrowOop* load_addr) {
 125   return load_reference_barrier_mutator_work(obj, load_addr);
 126 }
 127 
 128 template <class T>
 129 oop ShenandoahBarrierSet::load_reference_barrier_mutator_work(oop obj, T* load_addr) {
 130   assert(ShenandoahLoadRefBarrier, "should be enabled");
 131   shenandoah_assert_in_cset(load_addr, obj);
 132 
 133   oop fwd = resolve_forwarded_not_null(obj);
 134   if (obj == fwd) {
 135     assert(_heap->is_gc_in_progress_mask(ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL),
 136            "evac should be in progress");
 137 
 138     ShenandoahEvacOOMScope oom_evac_scope;
 139 
 140     Thread* thread = Thread::current();
 141     oop res_oop = _heap->evacuate_object(obj, thread);
 142 
 143     // Since we are already here and paid the price of getting through runtime call adapters
 144     // and acquiring oom-scope, it makes sense to try and evacuate more adjacent objects,
 145     // thus amortizing the overhead. For sparsely live heaps, scan costs easily dominate
 146     // total assist costs, and can introduce a lot of evacuation latency. This is why we
 147     // only scan for _nearest_ N objects, regardless if they are eligible for evac or not.
 148     // The scan itself should also avoid touching the non-marked objects below TAMS, because
 149     // their metadata (notably, klasses) may be incorrect already.
 150 
 151     size_t max = ShenandoahEvacAssist;
 152     if (max > 0) {
 153       // Traversal is special: it uses incomplete marking context, because it coalesces evac with mark.
 154       // Other code uses complete marking context, because evac happens after the mark.
 155       ShenandoahMarkingContext* ctx = _heap->is_concurrent_traversal_in_progress() ?
 156                                       _heap->marking_context() : _heap->complete_marking_context();
 157 
 158       ShenandoahHeapRegion* r = _heap->heap_region_containing(obj);
 159       assert(r->is_cset(), "sanity");
 160 
 161       HeapWord* cur = (HeapWord*)obj + obj->size();
 162 
 163       size_t count = 0;
 164       while ((cur < r->top()) && ctx->is_marked(oop(cur)) && (count++ < max)) {
 165         oop cur_oop = oop(cur);
 166         if (cur_oop == resolve_forwarded_not_null(cur_oop)) {
 167           _heap->evacuate_object(cur_oop, thread);
 168         }
 169         cur = cur + cur_oop->size();
 170       }
 171     }
 172 
 173     fwd = res_oop;
 174   }
 175 
 176   if (load_addr != NULL && fwd != obj) {
 177     // Since we are here and we know the load address, update the reference.
 178     ShenandoahHeap::cas_oop(fwd, load_addr, obj);
 179   }
 180 
 181   return fwd;
 182 }
 183 
 184 oop ShenandoahBarrierSet::load_reference_barrier_impl(oop obj) {
 185   assert(ShenandoahLoadRefBarrier, "should be enabled");
 186   if (!CompressedOops::is_null(obj)) {
 187     bool evac_in_progress = _heap->is_gc_in_progress_mask(ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL);
 188     oop fwd = resolve_forwarded_not_null(obj);
 189     if (evac_in_progress &&
 190         _heap->in_collection_set(obj) &&
 191         obj == fwd) {
 192       Thread *t = Thread::current();
 193       if (t->is_GC_task_thread()) {
 194         return _heap->evacuate_object(obj, t);
 195       } else {
 196         ShenandoahEvacOOMScope oom_evac_scope;
 197         return _heap->evacuate_object(obj, t);
 198       }
 199     } else {
 200       return fwd;
 201     }
 202   } else {
 203     return obj;
 204   }
 205 }
 206 
 207 void ShenandoahBarrierSet::storeval_barrier(oop obj) {
 208   if (ShenandoahStoreValEnqueueBarrier && !CompressedOops::is_null(obj) && _heap->is_concurrent_traversal_in_progress()) {
 209     enqueue(obj);
 210   }
 211 }
 212 
 213 void ShenandoahBarrierSet::keep_alive_barrier(oop obj) {
 214   if (ShenandoahKeepAliveBarrier && _heap->is_concurrent_mark_in_progress()) {
 215     enqueue(obj);
 216   }
 217 }
 218 
 219 void ShenandoahBarrierSet::enqueue(oop obj) {
 220   shenandoah_assert_not_forwarded_if(NULL, obj, _heap->is_concurrent_traversal_in_progress());
 221   assert(_satb_mark_queue_set.is_active(), "only get here when SATB active");
 222 
 223   // Filter marked objects before hitting the SATB queues. The same predicate would
 224   // be used by SATBMQ::filter to eliminate already marked objects downstream, but
 225   // filtering here helps to avoid wasteful SATB queueing work to begin with.
 226   if (!_heap->requires_marking<false>(obj)) return;
 227 
 228   ShenandoahThreadLocalData::satb_mark_queue(Thread::current()).enqueue_known_active(obj);
 229 }
 230 
 231 void ShenandoahBarrierSet::on_thread_create(Thread* thread) {
 232   // Create thread local data
 233   ShenandoahThreadLocalData::create(thread);
 234 }
 235 
 236 void ShenandoahBarrierSet::on_thread_destroy(Thread* thread) {
 237   // Destroy thread local data
 238   ShenandoahThreadLocalData::destroy(thread);
 239 }
 240 
 241 void ShenandoahBarrierSet::on_thread_attach(Thread *thread) {
 242   assert(!thread->is_Java_thread() || !SafepointSynchronize::is_at_safepoint(),
 243          "We should not be at a safepoint");
 244   SATBMarkQueue& queue = ShenandoahThreadLocalData::satb_mark_queue(thread);
 245   assert(!queue.is_active(), "SATB queue should not be active");
 246   assert( queue.is_empty(),  "SATB queue should be empty");
 247   queue.set_active(_satb_mark_queue_set.is_active());
 248   if (thread->is_Java_thread()) {
 249     ShenandoahThreadLocalData::set_gc_state(thread, _heap->gc_state());
 250     ShenandoahThreadLocalData::initialize_gclab(thread);
 251   }
 252 }
 253 
 254 void ShenandoahBarrierSet::on_thread_detach(Thread *thread) {
 255   SATBMarkQueue& queue = ShenandoahThreadLocalData::satb_mark_queue(thread);
 256   queue.flush();
 257   if (thread->is_Java_thread()) {
 258     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
 259     if (gclab != NULL) {
 260       gclab->retire();
 261     }
 262   }
 263 }
 264 
 265 oop ShenandoahBarrierSet::load_reference_barrier_native(oop obj, oop* load_addr) {
 266   return load_reference_barrier_native_impl(obj, load_addr);
 267 }
 268 
 269 oop ShenandoahBarrierSet::load_reference_barrier_native(oop obj, narrowOop* load_addr) {
 270   // Assumption: narrow oop version should not be used anywhere.
 271   ShouldNotReachHere();
 272   return NULL;
 273 }
 274 
 275 template <class T>
 276 oop ShenandoahBarrierSet::load_reference_barrier_native_impl(oop obj, T* load_addr) {
 277   if (CompressedOops::is_null(obj)) {
 278     return NULL;
 279   }
 280 
 281   ShenandoahMarkingContext* const marking_context = _heap->marking_context();
 282   if (_heap->is_evacuation_in_progress() && !marking_context->is_marked(obj)) {
 283     Thread* thr = Thread::current();
 284     if (thr->is_Java_thread()) {
 285       return NULL;
 286     } else {
 287       return obj;
 288     }
 289   }
 290 
 291   oop fwd = load_reference_barrier_not_null(obj);
 292   if (load_addr != NULL && fwd != obj) {
 293     // Since we are here and we know the load address, update the reference.
 294     ShenandoahHeap::cas_oop(fwd, load_addr, obj);
 295   }
 296 
 297   return fwd;
 298 }
 299 
 300 void ShenandoahBarrierSet::clone_barrier_runtime(oop src) {
 301   if (_heap->has_forwarded_objects()) {
 302     clone_barrier(src);
 303   }
 304 }
 305 
--- EOF ---