1 /*
  2  * Copyright (c) 2013, 2022, Red Hat, Inc. All rights reserved.
  3  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
  4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  5  *
  6  * This code is free software; you can redistribute it and/or modify it
  7  * under the terms of the GNU General Public License version 2 only, as
  8  * published by the Free Software Foundation.
  9  *
 10  * This code is distributed in the hope that it will be useful, but WITHOUT
 11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #include "gc/shared/barrierSetNMethod.hpp"
 27 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
 28 #include "gc/shenandoah/shenandoahBarrierSetClone.inline.hpp"
 29 #include "gc/shenandoah/shenandoahBarrierSetNMethod.hpp"
 30 #include "gc/shenandoah/shenandoahBarrierSetStackChunk.hpp"
 31 #include "gc/shenandoah/shenandoahCardTable.hpp"
 32 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
 33 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 34 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
 35 #include "gc/shenandoah/shenandoahStackWatermark.hpp"
 36 #ifdef COMPILER1
 37 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
 38 #endif
 39 #ifdef COMPILER2
 40 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
 41 #endif
 42 
 43 class ShenandoahBarrierSetC1;
 44 class ShenandoahBarrierSetC2;
 45 
 46 ShenandoahBarrierSet::ShenandoahBarrierSet(ShenandoahHeap* heap, MemRegion heap_region) :
 47   BarrierSet(make_barrier_set_assembler<ShenandoahBarrierSetAssembler>(),
 48              make_barrier_set_c1<ShenandoahBarrierSetC1>(),
 49              make_barrier_set_c2<ShenandoahBarrierSetC2>(),
 50              new ShenandoahBarrierSetNMethod(heap),
 51              new ShenandoahBarrierSetStackChunk(),
 52              BarrierSet::FakeRtti(BarrierSet::ShenandoahBarrierSet)),
 53   _heap(heap),
 54   _card_table(nullptr),
 55   _satb_mark_queue_buffer_allocator("SATB Buffer Allocator", ShenandoahSATBBufferSize),
 56   _satb_mark_queue_set(&_satb_mark_queue_buffer_allocator)
 57 {
 58   if (ShenandoahCardBarrier) {
 59     _card_table = new ShenandoahCardTable(heap_region);
 60     _card_table->initialize();
 61   }
 62 }
 63 
 64 ShenandoahBarrierSetAssembler* ShenandoahBarrierSet::assembler() {
 65   BarrierSetAssembler* const bsa = BarrierSet::barrier_set()->barrier_set_assembler();
 66   return reinterpret_cast<ShenandoahBarrierSetAssembler*>(bsa);
 67 }
 68 
 69 void ShenandoahBarrierSet::print_on(outputStream* st) const {
 70   st->print("ShenandoahBarrierSet");
 71 }
 72 
 73 bool ShenandoahBarrierSet::need_load_reference_barrier(DecoratorSet decorators, BasicType type) {
 74   if (!ShenandoahLoadRefBarrier) return false;
 75   return is_reference_type(type);
 76 }
 77 
 78 bool ShenandoahBarrierSet::need_keep_alive_barrier(DecoratorSet decorators, BasicType type) {
 79   if (!ShenandoahSATBBarrier) return false;
 80   if (!is_reference_type(type)) return false;
 81   bool keep_alive = (decorators & AS_NO_KEEPALIVE) == 0;
 82   bool unknown = (decorators & ON_UNKNOWN_OOP_REF) != 0;
 83   bool on_weak_ref = (decorators & (ON_WEAK_OOP_REF | ON_PHANTOM_OOP_REF)) != 0;
 84   return (on_weak_ref || unknown) && keep_alive;
 85 }
 86 
 87 bool ShenandoahBarrierSet::need_satb_barrier(DecoratorSet decorators, BasicType type) {
 88   if (!ShenandoahSATBBarrier) return false;
 89   if (!is_reference_type(type)) return false;
 90   bool as_normal = (decorators & AS_NORMAL) != 0;
 91   bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0;
 92   return as_normal && !dest_uninitialized;
 93 }
 94 
 95 bool ShenandoahBarrierSet::need_card_barrier(DecoratorSet decorators, BasicType type) {
 96   if (!ShenandoahCardBarrier) return false;
 97   if (!is_reference_type(type)) return false;
 98   bool in_heap = (decorators & IN_HEAP) != 0;
 99   return in_heap;
100 }
101 
102 void ShenandoahBarrierSet::on_slowpath_allocation_exit(JavaThread* thread, oop new_obj) {
103 #if COMPILER2_OR_JVMCI
104   if (ReduceInitialCardMarks && ShenandoahCardBarrier && !ShenandoahHeap::heap()->is_in_young(new_obj)) {
105     log_debug(gc)("Newly allocated object (" PTR_FORMAT ") is not in the young generation", p2i(new_obj));
106     // This can happen when an object is newly allocated, but we come to a safepoint before returning
107     // the object. If the safepoint runs a degenerated cycle that is upgraded to a full GC, this object
108     // will have survived two GC cycles. If the tenuring age is very low (1), this object may be promoted.
109     // In this case, we have an allocated object, but it has received no stores yet. If card marking barriers
110     // have been elided, we could end up with an object in old holding pointers to young that won't be in
111     // the remembered set. The solution here is conservative, but this problem should be rare, and it will
112     // correct itself on subsequent cycles when the remembered set is updated.
113     ShenandoahGenerationalHeap::heap()->old_generation()->card_scan()->mark_range_as_dirty(
114       cast_from_oop<HeapWord*>(new_obj), new_obj->size()
115     );
116   }
117 #endif // COMPILER2_OR_JVMCI
118 }
119 
120 void ShenandoahBarrierSet::on_thread_create(Thread* thread) {
121   // Create thread local data
122   ShenandoahThreadLocalData::create(thread);
123 }
124 
125 void ShenandoahBarrierSet::on_thread_destroy(Thread* thread) {
126   // Destroy thread local data
127   ShenandoahThreadLocalData::destroy(thread);
128 }
129 
130 void ShenandoahBarrierSet::on_thread_attach(Thread *thread) {
131   assert(!thread->is_Java_thread() || !SafepointSynchronize::is_at_safepoint(),
132          "We should not be at a safepoint");
133   SATBMarkQueue& queue = ShenandoahThreadLocalData::satb_mark_queue(thread);
134   assert(!queue.is_active(), "SATB queue should not be active");
135   assert(queue.buffer() == nullptr, "SATB queue should not have a buffer");
136   assert(queue.index() == 0, "SATB queue index should be zero");
137   queue.set_active(_satb_mark_queue_set.is_active());
138 
139   if (ShenandoahCardBarrier) {
140     // Every thread always have a pointer to the _current_ _write_ version of the card table.
141     // The JIT'ed code will use this address (+card entry offset) to mark the card as dirty.
142     ShenandoahThreadLocalData::set_card_table(thread, _card_table->write_byte_map_base());
143   }
144   ShenandoahThreadLocalData::set_gc_state(thread, _heap->gc_state());
145 
146   if (thread->is_Java_thread()) {
147     ShenandoahThreadLocalData::initialize_gclab(thread);
148 
149     BarrierSetNMethod* bs_nm = barrier_set_nmethod();
150     thread->set_nmethod_disarmed_guard_value(bs_nm->disarmed_guard_value());
151 
152     if (ShenandoahStackWatermarkBarrier) {
153       JavaThread* const jt = JavaThread::cast(thread);
154       StackWatermark* const watermark = new ShenandoahStackWatermark(jt);
155       StackWatermarkSet::add_watermark(jt, watermark);
156     }
157   }
158 }
159 
160 void ShenandoahBarrierSet::on_thread_detach(Thread *thread) {
161   SATBMarkQueue& queue = ShenandoahThreadLocalData::satb_mark_queue(thread);
162   _satb_mark_queue_set.flush_queue(queue);
163   if (thread->is_Java_thread()) {
164     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
165     if (gclab != nullptr) {
166       gclab->retire();
167     }
168 
169     PLAB* plab = ShenandoahThreadLocalData::plab(thread);
170     if (plab != nullptr) {
171       // This will assert if plab is not null in non-generational mode
172       ShenandoahGenerationalHeap::heap()->retire_plab(plab);
173     }
174 
175     // SATB protocol requires to keep alive reachable oops from roots at the beginning of GC
176     if (ShenandoahStackWatermarkBarrier) {
177       if (_heap->is_concurrent_mark_in_progress()) {
178         ShenandoahKeepAliveClosure oops;
179         StackWatermarkSet::finish_processing(JavaThread::cast(thread), &oops, StackWatermarkKind::gc);
180       } else if (_heap->is_concurrent_weak_root_in_progress() && _heap->is_evacuation_in_progress()) {
181         ShenandoahContextEvacuateUpdateRootsClosure oops;
182         StackWatermarkSet::finish_processing(JavaThread::cast(thread), &oops, StackWatermarkKind::gc);
183       }
184     }
185   }
186 }
187 
188 void ShenandoahBarrierSet::clone_barrier_runtime(oop src) {
189   if (_heap->has_forwarded_objects()) {
190     clone_barrier(src);
191   }
192 }
193 
194 void ShenandoahBarrierSet::write_ref_array(HeapWord* start, size_t count) {
195   assert(ShenandoahCardBarrier, "Should have been checked by caller");
196 
197   HeapWord* end = (HeapWord*)((char*) start + (count * heapOopSize));
198   // In the case of compressed oops, start and end may potentially be misaligned;
199   // so we need to conservatively align the first downward (this is not
200   // strictly necessary for current uses, but a case of good hygiene and,
201   // if you will, aesthetics) and the second upward (this is essential for
202   // current uses) to a HeapWord boundary, so we mark all cards overlapping
203   // this write.
204   HeapWord* aligned_start = align_down(start, HeapWordSize);
205   HeapWord* aligned_end   = align_up  (end,   HeapWordSize);
206   // If compressed oops were not being used, these should already be aligned
207   assert(UseCompressedOops || (aligned_start == start && aligned_end == end),
208          "Expected heap word alignment of start and end");
209   _heap->old_generation()->card_scan()->mark_range_as_dirty(aligned_start, (aligned_end - aligned_start));
210 }
211