1 /* 2 * Copyright (c) 2013, 2022, Red Hat, Inc. All rights reserved. 3 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "gc/shared/barrierSetNMethod.hpp" 28 #include "gc/shenandoah/shenandoahBarrierSetClone.inline.hpp" 29 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp" 30 #include "gc/shenandoah/shenandoahBarrierSetNMethod.hpp" 31 #include "gc/shenandoah/shenandoahBarrierSetStackChunk.hpp" 32 #include "gc/shenandoah/shenandoahClosures.inline.hpp" 33 #include "gc/shenandoah/shenandoahHeap.inline.hpp" 34 #include "gc/shenandoah/shenandoahStackWatermark.hpp" 35 #ifdef COMPILER1 36 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp" 37 #endif 38 #ifdef COMPILER2 39 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp" 40 #endif 41 42 class ShenandoahBarrierSetC1; 43 class ShenandoahBarrierSetC2; 44 45 ShenandoahBarrierSet::ShenandoahBarrierSet(ShenandoahHeap* heap, MemRegion heap_region) : 46 BarrierSet(make_barrier_set_assembler<ShenandoahBarrierSetAssembler>(), 47 make_barrier_set_c1<ShenandoahBarrierSetC1>(), 48 make_barrier_set_c2<ShenandoahBarrierSetC2>(), 49 new ShenandoahBarrierSetNMethod(heap), 50 new ShenandoahBarrierSetStackChunk(), 51 BarrierSet::FakeRtti(BarrierSet::ShenandoahBarrierSet)), 52 _heap(heap), 53 _satb_mark_queue_buffer_allocator("SATB Buffer Allocator", ShenandoahSATBBufferSize), 54 _satb_mark_queue_set(&_satb_mark_queue_buffer_allocator) 55 { 56 if (ShenandoahCardBarrier) { 57 _card_table = new ShenandoahCardTable(heap_region); 58 _card_table->initialize(); 59 } 60 } 61 62 ShenandoahBarrierSetAssembler* ShenandoahBarrierSet::assembler() { 63 BarrierSetAssembler* const bsa = BarrierSet::barrier_set()->barrier_set_assembler(); 64 return reinterpret_cast<ShenandoahBarrierSetAssembler*>(bsa); 65 } 66 67 void ShenandoahBarrierSet::print_on(outputStream* st) const { 68 st->print("ShenandoahBarrierSet"); 69 } 70 71 bool ShenandoahBarrierSet::need_load_reference_barrier(DecoratorSet decorators, BasicType type) { 72 if (!ShenandoahLoadRefBarrier) return false; 73 // Only needed for references 74 return is_reference_type(type); 75 } 76 77 bool ShenandoahBarrierSet::need_keep_alive_barrier(DecoratorSet decorators, BasicType type) { 78 if (!ShenandoahSATBBarrier) return false; 79 // Only needed for references 80 if (!is_reference_type(type)) return false; 81 82 bool keep_alive = (decorators & AS_NO_KEEPALIVE) == 0; 83 bool unknown = (decorators & ON_UNKNOWN_OOP_REF) != 0; 84 bool on_weak_ref = (decorators & (ON_WEAK_OOP_REF | ON_PHANTOM_OOP_REF)) != 0; 85 return (on_weak_ref || unknown) && keep_alive; 86 } 87 88 void ShenandoahBarrierSet::on_thread_create(Thread* thread) { 89 // Create thread local data 90 ShenandoahThreadLocalData::create(thread); 91 } 92 93 void ShenandoahBarrierSet::on_thread_destroy(Thread* thread) { 94 // Destroy thread local data 95 ShenandoahThreadLocalData::destroy(thread); 96 } 97 98 void ShenandoahBarrierSet::on_thread_attach(Thread *thread) { 99 assert(!thread->is_Java_thread() || !SafepointSynchronize::is_at_safepoint(), 100 "We should not be at a safepoint"); 101 SATBMarkQueue& queue = ShenandoahThreadLocalData::satb_mark_queue(thread); 102 assert(!queue.is_active(), "SATB queue should not be active"); 103 assert(queue.buffer() == nullptr, "SATB queue should not have a buffer"); 104 assert(queue.index() == 0, "SATB queue index should be zero"); 105 queue.set_active(_satb_mark_queue_set.is_active()); 106 if (thread->is_Java_thread()) { 107 ShenandoahThreadLocalData::set_gc_state(thread, _heap->gc_state()); 108 ShenandoahThreadLocalData::initialize_gclab(thread); 109 110 BarrierSetNMethod* bs_nm = barrier_set_nmethod(); 111 if (bs_nm != nullptr) { 112 thread->set_nmethod_disarmed_guard_value(bs_nm->disarmed_guard_value()); 113 } 114 115 if (ShenandoahStackWatermarkBarrier) { 116 JavaThread* const jt = JavaThread::cast(thread); 117 StackWatermark* const watermark = new ShenandoahStackWatermark(jt); 118 StackWatermarkSet::add_watermark(jt, watermark); 119 } 120 } 121 } 122 123 void ShenandoahBarrierSet::on_thread_detach(Thread *thread) { 124 SATBMarkQueue& queue = ShenandoahThreadLocalData::satb_mark_queue(thread); 125 _satb_mark_queue_set.flush_queue(queue); 126 if (thread->is_Java_thread()) { 127 PLAB* gclab = ShenandoahThreadLocalData::gclab(thread); 128 if (gclab != nullptr) { 129 gclab->retire(); 130 } 131 132 PLAB* plab = ShenandoahThreadLocalData::plab(thread); 133 // CAUTION: retire_plab may register the remnant filler object with the remembered set scanner without a lock. 134 // This is safe iff it is assured that each PLAB is a whole-number multiple of card-mark memory size and each 135 // PLAB is aligned with the start of each card's memory range. 136 // TODO: Assert this in retire_plab? 137 if (plab != nullptr) { 138 _heap->retire_plab(plab); 139 } 140 141 // SATB protocol requires to keep alive reachable oops from roots at the beginning of GC 142 if (ShenandoahStackWatermarkBarrier) { 143 if (_heap->is_concurrent_mark_in_progress()) { 144 ShenandoahKeepAliveClosure oops; 145 StackWatermarkSet::finish_processing(JavaThread::cast(thread), &oops, StackWatermarkKind::gc); 146 } else if (_heap->is_concurrent_weak_root_in_progress() && _heap->is_evacuation_in_progress()) { 147 ShenandoahContextEvacuateUpdateRootsClosure oops; 148 StackWatermarkSet::finish_processing(JavaThread::cast(thread), &oops, StackWatermarkKind::gc); 149 } 150 } 151 } 152 } 153 154 void ShenandoahBarrierSet::clone_barrier_runtime(oop src) { 155 if (_heap->has_forwarded_objects() || (ShenandoahIUBarrier && _heap->is_concurrent_mark_in_progress())) { 156 clone_barrier(src); 157 } 158 } 159 160 void ShenandoahBarrierSet::write_ref_array(HeapWord* start, size_t count) { 161 assert(ShenandoahCardBarrier, "Did you mean to enable ShenandoahCardBarrier?"); 162 163 HeapWord* end = (HeapWord*)((char*) start + (count * heapOopSize)); 164 // In the case of compressed oops, start and end may potentially be misaligned; 165 // so we need to conservatively align the first downward (this is not 166 // strictly necessary for current uses, but a case of good hygiene and, 167 // if you will, aesthetics) and the second upward (this is essential for 168 // current uses) to a HeapWord boundary, so we mark all cards overlapping 169 // this write. 170 HeapWord* aligned_start = align_down(start, HeapWordSize); 171 HeapWord* aligned_end = align_up (end, HeapWordSize); 172 // If compressed oops were not being used, these should already be aligned 173 assert(UseCompressedOops || (aligned_start == start && aligned_end == end), 174 "Expected heap word alignment of start and end"); 175 _heap->card_scan()->mark_range_as_dirty(aligned_start, (aligned_end - aligned_start)); 176 } 177