1 /* 2 * Copyright (c) 2013, 2022, Red Hat, Inc. All rights reserved. 3 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "gc/shared/barrierSetNMethod.hpp" 28 #include "gc/shenandoah/shenandoahBarrierSetClone.inline.hpp" 29 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp" 30 #include "gc/shenandoah/shenandoahBarrierSetNMethod.hpp" 31 #include "gc/shenandoah/shenandoahBarrierSetStackChunk.hpp" 32 #include "gc/shenandoah/shenandoahClosures.inline.hpp" 33 #include "gc/shenandoah/shenandoahHeap.inline.hpp" 34 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp" 35 #include "gc/shenandoah/shenandoahStackWatermark.hpp" 36 #ifdef COMPILER1 37 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp" 38 #endif 39 #ifdef COMPILER2 40 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp" 41 #endif 42 43 class ShenandoahBarrierSetC1; 44 class ShenandoahBarrierSetC2; 45 46 ShenandoahBarrierSet::ShenandoahBarrierSet(ShenandoahHeap* heap, MemRegion heap_region) : 47 BarrierSet(make_barrier_set_assembler<ShenandoahBarrierSetAssembler>(), 48 make_barrier_set_c1<ShenandoahBarrierSetC1>(), 49 make_barrier_set_c2<ShenandoahBarrierSetC2>(), 50 new ShenandoahBarrierSetNMethod(heap), 51 new ShenandoahBarrierSetStackChunk(), 52 BarrierSet::FakeRtti(BarrierSet::ShenandoahBarrierSet)), 53 _heap(heap), 54 _satb_mark_queue_buffer_allocator("SATB Buffer Allocator", ShenandoahSATBBufferSize), 55 _satb_mark_queue_set(&_satb_mark_queue_buffer_allocator) 56 { 57 if (ShenandoahCardBarrier) { 58 _card_table = new ShenandoahCardTable(heap_region); 59 _card_table->initialize(); 60 } 61 } 62 63 ShenandoahBarrierSetAssembler* ShenandoahBarrierSet::assembler() { 64 BarrierSetAssembler* const bsa = BarrierSet::barrier_set()->barrier_set_assembler(); 65 return reinterpret_cast<ShenandoahBarrierSetAssembler*>(bsa); 66 } 67 68 void ShenandoahBarrierSet::print_on(outputStream* st) const { 69 st->print("ShenandoahBarrierSet"); 70 } 71 72 bool ShenandoahBarrierSet::need_load_reference_barrier(DecoratorSet decorators, BasicType type) { 73 if (!ShenandoahLoadRefBarrier) return false; 74 // Only needed for references 75 return is_reference_type(type); 76 } 77 78 bool ShenandoahBarrierSet::need_keep_alive_barrier(DecoratorSet decorators, BasicType type) { 79 if (!ShenandoahSATBBarrier) return false; 80 // Only needed for references 81 if (!is_reference_type(type)) return false; 82 83 bool keep_alive = (decorators & AS_NO_KEEPALIVE) == 0; 84 bool unknown = (decorators & ON_UNKNOWN_OOP_REF) != 0; 85 bool on_weak_ref = (decorators & (ON_WEAK_OOP_REF | ON_PHANTOM_OOP_REF)) != 0; 86 return (on_weak_ref || unknown) && keep_alive; 87 } 88 89 void ShenandoahBarrierSet::on_thread_create(Thread* thread) { 90 // Create thread local data 91 ShenandoahThreadLocalData::create(thread); 92 } 93 94 void ShenandoahBarrierSet::on_thread_destroy(Thread* thread) { 95 // Destroy thread local data 96 ShenandoahThreadLocalData::destroy(thread); 97 } 98 99 void ShenandoahBarrierSet::on_thread_attach(Thread *thread) { 100 assert(!thread->is_Java_thread() || !SafepointSynchronize::is_at_safepoint(), 101 "We should not be at a safepoint"); 102 SATBMarkQueue& queue = ShenandoahThreadLocalData::satb_mark_queue(thread); 103 assert(!queue.is_active(), "SATB queue should not be active"); 104 assert(queue.buffer() == nullptr, "SATB queue should not have a buffer"); 105 assert(queue.index() == 0, "SATB queue index should be zero"); 106 queue.set_active(_satb_mark_queue_set.is_active()); 107 if (thread->is_Java_thread()) { 108 ShenandoahThreadLocalData::set_gc_state(thread, _heap->gc_state()); 109 ShenandoahThreadLocalData::initialize_gclab(thread); 110 111 BarrierSetNMethod* bs_nm = barrier_set_nmethod(); 112 if (bs_nm != nullptr) { 113 thread->set_nmethod_disarmed_guard_value(bs_nm->disarmed_guard_value()); 114 } 115 116 if (ShenandoahStackWatermarkBarrier) { 117 JavaThread* const jt = JavaThread::cast(thread); 118 StackWatermark* const watermark = new ShenandoahStackWatermark(jt); 119 StackWatermarkSet::add_watermark(jt, watermark); 120 } 121 } 122 } 123 124 void ShenandoahBarrierSet::on_thread_detach(Thread *thread) { 125 SATBMarkQueue& queue = ShenandoahThreadLocalData::satb_mark_queue(thread); 126 _satb_mark_queue_set.flush_queue(queue); 127 if (thread->is_Java_thread()) { 128 PLAB* gclab = ShenandoahThreadLocalData::gclab(thread); 129 if (gclab != nullptr) { 130 gclab->retire(); 131 } 132 133 if (ShenandoahCardBarrier) { 134 PLAB* plab = ShenandoahThreadLocalData::plab(thread); 135 // retire_plab may register the remnant filler object with the remembered set scanner without a lock. 136 // This is safe because it is assured that each PLAB is a whole-number multiple of card-mark memory size and each 137 // PLAB is aligned with the start of each card's memory range. 138 if (plab != nullptr) { 139 ShenandoahGenerationalHeap::heap()->retire_plab(plab); 140 } 141 } 142 143 // SATB protocol requires to keep alive reachable oops from roots at the beginning of GC 144 if (ShenandoahStackWatermarkBarrier) { 145 if (_heap->is_concurrent_mark_in_progress()) { 146 ShenandoahKeepAliveClosure oops; 147 StackWatermarkSet::finish_processing(JavaThread::cast(thread), &oops, StackWatermarkKind::gc); 148 } else if (_heap->is_concurrent_weak_root_in_progress() && _heap->is_evacuation_in_progress()) { 149 ShenandoahContextEvacuateUpdateRootsClosure oops; 150 StackWatermarkSet::finish_processing(JavaThread::cast(thread), &oops, StackWatermarkKind::gc); 151 } 152 } 153 } 154 } 155 156 void ShenandoahBarrierSet::clone_barrier_runtime(oop src) { 157 if (_heap->has_forwarded_objects() || (ShenandoahIUBarrier && _heap->is_concurrent_mark_in_progress())) { 158 clone_barrier(src); 159 } 160 } 161 162 void ShenandoahBarrierSet::write_ref_array(HeapWord* start, size_t count) { 163 assert(ShenandoahCardBarrier, "Did you mean to enable ShenandoahCardBarrier?"); 164 165 HeapWord* end = (HeapWord*)((char*) start + (count * heapOopSize)); 166 // In the case of compressed oops, start and end may potentially be misaligned; 167 // so we need to conservatively align the first downward (this is not 168 // strictly necessary for current uses, but a case of good hygiene and, 169 // if you will, aesthetics) and the second upward (this is essential for 170 // current uses) to a HeapWord boundary, so we mark all cards overlapping 171 // this write. 172 HeapWord* aligned_start = align_down(start, HeapWordSize); 173 HeapWord* aligned_end = align_up (end, HeapWordSize); 174 // If compressed oops were not being used, these should already be aligned 175 assert(UseCompressedOops || (aligned_start == start && aligned_end == end), 176 "Expected heap word alignment of start and end"); 177 _heap->old_generation()->card_scan()->mark_range_as_dirty(aligned_start, (aligned_end - aligned_start)); 178 } 179