1 /*
2 * Copyright (c) 2013, 2022, Red Hat, Inc. All rights reserved.
3 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "gc/shared/barrierSetNMethod.hpp"
27 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
28 #include "gc/shenandoah/shenandoahBarrierSetNMethod.hpp"
29 #include "gc/shenandoah/shenandoahBarrierSetStackChunk.hpp"
30 #include "gc/shenandoah/shenandoahCardTable.hpp"
31 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
32 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
33 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
34 #include "gc/shenandoah/shenandoahStackWatermark.hpp"
35 #ifdef COMPILER1
36 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
37 #endif
38 #ifdef COMPILER2
39 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
40 #endif
41
42 class ShenandoahBarrierSetC1;
43 class ShenandoahBarrierSetC2;
44
45 ShenandoahBarrierSet::ShenandoahBarrierSet(ShenandoahHeap* heap, MemRegion heap_region) :
46 BarrierSet(make_barrier_set_assembler<ShenandoahBarrierSetAssembler>(),
47 make_barrier_set_c1<ShenandoahBarrierSetC1>(),
48 make_barrier_set_c2<ShenandoahBarrierSetC2>(),
49 new ShenandoahBarrierSetNMethod(heap),
50 new ShenandoahBarrierSetStackChunk(),
51 BarrierSet::FakeRtti(BarrierSet::ShenandoahBarrierSet)),
52 _heap(heap),
53 _card_table(nullptr),
54 _satb_mark_queue_buffer_allocator("SATB Buffer Allocator", ShenandoahSATBBufferSize),
55 _satb_mark_queue_set(&_satb_mark_queue_buffer_allocator)
56 {
57 if (ShenandoahCardBarrier) {
58 _card_table = new ShenandoahCardTable(heap_region);
59 _card_table->initialize();
60 }
61 }
62
63 ShenandoahBarrierSetAssembler* ShenandoahBarrierSet::assembler() {
64 BarrierSetAssembler* const bsa = BarrierSet::barrier_set()->barrier_set_assembler();
65 return reinterpret_cast<ShenandoahBarrierSetAssembler*>(bsa);
66 }
67
68 void ShenandoahBarrierSet::print_on(outputStream* st) const {
69 st->print("ShenandoahBarrierSet");
70 }
71
72 bool ShenandoahBarrierSet::need_load_reference_barrier(DecoratorSet decorators, BasicType type) {
73 if (!ShenandoahLoadRefBarrier) return false;
74 return is_reference_type(type);
75 }
76
77 bool ShenandoahBarrierSet::need_keep_alive_barrier(DecoratorSet decorators, BasicType type) {
78 if (!ShenandoahSATBBarrier) return false;
79 if (!is_reference_type(type)) return false;
80 bool keep_alive = (decorators & AS_NO_KEEPALIVE) == 0;
81 bool unknown = (decorators & ON_UNKNOWN_OOP_REF) != 0;
82 bool on_weak_ref = (decorators & (ON_WEAK_OOP_REF | ON_PHANTOM_OOP_REF)) != 0;
83 return (on_weak_ref || unknown) && keep_alive;
84 }
85
86 bool ShenandoahBarrierSet::need_satb_barrier(DecoratorSet decorators, BasicType type) {
87 if (!ShenandoahSATBBarrier) return false;
88 if (!is_reference_type(type)) return false;
89 bool as_normal = (decorators & AS_NORMAL) != 0;
90 bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0;
91 return as_normal && !dest_uninitialized;
92 }
93
94 bool ShenandoahBarrierSet::need_card_barrier(DecoratorSet decorators, BasicType type) {
95 if (!ShenandoahCardBarrier) return false;
96 if (!is_reference_type(type)) return false;
97 bool in_heap = (decorators & IN_HEAP) != 0;
98 return in_heap;
99 }
100
101 void ShenandoahBarrierSet::on_slowpath_allocation_exit(JavaThread* thread, oop new_obj) {
102 #if COMPILER2_OR_JVMCI
103 if (ReduceInitialCardMarks && ShenandoahCardBarrier && !ShenandoahHeap::heap()->is_in_young(new_obj)) {
104 log_debug(gc)("Newly allocated object (" PTR_FORMAT ") is not in the young generation", p2i(new_obj));
105 // This can happen when an object is newly allocated, but we come to a safepoint before returning
106 // the object. If the safepoint runs a degenerated cycle that is upgraded to a full GC, this object
107 // will have survived two GC cycles. If the tenuring age is very low (1), this object may be promoted.
108 // In this case, we have an allocated object, but it has received no stores yet. If card marking barriers
109 // have been elided, we could end up with an object in old holding pointers to young that won't be in
110 // the remembered set. The solution here is conservative, but this problem should be rare, and it will
111 // correct itself on subsequent cycles when the remembered set is updated.
112 ShenandoahGenerationalHeap::heap()->old_generation()->card_scan()->mark_range_as_dirty(
113 cast_from_oop<HeapWord*>(new_obj), new_obj->size()
114 );
115 }
116 #endif // COMPILER2_OR_JVMCI
117 }
118
119 void ShenandoahBarrierSet::on_thread_create(Thread* thread) {
120 // Create thread local data
121 ShenandoahThreadLocalData::create(thread);
122 }
123
124 void ShenandoahBarrierSet::on_thread_destroy(Thread* thread) {
125 // Destroy thread local data
126 ShenandoahThreadLocalData::destroy(thread);
127 }
128
129 void ShenandoahBarrierSet::on_thread_attach(Thread *thread) {
130 assert(!thread->is_Java_thread() || !SafepointSynchronize::is_at_safepoint(),
131 "We should not be at a safepoint");
132 SATBMarkQueue& queue = ShenandoahThreadLocalData::satb_mark_queue(thread);
133 assert(!queue.is_active(), "SATB queue should not be active");
134 assert(queue.buffer() == nullptr, "SATB queue should not have a buffer");
135 assert(queue.index() == 0, "SATB queue index should be zero");
136 queue.set_active(_satb_mark_queue_set.is_active());
137
138 if (ShenandoahCardBarrier) {
139 // Every thread always have a pointer to the _current_ _write_ version of the card table.
140 // The JIT'ed code will use this address (+card entry offset) to mark the card as dirty.
141 ShenandoahThreadLocalData::set_card_table(thread, _card_table->write_byte_map_base());
142 }
143 ShenandoahThreadLocalData::set_gc_state(thread, _heap->gc_state());
144
145 if (thread->is_Java_thread()) {
146 ShenandoahThreadLocalData::initialize_gclab(thread);
147
148 BarrierSetNMethod* bs_nm = barrier_set_nmethod();
149 thread->set_nmethod_disarmed_guard_value(bs_nm->disarmed_guard_value());
150
151 if (ShenandoahStackWatermarkBarrier) {
152 JavaThread* const jt = JavaThread::cast(thread);
153 StackWatermark* const watermark = new ShenandoahStackWatermark(jt);
154 StackWatermarkSet::add_watermark(jt, watermark);
155 }
156 }
157 }
158
159 void ShenandoahBarrierSet::on_thread_detach(Thread *thread) {
160 SATBMarkQueue& queue = ShenandoahThreadLocalData::satb_mark_queue(thread);
161 _satb_mark_queue_set.flush_queue(queue);
162 if (thread->is_Java_thread()) {
163 PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
164 if (gclab != nullptr) {
165 gclab->retire();
166 }
167
168 PLAB* plab = ShenandoahThreadLocalData::plab(thread);
169 if (plab != nullptr) {
170 // This will assert if plab is not null in non-generational mode
171 ShenandoahGenerationalHeap::heap()->retire_plab(plab);
172 }
173
174 // SATB protocol requires to keep alive reachable oops from roots at the beginning of GC
175 if (ShenandoahStackWatermarkBarrier) {
176 if (_heap->is_concurrent_mark_in_progress()) {
177 ShenandoahKeepAliveClosure oops;
178 StackWatermarkSet::finish_processing(JavaThread::cast(thread), &oops, StackWatermarkKind::gc);
179 } else if (_heap->is_concurrent_weak_root_in_progress() && _heap->is_evacuation_in_progress()) {
180 ShenandoahContextEvacuateUpdateRootsClosure oops;
181 StackWatermarkSet::finish_processing(JavaThread::cast(thread), &oops, StackWatermarkKind::gc);
182 }
183 }
184 }
185 }
186
187 void ShenandoahBarrierSet::write_ref_array(HeapWord* start, size_t count) {
188 assert(ShenandoahCardBarrier, "Should have been checked by caller");
189
190 HeapWord* end = (HeapWord*)((char*) start + (count * heapOopSize));
191 // In the case of compressed oops, start and end may potentially be misaligned;
192 // so we need to conservatively align the first downward (this is not
193 // strictly necessary for current uses, but a case of good hygiene and,
194 // if you will, aesthetics) and the second upward (this is essential for
195 // current uses) to a HeapWord boundary, so we mark all cards overlapping
196 // this write.
197 HeapWord* aligned_start = align_down(start, HeapWordSize);
198 HeapWord* aligned_end = align_up (end, HeapWordSize);
199 // If compressed oops were not being used, these should already be aligned
200 assert(UseCompressedOops || (aligned_start == start && aligned_end == end),
201 "Expected heap word alignment of start and end");
202 _heap->old_generation()->card_scan()->mark_range_as_dirty(aligned_start, (aligned_end - aligned_start));
203 }
204