1 /*
2 * Copyright (c) 2019, 2020, Red Hat, Inc. All rights reserved.
3 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHCLOSURES_INLINE_HPP
26 #define SHARE_GC_SHENANDOAH_SHENANDOAHCLOSURES_INLINE_HPP
27
28 #include "gc/shenandoah/shenandoahClosures.hpp"
29
30 #include "gc/shared/barrierSetNMethod.hpp"
31 #include "gc/shenandoah/shenandoahAsserts.hpp"
32 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
33 #include "gc/shenandoah/shenandoahEvacOOMHandler.inline.hpp"
34 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
35 #include "gc/shenandoah/shenandoahMark.inline.hpp"
36 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
37 #include "gc/shenandoah/shenandoahNMethod.inline.hpp"
38 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
39 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
40 #include "memory/iterator.inline.hpp"
41 #include "oops/compressedOops.inline.hpp"
42 #include "runtime/atomicAccess.hpp"
43 #include "runtime/javaThread.hpp"
44
45 //
46 // ========= Super
47 //
48
49 ShenandoahSuperClosure::ShenandoahSuperClosure() :
50 MetadataVisitingOopIterateClosure(), _heap(ShenandoahHeap::heap()) {}
51
52 ShenandoahSuperClosure::ShenandoahSuperClosure(ShenandoahReferenceProcessor* rp) :
53 MetadataVisitingOopIterateClosure(rp), _heap(ShenandoahHeap::heap()) {}
54
55 void ShenandoahSuperClosure::do_nmethod(nmethod* nm) {
56 nm->run_nmethod_entry_barrier();
57 }
58
59 //
60 // ========= Marking
61 //
62 ShenandoahFlushSATBHandshakeClosure::ShenandoahFlushSATBHandshakeClosure(SATBMarkQueueSet& qset) :
63 HandshakeClosure("Shenandoah Flush SATB"),
64 _qset(qset) {}
65
66 void ShenandoahFlushSATBHandshakeClosure::do_thread(Thread* thread) {
67 _qset.flush_queue(ShenandoahThreadLocalData::satb_mark_queue(thread));
68 }
69
70 ShenandoahMarkRefsSuperClosure::ShenandoahMarkRefsSuperClosure(ShenandoahObjToScanQueue* q,
71 ShenandoahReferenceProcessor* rp,
72 ShenandoahObjToScanQueue* old_q) :
73 ShenandoahSuperClosure(rp),
74 _queue(q),
75 _old_queue(old_q),
76 _mark_context(ShenandoahHeap::heap()->marking_context()),
77 _weak(false) {}
78
79 template<class T, ShenandoahGenerationType GENERATION>
80 inline void ShenandoahMarkRefsSuperClosure::work(T* p) {
81 ShenandoahMark::mark_through_ref<T, GENERATION>(p, _queue, _old_queue, _mark_context, _weak);
82 }
83
84 ShenandoahForwardedIsAliveClosure::ShenandoahForwardedIsAliveClosure() :
85 _mark_context(ShenandoahHeap::heap()->marking_context()) {}
86
87 bool ShenandoahForwardedIsAliveClosure::do_object_b(oop obj) {
88 if (CompressedOops::is_null(obj)) {
89 return false;
90 }
91 obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
92 shenandoah_assert_not_forwarded_if(nullptr, obj, ShenandoahHeap::heap()->is_concurrent_mark_in_progress());
93 return _mark_context->is_marked_or_old(obj);
94 }
95
96 ShenandoahIsAliveClosure::ShenandoahIsAliveClosure() :
97 _mark_context(ShenandoahHeap::heap()->marking_context()) {}
98
99 bool ShenandoahIsAliveClosure::do_object_b(oop obj) {
100 if (CompressedOops::is_null(obj)) {
101 return false;
102 }
103 shenandoah_assert_not_forwarded(nullptr, obj);
104 return _mark_context->is_marked_or_old(obj);
105 }
106
107 BoolObjectClosure* ShenandoahIsAliveSelector::is_alive_closure() {
108 return ShenandoahHeap::heap()->has_forwarded_objects() ?
109 reinterpret_cast<BoolObjectClosure*>(&_fwd_alive_cl) :
110 reinterpret_cast<BoolObjectClosure*>(&_alive_cl);
111 }
112
113 ShenandoahKeepAliveClosure::ShenandoahKeepAliveClosure() :
114 _bs(ShenandoahBarrierSet::barrier_set()) {}
115
116 template <typename T>
117 void ShenandoahKeepAliveClosure::do_oop_work(T* p) {
118 assert(ShenandoahHeap::heap()->is_concurrent_mark_in_progress(), "Only for concurrent marking phase");
119 assert(ShenandoahHeap::heap()->is_concurrent_old_mark_in_progress() || !ShenandoahHeap::heap()->has_forwarded_objects(), "Not expected");
120
121 T o = RawAccess<>::oop_load(p);
122 if (!CompressedOops::is_null(o)) {
123 oop obj = CompressedOops::decode_not_null(o);
124 _bs->enqueue(obj);
125 }
126 }
127
128
129 //
130 // ========= Evacuating + Roots
131 //
132
133 template <bool CONCURRENT, bool STABLE_THREAD>
134 void ShenandoahEvacuateUpdateRootClosureBase<CONCURRENT, STABLE_THREAD>::do_oop(oop* p) {
135 if (CONCURRENT) {
136 ShenandoahEvacOOMScope scope;
137 do_oop_work(p);
138 } else {
139 do_oop_work(p);
140 }
141 }
142
143 template <bool CONCURRENT, bool STABLE_THREAD>
144 void ShenandoahEvacuateUpdateRootClosureBase<CONCURRENT, STABLE_THREAD>::do_oop(narrowOop* p) {
145 if (CONCURRENT) {
146 ShenandoahEvacOOMScope scope;
147 do_oop_work(p);
148 } else {
149 do_oop_work(p);
150 }
151 }
152
153 template <bool CONCURRENT, bool STABLE_THREAD>
154 template <class T>
155 void ShenandoahEvacuateUpdateRootClosureBase<CONCURRENT, STABLE_THREAD>::do_oop_work(T* p) {
156 assert(_heap->is_concurrent_weak_root_in_progress() ||
157 _heap->is_concurrent_strong_root_in_progress(),
158 "Only do this in root processing phase");
159
160 T o = RawAccess<>::oop_load(p);
161 if (!CompressedOops::is_null(o)) {
162 oop obj = CompressedOops::decode_not_null(o);
163 if (_heap->in_collection_set(obj)) {
164 assert(_heap->is_evacuation_in_progress(), "Only do this when evacuation is in progress");
165 shenandoah_assert_marked(p, obj);
166 oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
167 if (resolved == obj) {
168 Thread* thr = STABLE_THREAD ? _thread : Thread::current();
169 assert(thr == Thread::current(), "Wrong thread");
170
171 resolved = _heap->evacuate_object(obj, thr);
172 }
173 if (CONCURRENT) {
174 ShenandoahHeap::atomic_update_oop(resolved, p, o);
175 } else {
176 RawAccess<IS_NOT_NULL | MO_UNORDERED>::oop_store(p, resolved);
177 }
178 }
179 }
180 }
181
182 template <bool CONCURRENT, typename IsAlive, typename KeepAlive>
183 ShenandoahCleanUpdateWeakOopsClosure<CONCURRENT, IsAlive, KeepAlive>::ShenandoahCleanUpdateWeakOopsClosure(IsAlive* is_alive, KeepAlive* keep_alive) :
184 _is_alive(is_alive), _keep_alive(keep_alive) {
185 if (!CONCURRENT) {
186 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
187 }
188 }
189
190 template <bool CONCURRENT, typename IsAlive, typename KeepAlive>
191 void ShenandoahCleanUpdateWeakOopsClosure<CONCURRENT, IsAlive, KeepAlive>::do_oop(oop* p) {
192 oop obj = RawAccess<>::oop_load(p);
193 if (!CompressedOops::is_null(obj)) {
194 if (_is_alive->do_object_b(obj)) {
195 _keep_alive->do_oop(p);
196 } else {
197 if (CONCURRENT) {
198 ShenandoahHeap::atomic_clear_oop(p, obj);
199 } else {
200 RawAccess<IS_NOT_NULL>::oop_store(p, oop());
201 }
202 }
203 }
204 }
205
206 template <bool CONCURRENT, typename IsAlive, typename KeepAlive>
207 void ShenandoahCleanUpdateWeakOopsClosure<CONCURRENT, IsAlive, KeepAlive>::do_oop(narrowOop* p) {
208 ShouldNotReachHere();
209 }
210
211 ShenandoahNMethodAndDisarmClosure::ShenandoahNMethodAndDisarmClosure(OopClosure* cl) :
212 NMethodToOopClosure(cl, true /* fix_relocations */) {}
213
214 void ShenandoahNMethodAndDisarmClosure::do_nmethod(nmethod* nm) {
215 assert(nm != nullptr, "Sanity");
216 assert(!ShenandoahNMethod::gc_data(nm)->is_unregistered(), "Should not be here");
217 NMethodToOopClosure::do_nmethod(nm);
218 ShenandoahNMethod::disarm_nmethod(nm);
219 }
220
221
222 //
223 // ========= Update References
224 //
225
226 template <ShenandoahGenerationType GENERATION>
227 ShenandoahMarkUpdateRefsClosure<GENERATION>::ShenandoahMarkUpdateRefsClosure(ShenandoahObjToScanQueue* q,
228 ShenandoahReferenceProcessor* rp,
229 ShenandoahObjToScanQueue* old_q) :
230 ShenandoahMarkRefsSuperClosure(q, rp, old_q) {
231 assert(_heap->is_stw_gc_in_progress(), "Can only be used for STW GC");
232 }
233
234 template<ShenandoahGenerationType GENERATION>
235 template<class T>
236 inline void ShenandoahMarkUpdateRefsClosure<GENERATION>::work(T* p) {
237 // Update the location
238 _heap->non_conc_update_with_forwarded(p);
239
240 // ...then do the usual thing
241 ShenandoahMarkRefsSuperClosure::work<T, GENERATION>(p);
242 }
243
244 template<class T>
245 inline void ShenandoahNonConcUpdateRefsClosure::work(T* p) {
246 _heap->non_conc_update_with_forwarded(p);
247 }
248
249 template<class T>
250 inline void ShenandoahConcUpdateRefsClosure::work(T* p) {
251 _heap->conc_update_with_forwarded(p);
252 }
253
254 inline void ShenandoahFlushSATB::do_thread(Thread* thread) {
255 // Transfer any partial buffer to the qset for completed buffer processing.
256 _satb_qset.flush_queue(ShenandoahThreadLocalData::satb_mark_queue(thread));
257 }
258
259 //
260 // ========= Utilities
261 //
262
263 #ifdef ASSERT
264 template <class T>
265 void ShenandoahAssertNotForwardedClosure::do_oop_work(T* p) {
266 T o = RawAccess<>::oop_load(p);
267 if (!CompressedOops::is_null(o)) {
268 oop obj = CompressedOops::decode_not_null(o);
269 shenandoah_assert_not_forwarded(p, obj);
270 }
271 }
272
273 void ShenandoahAssertNotForwardedClosure::do_oop(narrowOop* p) { do_oop_work(p); }
274 void ShenandoahAssertNotForwardedClosure::do_oop(oop* p) { do_oop_work(p); }
275 #endif
276
277 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHCLOSURES_INLINE_HPP