1 /*
2 * Copyright (c) 2015, 2021, Red Hat, Inc. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25
26 #include "classfile/classLoaderData.hpp"
27 #include "code/nmethod.hpp"
28 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
29 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
30 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
31 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
32 #include "gc/shenandoah/shenandoahStackWatermark.hpp"
33 #include "memory/iterator.hpp"
34 #include "memory/resourceArea.hpp"
35 #include "runtime/javaThread.hpp"
36 #include "runtime/stackWatermarkSet.inline.hpp"
37 #include "runtime/threads.hpp"
38
39 ShenandoahJavaThreadsIterator::ShenandoahJavaThreadsIterator(ShenandoahPhaseTimings::Phase phase, uint n_workers) :
40 _threads(),
41 _length(_threads.length()),
42 _stride(MAX2(1u, _length / n_workers / _chunks_per_worker)),
43 _claimed(0),
44 _phase(phase) {
45 }
46
47 uint ShenandoahJavaThreadsIterator::claim() {
48 return _claimed.fetch_then_add(_stride, memory_order_relaxed);
49 }
50
51 void ShenandoahJavaThreadsIterator::threads_do(ThreadClosure* cl, uint worker_id) {
52 ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::ThreadRoots, worker_id);
53 for (uint i = claim(); i < _length; i = claim()) {
54 for (uint t = i; t < MIN2(_length, i + _stride); t++) {
55 cl->do_thread(thread_at(t));
56 }
57 }
58 }
59
60 ShenandoahThreadRoots::ShenandoahThreadRoots(ShenandoahPhaseTimings::Phase phase, bool is_par) :
61 _phase(phase),
62 _is_par(is_par),
63 _threads_claim_token_scope() {}
64
65 void ShenandoahThreadRoots::oops_do(OopClosure* oops_cl, NMethodClosure* code_cl, uint worker_id) {
66 ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::ThreadRoots, worker_id);
67 ResourceMark rm;
68 Threads::possibly_parallel_oops_do(_is_par, oops_cl, code_cl);
69 }
70
71 void ShenandoahThreadRoots::threads_do(ThreadClosure* tc, uint worker_id) {
72 ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::ThreadRoots, worker_id);
73 ResourceMark rm;
74 Threads::possibly_parallel_threads_do(_is_par, tc);
75 }
76
77 ShenandoahCodeCacheRoots::ShenandoahCodeCacheRoots(ShenandoahPhaseTimings::Phase phase) : _phase(phase) {
78 }
79
80 void ShenandoahCodeCacheRoots::nmethods_do(NMethodClosure* nmethod_cl, uint worker_id) {
81 ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CodeCacheRoots, worker_id);
82 _coderoots_iterator.possibly_parallel_nmethods_do(nmethod_cl);
83 }
84
85 ShenandoahRootProcessor::ShenandoahRootProcessor(ShenandoahPhaseTimings::Phase phase) :
86 _heap(ShenandoahHeap::heap()),
87 _worker_phase(phase) {
88 }
89
90 ShenandoahSTWRootScanner::ShenandoahSTWRootScanner(ShenandoahPhaseTimings::Phase phase) :
91 ShenandoahRootProcessor(phase),
92 _thread_roots(phase, ShenandoahHeap::heap()->workers()->active_workers() > 1),
93 _code_roots(phase),
94 _cld_roots(phase, ShenandoahHeap::heap()->workers()->active_workers(), false /*heap iteration*/),
95 _vm_roots(phase),
96 _unload_classes(ShenandoahHeap::heap()->unload_classes()) {
97 }
98
99 class ShenandoahConcurrentMarkThreadClosure : public ThreadClosure {
100 private:
101 OopClosure* const _oops;
102
103 public:
104 ShenandoahConcurrentMarkThreadClosure(OopClosure* oops);
105 void do_thread(Thread* thread);
106 };
107
108 ShenandoahConcurrentMarkThreadClosure::ShenandoahConcurrentMarkThreadClosure(OopClosure* oops) :
109 _oops(oops) {
110 }
111
112 void ShenandoahConcurrentMarkThreadClosure::do_thread(Thread* thread) {
113 assert(thread->is_Java_thread(), "Must be");
114 JavaThread* const jt = JavaThread::cast(thread);
115
116 StackWatermarkSet::finish_processing(jt, _oops, StackWatermarkKind::gc);
117 }
118
119 ShenandoahConcurrentRootScanner::ShenandoahConcurrentRootScanner(uint n_workers,
120 ShenandoahPhaseTimings::Phase phase) :
121 ShenandoahRootProcessor(phase),
122 _java_threads(phase, n_workers),
123 _vm_roots(phase),
124 _cld_roots(phase, n_workers, false /*heap iteration*/),
125 _codecache_snapshot(nullptr),
126 _phase(phase) {
127 if (!ShenandoahHeap::heap()->unload_classes()) {
128 MutexLocker locker(CodeCache_lock, Mutex::_no_safepoint_check_flag);
129 _codecache_snapshot = ShenandoahCodeRoots::table()->snapshot_for_iteration();
130 }
131 update_tlab_stats();
132 assert(!ShenandoahHeap::heap()->has_forwarded_objects(), "Not expecting forwarded pointers during concurrent marking");
133 }
134
135 ShenandoahConcurrentRootScanner::~ShenandoahConcurrentRootScanner() {
136 if (!ShenandoahHeap::heap()->unload_classes()) {
137 MonitorLocker locker(CodeCache_lock, Mutex::_no_safepoint_check_flag);
138 ShenandoahCodeRoots::table()->finish_iteration(_codecache_snapshot);
139 locker.notify_all();
140 }
141 }
142
143 void ShenandoahConcurrentRootScanner::roots_do(OopClosure* oops, uint worker_id) {
144 ShenandoahHeap* const heap = ShenandoahHeap::heap();
145 CLDToOopClosure clds_cl(oops, ClassLoaderData::_claim_strong);
146
147 // Process light-weight/limited parallel roots then
148 _vm_roots.oops_do(oops, worker_id);
149
150 if (heap->unload_classes()) {
151 _cld_roots.always_strong_cld_do(&clds_cl, worker_id);
152 } else {
153 _cld_roots.cld_do(&clds_cl, worker_id);
154
155 {
156 ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CodeCacheRoots, worker_id);
157 NMethodToOopClosure nmethods(oops, !NMethodToOopClosure::FixRelocations);
158 _codecache_snapshot->parallel_nmethods_do(&nmethods);
159 }
160 }
161
162 // Process heavy-weight/fully parallel roots the last
163 ShenandoahConcurrentMarkThreadClosure thr_cl(oops);
164 _java_threads.threads_do(&thr_cl, worker_id);
165 }
166
167 void ShenandoahConcurrentRootScanner::update_tlab_stats() {
168 if (UseTLAB) {
169 ThreadLocalAllocStats total;
170 for (uint i = 0; i < _java_threads.length(); i ++) {
171 Thread* thr = _java_threads.thread_at(i);
172 if (thr->is_Java_thread()) {
173 ShenandoahStackWatermark* wm = StackWatermarkSet::get<ShenandoahStackWatermark>(JavaThread::cast(thr), StackWatermarkKind::gc);
174 total.update(wm->stats());
175 }
176 }
177 total.publish();
178 }
179 }
180
181 ShenandoahRootUpdater::ShenandoahRootUpdater(uint n_workers, ShenandoahPhaseTimings::Phase phase) :
182 ShenandoahRootProcessor(phase),
183 _vm_roots(phase),
184 _cld_roots(phase, n_workers, false /*heap iteration*/),
185 _thread_roots(phase, n_workers > 1),
186 _weak_roots(phase),
187 _code_roots(phase) {
188 }
189
190 ShenandoahRootAdjuster::ShenandoahRootAdjuster(uint n_workers, ShenandoahPhaseTimings::Phase phase) :
191 ShenandoahRootProcessor(phase),
192 _vm_roots(phase),
193 _cld_roots(phase, n_workers, false /*heap iteration*/),
194 _thread_roots(phase, n_workers > 1),
195 _weak_roots(phase),
196 _code_roots(phase) {
197 assert(ShenandoahHeap::heap()->is_full_gc_in_progress(), "Full GC only");
198 }
199
200 void ShenandoahRootAdjuster::roots_do(uint worker_id, OopClosure* oops) {
201 NMethodToOopClosure code_blob_cl(oops, NMethodToOopClosure::FixRelocations);
202 ShenandoahNMethodAndDisarmClosure nmethods_and_disarm_Cl(oops);
203 NMethodToOopClosure* adjust_code_closure = ShenandoahCodeRoots::use_nmethod_barriers_for_mark() ?
204 static_cast<NMethodToOopClosure*>(&nmethods_and_disarm_Cl) :
205 static_cast<NMethodToOopClosure*>(&code_blob_cl);
206 CLDToOopClosure adjust_cld_closure(oops, ClassLoaderData::_claim_strong);
207
208 // Process light-weight/limited parallel roots then
209 _vm_roots.oops_do(oops, worker_id);
210 _weak_roots.oops_do<OopClosure>(oops, worker_id);
211 _cld_roots.cld_do(&adjust_cld_closure, worker_id);
212
213 // Process heavy-weight/fully parallel roots the last
214 _code_roots.nmethods_do(adjust_code_closure, worker_id);
215 _thread_roots.oops_do(oops, nullptr, worker_id);
216 }
217
218 ShenandoahHeapIterationRootScanner::ShenandoahHeapIterationRootScanner(uint n_workers) :
219 ShenandoahRootProcessor(ShenandoahPhaseTimings::heap_iteration_roots),
220 _thread_roots(ShenandoahPhaseTimings::heap_iteration_roots, false /*is par*/),
221 _vm_roots(ShenandoahPhaseTimings::heap_iteration_roots),
222 _cld_roots(ShenandoahPhaseTimings::heap_iteration_roots, n_workers, true /*heap iteration*/),
223 _weak_roots(ShenandoahPhaseTimings::heap_iteration_roots),
224 _code_roots(ShenandoahPhaseTimings::heap_iteration_roots) {
225 }
226
227 class ShenandoahMarkNMethodClosure : public NMethodClosure {
228 private:
229 OopClosure* const _oops;
230 BarrierSetNMethod* const _bs_nm;
231
232 public:
233 ShenandoahMarkNMethodClosure(OopClosure* oops) :
234 _oops(oops),
235 _bs_nm(BarrierSet::barrier_set()->barrier_set_nmethod()) {}
236
237 virtual void do_nmethod(nmethod* nm) {
238 assert(nm != nullptr, "Sanity");
239 if (_bs_nm != nullptr) {
240 // Make sure it only sees to-space objects
241 _bs_nm->nmethod_entry_barrier(nm);
242 }
243 ShenandoahNMethod* const snm = ShenandoahNMethod::gc_data(nm);
244 assert(snm != nullptr, "Sanity");
245 snm->oops_do(_oops, false /*fix_relocations*/);
246 }
247 };
248
249 void ShenandoahHeapIterationRootScanner::roots_do(OopClosure* oops) {
250 // Must use _claim_other to avoid interfering with concurrent CLDG iteration
251 CLDToOopClosure clds(oops, ClassLoaderData::_claim_other);
252 ShenandoahMarkNMethodClosure code(oops);
253 ShenandoahParallelOopsDoThreadClosure tc_cl(oops, &code, nullptr);
254
255 ResourceMark rm;
256
257 // Process light-weight/limited parallel roots then
258 _vm_roots.oops_do(oops, 0);
259 _weak_roots.oops_do<OopClosure>(oops, 0);
260 _cld_roots.cld_do(&clds, 0);
261
262 // Process heavy-weight/fully parallel roots the last
263 _code_roots.nmethods_do(&code, 0);
264 _thread_roots.threads_do(&tc_cl, 0);
265 }