1 /*
2 * Copyright (c) 2015, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 */
23
24 #include "classfile/classLoaderData.hpp"
25 #include "classfile/classLoaderDataGraph.hpp"
26 #include "classfile/javaClasses.inline.hpp"
27 #include "code/nmethod.hpp"
28 #include "gc/shared/continuationGCSupport.inline.hpp"
29 #include "gc/shared/gc_globals.hpp"
30 #include "gc/shared/suspendibleThreadSet.hpp"
31 #include "gc/shared/workerThread.hpp"
32 #include "gc/z/zAbort.inline.hpp"
33 #include "gc/z/zAddress.inline.hpp"
34 #include "gc/z/zBarrier.inline.hpp"
35 #include "gc/z/zBarrierSetNMethod.hpp"
36 #include "gc/z/zGeneration.inline.hpp"
37 #include "gc/z/zGenerationId.hpp"
38 #include "gc/z/zHeap.inline.hpp"
39 #include "gc/z/zLock.inline.hpp"
40 #include "gc/z/zMark.inline.hpp"
41 #include "gc/z/zMarkCache.inline.hpp"
42 #include "gc/z/zMarkContext.inline.hpp"
43 #include "gc/z/zMarkStack.inline.hpp"
44 #include "gc/z/zMarkTerminate.inline.hpp"
45 #include "gc/z/zNMethod.hpp"
46 #include "gc/z/zPage.hpp"
47 #include "gc/z/zPageTable.inline.hpp"
48 #include "gc/z/zRootsIterator.hpp"
49 #include "gc/z/zStackWatermark.hpp"
50 #include "gc/z/zStat.hpp"
51 #include "gc/z/zTask.hpp"
52 #include "gc/z/zThreadLocalAllocBuffer.hpp"
53 #include "gc/z/zUncoloredRoot.inline.hpp"
54 #include "gc/z/zUtils.inline.hpp"
55 #include "gc/z/zWorkers.hpp"
56 #include "logging/log.hpp"
57 #include "memory/iterator.inline.hpp"
58 #include "oops/objArrayOop.inline.hpp"
59 #include "oops/oop.inline.hpp"
60 #include "runtime/continuation.hpp"
61 #include "runtime/handshake.hpp"
62 #include "runtime/icache.hpp"
63 #include "runtime/javaThread.hpp"
64 #include "runtime/prefetch.inline.hpp"
65 #include "runtime/safepointMechanism.hpp"
66 #include "runtime/stackWatermark.hpp"
67 #include "runtime/stackWatermarkSet.inline.hpp"
68 #include "runtime/threads.hpp"
69 #include "runtime/vmThread.hpp"
70 #include "utilities/align.hpp"
71 #include "utilities/globalDefinitions.hpp"
72 #include "utilities/powerOfTwo.hpp"
73 #include "utilities/ticks.hpp"
74
75 static const ZStatSubPhase ZSubPhaseConcurrentMarkRootUncoloredYoung("Concurrent Mark Root Uncolored", ZGenerationId::young);
76 static const ZStatSubPhase ZSubPhaseConcurrentMarkRootColoredYoung("Concurrent Mark Root Colored", ZGenerationId::young);
77 static const ZStatSubPhase ZSubPhaseConcurrentMarkRootUncoloredOld("Concurrent Mark Root Uncolored", ZGenerationId::old);
78 static const ZStatSubPhase ZSubPhaseConcurrentMarkRootColoredOld("Concurrent Mark Root Colored", ZGenerationId::old);
79
80 ZMark::ZMark(ZGeneration* generation, ZPageTable* page_table)
81 : _generation(generation),
82 _page_table(page_table),
83 _marking_smr(),
84 _stripes(),
85 _terminate(),
86 _work_nproactiveflush(0),
87 _work_nterminateflush(0),
88 _nproactiveflush(0),
89 _nterminateflush(0),
90 _ntrycomplete(0),
91 _ncontinue(0),
92 _nworkers(0) {}
93
94 size_t ZMark::calculate_nstripes(uint nworkers) const {
95 // Calculate the number of stripes from the number of workers we use,
96 // where the number of stripes must be a power of two and we want to
97 // have at least one worker per stripe.
98 const size_t nstripes = round_down_power_of_2(nworkers);
99 return MIN2(nstripes, ZMarkStripesMax);
100 }
101
102 void ZMark::start() {
103 // Verification
104 if (ZVerifyMarking) {
105 verify_all_stacks_empty();
106 }
107
108 // Reset flush/continue counters
109 _nproactiveflush = 0;
110 _nterminateflush = 0;
111 _ntrycomplete = 0;
112 _ncontinue = 0;
113
114 // Set number of workers to use
115 _nworkers = workers()->active_workers();
116
117 // Set number of mark stripes to use, based on number
118 // of workers we will use in the concurrent mark phase.
119 const size_t nstripes = calculate_nstripes(_nworkers);
120 _stripes.set_nstripes(nstripes);
121
122 // Update statistics
123 _generation->stat_mark()->at_mark_start(nstripes);
124
125 // Print worker/stripe distribution
126 LogTarget(Debug, gc, marking) log;
127 if (log.is_enabled()) {
128 log.print("Mark Worker/Stripe Distribution");
129 for (uint worker_id = 0; worker_id < _nworkers; worker_id++) {
130 const ZMarkStripe* const stripe = _stripes.stripe_for_worker(_nworkers, worker_id);
131 const size_t stripe_id = _stripes.stripe_id(stripe);
132 log.print(" Worker %u(%u) -> Stripe %zu(%zu)",
133 worker_id, _nworkers, stripe_id, nstripes);
134 }
135 }
136 }
137
138 ZWorkers* ZMark::workers() const {
139 return _generation->workers();
140 }
141
142 void ZMark::prepare_work() {
143 // Set number of workers to use
144 _nworkers = workers()->active_workers();
145
146 // Set number of mark stripes to use, based on number
147 // of workers we will use in the concurrent mark phase.
148 const size_t nstripes = calculate_nstripes(_nworkers);
149 _stripes.set_nstripes(nstripes);
150
151 // Set number of active workers
152 _terminate.reset(_nworkers);
153
154 // Reset flush counters
155 _work_nproactiveflush.store_relaxed(0u);
156 _work_nterminateflush.store_relaxed(0u);
157 }
158
159 void ZMark::finish_work() {
160 // Accumulate proactive/terminate flush counters
161 _nproactiveflush += _work_nproactiveflush.load_relaxed();
162 _nterminateflush += _work_nterminateflush.load_relaxed();
163 }
164
165 void ZMark::follow_work_complete() {
166 follow_work(false /* partial */);
167 }
168
169 bool ZMark::follow_work_partial() {
170 return follow_work(true /* partial */);
171 }
172
173 bool ZMark::is_array(zaddress addr) const {
174 return to_oop(addr)->is_objArray();
175 }
176
177 static uintptr_t encode_partial_array_offset(zpointer* addr) {
178 return untype(ZAddress::offset(to_zaddress((uintptr_t)addr))) >> ZMarkPartialArrayMinSizeShift;
179 }
180
181 static zpointer* decode_partial_array_offset(uintptr_t offset) {
182 return (zpointer*)ZOffset::address(to_zoffset(offset << ZMarkPartialArrayMinSizeShift));
183 }
184
185 void ZMark::push_partial_array(zpointer* addr, size_t length, bool finalizable) {
186 assert(is_aligned(addr, ZMarkPartialArrayMinSize), "Address misaligned");
187 ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::mark_stacks(Thread::current(), _generation->id());
188 ZMarkStripe* const stripe = _stripes.stripe_for_addr((uintptr_t)addr);
189 const uintptr_t offset = encode_partial_array_offset(addr);
190 const ZMarkStackEntry entry(offset, length, finalizable);
191
192 log_develop_trace(gc, marking)("Array push partial: " PTR_FORMAT " (%zu), stripe: %zu",
193 p2i(addr), length, _stripes.stripe_id(stripe));
194
195 stacks->push(&_stripes, stripe, &_terminate, entry, false /* publish */);
196 }
197
198 static void mark_barrier_on_oop_array(volatile zpointer* p, size_t length, bool finalizable, bool young) {
199 for (volatile const zpointer* const end = p + length; p < end; p++) {
200 if (young) {
201 ZBarrier::mark_barrier_on_young_oop_field(p);
202 } else {
203 ZBarrier::mark_barrier_on_old_oop_field(p, finalizable);
204 }
205 }
206 }
207
208 void ZMark::follow_array_elements_small(zpointer* addr, size_t length, bool finalizable) {
209 assert(length <= ZMarkPartialArrayMinLength, "Too large, should be split");
210
211 log_develop_trace(gc, marking)("Array follow small: " PTR_FORMAT " (%zu)", p2i(addr), length);
212
213 mark_barrier_on_oop_array(addr, length, finalizable, _generation->is_young());
214 }
215
216 void ZMark::follow_array_elements_large(zpointer* addr, size_t length, bool finalizable) {
217 assert(length <= (size_t)arrayOopDesc::max_array_length(T_OBJECT), "Too large");
218 assert(length > ZMarkPartialArrayMinLength, "Too small, should not be split");
219
220 zpointer* const start = addr;
221 zpointer* const end = start + length;
222
223 // Calculate the aligned middle start/end/size, where the middle start
224 // should always be greater than the start (hence the +1 below) to make
225 // sure we always do some follow work, not just split the array into pieces.
226 zpointer* const middle_start = align_up(start + 1, ZMarkPartialArrayMinSize);
227 const size_t middle_length = align_down(end - middle_start, ZMarkPartialArrayMinLength);
228 zpointer* const middle_end = middle_start + middle_length;
229
230 log_develop_trace(gc, marking)("Array follow large: " PTR_FORMAT "-" PTR_FORMAT" (%zu), "
231 "middle: " PTR_FORMAT "-" PTR_FORMAT " (%zu)",
232 p2i(start), p2i(end), length, p2i(middle_start), p2i(middle_end), middle_length);
233
234 // Push unaligned trailing part
235 if (end > middle_end) {
236 zpointer* const trailing_addr = middle_end;
237 const size_t trailing_length = end - middle_end;
238 push_partial_array(trailing_addr, trailing_length, finalizable);
239 }
240
241 // Push aligned middle part(s)
242 zpointer* partial_addr = middle_end;
243 while (partial_addr > middle_start) {
244 const size_t parts = 2;
245 const size_t partial_length = align_up((partial_addr - middle_start) / parts, ZMarkPartialArrayMinLength);
246 partial_addr -= partial_length;
247 push_partial_array(partial_addr, partial_length, finalizable);
248 }
249
250 // Follow leading part
251 assert(start < middle_start, "Miscalculated middle start");
252 zpointer* const leading_addr = start;
253 const size_t leading_length = middle_start - start;
254 follow_array_elements_small(leading_addr, leading_length, finalizable);
255 }
256
257 void ZMark::follow_array_elements(zpointer* addr, size_t length, bool finalizable) {
258 if (length <= ZMarkPartialArrayMinLength) {
259 follow_array_elements_small(addr, length, finalizable);
260 } else {
261 follow_array_elements_large(addr, length, finalizable);
262 }
263 }
264
265 void ZMark::follow_partial_array(ZMarkStackEntry entry, bool finalizable) {
266 zpointer* const addr = decode_partial_array_offset(entry.partial_array_offset());
267 const size_t length = entry.partial_array_length();
268
269 follow_array_elements(addr, length, finalizable);
270 }
271
272 template <bool finalizable, ZGenerationIdOptional generation>
273 class ZMarkBarrierFollowOopClosure : public OopIterateClosure {
274 private:
275 static int claim_value() {
276 return finalizable ? ClassLoaderData::_claim_finalizable
277 : ClassLoaderData::_claim_strong;
278 }
279
280 static ReferenceDiscoverer* discoverer() {
281 if (!finalizable) {
282 return ZGeneration::old()->reference_discoverer();
283 } else {
284 return nullptr;
285 }
286 }
287
288 static bool visit_metadata() {
289 // Only visit metadata if we're marking through the old generation
290 return ZGeneration::old()->is_phase_mark();
291 }
292
293 const bool _visit_metadata;
294
295 public:
296 ZMarkBarrierFollowOopClosure()
297 : OopIterateClosure(discoverer()),
298 _visit_metadata(visit_metadata()) {}
299
300 virtual void do_oop(oop* p) {
301 switch (generation) {
302 case ZGenerationIdOptional::young:
303 ZBarrier::mark_barrier_on_young_oop_field((volatile zpointer*)p);
304 break;
305 case ZGenerationIdOptional::old:
306 ZBarrier::mark_barrier_on_old_oop_field((volatile zpointer*)p, finalizable);
307 break;
308 case ZGenerationIdOptional::none:
309 ZBarrier::mark_barrier_on_oop_field((volatile zpointer*)p, finalizable);
310 break;
311 }
312 }
313
314 virtual void do_oop(narrowOop* p) {
315 ShouldNotReachHere();
316 }
317
318 virtual bool do_metadata() final {
319 // Only help out with metadata visiting
320 return _visit_metadata;
321 }
322
323 virtual void do_nmethod(nmethod* nm) {
324 assert(do_metadata(), "Don't call otherwise");
325 assert(!finalizable, "Can't handle finalizable marking of nmethods");
326 nm->run_nmethod_entry_barrier();
327 }
328
329 virtual void do_method(Method* m) {
330 // Mark interpreted frames for class redefinition
331 m->record_gc_epoch();
332 }
333
334 virtual void do_klass(Klass* klass) {
335 ClassLoaderData* cld = klass->class_loader_data();
336 ZMarkBarrierFollowOopClosure<finalizable, ZGenerationIdOptional::none> cl;
337 cld->oops_do(&cl, claim_value());
338 }
339
340 virtual void do_cld(ClassLoaderData* cld) {
341 ZMarkBarrierFollowOopClosure<finalizable, ZGenerationIdOptional::none> cl;
342 cld->oops_do(&cl, claim_value());
343 }
344 };
345
346 void ZMark::follow_array_object(objArrayOop obj, bool finalizable) {
347 if (_generation->is_old()) {
348 if (finalizable) {
349 ZMarkBarrierFollowOopClosure<true /* finalizable */, ZGenerationIdOptional::old> cl;
350 cl.do_klass(obj->klass());
351 } else {
352 ZMarkBarrierFollowOopClosure<false /* finalizable */, ZGenerationIdOptional::old> cl;
353 cl.do_klass(obj->klass());
354 }
355 } else {
356 ZMarkBarrierFollowOopClosure<false /* finalizable */, ZGenerationIdOptional::none> cl;
357 if (cl.do_metadata()) {
358 cl.do_klass(obj->klass());
359 }
360 }
361
362 // Should be convertible to colorless oop
363 check_is_valid_zaddress(obj);
364
365 zpointer* const addr = (zpointer*)obj->base();
366 const size_t length = (size_t)obj->length();
367
368 follow_array_elements(addr, length, finalizable);
369 }
370
371 void ZMark::follow_object(oop obj, bool finalizable) {
372 if (_generation->is_old()) {
373 assert(ZHeap::heap()->is_old(to_zaddress(obj)), "Should only follow objects from old gen");
374 if (obj->is_stackChunk()) {
375 // No support for tracing through stack chunks as finalizably reachable
376 ZMarkBarrierFollowOopClosure<false /* finalizable */, ZGenerationIdOptional::old> cl;
377 ZIterator::oop_iterate(obj, &cl);
378 } else if (finalizable) {
379 ZMarkBarrierFollowOopClosure<true /* finalizable */, ZGenerationIdOptional::old> cl;
380 ZIterator::oop_iterate(obj, &cl);
381 } else {
382 ZMarkBarrierFollowOopClosure<false /* finalizable */, ZGenerationIdOptional::old> cl;
383 ZIterator::oop_iterate(obj, &cl);
384 }
385 } else {
386 // Young gen must help out with old marking
387 ZMarkBarrierFollowOopClosure<false /* finalizable */, ZGenerationIdOptional::young> cl;
388 ZIterator::oop_iterate(obj, &cl);
389 }
390 }
391
392 void ZMark::mark_and_follow(ZMarkContext* context, ZMarkStackEntry entry) {
393 // Decode flags
394 const bool finalizable = entry.finalizable();
395 const bool partial_array = entry.partial_array();
396
397 if (partial_array) {
398 follow_partial_array(entry, finalizable);
399 return;
400 }
401
402 // Decode object address and additional flags
403 const zaddress addr = ZOffset::address(to_zoffset(entry.object_address()));
404 const bool mark = entry.mark();
405 bool inc_live = entry.inc_live();
406 const bool follow = entry.follow();
407
408 ZPage* const page = _page_table->get(addr);
409 assert(page->is_relocatable(), "Invalid page state");
410
411 // Mark
412 if (mark && !page->mark_object(addr, finalizable, inc_live)) {
413 // Already marked
414 return;
415 }
416
417 // Increment live
418 if (inc_live) {
419 // Update live objects/bytes for page. We use the aligned object
420 // size since that is the actual number of bytes used on the page
421 // and alignment paddings can never be reclaimed.
422 const size_t size = ZUtils::object_size(addr);
423 const size_t aligned_size = align_up(size, page->object_alignment());
424 context->cache()->inc_live(page, aligned_size);
425 }
426
427 // Follow
428 if (follow) {
429 if (is_array(addr)) {
430 follow_array_object(objArrayOop(to_oop(addr)), finalizable);
431 } else {
432 follow_object(to_oop(addr), finalizable);
433 }
434 }
435 }
436
437 // This function returns true if we need to stop working to resize threads or
438 // abort marking
439 bool ZMark::rebalance_work(ZMarkContext* context) {
440 const size_t assumed_nstripes = context->nstripes();
441 const size_t nstripes = _stripes.nstripes();
442
443 if (assumed_nstripes != nstripes) {
444 // The number of stripes has changed; reflect that change locally
445 context->set_nstripes(nstripes);
446 } else if (nstripes < calculate_nstripes(_nworkers) && _stripes.is_crowded()) {
447 // We are running on a reduced number of threads to minimize the amount of work
448 // hidden in local stacks when the stripes are less well balanced. When this situation
449 // starts getting crowded, we bump the number of stripes again.
450 const size_t new_nstripes = nstripes << 1;
451 if (_stripes.try_set_nstripes(nstripes, new_nstripes)) {
452 context->set_nstripes(new_nstripes);
453 }
454 }
455
456 ZMarkStripe* stripe = _stripes.stripe_for_worker(_nworkers, WorkerThread::worker_id());
457 if (context->stripe() != stripe) {
458 // Need to switch stripe
459 context->set_stripe(stripe);
460 flush(Thread::current());
461 } else if (!_terminate.saturated()) {
462 // Work imbalance detected; striped marking is likely going to be in the way
463 flush(Thread::current());
464 }
465
466 SuspendibleThreadSet::yield();
467
468 return ZAbort::should_abort() || _generation->should_worker_resize();
469 }
470
471 bool ZMark::drain(ZMarkContext* context) {
472 ZMarkThreadLocalStacks* const stacks = context->stacks();
473 ZMarkStackEntry entry;
474 size_t processed = 0;
475
476 context->set_stripe(_stripes.stripe_for_worker(_nworkers, WorkerThread::worker_id()));
477 context->set_nstripes(_stripes.nstripes());
478
479 // Drain stripe stacks
480 while (stacks->pop(&_marking_smr, &_stripes, context->stripe(), &entry)) {
481 mark_and_follow(context, entry);
482
483 if ((processed++ & 31) == 0 && rebalance_work(context)) {
484 return false;
485 }
486 }
487
488 return true;
489 }
490
491 bool ZMark::try_steal_local(ZMarkContext* context) {
492 ZMarkStripe* const stripe = context->stripe();
493 ZMarkThreadLocalStacks* const stacks = context->stacks();
494
495 // Try to steal a local stack from another stripe
496 for (ZMarkStripe* victim_stripe = _stripes.stripe_next(stripe);
497 victim_stripe != stripe;
498 victim_stripe = _stripes.stripe_next(victim_stripe)) {
499 ZMarkStack* const stack = stacks->steal(&_stripes, victim_stripe);
500 if (stack != nullptr) {
501 // Success, install the stolen stack
502 stacks->install(&_stripes, stripe, stack);
503 return true;
504 }
505 }
506
507 // Nothing to steal
508 return false;
509 }
510
511 bool ZMark::try_steal_global(ZMarkContext* context) {
512 ZMarkStripe* const stripe = context->stripe();
513 ZMarkThreadLocalStacks* const stacks = context->stacks();
514
515 // Try to steal a stack from another stripe
516 for (ZMarkStripe* victim_stripe = _stripes.stripe_next(stripe);
517 victim_stripe != stripe;
518 victim_stripe = _stripes.stripe_next(victim_stripe)) {
519 ZMarkStack* const stack = victim_stripe->steal_stack(&_marking_smr);
520 if (stack != nullptr) {
521 // Success, install the stolen stack
522 stacks->install(&_stripes, stripe, stack);
523 return true;
524 }
525 }
526
527 // Nothing to steal
528 return false;
529 }
530
531 bool ZMark::try_steal(ZMarkContext* context) {
532 return try_steal_local(context) || try_steal_global(context);
533 }
534
535 class ZMarkFlushStacksHandshakeClosure : public HandshakeClosure {
536 private:
537 ZMark* const _mark;
538 bool _flushed;
539
540 public:
541 ZMarkFlushStacksHandshakeClosure(ZMark* mark)
542 : HandshakeClosure("ZMarkFlushStacks"),
543 _mark(mark),
544 _flushed(false) {}
545
546 void do_thread(Thread* thread) {
547 if (_mark->flush(thread)) {
548 _flushed = true;
549 if (SafepointSynchronize::is_at_safepoint()) {
550 log_debug(gc, marking)("Thread broke mark termination %s", thread->name());
551 }
552 }
553 }
554
555 bool flushed() const {
556 return _flushed;
557 }
558 };
559
560 class VM_ZMarkFlushOperation : public VM_Operation {
561 private:
562 ThreadClosure* _cl;
563
564 public:
565 VM_ZMarkFlushOperation(ThreadClosure* cl)
566 : _cl(cl) {}
567
568 virtual bool evaluate_at_safepoint() const {
569 return false;
570 }
571
572 virtual void doit() {
573 // Flush VM thread
574 Thread* const thread = Thread::current();
575 _cl->do_thread(thread);
576 }
577
578 virtual VMOp_Type type() const {
579 return VMOp_ZMarkFlushOperation;
580 }
581
582 virtual bool is_gc_operation() const {
583 return true;
584 }
585 };
586
587 bool ZMark::flush() {
588 ZMarkFlushStacksHandshakeClosure cl(this);
589 VM_ZMarkFlushOperation vm_cl(&cl);
590 Handshake::execute(&cl);
591 VMThread::execute(&vm_cl);
592
593 // Returns true if more work is available
594 return cl.flushed() || !_stripes.is_empty();
595 }
596
597 bool ZMark::try_terminate_flush() {
598 _work_nterminateflush.add_then_fetch(1u);
599 _terminate.set_resurrected(false);
600
601 if (ZVerifyMarking) {
602 verify_worker_stacks_empty();
603 }
604
605 return flush() || _terminate.resurrected();
606 }
607
608 bool ZMark::try_proactive_flush() {
609 // Only do proactive flushes from worker 0
610 if (WorkerThread::worker_id() != 0) {
611 return false;
612 }
613
614 if (_work_nproactiveflush.load_relaxed() == ZMarkProactiveFlushMax) {
615 // Limit reached or we're trying to terminate
616 return false;
617 }
618
619 _work_nproactiveflush.add_then_fetch(1u);
620
621 SuspendibleThreadSetLeaver sts_leaver;
622 return flush();
623 }
624
625 bool ZMark::try_terminate(ZMarkContext* context) {
626 return _terminate.try_terminate(&_stripes, context->nstripes());
627 }
628
629 void ZMark::leave() {
630 _terminate.leave();
631 }
632
633 // Returning true means marking finished successfully after marking as far as it could.
634 // Returning false means that marking finished unsuccessfully due to abort or resizing.
635 bool ZMark::follow_work(bool partial) {
636 ZMarkStripe* const stripe = _stripes.stripe_for_worker(_nworkers, WorkerThread::worker_id());
637 ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::mark_stacks(Thread::current(), _generation->id());
638 ZMarkContext context(ZMarkStripesMax, stripe, stacks);
639
640 for (;;) {
641 if (!drain(&context)) {
642 leave();
643 return false;
644 }
645
646 if (try_steal(&context)) {
647 // Stole work
648 continue;
649 }
650
651 if (partial) {
652 return true;
653 }
654
655 if (try_proactive_flush()) {
656 // Work available
657 continue;
658 }
659
660 if (try_terminate(&context)) {
661 // Terminate
662 return true;
663 }
664 }
665 }
666
667 class ZMarkOopClosure : public OopClosure {
668 public:
669 virtual void do_oop(oop* p) {
670 ZBarrier::mark_barrier_on_oop_field((zpointer*)p, false /* finalizable */);
671 }
672
673 virtual void do_oop(narrowOop* p) {
674 ShouldNotReachHere();
675 }
676 };
677
678 class ZMarkYoungOopClosure : public OopClosure {
679 public:
680 virtual void do_oop(oop* p) {
681 ZBarrier::mark_young_good_barrier_on_oop_field((zpointer*)p);
682 }
683
684 virtual void do_oop(narrowOop* p) {
685 ShouldNotReachHere();
686 }
687 };
688
689 class ZMarkThreadClosure : public ThreadClosure {
690 private:
691 static ZUncoloredRoot::RootFunction root_function() {
692 return ZUncoloredRoot::mark;
693 }
694
695 public:
696 ZMarkThreadClosure() {
697 ZThreadLocalAllocBuffer::reset_statistics();
698 }
699 ~ZMarkThreadClosure() {
700 ZThreadLocalAllocBuffer::publish_statistics();
701 }
702
703 virtual void do_thread(Thread* thread) {
704 JavaThread* const jt = JavaThread::cast(thread);
705
706 StackWatermarkSet::finish_processing(jt, (void*)root_function(), StackWatermarkKind::gc);
707 ZThreadLocalAllocBuffer::update_stats(jt);
708 }
709 };
710
711 class ZMarkNMethodClosure : public NMethodClosure {
712 private:
713 ZBarrierSetNMethod* const _bs_nm;
714
715 public:
716 ZMarkNMethodClosure()
717 : _bs_nm(static_cast<ZBarrierSetNMethod*>(BarrierSet::barrier_set()->barrier_set_nmethod())) {}
718
719 virtual void do_nmethod(nmethod* nm) {
720 ZLocker<ZReentrantLock> locker(ZNMethod::lock_for_nmethod(nm));
721 if (_bs_nm->is_armed(nm)) {
722 {
723 ICacheInvalidationContext icic;
724 // Heal barriers
725 ZNMethod::nmethod_patch_barriers(nm, &icic);
726
727 // Heal oops
728 ZUncoloredRootMarkOopClosure cl(ZNMethod::color(nm));
729 ZNMethod::nmethod_oops_do_inner(nm, &cl, &icic);
730 }
731
732 // CodeCache unloading support
733 nm->mark_as_maybe_on_stack();
734
735 log_trace(gc, nmethod)("nmethod: " PTR_FORMAT " visited by old", p2i(nm));
736
737 // Disarm
738 _bs_nm->disarm(nm);
739 }
740 }
741 };
742
743 class ZMarkYoungNMethodClosure : public NMethodClosure {
744 private:
745 ZBarrierSetNMethod* const _bs_nm;
746
747 public:
748 ZMarkYoungNMethodClosure()
749 : _bs_nm(static_cast<ZBarrierSetNMethod*>(BarrierSet::barrier_set()->barrier_set_nmethod())) {}
750
751 virtual void do_nmethod(nmethod* nm) {
752 ZLocker<ZReentrantLock> locker(ZNMethod::lock_for_nmethod(nm));
753 if (nm->is_unloading()) {
754 return;
755 }
756
757 if (_bs_nm->is_armed(nm)) {
758 const uintptr_t prev_color = ZNMethod::color(nm);
759
760 // Disarm only the young marking, not any potential old marking cycle
761
762 const uintptr_t old_marked_mask = ZPointerMarkedMask ^ (ZPointerMarkedYoung0 | ZPointerMarkedYoung1);
763 const uintptr_t old_marked = prev_color & old_marked_mask;
764
765 const zpointer new_disarm_value_ptr = ZAddress::color(zaddress::null, ZPointerLoadGoodMask | ZPointerMarkedYoung | old_marked | ZPointerRemembered);
766
767 // Check if disarming for young mark, completely disarms the nmethod entry barrier
768 const bool complete_disarm = ZPointer::is_store_good(new_disarm_value_ptr);
769
770 {
771 ICacheInvalidationContext icic;
772 if (complete_disarm) {
773 // We are about to completely disarm the nmethod, must take responsibility to patch all barriers before disarming
774 ZNMethod::nmethod_patch_barriers(nm, &icic);
775 }
776
777 // Heal oops
778 ZUncoloredRootMarkYoungOopClosure cl(prev_color);
779 ZNMethod::nmethod_oops_do_inner(nm, &cl, &icic);
780 }
781
782 _bs_nm->guard_with(nm, (int)untype(new_disarm_value_ptr));
783
784 if (complete_disarm) {
785 log_trace(gc, nmethod)("nmethod: " PTR_FORMAT " visited by young (complete) [" PTR_FORMAT " -> " PTR_FORMAT "]", p2i(nm), prev_color, untype(new_disarm_value_ptr));
786 assert(!_bs_nm->is_armed(nm), "Must not be considered armed anymore");
787 } else {
788 log_trace(gc, nmethod)("nmethod: " PTR_FORMAT " visited by young (incomplete) [" PTR_FORMAT " -> " PTR_FORMAT "]", p2i(nm), prev_color, untype(new_disarm_value_ptr));
789 assert(_bs_nm->is_armed(nm), "Must be considered armed");
790 }
791 }
792 }
793 };
794
795 typedef ClaimingCLDToOopClosure<ClassLoaderData::_claim_strong> ZMarkOldCLDClosure;
796
797 class ZMarkOldRootsTask : public ZTask {
798 private:
799 ZRootsIteratorStrongColored _roots_colored;
800 ZRootsIteratorStrongUncolored _roots_uncolored;
801
802 ZMarkOopClosure _cl_colored;
803 ZMarkOldCLDClosure _cld_cl;
804
805 ZMarkThreadClosure _thread_cl;
806 ZMarkNMethodClosure _nm_cl;
807
808 public:
809 ZMarkOldRootsTask()
810 : ZTask("ZMarkOldRootsTask"),
811 _roots_colored(ZGenerationIdOptional::old),
812 _roots_uncolored(ZGenerationIdOptional::old),
813 _cl_colored(),
814 _cld_cl(&_cl_colored),
815 _thread_cl(),
816 _nm_cl() {}
817
818 virtual void work() {
819 {
820 ZStatTimerWorker timer(ZSubPhaseConcurrentMarkRootColoredOld);
821 _roots_colored.apply(&_cl_colored,
822 &_cld_cl);
823 }
824
825 {
826 ZStatTimerWorker timer(ZSubPhaseConcurrentMarkRootUncoloredOld);
827 _roots_uncolored.apply(&_thread_cl,
828 &_nm_cl);
829 }
830
831 // Flush and free worker stacks. Needed here since
832 // the set of workers executing during root scanning
833 // can be different from the set of workers executing
834 // during mark.
835 ZHeap::heap()->mark_flush(Thread::current());
836 }
837 };
838
839 class ZMarkYoungCLDClosure : public ClaimingCLDToOopClosure<ClassLoaderData::_claim_none> {
840 public:
841 virtual void do_cld(ClassLoaderData* cld) {
842 if (!cld->is_alive()) {
843 // Skip marking through concurrently unloading CLDs
844 return;
845 }
846 ClaimingCLDToOopClosure<ClassLoaderData::_claim_none>::do_cld(cld);
847 }
848
849 ZMarkYoungCLDClosure(OopClosure* cl)
850 : ClaimingCLDToOopClosure<ClassLoaderData::_claim_none>(cl) {}
851 };
852
853 class ZMarkYoungRootsTask : public ZTask {
854 private:
855 ZRootsIteratorAllColored _roots_colored;
856 ZRootsIteratorAllUncolored _roots_uncolored;
857
858 ZMarkYoungOopClosure _cl_colored;
859 ZMarkYoungCLDClosure _cld_cl;
860
861 ZMarkThreadClosure _thread_cl;
862 ZMarkYoungNMethodClosure _nm_cl;
863
864 public:
865 ZMarkYoungRootsTask()
866 : ZTask("ZMarkYoungRootsTask"),
867 _roots_colored(ZGenerationIdOptional::young),
868 _roots_uncolored(ZGenerationIdOptional::young),
869 _cl_colored(),
870 _cld_cl(&_cl_colored),
871 _thread_cl(),
872 _nm_cl() {}
873
874 virtual void work() {
875 {
876 ZStatTimerWorker timer(ZSubPhaseConcurrentMarkRootColoredYoung);
877 _roots_colored.apply(&_cl_colored,
878 &_cld_cl);
879 }
880
881 {
882 ZStatTimerWorker timer(ZSubPhaseConcurrentMarkRootUncoloredYoung);
883 _roots_uncolored.apply(&_thread_cl,
884 &_nm_cl);
885 }
886
887 // Flush and free worker stacks. Needed here since
888 // the set of workers executing during root scanning
889 // can be different from the set of workers executing
890 // during mark.
891 ZHeap::heap()->mark_flush(Thread::current());
892 }
893 };
894
895 class ZMarkTask : public ZRestartableTask {
896 private:
897 ZMark* const _mark;
898
899 public:
900 ZMarkTask(ZMark* mark)
901 : ZRestartableTask("ZMarkTask"),
902 _mark(mark) {
903 _mark->prepare_work();
904 }
905
906 ~ZMarkTask() {
907 _mark->finish_work();
908 }
909
910 virtual void work() {
911 SuspendibleThreadSetJoiner sts_joiner;
912 _mark->follow_work_complete();
913 // We might have found pointers into the other generation, and then we want to
914 // publish such marking stacks to prevent that generation from getting a mark continue.
915 // We also flush in case of a resize where a new worker thread continues the marking
916 // work, causing a mark continue for the collected generation.
917 ZHeap::heap()->mark_flush(Thread::current());
918 }
919
920 virtual void resize_workers(uint nworkers) {
921 _mark->resize_workers(nworkers);
922 }
923 };
924
925 void ZMark::resize_workers(uint nworkers) {
926 _nworkers = nworkers;
927 const size_t nstripes = calculate_nstripes(nworkers);
928 _stripes.set_nstripes(nstripes);
929 _terminate.reset(nworkers);
930 }
931
932 void ZMark::mark_young_roots() {
933 SuspendibleThreadSetJoiner sts_joiner;
934 ZMarkYoungRootsTask task;
935 workers()->run(&task);
936 }
937
938 void ZMark::mark_old_roots() {
939 SuspendibleThreadSetJoiner sts_joiner;
940 ZMarkOldRootsTask task;
941 workers()->run(&task);
942 }
943
944 void ZMark::mark_follow() {
945 for (;;) {
946 ZMarkTask task(this);
947 workers()->run(&task);
948 if (ZAbort::should_abort() || !try_terminate_flush()) {
949 break;
950 }
951 }
952 }
953
954 bool ZMark::try_end() {
955 if (_terminate.resurrected()) {
956 // An oop was resurrected after concurrent termination.
957 return false;
958 }
959
960 // Try end marking
961 ZMarkFlushStacksHandshakeClosure cl(this);
962 Threads::non_java_threads_do(&cl);
963
964 // Check if non-java threads have any pending marking
965 if (cl.flushed() || !_stripes.is_empty()) {
966 return false;
967 }
968
969 // Mark completed
970 return true;
971 }
972
973 bool ZMark::end() {
974 // Try end marking
975 if (!try_end()) {
976 // Mark not completed
977 _ncontinue++;
978 return false;
979 }
980
981 // Verification
982 if (ZVerifyMarking) {
983 verify_all_stacks_empty();
984 }
985
986 // Update statistics
987 _generation->stat_mark()->at_mark_end(_nproactiveflush, _nterminateflush, _ntrycomplete, _ncontinue);
988
989 // Mark completed
990 return true;
991 }
992
993 void ZMark::free() {
994 // Free any unused mark stack space
995 _marking_smr.free();
996 }
997
998 bool ZMark::flush(Thread* thread) {
999 if (thread->is_Java_thread()) {
1000 ZThreadLocalData::store_barrier_buffer(thread)->flush();
1001 }
1002 ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::mark_stacks(thread, _generation->id());
1003 return stacks->flush(&_stripes, &_terminate);
1004 }
1005
1006 class ZVerifyMarkStacksEmptyClosure : public ThreadClosure {
1007 private:
1008 const ZMarkStripeSet* const _stripes;
1009 const ZGenerationId _generation_id;
1010
1011 public:
1012 ZVerifyMarkStacksEmptyClosure(const ZMarkStripeSet* stripes, ZGenerationId id)
1013 : _stripes(stripes),
1014 _generation_id(id) {}
1015
1016 void do_thread(Thread* thread) {
1017 ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::mark_stacks(thread, _generation_id);
1018 guarantee(stacks->is_empty(_stripes), "Should be empty");
1019 }
1020 };
1021
1022 void ZMark::verify_all_stacks_empty() const {
1023 // Verify thread stacks
1024 ZVerifyMarkStacksEmptyClosure cl(&_stripes, _generation->id());
1025 Threads::threads_do(&cl);
1026
1027 // Verify stripe stacks
1028 guarantee(_stripes.is_empty(), "Should be empty");
1029 }
1030
1031 void ZMark::verify_worker_stacks_empty() const {
1032 // Verify thread stacks
1033 ZVerifyMarkStacksEmptyClosure cl(&_stripes, _generation->id());
1034 workers()->threads_do(&cl);
1035 }