1 /*
  2  * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  */
 23 
 24 #include "precompiled.hpp"
 25 #include "classfile/classLoaderData.hpp"
 26 #include "classfile/classLoaderDataGraph.hpp"
 27 #include "classfile/javaClasses.inline.hpp"
 28 #include "code/nmethod.hpp"
 29 #include "gc/shared/gc_globals.hpp"
 30 #include "gc/shared/stringdedup/stringDedup.hpp"
 31 #include "gc/shared/suspendibleThreadSet.hpp"
 32 #include "gc/z/zAbort.inline.hpp"
 33 #include "gc/z/zBarrier.inline.hpp"
 34 #include "gc/z/zHeap.inline.hpp"
 35 #include "gc/z/zLock.inline.hpp"
 36 #include "gc/z/zMark.inline.hpp"
 37 #include "gc/z/zMarkCache.inline.hpp"
 38 #include "gc/z/zMarkContext.inline.hpp"
 39 #include "gc/z/zMarkStack.inline.hpp"
 40 #include "gc/z/zMarkTerminate.inline.hpp"
 41 #include "gc/z/zNMethod.hpp"
 42 #include "gc/z/zOop.inline.hpp"
 43 #include "gc/z/zPage.hpp"
 44 #include "gc/z/zPageTable.inline.hpp"
 45 #include "gc/z/zRootsIterator.hpp"
 46 #include "gc/z/zStackWatermark.hpp"
 47 #include "gc/z/zStat.hpp"
 48 #include "gc/z/zTask.hpp"
 49 #include "gc/z/zThread.inline.hpp"
 50 #include "gc/z/zThreadLocalAllocBuffer.hpp"
 51 #include "gc/z/zUtils.inline.hpp"
 52 #include "gc/z/zWorkers.hpp"
 53 #include "logging/log.hpp"
 54 #include "memory/iterator.inline.hpp"
 55 #include "oops/objArrayOop.inline.hpp"
 56 #include "oops/oop.inline.hpp"
 57 #include "runtime/atomic.hpp"
 58 #include "runtime/handshake.hpp"
 59 #include "runtime/prefetch.inline.hpp"
 60 #include "runtime/safepointMechanism.hpp"
 61 #include "runtime/stackWatermark.hpp"
 62 #include "runtime/stackWatermarkSet.inline.hpp"
 63 #include "runtime/thread.hpp"
 64 #include "utilities/align.hpp"
 65 #include "utilities/globalDefinitions.hpp"
 66 #include "utilities/powerOfTwo.hpp"
 67 #include "utilities/ticks.hpp"
 68 
 69 static const ZStatSubPhase ZSubPhaseConcurrentMark("Concurrent Mark");
 70 static const ZStatSubPhase ZSubPhaseConcurrentMarkTryFlush("Concurrent Mark Try Flush");
 71 static const ZStatSubPhase ZSubPhaseConcurrentMarkTryTerminate("Concurrent Mark Try Terminate");
 72 static const ZStatSubPhase ZSubPhaseMarkTryComplete("Pause Mark Try Complete");
 73 
 74 ZMark::ZMark(ZWorkers* workers, ZPageTable* page_table) :
 75     _workers(workers),
 76     _page_table(page_table),
 77     _allocator(),
 78     _stripes(),
 79     _terminate(),
 80     _work_terminateflush(true),
 81     _work_nproactiveflush(0),
 82     _work_nterminateflush(0),
 83     _nproactiveflush(0),
 84     _nterminateflush(0),
 85     _ntrycomplete(0),
 86     _ncontinue(0),
 87     _nworkers(0) {}
 88 
 89 bool ZMark::is_initialized() const {
 90   return _allocator.is_initialized();
 91 }
 92 
 93 size_t ZMark::calculate_nstripes(uint nworkers) const {
 94   // Calculate the number of stripes from the number of workers we use,
 95   // where the number of stripes must be a power of two and we want to
 96   // have at least one worker per stripe.
 97   const size_t nstripes = round_down_power_of_2(nworkers);
 98   return MIN2(nstripes, ZMarkStripesMax);
 99 }
100 
101 void ZMark::start() {
102   // Verification
103   if (ZVerifyMarking) {
104     verify_all_stacks_empty();
105   }
106 
107   // Increment global sequence number to invalidate
108   // marking information for all pages.
109   ZGlobalSeqNum++;
110 
111   CodeCache::increment_marking_cycle();
112 
113   // Reset flush/continue counters
114   _nproactiveflush = 0;
115   _nterminateflush = 0;
116   _ntrycomplete = 0;
117   _ncontinue = 0;
118 
119   // Set number of workers to use
120   _nworkers = _workers->active_workers();
121 
122   // Set number of mark stripes to use, based on number
123   // of workers we will use in the concurrent mark phase.
124   const size_t nstripes = calculate_nstripes(_nworkers);
125   _stripes.set_nstripes(nstripes);
126 
127   // Update statistics
128   ZStatMark::set_at_mark_start(nstripes);
129 
130   // Print worker/stripe distribution
131   LogTarget(Debug, gc, marking) log;
132   if (log.is_enabled()) {
133     log.print("Mark Worker/Stripe Distribution");
134     for (uint worker_id = 0; worker_id < _nworkers; worker_id++) {
135       const ZMarkStripe* const stripe = _stripes.stripe_for_worker(_nworkers, worker_id);
136       const size_t stripe_id = _stripes.stripe_id(stripe);
137       log.print("  Worker %u(%u) -> Stripe " SIZE_FORMAT "(" SIZE_FORMAT ")",
138                 worker_id, _nworkers, stripe_id, nstripes);
139     }
140   }
141 }
142 
143 void ZMark::prepare_work() {
144   assert(_nworkers == _workers->active_workers(), "Invalid number of workers");
145 
146   // Set number of active workers
147   _terminate.reset(_nworkers);
148 
149   // Reset flush counters
150   _work_nproactiveflush = _work_nterminateflush = 0;
151   _work_terminateflush = true;
152 }
153 
154 void ZMark::finish_work() {
155   // Accumulate proactive/terminate flush counters
156   _nproactiveflush += _work_nproactiveflush;
157   _nterminateflush += _work_nterminateflush;
158 }
159 
160 bool ZMark::is_array(uintptr_t addr) const {
161   return ZOop::from_address(addr)->is_objArray();
162 }
163 
164 void ZMark::push_partial_array(uintptr_t addr, size_t size, bool finalizable) {
165   assert(is_aligned(addr, ZMarkPartialArrayMinSize), "Address misaligned");
166   ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::stacks(Thread::current());
167   ZMarkStripe* const stripe = _stripes.stripe_for_addr(addr);
168   const uintptr_t offset = ZAddress::offset(addr) >> ZMarkPartialArrayMinSizeShift;
169   const uintptr_t length = size / oopSize;
170   const ZMarkStackEntry entry(offset, length, finalizable);
171 
172   log_develop_trace(gc, marking)("Array push partial: " PTR_FORMAT " (" SIZE_FORMAT "), stripe: " SIZE_FORMAT,
173                                  addr, size, _stripes.stripe_id(stripe));
174 
175   stacks->push(&_allocator, &_stripes, stripe, entry, false /* publish */);
176 }
177 
178 void ZMark::follow_small_array(uintptr_t addr, size_t size, bool finalizable) {
179   assert(size <= ZMarkPartialArrayMinSize, "Too large, should be split");
180   const size_t length = size / oopSize;
181 
182   log_develop_trace(gc, marking)("Array follow small: " PTR_FORMAT " (" SIZE_FORMAT ")", addr, size);
183 
184   ZBarrier::mark_barrier_on_oop_array((oop*)addr, length, finalizable);
185 }
186 
187 void ZMark::follow_large_array(uintptr_t addr, size_t size, bool finalizable) {
188   assert(size <= (size_t)arrayOopDesc::max_array_length(T_OBJECT) * oopSize, "Too large");
189   assert(size > ZMarkPartialArrayMinSize, "Too small, should not be split");
190   const uintptr_t start = addr;
191   const uintptr_t end = start + size;
192 
193   // Calculate the aligned middle start/end/size, where the middle start
194   // should always be greater than the start (hence the +1 below) to make
195   // sure we always do some follow work, not just split the array into pieces.
196   const uintptr_t middle_start = align_up(start + 1, ZMarkPartialArrayMinSize);
197   const size_t    middle_size = align_down(end - middle_start, ZMarkPartialArrayMinSize);
198   const uintptr_t middle_end = middle_start + middle_size;
199 
200   log_develop_trace(gc, marking)("Array follow large: " PTR_FORMAT "-" PTR_FORMAT" (" SIZE_FORMAT "), "
201                                  "middle: " PTR_FORMAT "-" PTR_FORMAT " (" SIZE_FORMAT ")",
202                                  start, end, size, middle_start, middle_end, middle_size);
203 
204   // Push unaligned trailing part
205   if (end > middle_end) {
206     const uintptr_t trailing_addr = middle_end;
207     const size_t trailing_size = end - middle_end;
208     push_partial_array(trailing_addr, trailing_size, finalizable);
209   }
210 
211   // Push aligned middle part(s)
212   uintptr_t partial_addr = middle_end;
213   while (partial_addr > middle_start) {
214     const size_t parts = 2;
215     const size_t partial_size = align_up((partial_addr - middle_start) / parts, ZMarkPartialArrayMinSize);
216     partial_addr -= partial_size;
217     push_partial_array(partial_addr, partial_size, finalizable);
218   }
219 
220   // Follow leading part
221   assert(start < middle_start, "Miscalculated middle start");
222   const uintptr_t leading_addr = start;
223   const size_t leading_size = middle_start - start;
224   follow_small_array(leading_addr, leading_size, finalizable);
225 }
226 
227 void ZMark::follow_array(uintptr_t addr, size_t size, bool finalizable) {
228   if (size <= ZMarkPartialArrayMinSize) {
229     follow_small_array(addr, size, finalizable);
230   } else {
231     follow_large_array(addr, size, finalizable);
232   }
233 }
234 
235 void ZMark::follow_partial_array(ZMarkStackEntry entry, bool finalizable) {
236   const uintptr_t addr = ZAddress::good(entry.partial_array_offset() << ZMarkPartialArrayMinSizeShift);
237   const size_t size = entry.partial_array_length() * oopSize;
238 
239   follow_array(addr, size, finalizable);
240 }
241 
242 template <bool finalizable>
243 class ZMarkBarrierOopClosure : public ClaimMetadataVisitingOopIterateClosure {
244 public:
245   ZMarkBarrierOopClosure() :
246       ClaimMetadataVisitingOopIterateClosure(finalizable
247                                                  ? ClassLoaderData::_claim_finalizable
248                                                  : ClassLoaderData::_claim_strong,
249                                              finalizable
250                                                  ? NULL
251                                                  : ZHeap::heap()->reference_discoverer()) {}
252 
253   virtual void do_oop(oop* p) {
254     ZBarrier::mark_barrier_on_oop_field(p, finalizable);
255   }
256 
257   virtual void do_oop(narrowOop* p) {
258     ShouldNotReachHere();
259   }
260 
261   virtual void do_nmethod(nmethod* nm) {
262     nm->run_nmethod_entry_barrier();
263   }
264 };
265 
266 void ZMark::follow_array_object(objArrayOop obj, bool finalizable) {
267   if (finalizable) {
268     ZMarkBarrierOopClosure<true /* finalizable */> cl;
269     cl.do_klass(obj->klass());
270   } else {
271     ZMarkBarrierOopClosure<false /* finalizable */> cl;
272     cl.do_klass(obj->klass());
273   }
274 
275   const uintptr_t addr = (uintptr_t)obj->base();
276   const size_t size = (size_t)obj->length() * oopSize;
277 
278   follow_array(addr, size, finalizable);
279 }
280 
281 void ZMark::follow_object(oop obj, bool finalizable) {
282   if (finalizable) {
283     ZMarkBarrierOopClosure<true /* finalizable */> cl;
284     obj->oop_iterate(&cl);
285   } else {
286     ZMarkBarrierOopClosure<false /* finalizable */> cl;
287     obj->oop_iterate(&cl);
288   }
289 }
290 
291 static void try_deduplicate(ZMarkContext* context, oop obj) {
292   if (!StringDedup::is_enabled()) {
293     // Not enabled
294     return;
295   }
296 
297   if (!java_lang_String::is_instance(obj)) {
298     // Not a String object
299     return;
300   }
301 
302   if (java_lang_String::test_and_set_deduplication_requested(obj)) {
303     // Already requested deduplication
304     return;
305   }
306 
307   // Request deduplication
308   context->string_dedup_requests()->add(obj);
309 }
310 
311 void ZMark::mark_and_follow(ZMarkContext* context, ZMarkStackEntry entry) {
312   // Decode flags
313   const bool finalizable = entry.finalizable();
314   const bool partial_array = entry.partial_array();
315 
316   if (partial_array) {
317     follow_partial_array(entry, finalizable);
318     return;
319   }
320 
321   // Decode object address and additional flags
322   const uintptr_t addr = entry.object_address();
323   const bool mark = entry.mark();
324   bool inc_live = entry.inc_live();
325   const bool follow = entry.follow();
326 
327   ZPage* const page = _page_table->get(addr);
328   assert(page->is_relocatable(), "Invalid page state");
329 
330   // Mark
331   if (mark && !page->mark_object(addr, finalizable, inc_live)) {
332     // Already marked
333     return;
334   }
335 
336   // Increment live
337   if (inc_live) {
338     // Update live objects/bytes for page. We use the aligned object
339     // size since that is the actual number of bytes used on the page
340     // and alignment paddings can never be reclaimed.
341     const size_t size = ZUtils::object_size(addr);
342     const size_t aligned_size = align_up(size, page->object_alignment());
343     context->cache()->inc_live(page, aligned_size);
344   }
345 
346   // Follow
347   if (follow) {
348     if (is_array(addr)) {
349       follow_array_object(objArrayOop(ZOop::from_address(addr)), finalizable);
350     } else {
351       const oop obj = ZOop::from_address(addr);
352       follow_object(obj, finalizable);
353 
354       // Try deduplicate
355       try_deduplicate(context, obj);
356     }
357   }
358 }
359 
360 template <typename T>
361 bool ZMark::drain(ZMarkContext* context, T* timeout) {
362   ZMarkStripe* const stripe = context->stripe();
363   ZMarkThreadLocalStacks* const stacks = context->stacks();
364   ZMarkStackEntry entry;
365 
366   // Drain stripe stacks
367   while (stacks->pop(&_allocator, &_stripes, stripe, entry)) {
368     mark_and_follow(context, entry);
369 
370     // Check timeout
371     if (timeout->has_expired()) {
372       // Timeout
373       return false;
374     }
375   }
376 
377   // Success
378   return !timeout->has_expired();
379 }
380 
381 bool ZMark::try_steal_local(ZMarkContext* context) {
382   ZMarkStripe* const stripe = context->stripe();
383   ZMarkThreadLocalStacks* const stacks = context->stacks();
384 
385   // Try to steal a local stack from another stripe
386   for (ZMarkStripe* victim_stripe = _stripes.stripe_next(stripe);
387        victim_stripe != stripe;
388        victim_stripe = _stripes.stripe_next(victim_stripe)) {
389     ZMarkStack* const stack = stacks->steal(&_stripes, victim_stripe);
390     if (stack != NULL) {
391       // Success, install the stolen stack
392       stacks->install(&_stripes, stripe, stack);
393       return true;
394     }
395   }
396 
397   // Nothing to steal
398   return false;
399 }
400 
401 bool ZMark::try_steal_global(ZMarkContext* context) {
402   ZMarkStripe* const stripe = context->stripe();
403   ZMarkThreadLocalStacks* const stacks = context->stacks();
404 
405   // Try to steal a stack from another stripe
406   for (ZMarkStripe* victim_stripe = _stripes.stripe_next(stripe);
407        victim_stripe != stripe;
408        victim_stripe = _stripes.stripe_next(victim_stripe)) {
409     ZMarkStack* const stack = victim_stripe->steal_stack();
410     if (stack != NULL) {
411       // Success, install the stolen stack
412       stacks->install(&_stripes, stripe, stack);
413       return true;
414     }
415   }
416 
417   // Nothing to steal
418   return false;
419 }
420 
421 bool ZMark::try_steal(ZMarkContext* context) {
422   return try_steal_local(context) || try_steal_global(context);
423 }
424 
425 void ZMark::idle() const {
426   os::naked_short_sleep(1);
427 }
428 
429 class ZMarkFlushAndFreeStacksClosure : public HandshakeClosure {
430 private:
431   ZMark* const _mark;
432   bool         _flushed;
433 
434 public:
435   ZMarkFlushAndFreeStacksClosure(ZMark* mark) :
436       HandshakeClosure("ZMarkFlushAndFreeStacks"),
437       _mark(mark),
438       _flushed(false) {}
439 
440   void do_thread(Thread* thread) {
441     if (_mark->flush_and_free(thread)) {
442       _flushed = true;
443     }
444   }
445 
446   bool flushed() const {
447     return _flushed;
448   }
449 };
450 
451 bool ZMark::flush(bool at_safepoint) {
452   ZMarkFlushAndFreeStacksClosure cl(this);
453   if (at_safepoint) {
454     Threads::threads_do(&cl);
455   } else {
456     Handshake::execute(&cl);
457   }
458 
459   // Returns true if more work is available
460   return cl.flushed() || !_stripes.is_empty();
461 }
462 
463 bool ZMark::try_flush(volatile size_t* nflush) {
464   Atomic::inc(nflush);
465 
466   ZStatTimer timer(ZSubPhaseConcurrentMarkTryFlush);
467   return flush(false /* at_safepoint */);
468 }
469 
470 bool ZMark::try_proactive_flush() {
471   // Only do proactive flushes from worker 0
472   if (ZThread::worker_id() != 0) {
473     return false;
474   }
475 
476   if (Atomic::load(&_work_nproactiveflush) == ZMarkProactiveFlushMax ||
477       Atomic::load(&_work_nterminateflush) != 0) {
478     // Limit reached or we're trying to terminate
479     return false;
480   }
481 
482   return try_flush(&_work_nproactiveflush);
483 }
484 
485 bool ZMark::try_terminate() {
486   ZStatTimer timer(ZSubPhaseConcurrentMarkTryTerminate);
487 
488   if (_terminate.enter_stage0()) {
489     // Last thread entered stage 0, flush
490     if (Atomic::load(&_work_terminateflush) &&
491         Atomic::load(&_work_nterminateflush) != ZMarkTerminateFlushMax) {
492       // Exit stage 0 to allow other threads to continue marking
493       _terminate.exit_stage0();
494 
495       // Flush before termination
496       if (!try_flush(&_work_nterminateflush)) {
497         // No more work available, skip further flush attempts
498         Atomic::store(&_work_terminateflush, false);
499       }
500 
501       // Don't terminate, regardless of whether we successfully
502       // flushed out more work or not. We've already exited
503       // termination stage 0, to allow other threads to continue
504       // marking, so this thread has to return false and also
505       // make another round of attempted marking.
506       return false;
507     }
508   }
509 
510   for (;;) {
511     if (_terminate.enter_stage1()) {
512       // Last thread entered stage 1, terminate
513       return true;
514     }
515 
516     // Idle to give the other threads
517     // a chance to enter termination.
518     idle();
519 
520     if (!_terminate.try_exit_stage1()) {
521       // All workers in stage 1, terminate
522       return true;
523     }
524 
525     if (_terminate.try_exit_stage0()) {
526       // More work available, don't terminate
527       return false;
528     }
529   }
530 }
531 
532 class ZMarkNoTimeout : public StackObj {
533 public:
534   bool has_expired() {
535     // No timeout, but check for signal to abort
536     return ZAbort::should_abort();
537   }
538 };
539 
540 void ZMark::work_without_timeout(ZMarkContext* context) {
541   ZStatTimer timer(ZSubPhaseConcurrentMark);
542   ZMarkNoTimeout no_timeout;
543 
544   for (;;) {
545     if (!drain(context, &no_timeout)) {
546       // Abort
547       break;
548     }
549 
550     if (try_steal(context)) {
551       // Stole work
552       continue;
553     }
554 
555     if (try_proactive_flush()) {
556       // Work available
557       continue;
558     }
559 
560     if (try_terminate()) {
561       // Terminate
562       break;
563     }
564   }
565 }
566 
567 class ZMarkTimeout : public StackObj {
568 private:
569   const Ticks    _start;
570   const uint64_t _timeout;
571   const uint64_t _check_interval;
572   uint64_t       _check_at;
573   uint64_t       _check_count;
574   bool           _expired;
575 
576 public:
577   ZMarkTimeout(uint64_t timeout_in_micros) :
578       _start(Ticks::now()),
579       _timeout(_start.value() + TimeHelper::micros_to_counter(timeout_in_micros)),
580       _check_interval(200),
581       _check_at(_check_interval),
582       _check_count(0),
583       _expired(false) {}
584 
585   ~ZMarkTimeout() {
586     const Tickspan duration = Ticks::now() - _start;
587     log_debug(gc, marking)("Mark With Timeout (%s): %s, " UINT64_FORMAT " oops, %.3fms",
588                            ZThread::name(), _expired ? "Expired" : "Completed",
589                            _check_count, TimeHelper::counter_to_millis(duration.value()));
590   }
591 
592   bool has_expired() {
593     if (++_check_count == _check_at) {
594       _check_at += _check_interval;
595       if ((uint64_t)Ticks::now().value() >= _timeout) {
596         // Timeout
597         _expired = true;
598       }
599     }
600 
601     return _expired;
602   }
603 };
604 
605 void ZMark::work_with_timeout(ZMarkContext* context, uint64_t timeout_in_micros) {
606   ZStatTimer timer(ZSubPhaseMarkTryComplete);
607   ZMarkTimeout timeout(timeout_in_micros);
608 
609   for (;;) {
610     if (!drain(context, &timeout)) {
611       // Timed out
612       break;
613     }
614 
615     if (try_steal(context)) {
616       // Stole work
617       continue;
618     }
619 
620     // Terminate
621     break;
622   }
623 }
624 
625 void ZMark::work(uint64_t timeout_in_micros) {
626   ZMarkStripe* const stripe = _stripes.stripe_for_worker(_nworkers, ZThread::worker_id());
627   ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::stacks(Thread::current());
628   ZMarkContext context(_stripes.nstripes(), stripe, stacks);
629 
630   if (timeout_in_micros == 0) {
631     work_without_timeout(&context);
632   } else {
633     work_with_timeout(&context, timeout_in_micros);
634   }
635 
636   // Flush and publish stacks
637   stacks->flush(&_allocator, &_stripes);
638 
639   // Free remaining stacks
640   stacks->free(&_allocator);
641 }
642 
643 class ZMarkOopClosure : public OopClosure {
644   virtual void do_oop(oop* p) {
645     ZBarrier::mark_barrier_on_oop_field(p, false /* finalizable */);
646   }
647 
648   virtual void do_oop(narrowOop* p) {
649     ShouldNotReachHere();
650   }
651 };
652 
653 class ZMarkThreadClosure : public ThreadClosure {
654 private:
655   OopClosure* const _cl;
656 
657 public:
658   ZMarkThreadClosure(OopClosure* cl) :
659       _cl(cl) {
660     ZThreadLocalAllocBuffer::reset_statistics();
661   }
662   ~ZMarkThreadClosure() {
663     ZThreadLocalAllocBuffer::publish_statistics();
664   }
665   virtual void do_thread(Thread* thread) {
666     JavaThread* const jt = JavaThread::cast(thread);
667     StackWatermarkSet::finish_processing(jt, _cl, StackWatermarkKind::gc);
668     ZThreadLocalAllocBuffer::update_stats(jt);
669   }
670 };
671 
672 class ZMarkNMethodClosure : public NMethodClosure {
673 private:
674   OopClosure* const _cl;
675 
676 public:
677   ZMarkNMethodClosure(OopClosure* cl) :
678       _cl(cl) {}
679 
680   virtual void do_nmethod(nmethod* nm) {
681     ZLocker<ZReentrantLock> locker(ZNMethod::lock_for_nmethod(nm));
682     if (!nm->is_alive()) {
683       return;
684     }
685 
686     if (ZNMethod::is_armed(nm)) {
687       ZNMethod::nmethod_oops_do_inner(nm, _cl);
688       nm->mark_as_maybe_on_continuation();
689       ZNMethod::disarm(nm);
690     }
691   }
692 };
693 
694 typedef ClaimingCLDToOopClosure<ClassLoaderData::_claim_strong> ZMarkCLDClosure;
695 
696 class ZMarkRootsTask : public ZTask {
697 private:
698   ZMark* const               _mark;
699   SuspendibleThreadSetJoiner _sts_joiner;
700   ZRootsIterator             _roots;
701 
702   ZMarkOopClosure            _cl;
703   ZMarkCLDClosure            _cld_cl;
704   ZMarkThreadClosure         _thread_cl;
705   ZMarkNMethodClosure        _nm_cl;
706 
707 public:
708   ZMarkRootsTask(ZMark* mark) :
709       ZTask("ZMarkRootsTask"),
710       _mark(mark),
711       _sts_joiner(),
712       _roots(ClassLoaderData::_claim_strong),
713       _cl(),
714       _cld_cl(&_cl),
715       _thread_cl(&_cl),
716       _nm_cl(&_cl) {
717     ClassLoaderDataGraph_lock->lock();
718   }
719 
720   ~ZMarkRootsTask() {
721     ClassLoaderDataGraph_lock->unlock();
722   }
723 
724   virtual void work() {
725     _roots.apply(&_cl,
726                  &_cld_cl,
727                  &_thread_cl,
728                  &_nm_cl);
729 
730     // Flush and free worker stacks. Needed here since
731     // the set of workers executing during root scanning
732     // can be different from the set of workers executing
733     // during mark.
734     _mark->flush_and_free();
735   }
736 };
737 
738 class ZMarkTask : public ZTask {
739 private:
740   ZMark* const   _mark;
741   const uint64_t _timeout_in_micros;
742 
743 public:
744   ZMarkTask(ZMark* mark, uint64_t timeout_in_micros = 0) :
745       ZTask("ZMarkTask"),
746       _mark(mark),
747       _timeout_in_micros(timeout_in_micros) {
748     _mark->prepare_work();
749   }
750 
751   ~ZMarkTask() {
752     _mark->finish_work();
753   }
754 
755   virtual void work() {
756     _mark->work(_timeout_in_micros);
757   }
758 };
759 
760 void ZMark::mark(bool initial) {
761   if (initial) {
762     ZMarkRootsTask task(this);
763     _workers->run(&task);
764   }
765 
766   ZMarkTask task(this);
767   _workers->run(&task);
768 }
769 
770 bool ZMark::try_complete() {
771   _ntrycomplete++;
772 
773   // Use nconcurrent number of worker threads to maintain the
774   // worker/stripe distribution used during concurrent mark.
775   ZMarkTask task(this, ZMarkCompleteTimeout);
776   _workers->run(&task);
777 
778   // Successful if all stripes are empty
779   return _stripes.is_empty();
780 }
781 
782 bool ZMark::try_end() {
783   // Flush all mark stacks
784   if (!flush(true /* at_safepoint */)) {
785     // Mark completed
786     return true;
787   }
788 
789   // Try complete marking by doing a limited
790   // amount of mark work in this phase.
791   return try_complete();
792 }
793 
794 bool ZMark::end() {
795   // Try end marking
796   if (!try_end()) {
797     // Mark not completed
798     _ncontinue++;
799     return false;
800   }
801 
802   // Verification
803   if (ZVerifyMarking) {
804     verify_all_stacks_empty();
805   }
806 
807   // Update statistics
808   ZStatMark::set_at_mark_end(_nproactiveflush, _nterminateflush, _ntrycomplete, _ncontinue);
809 
810   CodeCache::increment_marking_cycle();
811 
812   // Mark completed
813   return true;
814 }
815 
816 void ZMark::free() {
817   // Free any unused mark stack space
818   _allocator.free();
819 
820   // Update statistics
821   ZStatMark::set_at_mark_free(_allocator.size());
822 }
823 
824 void ZMark::flush_and_free() {
825   Thread* const thread = Thread::current();
826   flush_and_free(thread);
827 }
828 
829 bool ZMark::flush_and_free(Thread* thread) {
830   ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::stacks(thread);
831   const bool flushed = stacks->flush(&_allocator, &_stripes);
832   stacks->free(&_allocator);
833   return flushed;
834 }
835 
836 class ZVerifyMarkStacksEmptyClosure : public ThreadClosure {
837 private:
838   const ZMarkStripeSet* const _stripes;
839 
840 public:
841   ZVerifyMarkStacksEmptyClosure(const ZMarkStripeSet* stripes) :
842       _stripes(stripes) {}
843 
844   void do_thread(Thread* thread) {
845     ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::stacks(thread);
846     guarantee(stacks->is_empty(_stripes), "Should be empty");
847   }
848 };
849 
850 void ZMark::verify_all_stacks_empty() const {
851   // Verify thread stacks
852   ZVerifyMarkStacksEmptyClosure cl(&_stripes);
853   Threads::threads_do(&cl);
854 
855   // Verify stripe stacks
856   guarantee(_stripes.is_empty(), "Should be empty");
857 }