1 /*
  2  * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  */
 23 
 24 #include "precompiled.hpp"
 25 #include "classfile/classLoaderData.hpp"
 26 #include "classfile/classLoaderDataGraph.hpp"
 27 #include "classfile/javaClasses.inline.hpp"
 28 #include "code/nmethod.hpp"
 29 #include "gc/shared/gc_globals.hpp"
 30 #include "gc/shared/stringdedup/stringDedup.hpp"
 31 #include "gc/shared/suspendibleThreadSet.hpp"
 32 #include "gc/z/zAbort.inline.hpp"
 33 #include "gc/z/zBarrier.inline.hpp"
 34 #include "gc/z/zHeap.inline.hpp"
 35 #include "gc/z/zLock.inline.hpp"
 36 #include "gc/z/zMark.inline.hpp"
 37 #include "gc/z/zMarkCache.inline.hpp"
 38 #include "gc/z/zMarkContext.inline.hpp"
 39 #include "gc/z/zMarkStack.inline.hpp"
 40 #include "gc/z/zMarkTerminate.inline.hpp"
 41 #include "gc/z/zNMethod.hpp"
 42 #include "gc/z/zOop.inline.hpp"
 43 #include "gc/z/zPage.hpp"
 44 #include "gc/z/zPageTable.inline.hpp"
 45 #include "gc/z/zRootsIterator.hpp"
 46 #include "gc/z/zStackWatermark.hpp"
 47 #include "gc/z/zStat.hpp"
 48 #include "gc/z/zTask.hpp"
 49 #include "gc/z/zThread.inline.hpp"
 50 #include "gc/z/zThreadLocalAllocBuffer.hpp"
 51 #include "gc/z/zUtils.inline.hpp"
 52 #include "gc/z/zWorkers.hpp"
 53 #include "logging/log.hpp"
 54 #include "memory/iterator.inline.hpp"
 55 #include "oops/objArrayOop.inline.hpp"
 56 #include "oops/oop.inline.hpp"
 57 #include "runtime/atomic.hpp"
 58 #include "runtime/handshake.hpp"
 59 #include "runtime/prefetch.inline.hpp"
 60 #include "runtime/safepointMechanism.hpp"
 61 #include "runtime/stackWatermark.hpp"
 62 #include "runtime/stackWatermarkSet.inline.hpp"
 63 #include "runtime/thread.hpp"
 64 #include "utilities/align.hpp"
 65 #include "utilities/globalDefinitions.hpp"
 66 #include "utilities/powerOfTwo.hpp"
 67 #include "utilities/ticks.hpp"
 68 
 69 static const ZStatSubPhase ZSubPhaseConcurrentMark("Concurrent Mark");
 70 static const ZStatSubPhase ZSubPhaseConcurrentMarkTryFlush("Concurrent Mark Try Flush");
 71 static const ZStatSubPhase ZSubPhaseConcurrentMarkTryTerminate("Concurrent Mark Try Terminate");
 72 static const ZStatSubPhase ZSubPhaseMarkTryComplete("Pause Mark Try Complete");
 73 
 74 ZMark::ZMark(ZWorkers* workers, ZPageTable* page_table) :
 75     _workers(workers),
 76     _page_table(page_table),
 77     _allocator(),
 78     _stripes(),
 79     _terminate(),
 80     _work_terminateflush(true),
 81     _work_nproactiveflush(0),
 82     _work_nterminateflush(0),
 83     _nproactiveflush(0),
 84     _nterminateflush(0),
 85     _ntrycomplete(0),
 86     _ncontinue(0),
 87     _nworkers(0) {}
 88 
 89 bool ZMark::is_initialized() const {
 90   return _allocator.is_initialized();
 91 }
 92 
 93 size_t ZMark::calculate_nstripes(uint nworkers) const {
 94   // Calculate the number of stripes from the number of workers we use,
 95   // where the number of stripes must be a power of two and we want to
 96   // have at least one worker per stripe.
 97   const size_t nstripes = round_down_power_of_2(nworkers);
 98   return MIN2(nstripes, ZMarkStripesMax);
 99 }
100 
101 void ZMark::start() {
102   // Verification
103   if (ZVerifyMarking) {
104     verify_all_stacks_empty();
105   }
106 
107   // Increment global sequence number to invalidate
108   // marking information for all pages.
109   ZGlobalSeqNum++;
110 
111   // Reset flush/continue counters
112   _nproactiveflush = 0;
113   _nterminateflush = 0;
114   _ntrycomplete = 0;
115   _ncontinue = 0;
116 
117   // Set number of workers to use
118   _nworkers = _workers->active_workers();
119 
120   // Set number of mark stripes to use, based on number
121   // of workers we will use in the concurrent mark phase.
122   const size_t nstripes = calculate_nstripes(_nworkers);
123   _stripes.set_nstripes(nstripes);
124 
125   // Update statistics
126   ZStatMark::set_at_mark_start(nstripes);
127 
128   // Print worker/stripe distribution
129   LogTarget(Debug, gc, marking) log;
130   if (log.is_enabled()) {
131     log.print("Mark Worker/Stripe Distribution");
132     for (uint worker_id = 0; worker_id < _nworkers; worker_id++) {
133       const ZMarkStripe* const stripe = _stripes.stripe_for_worker(_nworkers, worker_id);
134       const size_t stripe_id = _stripes.stripe_id(stripe);
135       log.print("  Worker %u(%u) -> Stripe " SIZE_FORMAT "(" SIZE_FORMAT ")",
136                 worker_id, _nworkers, stripe_id, nstripes);
137     }
138   }
139 }
140 
141 void ZMark::prepare_work() {
142   assert(_nworkers == _workers->active_workers(), "Invalid number of workers");
143 
144   // Set number of active workers
145   _terminate.reset(_nworkers);
146 
147   // Reset flush counters
148   _work_nproactiveflush = _work_nterminateflush = 0;
149   _work_terminateflush = true;
150 }
151 
152 void ZMark::finish_work() {
153   // Accumulate proactive/terminate flush counters
154   _nproactiveflush += _work_nproactiveflush;
155   _nterminateflush += _work_nterminateflush;
156 }
157 
158 bool ZMark::is_array(uintptr_t addr) const {
159   return ZOop::from_address(addr)->is_objArray();
160 }
161 
162 void ZMark::push_partial_array(uintptr_t addr, size_t size, bool finalizable) {
163   assert(is_aligned(addr, ZMarkPartialArrayMinSize), "Address misaligned");
164   ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::stacks(Thread::current());
165   ZMarkStripe* const stripe = _stripes.stripe_for_addr(addr);
166   const uintptr_t offset = ZAddress::offset(addr) >> ZMarkPartialArrayMinSizeShift;
167   const uintptr_t length = size / oopSize;
168   const ZMarkStackEntry entry(offset, length, finalizable);
169 
170   log_develop_trace(gc, marking)("Array push partial: " PTR_FORMAT " (" SIZE_FORMAT "), stripe: " SIZE_FORMAT,
171                                  addr, size, _stripes.stripe_id(stripe));
172 
173   stacks->push(&_allocator, &_stripes, stripe, entry, false /* publish */);
174 }
175 
176 void ZMark::follow_small_array(uintptr_t addr, size_t size, bool finalizable) {
177   assert(size <= ZMarkPartialArrayMinSize, "Too large, should be split");
178   const size_t length = size / oopSize;
179 
180   log_develop_trace(gc, marking)("Array follow small: " PTR_FORMAT " (" SIZE_FORMAT ")", addr, size);
181 
182   ZBarrier::mark_barrier_on_oop_array((oop*)addr, length, finalizable);
183 }
184 
185 void ZMark::follow_large_array(uintptr_t addr, size_t size, bool finalizable) {
186   assert(size <= (size_t)arrayOopDesc::max_array_length(T_OBJECT) * oopSize, "Too large");
187   assert(size > ZMarkPartialArrayMinSize, "Too small, should not be split");
188   const uintptr_t start = addr;
189   const uintptr_t end = start + size;
190 
191   // Calculate the aligned middle start/end/size, where the middle start
192   // should always be greater than the start (hence the +1 below) to make
193   // sure we always do some follow work, not just split the array into pieces.
194   const uintptr_t middle_start = align_up(start + 1, ZMarkPartialArrayMinSize);
195   const size_t    middle_size = align_down(end - middle_start, ZMarkPartialArrayMinSize);
196   const uintptr_t middle_end = middle_start + middle_size;
197 
198   log_develop_trace(gc, marking)("Array follow large: " PTR_FORMAT "-" PTR_FORMAT" (" SIZE_FORMAT "), "
199                                  "middle: " PTR_FORMAT "-" PTR_FORMAT " (" SIZE_FORMAT ")",
200                                  start, end, size, middle_start, middle_end, middle_size);
201 
202   // Push unaligned trailing part
203   if (end > middle_end) {
204     const uintptr_t trailing_addr = middle_end;
205     const size_t trailing_size = end - middle_end;
206     push_partial_array(trailing_addr, trailing_size, finalizable);
207   }
208 
209   // Push aligned middle part(s)
210   uintptr_t partial_addr = middle_end;
211   while (partial_addr > middle_start) {
212     const size_t parts = 2;
213     const size_t partial_size = align_up((partial_addr - middle_start) / parts, ZMarkPartialArrayMinSize);
214     partial_addr -= partial_size;
215     push_partial_array(partial_addr, partial_size, finalizable);
216   }
217 
218   // Follow leading part
219   assert(start < middle_start, "Miscalculated middle start");
220   const uintptr_t leading_addr = start;
221   const size_t leading_size = middle_start - start;
222   follow_small_array(leading_addr, leading_size, finalizable);
223 }
224 
225 void ZMark::follow_array(uintptr_t addr, size_t size, bool finalizable) {
226   if (size <= ZMarkPartialArrayMinSize) {
227     follow_small_array(addr, size, finalizable);
228   } else {
229     follow_large_array(addr, size, finalizable);
230   }
231 }
232 
233 void ZMark::follow_partial_array(ZMarkStackEntry entry, bool finalizable) {
234   const uintptr_t addr = ZAddress::good(entry.partial_array_offset() << ZMarkPartialArrayMinSizeShift);
235   const size_t size = entry.partial_array_length() * oopSize;
236 
237   follow_array(addr, size, finalizable);
238 }
239 
240 template <bool finalizable>
241 class ZMarkBarrierOopClosure : public ClaimMetadataVisitingOopIterateClosure {
242 public:
243   ZMarkBarrierOopClosure() :
244       ClaimMetadataVisitingOopIterateClosure(finalizable
245                                                  ? ClassLoaderData::_claim_finalizable
246                                                  : ClassLoaderData::_claim_strong,
247                                              finalizable
248                                                  ? NULL
249                                                  : ZHeap::heap()->reference_discoverer()) {}
250 
251   virtual void do_oop(oop* p) {
252     ZBarrier::mark_barrier_on_oop_field(p, finalizable);
253   }
254 
255   virtual void do_oop(narrowOop* p) {
256     ShouldNotReachHere();
257   }
258 };
259 
260 void ZMark::follow_array_object(objArrayOop obj, bool finalizable) {
261   if (finalizable) {
262     ZMarkBarrierOopClosure<true /* finalizable */> cl;
263     cl.do_klass(obj->klass());
264   } else {
265     ZMarkBarrierOopClosure<false /* finalizable */> cl;
266     cl.do_klass(obj->klass());
267   }
268 
269   const uintptr_t addr = (uintptr_t)obj->base();
270   const size_t size = (size_t)obj->length() * oopSize;
271 
272   follow_array(addr, size, finalizable);
273 }
274 
275 void ZMark::follow_object(oop obj, bool finalizable) {
276   if (finalizable) {
277     ZMarkBarrierOopClosure<true /* finalizable */> cl;
278     obj->oop_iterate(&cl);
279   } else {
280     ZMarkBarrierOopClosure<false /* finalizable */> cl;
281     obj->oop_iterate(&cl);
282   }
283 }
284 
285 static void try_deduplicate(ZMarkContext* context, oop obj) {
286   if (!StringDedup::is_enabled()) {
287     // Not enabled
288     return;
289   }
290 
291   if (!java_lang_String::is_instance(obj)) {
292     // Not a String object
293     return;
294   }
295 
296   if (java_lang_String::test_and_set_deduplication_requested(obj)) {
297     // Already requested deduplication
298     return;
299   }
300 
301   // Request deduplication
302   context->string_dedup_requests()->add(obj);
303 }
304 
305 void ZMark::mark_and_follow(ZMarkContext* context, ZMarkStackEntry entry) {
306   // Decode flags
307   const bool finalizable = entry.finalizable();
308   const bool partial_array = entry.partial_array();
309 
310   if (partial_array) {
311     follow_partial_array(entry, finalizable);
312     return;
313   }
314 
315   // Decode object address and additional flags
316   const uintptr_t addr = entry.object_address();
317   const bool mark = entry.mark();
318   bool inc_live = entry.inc_live();
319   const bool follow = entry.follow();
320 
321   ZPage* const page = _page_table->get(addr);
322   assert(page->is_relocatable(), "Invalid page state");
323 
324   // Mark
325   if (mark && !page->mark_object(addr, finalizable, inc_live)) {
326     // Already marked
327     return;
328   }
329 
330   // Increment live
331   if (inc_live) {
332     // Update live objects/bytes for page. We use the aligned object
333     // size since that is the actual number of bytes used on the page
334     // and alignment paddings can never be reclaimed.
335     const size_t size = ZUtils::object_size(addr);
336     const size_t aligned_size = align_up(size, page->object_alignment());
337     context->cache()->inc_live(page, aligned_size);
338   }
339 
340   // Follow
341   if (follow) {
342     if (is_array(addr)) {
343       follow_array_object(objArrayOop(ZOop::from_address(addr)), finalizable);
344     } else {
345       const oop obj = ZOop::from_address(addr);
346       follow_object(obj, finalizable);
347 
348       // Try deduplicate
349       try_deduplicate(context, obj);
350     }
351   }
352 }
353 
354 template <typename T>
355 bool ZMark::drain(ZMarkContext* context, T* timeout) {
356   ZMarkStripe* const stripe = context->stripe();
357   ZMarkThreadLocalStacks* const stacks = context->stacks();
358   ZMarkStackEntry entry;
359 
360   // Drain stripe stacks
361   while (stacks->pop(&_allocator, &_stripes, stripe, entry)) {
362     mark_and_follow(context, entry);
363 
364     // Check timeout
365     if (timeout->has_expired()) {
366       // Timeout
367       return false;
368     }
369   }
370 
371   // Success
372   return !timeout->has_expired();
373 }
374 
375 bool ZMark::try_steal_local(ZMarkContext* context) {
376   ZMarkStripe* const stripe = context->stripe();
377   ZMarkThreadLocalStacks* const stacks = context->stacks();
378 
379   // Try to steal a local stack from another stripe
380   for (ZMarkStripe* victim_stripe = _stripes.stripe_next(stripe);
381        victim_stripe != stripe;
382        victim_stripe = _stripes.stripe_next(victim_stripe)) {
383     ZMarkStack* const stack = stacks->steal(&_stripes, victim_stripe);
384     if (stack != NULL) {
385       // Success, install the stolen stack
386       stacks->install(&_stripes, stripe, stack);
387       return true;
388     }
389   }
390 
391   // Nothing to steal
392   return false;
393 }
394 
395 bool ZMark::try_steal_global(ZMarkContext* context) {
396   ZMarkStripe* const stripe = context->stripe();
397   ZMarkThreadLocalStacks* const stacks = context->stacks();
398 
399   // Try to steal a stack from another stripe
400   for (ZMarkStripe* victim_stripe = _stripes.stripe_next(stripe);
401        victim_stripe != stripe;
402        victim_stripe = _stripes.stripe_next(victim_stripe)) {
403     ZMarkStack* const stack = victim_stripe->steal_stack();
404     if (stack != NULL) {
405       // Success, install the stolen stack
406       stacks->install(&_stripes, stripe, stack);
407       return true;
408     }
409   }
410 
411   // Nothing to steal
412   return false;
413 }
414 
415 bool ZMark::try_steal(ZMarkContext* context) {
416   return try_steal_local(context) || try_steal_global(context);
417 }
418 
419 void ZMark::idle() const {
420   os::naked_short_sleep(1);
421 }
422 
423 class ZMarkFlushAndFreeStacksClosure : public HandshakeClosure {
424 private:
425   ZMark* const _mark;
426   bool         _flushed;
427 
428 public:
429   ZMarkFlushAndFreeStacksClosure(ZMark* mark) :
430       HandshakeClosure("ZMarkFlushAndFreeStacks"),
431       _mark(mark),
432       _flushed(false) {}
433 
434   void do_thread(Thread* thread) {
435     if (_mark->flush_and_free(thread)) {
436       _flushed = true;
437     }
438   }
439 
440   bool flushed() const {
441     return _flushed;
442   }
443 };
444 
445 bool ZMark::flush(bool at_safepoint) {
446   ZMarkFlushAndFreeStacksClosure cl(this);
447   if (at_safepoint) {
448     Threads::threads_do(&cl);
449   } else {
450     Handshake::execute(&cl);
451   }
452 
453   // Returns true if more work is available
454   return cl.flushed() || !_stripes.is_empty();
455 }
456 
457 bool ZMark::try_flush(volatile size_t* nflush) {
458   Atomic::inc(nflush);
459 
460   ZStatTimer timer(ZSubPhaseConcurrentMarkTryFlush);
461   return flush(false /* at_safepoint */);
462 }
463 
464 bool ZMark::try_proactive_flush() {
465   // Only do proactive flushes from worker 0
466   if (ZThread::worker_id() != 0) {
467     return false;
468   }
469 
470   if (Atomic::load(&_work_nproactiveflush) == ZMarkProactiveFlushMax ||
471       Atomic::load(&_work_nterminateflush) != 0) {
472     // Limit reached or we're trying to terminate
473     return false;
474   }
475 
476   return try_flush(&_work_nproactiveflush);
477 }
478 
479 bool ZMark::try_terminate() {
480   ZStatTimer timer(ZSubPhaseConcurrentMarkTryTerminate);
481 
482   if (_terminate.enter_stage0()) {
483     // Last thread entered stage 0, flush
484     if (Atomic::load(&_work_terminateflush) &&
485         Atomic::load(&_work_nterminateflush) != ZMarkTerminateFlushMax) {
486       // Exit stage 0 to allow other threads to continue marking
487       _terminate.exit_stage0();
488 
489       // Flush before termination
490       if (!try_flush(&_work_nterminateflush)) {
491         // No more work available, skip further flush attempts
492         Atomic::store(&_work_terminateflush, false);
493       }
494 
495       // Don't terminate, regardless of whether we successfully
496       // flushed out more work or not. We've already exited
497       // termination stage 0, to allow other threads to continue
498       // marking, so this thread has to return false and also
499       // make another round of attempted marking.
500       return false;
501     }
502   }
503 
504   for (;;) {
505     if (_terminate.enter_stage1()) {
506       // Last thread entered stage 1, terminate
507       return true;
508     }
509 
510     // Idle to give the other threads
511     // a chance to enter termination.
512     idle();
513 
514     if (!_terminate.try_exit_stage1()) {
515       // All workers in stage 1, terminate
516       return true;
517     }
518 
519     if (_terminate.try_exit_stage0()) {
520       // More work available, don't terminate
521       return false;
522     }
523   }
524 }
525 
526 class ZMarkNoTimeout : public StackObj {
527 public:
528   bool has_expired() {
529     // No timeout, but check for signal to abort
530     return ZAbort::should_abort();
531   }
532 };
533 
534 void ZMark::work_without_timeout(ZMarkContext* context) {
535   ZStatTimer timer(ZSubPhaseConcurrentMark);
536   ZMarkNoTimeout no_timeout;
537 
538   for (;;) {
539     if (!drain(context, &no_timeout)) {
540       // Abort
541       break;
542     }
543 
544     if (try_steal(context)) {
545       // Stole work
546       continue;
547     }
548 
549     if (try_proactive_flush()) {
550       // Work available
551       continue;
552     }
553 
554     if (try_terminate()) {
555       // Terminate
556       break;
557     }
558   }
559 }
560 
561 class ZMarkTimeout : public StackObj {
562 private:
563   const Ticks    _start;
564   const uint64_t _timeout;
565   const uint64_t _check_interval;
566   uint64_t       _check_at;
567   uint64_t       _check_count;
568   bool           _expired;
569 
570 public:
571   ZMarkTimeout(uint64_t timeout_in_micros) :
572       _start(Ticks::now()),
573       _timeout(_start.value() + TimeHelper::micros_to_counter(timeout_in_micros)),
574       _check_interval(200),
575       _check_at(_check_interval),
576       _check_count(0),
577       _expired(false) {}
578 
579   ~ZMarkTimeout() {
580     const Tickspan duration = Ticks::now() - _start;
581     log_debug(gc, marking)("Mark With Timeout (%s): %s, " UINT64_FORMAT " oops, %.3fms",
582                            ZThread::name(), _expired ? "Expired" : "Completed",
583                            _check_count, TimeHelper::counter_to_millis(duration.value()));
584   }
585 
586   bool has_expired() {
587     if (++_check_count == _check_at) {
588       _check_at += _check_interval;
589       if ((uint64_t)Ticks::now().value() >= _timeout) {
590         // Timeout
591         _expired = true;
592       }
593     }
594 
595     return _expired;
596   }
597 };
598 
599 void ZMark::work_with_timeout(ZMarkContext* context, uint64_t timeout_in_micros) {
600   ZStatTimer timer(ZSubPhaseMarkTryComplete);
601   ZMarkTimeout timeout(timeout_in_micros);
602 
603   for (;;) {
604     if (!drain(context, &timeout)) {
605       // Timed out
606       break;
607     }
608 
609     if (try_steal(context)) {
610       // Stole work
611       continue;
612     }
613 
614     // Terminate
615     break;
616   }
617 }
618 
619 void ZMark::work(uint64_t timeout_in_micros) {
620   ZMarkStripe* const stripe = _stripes.stripe_for_worker(_nworkers, ZThread::worker_id());
621   ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::stacks(Thread::current());
622   ZMarkContext context(_stripes.nstripes(), stripe, stacks);
623 
624   if (timeout_in_micros == 0) {
625     work_without_timeout(&context);
626   } else {
627     work_with_timeout(&context, timeout_in_micros);
628   }
629 
630   // Flush and publish stacks
631   stacks->flush(&_allocator, &_stripes);
632 
633   // Free remaining stacks
634   stacks->free(&_allocator);
635 }
636 
637 class ZMarkOopClosure : public OopClosure {
638   virtual void do_oop(oop* p) {
639     ZBarrier::mark_barrier_on_oop_field(p, false /* finalizable */);
640   }
641 
642   virtual void do_oop(narrowOop* p) {
643     ShouldNotReachHere();
644   }
645 };
646 
647 class ZMarkThreadClosure : public ThreadClosure {
648 private:
649   OopClosure* const _cl;
650 
651 public:
652   ZMarkThreadClosure(OopClosure* cl) :
653       _cl(cl) {
654     ZThreadLocalAllocBuffer::reset_statistics();
655   }
656   ~ZMarkThreadClosure() {
657     ZThreadLocalAllocBuffer::publish_statistics();
658   }
659   virtual void do_thread(Thread* thread) {
660     JavaThread* const jt = JavaThread::cast(thread);
661     StackWatermarkSet::finish_processing(jt, _cl, StackWatermarkKind::gc);
662     ZThreadLocalAllocBuffer::update_stats(jt);
663   }
664 };
665 
666 class ZMarkNMethodClosure : public NMethodClosure {
667 private:
668   OopClosure* const _cl;
669 
670 public:
671   ZMarkNMethodClosure(OopClosure* cl) :
672       _cl(cl) {}
673 
674   virtual void do_nmethod(nmethod* nm) {
675     ZLocker<ZReentrantLock> locker(ZNMethod::lock_for_nmethod(nm));
676     if (!nm->is_alive()) {
677       return;
678     }
679 
680     if (ZNMethod::is_armed(nm)) {
681       ZNMethod::nmethod_oops_do_inner(nm, _cl);
682       ZNMethod::disarm(nm);
683     }
684   }
685 };
686 
687 typedef ClaimingCLDToOopClosure<ClassLoaderData::_claim_strong> ZMarkCLDClosure;
688 
689 class ZMarkRootsTask : public ZTask {
690 private:
691   ZMark* const               _mark;
692   SuspendibleThreadSetJoiner _sts_joiner;
693   ZRootsIterator             _roots;
694 
695   ZMarkOopClosure            _cl;
696   ZMarkCLDClosure            _cld_cl;
697   ZMarkThreadClosure         _thread_cl;
698   ZMarkNMethodClosure        _nm_cl;
699 
700 public:
701   ZMarkRootsTask(ZMark* mark) :
702       ZTask("ZMarkRootsTask"),
703       _mark(mark),
704       _sts_joiner(),
705       _roots(ClassLoaderData::_claim_strong),
706       _cl(),
707       _cld_cl(&_cl),
708       _thread_cl(&_cl),
709       _nm_cl(&_cl) {
710     ClassLoaderDataGraph_lock->lock();
711   }
712 
713   ~ZMarkRootsTask() {
714     ClassLoaderDataGraph_lock->unlock();
715   }
716 
717   virtual void work() {
718     _roots.apply(&_cl,
719                  &_cld_cl,
720                  &_thread_cl,
721                  &_nm_cl);
722 
723     // Flush and free worker stacks. Needed here since
724     // the set of workers executing during root scanning
725     // can be different from the set of workers executing
726     // during mark.
727     _mark->flush_and_free();
728   }
729 };
730 
731 class ZMarkTask : public ZTask {
732 private:
733   ZMark* const   _mark;
734   const uint64_t _timeout_in_micros;
735 
736 public:
737   ZMarkTask(ZMark* mark, uint64_t timeout_in_micros = 0) :
738       ZTask("ZMarkTask"),
739       _mark(mark),
740       _timeout_in_micros(timeout_in_micros) {
741     _mark->prepare_work();
742   }
743 
744   ~ZMarkTask() {
745     _mark->finish_work();
746   }
747 
748   virtual void work() {
749     _mark->work(_timeout_in_micros);
750   }
751 };
752 
753 void ZMark::mark(bool initial) {
754   if (initial) {
755     ZMarkRootsTask task(this);
756     _workers->run(&task);
757   }
758 
759   ZMarkTask task(this);
760   _workers->run(&task);
761 }
762 
763 bool ZMark::try_complete() {
764   _ntrycomplete++;
765 
766   // Use nconcurrent number of worker threads to maintain the
767   // worker/stripe distribution used during concurrent mark.
768   ZMarkTask task(this, ZMarkCompleteTimeout);
769   _workers->run(&task);
770 
771   // Successful if all stripes are empty
772   return _stripes.is_empty();
773 }
774 
775 bool ZMark::try_end() {
776   // Flush all mark stacks
777   if (!flush(true /* at_safepoint */)) {
778     // Mark completed
779     return true;
780   }
781 
782   // Try complete marking by doing a limited
783   // amount of mark work in this phase.
784   return try_complete();
785 }
786 
787 bool ZMark::end() {
788   // Try end marking
789   if (!try_end()) {
790     // Mark not completed
791     _ncontinue++;
792     return false;
793   }
794 
795   // Verification
796   if (ZVerifyMarking) {
797     verify_all_stacks_empty();
798   }
799 
800   // Update statistics
801   ZStatMark::set_at_mark_end(_nproactiveflush, _nterminateflush, _ntrycomplete, _ncontinue);
802 
803   // Mark completed
804   return true;
805 }
806 
807 void ZMark::free() {
808   // Free any unused mark stack space
809   _allocator.free();
810 
811   // Update statistics
812   ZStatMark::set_at_mark_free(_allocator.size());
813 }
814 
815 void ZMark::flush_and_free() {
816   Thread* const thread = Thread::current();
817   flush_and_free(thread);
818 }
819 
820 bool ZMark::flush_and_free(Thread* thread) {
821   ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::stacks(thread);
822   const bool flushed = stacks->flush(&_allocator, &_stripes);
823   stacks->free(&_allocator);
824   return flushed;
825 }
826 
827 class ZVerifyMarkStacksEmptyClosure : public ThreadClosure {
828 private:
829   const ZMarkStripeSet* const _stripes;
830 
831 public:
832   ZVerifyMarkStacksEmptyClosure(const ZMarkStripeSet* stripes) :
833       _stripes(stripes) {}
834 
835   void do_thread(Thread* thread) {
836     ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::stacks(thread);
837     guarantee(stacks->is_empty(_stripes), "Should be empty");
838   }
839 };
840 
841 void ZMark::verify_all_stacks_empty() const {
842   // Verify thread stacks
843   ZVerifyMarkStacksEmptyClosure cl(&_stripes);
844   Threads::threads_do(&cl);
845 
846   // Verify stripe stacks
847   guarantee(_stripes.is_empty(), "Should be empty");
848 }