1 /*
   2  * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "classfile/classLoaderData.hpp"
  25 #include "classfile/classLoaderDataGraph.hpp"
  26 #include "classfile/javaClasses.inline.hpp"
  27 #include "code/nmethod.hpp"
  28 #include "gc/shared/continuationGCSupport.inline.hpp"
  29 #include "gc/shared/gc_globals.hpp"
  30 #include "gc/shared/suspendibleThreadSet.hpp"
  31 #include "gc/shared/workerThread.hpp"
  32 #include "gc/z/zAbort.inline.hpp"
  33 #include "gc/z/zAddress.inline.hpp"
  34 #include "gc/z/zBarrier.inline.hpp"
  35 #include "gc/z/zBarrierSetNMethod.hpp"
  36 #include "gc/z/zGeneration.inline.hpp"
  37 #include "gc/z/zGenerationId.hpp"
  38 #include "gc/z/zHeap.inline.hpp"
  39 #include "gc/z/zLock.inline.hpp"
  40 #include "gc/z/zMark.inline.hpp"
  41 #include "gc/z/zMarkCache.inline.hpp"
  42 #include "gc/z/zMarkContext.inline.hpp"
  43 #include "gc/z/zMarkStack.inline.hpp"
  44 #include "gc/z/zMarkTerminate.inline.hpp"
  45 #include "gc/z/zNMethod.hpp"
  46 #include "gc/z/zPage.hpp"
  47 #include "gc/z/zPageTable.inline.hpp"
  48 #include "gc/z/zRootsIterator.hpp"
  49 #include "gc/z/zStackWatermark.hpp"
  50 #include "gc/z/zStat.hpp"
  51 #include "gc/z/zTask.hpp"
  52 #include "gc/z/zThreadLocalAllocBuffer.hpp"
  53 #include "gc/z/zUncoloredRoot.inline.hpp"
  54 #include "gc/z/zUtils.inline.hpp"
  55 #include "gc/z/zWorkers.hpp"
  56 #include "logging/log.hpp"
  57 #include "memory/iterator.inline.hpp"
  58 #include "oops/objArrayOop.inline.hpp"
  59 #include "oops/oop.inline.hpp"
  60 #include "runtime/atomic.hpp"
  61 #include "runtime/continuation.hpp"
  62 #include "runtime/handshake.hpp"
  63 #include "runtime/javaThread.hpp"
  64 #include "runtime/prefetch.inline.hpp"
  65 #include "runtime/safepointMechanism.hpp"
  66 #include "runtime/stackWatermark.hpp"
  67 #include "runtime/stackWatermarkSet.inline.hpp"
  68 #include "runtime/threads.hpp"
  69 #include "runtime/vmThread.hpp"
  70 #include "utilities/align.hpp"
  71 #include "utilities/globalDefinitions.hpp"
  72 #include "utilities/powerOfTwo.hpp"
  73 #include "utilities/ticks.hpp"
  74 
  75 static const ZStatSubPhase ZSubPhaseConcurrentMarkRootUncoloredYoung("Concurrent Mark Root Uncolored", ZGenerationId::young);
  76 static const ZStatSubPhase ZSubPhaseConcurrentMarkRootColoredYoung("Concurrent Mark Root Colored", ZGenerationId::young);
  77 static const ZStatSubPhase ZSubPhaseConcurrentMarkRootUncoloredOld("Concurrent Mark Root Uncolored", ZGenerationId::old);
  78 static const ZStatSubPhase ZSubPhaseConcurrentMarkRootColoredOld("Concurrent Mark Root Colored", ZGenerationId::old);
  79 
  80 ZMark::ZMark(ZGeneration* generation, ZPageTable* page_table)
  81   : _generation(generation),
  82     _page_table(page_table),
  83     _marking_smr(),
  84     _stripes(),
  85     _terminate(),
  86     _work_nproactiveflush(0),
  87     _work_nterminateflush(0),
  88     _nproactiveflush(0),
  89     _nterminateflush(0),
  90     _ntrycomplete(0),
  91     _ncontinue(0),
  92     _nworkers(0) {}
  93 
  94 size_t ZMark::calculate_nstripes(uint nworkers) const {
  95   // Calculate the number of stripes from the number of workers we use,
  96   // where the number of stripes must be a power of two and we want to
  97   // have at least one worker per stripe.
  98   const size_t nstripes = round_down_power_of_2(nworkers);
  99   return MIN2(nstripes, ZMarkStripesMax);
 100 }
 101 
 102 void ZMark::start() {
 103   // Verification
 104   if (ZVerifyMarking) {
 105     verify_all_stacks_empty();
 106   }
 107 
 108   // Reset flush/continue counters
 109   _nproactiveflush = 0;
 110   _nterminateflush = 0;
 111   _ntrycomplete = 0;
 112   _ncontinue = 0;
 113 
 114   // Set number of workers to use
 115   _nworkers = workers()->active_workers();
 116 
 117   // Set number of mark stripes to use, based on number
 118   // of workers we will use in the concurrent mark phase.
 119   const size_t nstripes = calculate_nstripes(_nworkers);
 120   _stripes.set_nstripes(nstripes);
 121 
 122   // Update statistics
 123   _generation->stat_mark()->at_mark_start(nstripes);
 124 
 125   // Print worker/stripe distribution
 126   LogTarget(Debug, gc, marking) log;
 127   if (log.is_enabled()) {
 128     log.print("Mark Worker/Stripe Distribution");
 129     for (uint worker_id = 0; worker_id < _nworkers; worker_id++) {
 130       const ZMarkStripe* const stripe = _stripes.stripe_for_worker(_nworkers, worker_id);
 131       const size_t stripe_id = _stripes.stripe_id(stripe);
 132       log.print("  Worker %u(%u) -> Stripe %zu(%zu)",
 133                 worker_id, _nworkers, stripe_id, nstripes);
 134     }
 135   }
 136 }
 137 
 138 ZWorkers* ZMark::workers() const {
 139   return _generation->workers();
 140 }
 141 
 142 void ZMark::prepare_work() {
 143   // Set number of workers to use
 144   _nworkers = workers()->active_workers();
 145 
 146   // Set number of mark stripes to use, based on number
 147   // of workers we will use in the concurrent mark phase.
 148   const size_t nstripes = calculate_nstripes(_nworkers);
 149   _stripes.set_nstripes(nstripes);
 150 
 151   // Set number of active workers
 152   _terminate.reset(_nworkers);
 153 
 154   // Reset flush counters
 155   _work_nproactiveflush = _work_nterminateflush = 0;
 156 }
 157 
 158 void ZMark::finish_work() {
 159   // Accumulate proactive/terminate flush counters
 160   _nproactiveflush += _work_nproactiveflush;
 161   _nterminateflush += _work_nterminateflush;
 162 }
 163 
 164 void ZMark::follow_work_complete() {
 165   follow_work(false /* partial */);
 166 }
 167 
 168 bool ZMark::follow_work_partial() {
 169   return follow_work(true /* partial */);
 170 }
 171 
 172 bool ZMark::is_array(zaddress addr) const {
 173   return to_oop(addr)->is_objArray();
 174 }
 175 
 176 static uintptr_t encode_partial_array_offset(zpointer* addr) {
 177   return untype(ZAddress::offset(to_zaddress((uintptr_t)addr))) >> ZMarkPartialArrayMinSizeShift;
 178 }
 179 
 180 static zpointer* decode_partial_array_offset(uintptr_t offset) {
 181   return (zpointer*)ZOffset::address(to_zoffset(offset << ZMarkPartialArrayMinSizeShift));
 182 }
 183 
 184 void ZMark::push_partial_array(zpointer* addr, size_t length, bool finalizable) {
 185   assert(is_aligned(addr, ZMarkPartialArrayMinSize), "Address misaligned");
 186   ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::mark_stacks(Thread::current(), _generation->id());
 187   ZMarkStripe* const stripe = _stripes.stripe_for_addr((uintptr_t)addr);
 188   const uintptr_t offset = encode_partial_array_offset(addr);
 189   const ZMarkStackEntry entry(offset, length, finalizable);
 190 
 191   log_develop_trace(gc, marking)("Array push partial: " PTR_FORMAT " (%zu), stripe: %zu",
 192                                  p2i(addr), length, _stripes.stripe_id(stripe));
 193 
 194   stacks->push(&_stripes, stripe, &_terminate, entry, false /* publish */);
 195 }
 196 
 197 static void mark_barrier_on_oop_array(volatile zpointer* p, size_t length, bool finalizable, bool young) {
 198   for (volatile const zpointer* const end = p + length; p < end; p++) {
 199     if (young) {
 200       ZBarrier::mark_barrier_on_young_oop_field(p);
 201     } else {
 202       ZBarrier::mark_barrier_on_old_oop_field(p, finalizable);
 203     }
 204   }
 205 }
 206 
 207 void ZMark::follow_array_elements_small(zpointer* addr, size_t length, bool finalizable) {
 208   assert(length <= ZMarkPartialArrayMinLength, "Too large, should be split");
 209 
 210   log_develop_trace(gc, marking)("Array follow small: " PTR_FORMAT " (%zu)", p2i(addr), length);
 211 
 212   mark_barrier_on_oop_array(addr, length, finalizable, _generation->is_young());
 213 }
 214 
 215 void ZMark::follow_array_elements_large(zpointer* addr, size_t length, bool finalizable) {
 216   assert(length <= (size_t)arrayOopDesc::max_array_length(T_OBJECT), "Too large");
 217   assert(length > ZMarkPartialArrayMinLength, "Too small, should not be split");
 218 
 219   zpointer* const start = addr;
 220   zpointer* const end = start + length;
 221 
 222   // Calculate the aligned middle start/end/size, where the middle start
 223   // should always be greater than the start (hence the +1 below) to make
 224   // sure we always do some follow work, not just split the array into pieces.
 225   zpointer* const middle_start = align_up(start + 1, ZMarkPartialArrayMinSize);
 226   const size_t    middle_length = align_down(end - middle_start, ZMarkPartialArrayMinLength);
 227   zpointer* const middle_end = middle_start + middle_length;
 228 
 229   log_develop_trace(gc, marking)("Array follow large: " PTR_FORMAT "-" PTR_FORMAT" (%zu), "
 230                                  "middle: " PTR_FORMAT "-" PTR_FORMAT " (%zu)",
 231                                  p2i(start), p2i(end), length, p2i(middle_start), p2i(middle_end), middle_length);
 232 
 233   // Push unaligned trailing part
 234   if (end > middle_end) {
 235     zpointer* const trailing_addr = middle_end;
 236     const size_t trailing_length = end - middle_end;
 237     push_partial_array(trailing_addr, trailing_length, finalizable);
 238   }
 239 
 240   // Push aligned middle part(s)
 241   zpointer* partial_addr = middle_end;
 242   while (partial_addr > middle_start) {
 243     const size_t parts = 2;
 244     const size_t partial_length = align_up((partial_addr - middle_start) / parts, ZMarkPartialArrayMinLength);
 245     partial_addr -= partial_length;
 246     push_partial_array(partial_addr, partial_length, finalizable);
 247   }
 248 
 249   // Follow leading part
 250   assert(start < middle_start, "Miscalculated middle start");
 251   zpointer* const leading_addr = start;
 252   const size_t leading_length = middle_start - start;
 253   follow_array_elements_small(leading_addr, leading_length, finalizable);
 254 }
 255 
 256 void ZMark::follow_array_elements(zpointer* addr, size_t length, bool finalizable) {
 257   if (length <= ZMarkPartialArrayMinLength) {
 258     follow_array_elements_small(addr, length, finalizable);
 259   } else {
 260     follow_array_elements_large(addr, length, finalizable);
 261   }
 262 }
 263 
 264 void ZMark::follow_partial_array(ZMarkStackEntry entry, bool finalizable) {
 265   zpointer* const addr = decode_partial_array_offset(entry.partial_array_offset());
 266   const size_t length = entry.partial_array_length();
 267 
 268   follow_array_elements(addr, length, finalizable);
 269 }
 270 
 271 template <bool finalizable, ZGenerationIdOptional generation>
 272 class ZMarkBarrierFollowOopClosure : public OopIterateClosure {
 273 private:
 274   static int claim_value() {
 275     return finalizable ? ClassLoaderData::_claim_finalizable
 276                        : ClassLoaderData::_claim_strong;
 277   }
 278 
 279   static ReferenceDiscoverer* discoverer() {
 280     if (!finalizable) {
 281       return ZGeneration::old()->reference_discoverer();
 282     } else {
 283       return nullptr;
 284     }
 285   }
 286 
 287   static bool visit_metadata() {
 288     // Only visit metadata if we're marking through the old generation
 289     return ZGeneration::old()->is_phase_mark();
 290   }
 291 
 292   const bool _visit_metadata;
 293 
 294 public:
 295   ZMarkBarrierFollowOopClosure()
 296     : OopIterateClosure(discoverer()),
 297       _visit_metadata(visit_metadata()) {}
 298 
 299   virtual void do_oop(oop* p) {
 300     switch (generation) {
 301     case ZGenerationIdOptional::young:
 302       ZBarrier::mark_barrier_on_young_oop_field((volatile zpointer*)p);
 303       break;
 304     case ZGenerationIdOptional::old:
 305       ZBarrier::mark_barrier_on_old_oop_field((volatile zpointer*)p, finalizable);
 306       break;
 307     case ZGenerationIdOptional::none:
 308       ZBarrier::mark_barrier_on_oop_field((volatile zpointer*)p, finalizable);
 309       break;
 310     }
 311   }
 312 
 313   virtual void do_oop(narrowOop* p) {
 314     ShouldNotReachHere();
 315   }
 316 
 317   virtual bool do_metadata() final {
 318     // Only help out with metadata visiting
 319     return _visit_metadata;
 320   }
 321 
 322   virtual void do_nmethod(nmethod* nm) {
 323     assert(do_metadata(), "Don't call otherwise");
 324     assert(!finalizable, "Can't handle finalizable marking of nmethods");
 325     nm->run_nmethod_entry_barrier();
 326   }
 327 
 328   virtual void do_method(Method* m) {
 329     // Mark interpreted frames for class redefinition
 330     m->record_gc_epoch();
 331   }
 332 
 333   virtual void do_klass(Klass* klass) {
 334     ClassLoaderData* cld = klass->class_loader_data();
 335     ZMarkBarrierFollowOopClosure<finalizable, ZGenerationIdOptional::none> cl;
 336     cld->oops_do(&cl, claim_value());
 337   }
 338 
 339   virtual void do_cld(ClassLoaderData* cld) {
 340     ZMarkBarrierFollowOopClosure<finalizable, ZGenerationIdOptional::none> cl;
 341     cld->oops_do(&cl, claim_value());
 342   }
 343 };
 344 
 345 void ZMark::follow_array_object(objArrayOop obj, bool finalizable) {
 346   if (_generation->is_old()) {
 347     if (finalizable) {
 348       ZMarkBarrierFollowOopClosure<true /* finalizable */, ZGenerationIdOptional::old> cl;
 349       cl.do_klass(obj->klass());
 350     } else {
 351       ZMarkBarrierFollowOopClosure<false /* finalizable */, ZGenerationIdOptional::old> cl;
 352       cl.do_klass(obj->klass());
 353     }
 354   } else {
 355     ZMarkBarrierFollowOopClosure<false /* finalizable */, ZGenerationIdOptional::none> cl;
 356     if (cl.do_metadata()) {
 357       cl.do_klass(obj->klass());
 358     }
 359   }
 360 
 361   // Should be convertible to colorless oop
 362   check_is_valid_zaddress(obj);
 363 
 364   zpointer* const addr = (zpointer*)obj->base();
 365   const size_t length = (size_t)obj->length();
 366 
 367   follow_array_elements(addr, length, finalizable);
 368 }
 369 
 370 void ZMark::follow_object(oop obj, bool finalizable) {
 371   if (_generation->is_old()) {
 372     assert(ZHeap::heap()->is_old(to_zaddress(obj)), "Should only follow objects from old gen");
 373     if (obj->is_stackChunk()) {
 374       // No support for tracing through stack chunks as finalizably reachable
 375       ZMarkBarrierFollowOopClosure<false /* finalizable */, ZGenerationIdOptional::old> cl;
 376       ZIterator::oop_iterate(obj, &cl);
 377     } else if (finalizable) {
 378       ZMarkBarrierFollowOopClosure<true /* finalizable */, ZGenerationIdOptional::old> cl;
 379       ZIterator::oop_iterate(obj, &cl);
 380     } else {
 381       ZMarkBarrierFollowOopClosure<false /* finalizable */, ZGenerationIdOptional::old> cl;
 382       ZIterator::oop_iterate(obj, &cl);
 383     }
 384   } else {
 385     // Young gen must help out with old marking
 386     ZMarkBarrierFollowOopClosure<false /* finalizable */, ZGenerationIdOptional::young> cl;
 387     ZIterator::oop_iterate(obj, &cl);
 388   }
 389 }
 390 
 391 
 392 void ZMark::mark_and_follow(ZMarkContext* context, ZMarkStackEntry entry) {
 393   // Decode flags
 394   const bool finalizable = entry.finalizable();
 395   const bool partial_array = entry.partial_array();
 396 
 397   if (partial_array) {
 398     follow_partial_array(entry, finalizable);
 399     return;
 400   }
 401 
 402   // Decode object address and additional flags
 403   const zaddress addr = ZOffset::address(to_zoffset(entry.object_address()));
 404   const bool mark = entry.mark();
 405   bool inc_live = entry.inc_live();
 406   const bool follow = entry.follow();
 407 
 408   ZPage* const page = _page_table->get(addr);
 409   assert(page->is_relocatable(), "Invalid page state");
 410 
 411   // Mark
 412   if (mark && !page->mark_object(addr, finalizable, inc_live)) {
 413     // Already marked
 414     return;
 415   }
 416 
 417   // Increment live
 418   if (inc_live) {
 419     // Update live objects/bytes for page. We use the aligned object
 420     // size since that is the actual number of bytes used on the page
 421     // and alignment paddings can never be reclaimed.
 422     const size_t size = ZUtils::object_size(addr);
 423     const size_t aligned_size = align_up(size, page->object_alignment());
 424     context->cache()->inc_live(page, aligned_size);
 425   }
 426 
 427   // Follow
 428   if (follow) {
 429     if (is_array(addr)) {
 430       follow_array_object(objArrayOop(to_oop(addr)), finalizable);
 431     } else {
 432       follow_object(to_oop(addr), finalizable);
 433     }
 434   }
 435 }
 436 
 437 // This function returns true if we need to stop working to resize threads or
 438 // abort marking
 439 bool ZMark::rebalance_work(ZMarkContext* context) {
 440   const size_t assumed_nstripes = context->nstripes();
 441   const size_t nstripes = _stripes.nstripes();
 442 
 443   if (assumed_nstripes != nstripes) {
 444     // The number of stripes has changed; reflect that change locally
 445     context->set_nstripes(nstripes);
 446   } else if (nstripes < calculate_nstripes(_nworkers) && _stripes.is_crowded()) {
 447     // We are running on a reduced number of threads to minimize the amount of work
 448     // hidden in local stacks when the stripes are less well balanced. When this situation
 449     // starts getting crowded, we bump the number of stripes again.
 450     const size_t new_nstripes = nstripes << 1;
 451     if (_stripes.try_set_nstripes(nstripes, new_nstripes)) {
 452       context->set_nstripes(new_nstripes);
 453     }
 454   }
 455 
 456   ZMarkStripe* stripe = _stripes.stripe_for_worker(_nworkers, WorkerThread::worker_id());
 457   if (context->stripe() != stripe) {
 458     // Need to switch stripe
 459     context->set_stripe(stripe);
 460     flush(Thread::current());
 461   } else if (!_terminate.saturated()) {
 462     // Work imbalance detected; striped marking is likely going to be in the way
 463     flush(Thread::current());
 464   }
 465 
 466   SuspendibleThreadSet::yield();
 467 
 468   return ZAbort::should_abort() || _generation->should_worker_resize();
 469 }
 470 
 471 bool ZMark::drain(ZMarkContext* context) {
 472   ZMarkThreadLocalStacks* const stacks = context->stacks();
 473   ZMarkStackEntry entry;
 474   size_t processed = 0;
 475 
 476   context->set_stripe(_stripes.stripe_for_worker(_nworkers, WorkerThread::worker_id()));
 477   context->set_nstripes(_stripes.nstripes());
 478 
 479   // Drain stripe stacks
 480   while (stacks->pop(&_marking_smr, &_stripes, context->stripe(), &entry)) {
 481     mark_and_follow(context, entry);
 482 
 483     if ((processed++ & 31) == 0 && rebalance_work(context)) {
 484       return false;
 485     }
 486   }
 487 
 488   return true;
 489 }
 490 
 491 bool ZMark::try_steal_local(ZMarkContext* context) {
 492   ZMarkStripe* const stripe = context->stripe();
 493   ZMarkThreadLocalStacks* const stacks = context->stacks();
 494 
 495   // Try to steal a local stack from another stripe
 496   for (ZMarkStripe* victim_stripe = _stripes.stripe_next(stripe);
 497        victim_stripe != stripe;
 498        victim_stripe = _stripes.stripe_next(victim_stripe)) {
 499     ZMarkStack* const stack = stacks->steal(&_stripes, victim_stripe);
 500     if (stack != nullptr) {
 501       // Success, install the stolen stack
 502       stacks->install(&_stripes, stripe, stack);
 503       return true;
 504     }
 505   }
 506 
 507   // Nothing to steal
 508   return false;
 509 }
 510 
 511 bool ZMark::try_steal_global(ZMarkContext* context) {
 512   ZMarkStripe* const stripe = context->stripe();
 513   ZMarkThreadLocalStacks* const stacks = context->stacks();
 514 
 515   // Try to steal a stack from another stripe
 516   for (ZMarkStripe* victim_stripe = _stripes.stripe_next(stripe);
 517        victim_stripe != stripe;
 518        victim_stripe = _stripes.stripe_next(victim_stripe)) {
 519     ZMarkStack* const stack = victim_stripe->steal_stack(&_marking_smr);
 520     if (stack != nullptr) {
 521       // Success, install the stolen stack
 522       stacks->install(&_stripes, stripe, stack);
 523       return true;
 524     }
 525   }
 526 
 527   // Nothing to steal
 528   return false;
 529 }
 530 
 531 bool ZMark::try_steal(ZMarkContext* context) {
 532   return try_steal_local(context) || try_steal_global(context);
 533 }
 534 
 535 class ZMarkFlushStacksHandshakeClosure : public HandshakeClosure {
 536 private:
 537   ZMark* const _mark;
 538   bool         _flushed;
 539 
 540 public:
 541   ZMarkFlushStacksHandshakeClosure(ZMark* mark)
 542     : HandshakeClosure("ZMarkFlushStacks"),
 543       _mark(mark),
 544       _flushed(false) {}
 545 
 546   void do_thread(Thread* thread) {
 547     if (_mark->flush(thread)) {
 548       _flushed = true;
 549       if (SafepointSynchronize::is_at_safepoint()) {
 550         log_debug(gc, marking)("Thread broke mark termination %s", thread->name());
 551       }
 552     }
 553   }
 554 
 555   bool flushed() const {
 556     return _flushed;
 557   }
 558 };
 559 
 560 class VM_ZMarkFlushOperation : public VM_Operation {
 561 private:
 562   ThreadClosure* _cl;
 563 
 564 public:
 565   VM_ZMarkFlushOperation(ThreadClosure* cl)
 566     : _cl(cl) {}
 567 
 568   virtual bool evaluate_at_safepoint() const {
 569     return false;
 570   }
 571 
 572   virtual void doit() {
 573     // Flush VM thread
 574     Thread* const thread = Thread::current();
 575     _cl->do_thread(thread);
 576   }
 577 
 578   virtual VMOp_Type type() const {
 579     return VMOp_ZMarkFlushOperation;
 580   }
 581 
 582   virtual bool is_gc_operation() const {
 583     return true;
 584   }
 585 };
 586 
 587 bool ZMark::flush() {
 588   ZMarkFlushStacksHandshakeClosure cl(this);
 589   VM_ZMarkFlushOperation vm_cl(&cl);
 590   Handshake::execute(&cl);
 591   VMThread::execute(&vm_cl);
 592 
 593   // Returns true if more work is available
 594   return cl.flushed() || !_stripes.is_empty();
 595 }
 596 
 597 bool ZMark::try_terminate_flush() {
 598   Atomic::inc(&_work_nterminateflush);
 599   _terminate.set_resurrected(false);
 600 
 601   if (ZVerifyMarking) {
 602     verify_worker_stacks_empty();
 603   }
 604 
 605   return flush() || _terminate.resurrected();
 606 }
 607 
 608 bool ZMark::try_proactive_flush() {
 609   // Only do proactive flushes from worker 0
 610   if (WorkerThread::worker_id() != 0) {
 611     return false;
 612   }
 613 
 614   if (Atomic::load(&_work_nproactiveflush) == ZMarkProactiveFlushMax) {
 615     // Limit reached or we're trying to terminate
 616     return false;
 617   }
 618 
 619   Atomic::inc(&_work_nproactiveflush);
 620 
 621   SuspendibleThreadSetLeaver sts_leaver;
 622   return flush();
 623 }
 624 
 625 bool ZMark::try_terminate(ZMarkContext* context) {
 626   return _terminate.try_terminate(&_stripes, context->nstripes());
 627 }
 628 
 629 void ZMark::leave() {
 630   _terminate.leave();
 631 }
 632 
 633 // Returning true means marking finished successfully after marking as far as it could.
 634 // Returning false means that marking finished unsuccessfully due to abort or resizing.
 635 bool ZMark::follow_work(bool partial) {
 636   ZMarkStripe* const stripe = _stripes.stripe_for_worker(_nworkers, WorkerThread::worker_id());
 637   ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::mark_stacks(Thread::current(), _generation->id());
 638   ZMarkContext context(ZMarkStripesMax, stripe, stacks);
 639 
 640   for (;;) {
 641     if (!drain(&context)) {
 642       leave();
 643       return false;
 644     }
 645 
 646     if (try_steal(&context)) {
 647       // Stole work
 648       continue;
 649     }
 650 
 651     if (partial) {
 652       return true;
 653     }
 654 
 655     if (try_proactive_flush()) {
 656       // Work available
 657       continue;
 658     }
 659 
 660     if (try_terminate(&context)) {
 661       // Terminate
 662       return true;
 663     }
 664   }
 665 }
 666 
 667 class ZMarkOopClosure : public OopClosure {
 668 public:
 669   virtual void do_oop(oop* p) {
 670     ZBarrier::mark_barrier_on_oop_field((zpointer*)p, false /* finalizable */);
 671   }
 672 
 673   virtual void do_oop(narrowOop* p) {
 674     ShouldNotReachHere();
 675   }
 676 };
 677 
 678 class ZMarkYoungOopClosure : public OopClosure {
 679 public:
 680   virtual void do_oop(oop* p) {
 681     ZBarrier::mark_young_good_barrier_on_oop_field((zpointer*)p);
 682   }
 683 
 684   virtual void do_oop(narrowOop* p) {
 685     ShouldNotReachHere();
 686   }
 687 };
 688 
 689 class ZMarkThreadClosure : public ThreadClosure {
 690 private:
 691   static ZUncoloredRoot::RootFunction root_function() {
 692     return ZUncoloredRoot::mark;
 693   }
 694 
 695 public:
 696   ZMarkThreadClosure() {
 697     ZThreadLocalAllocBuffer::reset_statistics();
 698   }
 699   ~ZMarkThreadClosure() {
 700     ZThreadLocalAllocBuffer::publish_statistics();
 701   }
 702 
 703   virtual void do_thread(Thread* thread) {
 704     JavaThread* const jt = JavaThread::cast(thread);
 705 
 706     StackWatermarkSet::finish_processing(jt, (void*)root_function(), StackWatermarkKind::gc);
 707     ZThreadLocalAllocBuffer::update_stats(jt);
 708   }
 709 };
 710 
 711 class ZMarkNMethodClosure : public NMethodClosure {
 712 private:
 713   ZBarrierSetNMethod* const _bs_nm;
 714 
 715 public:
 716   ZMarkNMethodClosure()
 717     : _bs_nm(static_cast<ZBarrierSetNMethod*>(BarrierSet::barrier_set()->barrier_set_nmethod())) {}
 718 
 719   virtual void do_nmethod(nmethod* nm) {
 720     ZLocker<ZReentrantLock> locker(ZNMethod::lock_for_nmethod(nm));
 721     if (_bs_nm->is_armed(nm)) {
 722       // Heal barriers
 723       ZNMethod::nmethod_patch_barriers(nm);
 724 
 725       // Heal oops
 726       ZUncoloredRootMarkOopClosure cl(ZNMethod::color(nm));
 727       ZNMethod::nmethod_oops_do_inner(nm, &cl);
 728 
 729       // CodeCache unloading support
 730       nm->mark_as_maybe_on_stack();
 731 
 732       log_trace(gc, nmethod)("nmethod: " PTR_FORMAT " visited by old", p2i(nm));
 733 
 734       // Disarm
 735       _bs_nm->disarm(nm);
 736     }
 737   }
 738 };
 739 
 740 class ZMarkYoungNMethodClosure : public NMethodClosure {
 741 private:
 742   ZBarrierSetNMethod* const _bs_nm;
 743 
 744 public:
 745   ZMarkYoungNMethodClosure()
 746     : _bs_nm(static_cast<ZBarrierSetNMethod*>(BarrierSet::barrier_set()->barrier_set_nmethod())) {}
 747 
 748   virtual void do_nmethod(nmethod* nm) {
 749     ZLocker<ZReentrantLock> locker(ZNMethod::lock_for_nmethod(nm));
 750     if (nm->is_unloading()) {
 751       return;
 752     }
 753 
 754     if (_bs_nm->is_armed(nm)) {
 755       const uintptr_t prev_color = ZNMethod::color(nm);
 756 
 757       // Heal oops
 758       ZUncoloredRootMarkYoungOopClosure cl(prev_color);
 759       ZNMethod::nmethod_oops_do_inner(nm, &cl);
 760 
 761       // Disarm only the young marking, not any potential old marking cycle
 762 
 763       const uintptr_t old_marked_mask = ZPointerMarkedMask ^ (ZPointerMarkedYoung0 | ZPointerMarkedYoung1);
 764       const uintptr_t old_marked = prev_color & old_marked_mask;
 765 
 766       const zpointer new_disarm_value_ptr = ZAddress::color(zaddress::null, ZPointerLoadGoodMask | ZPointerMarkedYoung | old_marked | ZPointerRemembered);
 767 
 768       // Check if disarming for young mark, completely disarms the nmethod entry barrier
 769       const bool complete_disarm = ZPointer::is_store_good(new_disarm_value_ptr);
 770 
 771       if (complete_disarm) {
 772         // We are about to completely disarm the nmethod, must take responsibility to patch all barriers before disarming
 773         ZNMethod::nmethod_patch_barriers(nm);
 774       }
 775 
 776       _bs_nm->guard_with(nm, (int)untype(new_disarm_value_ptr));
 777 
 778       if (complete_disarm) {
 779         log_trace(gc, nmethod)("nmethod: " PTR_FORMAT " visited by young (complete) [" PTR_FORMAT " -> " PTR_FORMAT "]", p2i(nm), prev_color, untype(new_disarm_value_ptr));
 780         assert(!_bs_nm->is_armed(nm), "Must not be considered armed anymore");
 781       } else {
 782         log_trace(gc, nmethod)("nmethod: " PTR_FORMAT " visited by young (incomplete) [" PTR_FORMAT " -> " PTR_FORMAT "]", p2i(nm), prev_color, untype(new_disarm_value_ptr));
 783         assert(_bs_nm->is_armed(nm), "Must be considered armed");
 784       }
 785     }
 786   }
 787 };
 788 
 789 typedef ClaimingCLDToOopClosure<ClassLoaderData::_claim_strong> ZMarkOldCLDClosure;
 790 
 791 class ZMarkOldRootsTask : public ZTask {
 792 private:
 793   ZMark* const                  _mark;
 794   ZRootsIteratorStrongColored   _roots_colored;
 795   ZRootsIteratorStrongUncolored _roots_uncolored;
 796 
 797   ZMarkOopClosure               _cl_colored;
 798   ZMarkOldCLDClosure            _cld_cl;
 799 
 800   ZMarkThreadClosure            _thread_cl;
 801   ZMarkNMethodClosure           _nm_cl;
 802 
 803 public:
 804   ZMarkOldRootsTask(ZMark* mark)
 805     : ZTask("ZMarkOldRootsTask"),
 806       _mark(mark),
 807       _roots_colored(ZGenerationIdOptional::old),
 808       _roots_uncolored(ZGenerationIdOptional::old),
 809       _cl_colored(),
 810       _cld_cl(&_cl_colored),
 811       _thread_cl(),
 812       _nm_cl() {}
 813 
 814   virtual void work() {
 815     {
 816       ZStatTimerWorker timer(ZSubPhaseConcurrentMarkRootColoredOld);
 817       _roots_colored.apply(&_cl_colored,
 818                            &_cld_cl);
 819     }
 820 
 821     {
 822       ZStatTimerWorker timer(ZSubPhaseConcurrentMarkRootUncoloredOld);
 823       _roots_uncolored.apply(&_thread_cl,
 824                              &_nm_cl);
 825     }
 826 
 827     // Flush and free worker stacks. Needed here since
 828     // the set of workers executing during root scanning
 829     // can be different from the set of workers executing
 830     // during mark.
 831     ZHeap::heap()->mark_flush(Thread::current());
 832   }
 833 };
 834 
 835 class ZMarkYoungCLDClosure : public ClaimingCLDToOopClosure<ClassLoaderData::_claim_none> {
 836 public:
 837   virtual void do_cld(ClassLoaderData* cld) {
 838     if (!cld->is_alive()) {
 839       // Skip marking through concurrently unloading CLDs
 840       return;
 841     }
 842     ClaimingCLDToOopClosure<ClassLoaderData::_claim_none>::do_cld(cld);
 843   }
 844 
 845   ZMarkYoungCLDClosure(OopClosure* cl)
 846     : ClaimingCLDToOopClosure<ClassLoaderData::_claim_none>(cl) {}
 847 };
 848 
 849 class ZMarkYoungRootsTask : public ZTask {
 850 private:
 851   ZMark* const               _mark;
 852   ZRootsIteratorAllColored   _roots_colored;
 853   ZRootsIteratorAllUncolored _roots_uncolored;
 854 
 855   ZMarkYoungOopClosure       _cl_colored;
 856   ZMarkYoungCLDClosure       _cld_cl;
 857 
 858   ZMarkThreadClosure         _thread_cl;
 859   ZMarkYoungNMethodClosure   _nm_cl;
 860 
 861 public:
 862   ZMarkYoungRootsTask(ZMark* mark)
 863     : ZTask("ZMarkYoungRootsTask"),
 864       _mark(mark),
 865       _roots_colored(ZGenerationIdOptional::young),
 866       _roots_uncolored(ZGenerationIdOptional::young),
 867       _cl_colored(),
 868       _cld_cl(&_cl_colored),
 869       _thread_cl(),
 870       _nm_cl() {}
 871 
 872   virtual void work() {
 873     {
 874       ZStatTimerWorker timer(ZSubPhaseConcurrentMarkRootColoredYoung);
 875       _roots_colored.apply(&_cl_colored,
 876                            &_cld_cl);
 877     }
 878 
 879     {
 880       ZStatTimerWorker timer(ZSubPhaseConcurrentMarkRootUncoloredYoung);
 881       _roots_uncolored.apply(&_thread_cl,
 882                              &_nm_cl);
 883     }
 884 
 885     // Flush and free worker stacks. Needed here since
 886     // the set of workers executing during root scanning
 887     // can be different from the set of workers executing
 888     // during mark.
 889     ZHeap::heap()->mark_flush(Thread::current());
 890   }
 891 };
 892 
 893 class ZMarkTask : public ZRestartableTask {
 894 private:
 895   ZMark* const _mark;
 896 
 897 public:
 898   ZMarkTask(ZMark* mark)
 899     : ZRestartableTask("ZMarkTask"),
 900       _mark(mark) {
 901     _mark->prepare_work();
 902   }
 903 
 904   ~ZMarkTask() {
 905     _mark->finish_work();
 906   }
 907 
 908   virtual void work() {
 909     SuspendibleThreadSetJoiner sts_joiner;
 910     _mark->follow_work_complete();
 911     // We might have found pointers into the other generation, and then we want to
 912     // publish such marking stacks to prevent that generation from getting a mark continue.
 913     // We also flush in case of a resize where a new worker thread continues the marking
 914     // work, causing a mark continue for the collected generation.
 915     ZHeap::heap()->mark_flush(Thread::current());
 916   }
 917 
 918   virtual void resize_workers(uint nworkers) {
 919     _mark->resize_workers(nworkers);
 920   }
 921 };
 922 
 923 void ZMark::resize_workers(uint nworkers) {
 924   _nworkers = nworkers;
 925   const size_t nstripes = calculate_nstripes(nworkers);
 926   _stripes.set_nstripes(nstripes);
 927   _terminate.reset(nworkers);
 928 }
 929 
 930 void ZMark::mark_young_roots() {
 931   SuspendibleThreadSetJoiner sts_joiner;
 932   ZMarkYoungRootsTask task(this);
 933   workers()->run(&task);
 934 }
 935 
 936 void ZMark::mark_old_roots() {
 937   SuspendibleThreadSetJoiner sts_joiner;
 938   ZMarkOldRootsTask task(this);
 939   workers()->run(&task);
 940 }
 941 
 942 void ZMark::mark_follow() {
 943   for (;;) {
 944     ZMarkTask task(this);
 945     workers()->run(&task);
 946     if (ZAbort::should_abort() || !try_terminate_flush()) {
 947       break;
 948     }
 949   }
 950 }
 951 
 952 bool ZMark::try_end() {
 953   if (_terminate.resurrected()) {
 954     // An oop was resurrected after concurrent termination.
 955     return false;
 956   }
 957 
 958   // Try end marking
 959   ZMarkFlushStacksHandshakeClosure cl(this);
 960   Threads::non_java_threads_do(&cl);
 961 
 962   // Check if non-java threads have any pending marking
 963   if (cl.flushed() || !_stripes.is_empty()) {
 964     return false;
 965   }
 966 
 967   // Mark completed
 968   return true;
 969 }
 970 
 971 bool ZMark::end() {
 972   // Try end marking
 973   if (!try_end()) {
 974     // Mark not completed
 975     _ncontinue++;
 976     return false;
 977   }
 978 
 979   // Verification
 980   if (ZVerifyMarking) {
 981     verify_all_stacks_empty();
 982   }
 983 
 984   // Update statistics
 985   _generation->stat_mark()->at_mark_end(_nproactiveflush, _nterminateflush, _ntrycomplete, _ncontinue);
 986 
 987   // Mark completed
 988   return true;
 989 }
 990 
 991 void ZMark::free() {
 992   // Free any unused mark stack space
 993   _marking_smr.free();
 994 }
 995 
 996 bool ZMark::flush(Thread* thread) {
 997   if (thread->is_Java_thread()) {
 998     ZThreadLocalData::store_barrier_buffer(thread)->flush();
 999   }
1000   ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::mark_stacks(thread, _generation->id());
1001   return stacks->flush(&_stripes, &_terminate);
1002 }
1003 
1004 class ZVerifyMarkStacksEmptyClosure : public ThreadClosure {
1005 private:
1006   const ZMarkStripeSet* const _stripes;
1007   const ZGenerationId _generation_id;
1008 
1009 public:
1010   ZVerifyMarkStacksEmptyClosure(const ZMarkStripeSet* stripes, ZGenerationId id)
1011     : _stripes(stripes),
1012       _generation_id(id) {}
1013 
1014   void do_thread(Thread* thread) {
1015     ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::mark_stacks(thread, _generation_id);
1016     guarantee(stacks->is_empty(_stripes), "Should be empty");
1017   }
1018 };
1019 
1020 void ZMark::verify_all_stacks_empty() const {
1021   // Verify thread stacks
1022   ZVerifyMarkStacksEmptyClosure cl(&_stripes, _generation->id());
1023   Threads::threads_do(&cl);
1024 
1025   // Verify stripe stacks
1026   guarantee(_stripes.is_empty(), "Should be empty");
1027 }
1028 
1029 void ZMark::verify_worker_stacks_empty() const {
1030   // Verify thread stacks
1031   ZVerifyMarkStacksEmptyClosure cl(&_stripes, _generation->id());
1032   workers()->threads_do(&cl);
1033 }