1 /*
   2  * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "classfile/classLoaderData.hpp"
  25 #include "classfile/classLoaderDataGraph.hpp"
  26 #include "classfile/javaClasses.inline.hpp"
  27 #include "code/nmethod.hpp"
  28 #include "gc/shared/continuationGCSupport.inline.hpp"
  29 #include "gc/shared/gc_globals.hpp"
  30 #include "gc/shared/suspendibleThreadSet.hpp"
  31 #include "gc/shared/workerThread.hpp"
  32 #include "gc/z/zAbort.inline.hpp"
  33 #include "gc/z/zAddress.inline.hpp"
  34 #include "gc/z/zBarrier.inline.hpp"
  35 #include "gc/z/zBarrierSetNMethod.hpp"
  36 #include "gc/z/zGeneration.inline.hpp"
  37 #include "gc/z/zGenerationId.hpp"
  38 #include "gc/z/zHeap.inline.hpp"
  39 #include "gc/z/zLock.inline.hpp"
  40 #include "gc/z/zMark.inline.hpp"
  41 #include "gc/z/zMarkCache.inline.hpp"
  42 #include "gc/z/zMarkContext.inline.hpp"
  43 #include "gc/z/zMarkStack.inline.hpp"
  44 #include "gc/z/zMarkTerminate.inline.hpp"
  45 #include "gc/z/zNMethod.hpp"
  46 #include "gc/z/zPage.hpp"
  47 #include "gc/z/zPageTable.inline.hpp"
  48 #include "gc/z/zRootsIterator.hpp"
  49 #include "gc/z/zStackWatermark.hpp"
  50 #include "gc/z/zStat.hpp"
  51 #include "gc/z/zTask.hpp"
  52 #include "gc/z/zThreadLocalAllocBuffer.hpp"
  53 #include "gc/z/zUncoloredRoot.inline.hpp"
  54 #include "gc/z/zUtils.inline.hpp"
  55 #include "gc/z/zWorkers.hpp"
  56 #include "logging/log.hpp"
  57 #include "memory/iterator.inline.hpp"
  58 #include "oops/objArrayOop.inline.hpp"
  59 #include "oops/oop.inline.hpp"
  60 #include "runtime/atomicAccess.hpp"
  61 #include "runtime/continuation.hpp"
  62 #include "runtime/handshake.hpp"
  63 #include "runtime/javaThread.hpp"
  64 #include "runtime/prefetch.inline.hpp"
  65 #include "runtime/safepointMechanism.hpp"
  66 #include "runtime/stackWatermark.hpp"
  67 #include "runtime/stackWatermarkSet.inline.hpp"
  68 #include "runtime/threads.hpp"
  69 #include "runtime/vmThread.hpp"
  70 #include "utilities/align.hpp"
  71 #include "utilities/globalDefinitions.hpp"
  72 #include "utilities/powerOfTwo.hpp"
  73 #include "utilities/ticks.hpp"
  74 
  75 static const ZStatSubPhase ZSubPhaseConcurrentMarkRootUncoloredYoung("Concurrent Mark Root Uncolored", ZGenerationId::young);
  76 static const ZStatSubPhase ZSubPhaseConcurrentMarkRootColoredYoung("Concurrent Mark Root Colored", ZGenerationId::young);
  77 static const ZStatSubPhase ZSubPhaseConcurrentMarkRootUncoloredOld("Concurrent Mark Root Uncolored", ZGenerationId::old);
  78 static const ZStatSubPhase ZSubPhaseConcurrentMarkRootColoredOld("Concurrent Mark Root Colored", ZGenerationId::old);
  79 
  80 ZMark::ZMark(ZGeneration* generation, ZPageTable* page_table)
  81   : _generation(generation),
  82     _page_table(page_table),
  83     _marking_smr(),
  84     _stripes(),
  85     _terminate(),
  86     _work_nproactiveflush(0),
  87     _work_nterminateflush(0),
  88     _nproactiveflush(0),
  89     _nterminateflush(0),
  90     _ntrycomplete(0),
  91     _ncontinue(0),
  92     _nworkers(0) {}
  93 
  94 size_t ZMark::calculate_nstripes(uint nworkers) const {
  95   // Calculate the number of stripes from the number of workers we use,
  96   // where the number of stripes must be a power of two and we want to
  97   // have at least one worker per stripe.
  98   const size_t nstripes = round_down_power_of_2(nworkers);
  99   return MIN2(nstripes, ZMarkStripesMax);
 100 }
 101 
 102 void ZMark::start() {
 103   // Verification
 104   if (ZVerifyMarking) {
 105     verify_all_stacks_empty();
 106   }
 107 
 108   // Reset flush/continue counters
 109   _nproactiveflush = 0;
 110   _nterminateflush = 0;
 111   _ntrycomplete = 0;
 112   _ncontinue = 0;
 113 
 114   // Set number of workers to use
 115   _nworkers = workers()->active_workers();
 116 
 117   // Set number of mark stripes to use, based on number
 118   // of workers we will use in the concurrent mark phase.
 119   const size_t nstripes = calculate_nstripes(_nworkers);
 120   _stripes.set_nstripes(nstripes);
 121 
 122   // Update statistics
 123   _generation->stat_mark()->at_mark_start(nstripes);
 124 
 125   // Print worker/stripe distribution
 126   LogTarget(Debug, gc, marking) log;
 127   if (log.is_enabled()) {
 128     log.print("Mark Worker/Stripe Distribution");
 129     for (uint worker_id = 0; worker_id < _nworkers; worker_id++) {
 130       const ZMarkStripe* const stripe = _stripes.stripe_for_worker(_nworkers, worker_id);
 131       const size_t stripe_id = _stripes.stripe_id(stripe);
 132       log.print("  Worker %u(%u) -> Stripe %zu(%zu)",
 133                 worker_id, _nworkers, stripe_id, nstripes);
 134     }
 135   }
 136 }
 137 
 138 ZWorkers* ZMark::workers() const {
 139   return _generation->workers();
 140 }
 141 
 142 void ZMark::prepare_work() {
 143   // Set number of workers to use
 144   _nworkers = workers()->active_workers();
 145 
 146   // Set number of mark stripes to use, based on number
 147   // of workers we will use in the concurrent mark phase.
 148   const size_t nstripes = calculate_nstripes(_nworkers);
 149   _stripes.set_nstripes(nstripes);
 150 
 151   // Set number of active workers
 152   _terminate.reset(_nworkers);
 153 
 154   // Reset flush counters
 155   _work_nproactiveflush = _work_nterminateflush = 0;
 156 }
 157 
 158 void ZMark::finish_work() {
 159   // Accumulate proactive/terminate flush counters
 160   _nproactiveflush += _work_nproactiveflush;
 161   _nterminateflush += _work_nterminateflush;
 162 }
 163 
 164 void ZMark::follow_work_complete() {
 165   follow_work(false /* partial */);
 166 }
 167 
 168 bool ZMark::follow_work_partial() {
 169   return follow_work(true /* partial */);
 170 }
 171 
 172 bool ZMark::is_array(zaddress addr) const {
 173   return to_oop(addr)->is_objArray();
 174 }
 175 
 176 static uintptr_t encode_partial_array_offset(zpointer* addr) {
 177   return untype(ZAddress::offset(to_zaddress((uintptr_t)addr))) >> ZMarkPartialArrayMinSizeShift;
 178 }
 179 
 180 static zpointer* decode_partial_array_offset(uintptr_t offset) {
 181   return (zpointer*)ZOffset::address(to_zoffset(offset << ZMarkPartialArrayMinSizeShift));
 182 }
 183 
 184 void ZMark::push_partial_array(zpointer* addr, size_t length, bool finalizable) {
 185   assert(is_aligned(addr, ZMarkPartialArrayMinSize), "Address misaligned");
 186   ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::mark_stacks(Thread::current(), _generation->id());
 187   ZMarkStripe* const stripe = _stripes.stripe_for_addr((uintptr_t)addr);
 188   const uintptr_t offset = encode_partial_array_offset(addr);
 189   const ZMarkStackEntry entry(offset, length, finalizable);
 190 
 191   log_develop_trace(gc, marking)("Array push partial: " PTR_FORMAT " (%zu), stripe: %zu",
 192                                  p2i(addr), length, _stripes.stripe_id(stripe));
 193 
 194   stacks->push(&_stripes, stripe, &_terminate, entry, false /* publish */);
 195 }
 196 
 197 static void mark_barrier_on_oop_array(volatile zpointer* p, size_t length, bool finalizable, bool young) {
 198   for (volatile const zpointer* const end = p + length; p < end; p++) {
 199     if (young) {
 200       ZBarrier::mark_barrier_on_young_oop_field(p);
 201     } else {
 202       ZBarrier::mark_barrier_on_old_oop_field(p, finalizable);
 203     }
 204   }
 205 }
 206 
 207 void ZMark::follow_array_elements_small(zpointer* addr, size_t length, bool finalizable) {
 208   assert(length <= ZMarkPartialArrayMinLength, "Too large, should be split");
 209 
 210   log_develop_trace(gc, marking)("Array follow small: " PTR_FORMAT " (%zu)", p2i(addr), length);
 211 
 212   mark_barrier_on_oop_array(addr, length, finalizable, _generation->is_young());
 213 }
 214 
 215 void ZMark::follow_array_elements_large(zpointer* addr, size_t length, bool finalizable) {
 216   assert(length <= (size_t)arrayOopDesc::max_array_length(T_OBJECT), "Too large");
 217   assert(length > ZMarkPartialArrayMinLength, "Too small, should not be split");
 218 
 219   zpointer* const start = addr;
 220   zpointer* const end = start + length;
 221 
 222   // Calculate the aligned middle start/end/size, where the middle start
 223   // should always be greater than the start (hence the +1 below) to make
 224   // sure we always do some follow work, not just split the array into pieces.
 225   zpointer* const middle_start = align_up(start + 1, ZMarkPartialArrayMinSize);
 226   const size_t    middle_length = align_down(end - middle_start, ZMarkPartialArrayMinLength);
 227   zpointer* const middle_end = middle_start + middle_length;
 228 
 229   log_develop_trace(gc, marking)("Array follow large: " PTR_FORMAT "-" PTR_FORMAT" (%zu), "
 230                                  "middle: " PTR_FORMAT "-" PTR_FORMAT " (%zu)",
 231                                  p2i(start), p2i(end), length, p2i(middle_start), p2i(middle_end), middle_length);
 232 
 233   // Push unaligned trailing part
 234   if (end > middle_end) {
 235     zpointer* const trailing_addr = middle_end;
 236     const size_t trailing_length = end - middle_end;
 237     push_partial_array(trailing_addr, trailing_length, finalizable);
 238   }
 239 
 240   // Push aligned middle part(s)
 241   zpointer* partial_addr = middle_end;
 242   while (partial_addr > middle_start) {
 243     const size_t parts = 2;
 244     const size_t partial_length = align_up((partial_addr - middle_start) / parts, ZMarkPartialArrayMinLength);
 245     partial_addr -= partial_length;
 246     push_partial_array(partial_addr, partial_length, finalizable);
 247   }
 248 
 249   // Follow leading part
 250   assert(start < middle_start, "Miscalculated middle start");
 251   zpointer* const leading_addr = start;
 252   const size_t leading_length = middle_start - start;
 253   follow_array_elements_small(leading_addr, leading_length, finalizable);
 254 }
 255 
 256 void ZMark::follow_array_elements(zpointer* addr, size_t length, bool finalizable) {
 257   if (length <= ZMarkPartialArrayMinLength) {
 258     follow_array_elements_small(addr, length, finalizable);
 259   } else {
 260     follow_array_elements_large(addr, length, finalizable);
 261   }
 262 }
 263 
 264 void ZMark::follow_partial_array(ZMarkStackEntry entry, bool finalizable) {
 265   zpointer* const addr = decode_partial_array_offset(entry.partial_array_offset());
 266   const size_t length = entry.partial_array_length();
 267 
 268   follow_array_elements(addr, length, finalizable);
 269 }
 270 
 271 template <bool finalizable, ZGenerationIdOptional generation>
 272 class ZMarkBarrierFollowOopClosure : public OopIterateClosure {
 273 private:
 274   static int claim_value() {
 275     return finalizable ? ClassLoaderData::_claim_finalizable
 276                        : ClassLoaderData::_claim_strong;
 277   }
 278 
 279   static ReferenceDiscoverer* discoverer() {
 280     if (!finalizable) {
 281       return ZGeneration::old()->reference_discoverer();
 282     } else {
 283       return nullptr;
 284     }
 285   }
 286 
 287   static bool visit_metadata() {
 288     // Only visit metadata if we're marking through the old generation
 289     return ZGeneration::old()->is_phase_mark();
 290   }
 291 
 292   const bool _visit_metadata;
 293 
 294 public:
 295   ZMarkBarrierFollowOopClosure()
 296     : OopIterateClosure(discoverer()),
 297       _visit_metadata(visit_metadata()) {}
 298 
 299   virtual void do_oop(oop* p) {
 300     switch (generation) {
 301     case ZGenerationIdOptional::young:
 302       ZBarrier::mark_barrier_on_young_oop_field((volatile zpointer*)p);
 303       break;
 304     case ZGenerationIdOptional::old:
 305       ZBarrier::mark_barrier_on_old_oop_field((volatile zpointer*)p, finalizable);
 306       break;
 307     case ZGenerationIdOptional::none:
 308       ZBarrier::mark_barrier_on_oop_field((volatile zpointer*)p, finalizable);
 309       break;
 310     }
 311   }
 312 
 313   virtual void do_oop(narrowOop* p) {
 314     ShouldNotReachHere();
 315   }
 316 
 317   virtual bool do_metadata() final {
 318     // Only help out with metadata visiting
 319     return _visit_metadata;
 320   }
 321 
 322   virtual void do_nmethod(nmethod* nm) {
 323     assert(do_metadata(), "Don't call otherwise");
 324     assert(!finalizable, "Can't handle finalizable marking of nmethods");
 325     nm->run_nmethod_entry_barrier();
 326   }
 327 
 328   virtual void do_method(Method* m) {
 329     // Mark interpreted frames for class redefinition
 330     m->record_gc_epoch();
 331   }
 332 
 333   virtual void do_klass(Klass* klass) {
 334     ClassLoaderData* cld = klass->class_loader_data();
 335     ZMarkBarrierFollowOopClosure<finalizable, ZGenerationIdOptional::none> cl;
 336     cld->oops_do(&cl, claim_value());
 337   }
 338 
 339   virtual void do_cld(ClassLoaderData* cld) {
 340     ZMarkBarrierFollowOopClosure<finalizable, ZGenerationIdOptional::none> cl;
 341     cld->oops_do(&cl, claim_value());
 342   }
 343 };
 344 
 345 void ZMark::follow_array_object(objArrayOop obj, bool finalizable) {
 346   if (_generation->is_old()) {
 347     if (finalizable) {
 348       ZMarkBarrierFollowOopClosure<true /* finalizable */, ZGenerationIdOptional::old> cl;
 349       cl.do_klass(obj->klass());
 350     } else {
 351       ZMarkBarrierFollowOopClosure<false /* finalizable */, ZGenerationIdOptional::old> cl;
 352       cl.do_klass(obj->klass());
 353     }
 354   } else {
 355     ZMarkBarrierFollowOopClosure<false /* finalizable */, ZGenerationIdOptional::none> cl;
 356     if (cl.do_metadata()) {
 357       cl.do_klass(obj->klass());
 358     }
 359   }
 360 
 361   // Should be convertible to colorless oop
 362   check_is_valid_zaddress(obj);
 363 
 364   zpointer* const addr = (zpointer*)obj->base();
 365   const size_t length = (size_t)obj->length();
 366 
 367   follow_array_elements(addr, length, finalizable);
 368 }
 369 
 370 void ZMark::follow_object(oop obj, bool finalizable) {
 371   if (_generation->is_old()) {
 372     assert(ZHeap::heap()->is_old(to_zaddress(obj)), "Should only follow objects from old gen");
 373     if (obj->is_stackChunk()) {
 374       // No support for tracing through stack chunks as finalizably reachable
 375       ZMarkBarrierFollowOopClosure<false /* finalizable */, ZGenerationIdOptional::old> cl;
 376       ZIterator::oop_iterate(obj, &cl);
 377     } else if (finalizable) {
 378       ZMarkBarrierFollowOopClosure<true /* finalizable */, ZGenerationIdOptional::old> cl;
 379       ZIterator::oop_iterate(obj, &cl);
 380     } else {
 381       ZMarkBarrierFollowOopClosure<false /* finalizable */, ZGenerationIdOptional::old> cl;
 382       ZIterator::oop_iterate(obj, &cl);
 383     }
 384   } else {
 385     // Young gen must help out with old marking
 386     ZMarkBarrierFollowOopClosure<false /* finalizable */, ZGenerationIdOptional::young> cl;
 387     ZIterator::oop_iterate(obj, &cl);
 388   }
 389 }
 390 
 391 void ZMark::mark_and_follow(ZMarkContext* context, ZMarkStackEntry entry) {
 392   // Decode flags
 393   const bool finalizable = entry.finalizable();
 394   const bool partial_array = entry.partial_array();
 395 
 396   if (partial_array) {
 397     follow_partial_array(entry, finalizable);
 398     return;
 399   }
 400 
 401   // Decode object address and additional flags
 402   const zaddress addr = ZOffset::address(to_zoffset(entry.object_address()));
 403   const bool mark = entry.mark();
 404   bool inc_live = entry.inc_live();
 405   const bool follow = entry.follow();
 406 
 407   ZPage* const page = _page_table->get(addr);
 408   assert(page->is_relocatable(), "Invalid page state");
 409 
 410   // Mark
 411   if (mark && !page->mark_object(addr, finalizable, inc_live)) {
 412     // Already marked
 413     return;
 414   }
 415 
 416   // Increment live
 417   if (inc_live) {
 418     // Update live objects/bytes for page. We use the aligned object
 419     // size since that is the actual number of bytes used on the page
 420     // and alignment paddings can never be reclaimed.
 421     const size_t size = ZUtils::object_size(addr);
 422     const size_t aligned_size = align_up(size, page->object_alignment());
 423     context->cache()->inc_live(page, aligned_size);
 424   }
 425 
 426   // Follow
 427   if (follow) {
 428     if (is_array(addr)) {
 429       follow_array_object(objArrayOop(to_oop(addr)), finalizable);
 430     } else {
 431       follow_object(to_oop(addr), finalizable);
 432     }
 433   }
 434 }
 435 
 436 // This function returns true if we need to stop working to resize threads or
 437 // abort marking
 438 bool ZMark::rebalance_work(ZMarkContext* context) {
 439   const size_t assumed_nstripes = context->nstripes();
 440   const size_t nstripes = _stripes.nstripes();
 441 
 442   if (assumed_nstripes != nstripes) {
 443     // The number of stripes has changed; reflect that change locally
 444     context->set_nstripes(nstripes);
 445   } else if (nstripes < calculate_nstripes(_nworkers) && _stripes.is_crowded()) {
 446     // We are running on a reduced number of threads to minimize the amount of work
 447     // hidden in local stacks when the stripes are less well balanced. When this situation
 448     // starts getting crowded, we bump the number of stripes again.
 449     const size_t new_nstripes = nstripes << 1;
 450     if (_stripes.try_set_nstripes(nstripes, new_nstripes)) {
 451       context->set_nstripes(new_nstripes);
 452     }
 453   }
 454 
 455   ZMarkStripe* stripe = _stripes.stripe_for_worker(_nworkers, WorkerThread::worker_id());
 456   if (context->stripe() != stripe) {
 457     // Need to switch stripe
 458     context->set_stripe(stripe);
 459     flush(Thread::current());
 460   } else if (!_terminate.saturated()) {
 461     // Work imbalance detected; striped marking is likely going to be in the way
 462     flush(Thread::current());
 463   }
 464 
 465   SuspendibleThreadSet::yield();
 466 
 467   return ZAbort::should_abort() || _generation->should_worker_resize();
 468 }
 469 
 470 bool ZMark::drain(ZMarkContext* context) {
 471   ZMarkThreadLocalStacks* const stacks = context->stacks();
 472   ZMarkStackEntry entry;
 473   size_t processed = 0;
 474 
 475   context->set_stripe(_stripes.stripe_for_worker(_nworkers, WorkerThread::worker_id()));
 476   context->set_nstripes(_stripes.nstripes());
 477 
 478   // Drain stripe stacks
 479   while (stacks->pop(&_marking_smr, &_stripes, context->stripe(), &entry)) {
 480     mark_and_follow(context, entry);
 481 
 482     if ((processed++ & 31) == 0 && rebalance_work(context)) {
 483       return false;
 484     }
 485   }
 486 
 487   return true;
 488 }
 489 
 490 bool ZMark::try_steal_local(ZMarkContext* context) {
 491   ZMarkStripe* const stripe = context->stripe();
 492   ZMarkThreadLocalStacks* const stacks = context->stacks();
 493 
 494   // Try to steal a local stack from another stripe
 495   for (ZMarkStripe* victim_stripe = _stripes.stripe_next(stripe);
 496        victim_stripe != stripe;
 497        victim_stripe = _stripes.stripe_next(victim_stripe)) {
 498     ZMarkStack* const stack = stacks->steal(&_stripes, victim_stripe);
 499     if (stack != nullptr) {
 500       // Success, install the stolen stack
 501       stacks->install(&_stripes, stripe, stack);
 502       return true;
 503     }
 504   }
 505 
 506   // Nothing to steal
 507   return false;
 508 }
 509 
 510 bool ZMark::try_steal_global(ZMarkContext* context) {
 511   ZMarkStripe* const stripe = context->stripe();
 512   ZMarkThreadLocalStacks* const stacks = context->stacks();
 513 
 514   // Try to steal a stack from another stripe
 515   for (ZMarkStripe* victim_stripe = _stripes.stripe_next(stripe);
 516        victim_stripe != stripe;
 517        victim_stripe = _stripes.stripe_next(victim_stripe)) {
 518     ZMarkStack* const stack = victim_stripe->steal_stack(&_marking_smr);
 519     if (stack != nullptr) {
 520       // Success, install the stolen stack
 521       stacks->install(&_stripes, stripe, stack);
 522       return true;
 523     }
 524   }
 525 
 526   // Nothing to steal
 527   return false;
 528 }
 529 
 530 bool ZMark::try_steal(ZMarkContext* context) {
 531   return try_steal_local(context) || try_steal_global(context);
 532 }
 533 
 534 class ZMarkFlushStacksHandshakeClosure : public HandshakeClosure {
 535 private:
 536   ZMark* const _mark;
 537   bool         _flushed;
 538 
 539 public:
 540   ZMarkFlushStacksHandshakeClosure(ZMark* mark)
 541     : HandshakeClosure("ZMarkFlushStacks"),
 542       _mark(mark),
 543       _flushed(false) {}
 544 
 545   void do_thread(Thread* thread) {
 546     if (_mark->flush(thread)) {
 547       _flushed = true;
 548       if (SafepointSynchronize::is_at_safepoint()) {
 549         log_debug(gc, marking)("Thread broke mark termination %s", thread->name());
 550       }
 551     }
 552   }
 553 
 554   bool flushed() const {
 555     return _flushed;
 556   }
 557 };
 558 
 559 class VM_ZMarkFlushOperation : public VM_Operation {
 560 private:
 561   ThreadClosure* _cl;
 562 
 563 public:
 564   VM_ZMarkFlushOperation(ThreadClosure* cl)
 565     : _cl(cl) {}
 566 
 567   virtual bool evaluate_at_safepoint() const {
 568     return false;
 569   }
 570 
 571   virtual void doit() {
 572     // Flush VM thread
 573     Thread* const thread = Thread::current();
 574     _cl->do_thread(thread);
 575   }
 576 
 577   virtual VMOp_Type type() const {
 578     return VMOp_ZMarkFlushOperation;
 579   }
 580 
 581   virtual bool is_gc_operation() const {
 582     return true;
 583   }
 584 };
 585 
 586 bool ZMark::flush() {
 587   ZMarkFlushStacksHandshakeClosure cl(this);
 588   VM_ZMarkFlushOperation vm_cl(&cl);
 589   Handshake::execute(&cl);
 590   VMThread::execute(&vm_cl);
 591 
 592   // Returns true if more work is available
 593   return cl.flushed() || !_stripes.is_empty();
 594 }
 595 
 596 bool ZMark::try_terminate_flush() {
 597   AtomicAccess::inc(&_work_nterminateflush);
 598   _terminate.set_resurrected(false);
 599 
 600   if (ZVerifyMarking) {
 601     verify_worker_stacks_empty();
 602   }
 603 
 604   return flush() || _terminate.resurrected();
 605 }
 606 
 607 bool ZMark::try_proactive_flush() {
 608   // Only do proactive flushes from worker 0
 609   if (WorkerThread::worker_id() != 0) {
 610     return false;
 611   }
 612 
 613   if (AtomicAccess::load(&_work_nproactiveflush) == ZMarkProactiveFlushMax) {
 614     // Limit reached or we're trying to terminate
 615     return false;
 616   }
 617 
 618   AtomicAccess::inc(&_work_nproactiveflush);
 619 
 620   SuspendibleThreadSetLeaver sts_leaver;
 621   return flush();
 622 }
 623 
 624 bool ZMark::try_terminate(ZMarkContext* context) {
 625   return _terminate.try_terminate(&_stripes, context->nstripes());
 626 }
 627 
 628 void ZMark::leave() {
 629   _terminate.leave();
 630 }
 631 
 632 // Returning true means marking finished successfully after marking as far as it could.
 633 // Returning false means that marking finished unsuccessfully due to abort or resizing.
 634 bool ZMark::follow_work(bool partial) {
 635   ZMarkStripe* const stripe = _stripes.stripe_for_worker(_nworkers, WorkerThread::worker_id());
 636   ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::mark_stacks(Thread::current(), _generation->id());
 637   ZMarkContext context(ZMarkStripesMax, stripe, stacks);
 638 
 639   for (;;) {
 640     if (!drain(&context)) {
 641       leave();
 642       return false;
 643     }
 644 
 645     if (try_steal(&context)) {
 646       // Stole work
 647       continue;
 648     }
 649 
 650     if (partial) {
 651       return true;
 652     }
 653 
 654     if (try_proactive_flush()) {
 655       // Work available
 656       continue;
 657     }
 658 
 659     if (try_terminate(&context)) {
 660       // Terminate
 661       return true;
 662     }
 663   }
 664 }
 665 
 666 class ZMarkOopClosure : public OopClosure {
 667 public:
 668   virtual void do_oop(oop* p) {
 669     ZBarrier::mark_barrier_on_oop_field((zpointer*)p, false /* finalizable */);
 670   }
 671 
 672   virtual void do_oop(narrowOop* p) {
 673     ShouldNotReachHere();
 674   }
 675 };
 676 
 677 class ZMarkYoungOopClosure : public OopClosure {
 678 public:
 679   virtual void do_oop(oop* p) {
 680     ZBarrier::mark_young_good_barrier_on_oop_field((zpointer*)p);
 681   }
 682 
 683   virtual void do_oop(narrowOop* p) {
 684     ShouldNotReachHere();
 685   }
 686 };
 687 
 688 class ZMarkThreadClosure : public ThreadClosure {
 689 private:
 690   static ZUncoloredRoot::RootFunction root_function() {
 691     return ZUncoloredRoot::mark;
 692   }
 693 
 694 public:
 695   ZMarkThreadClosure() {
 696     ZThreadLocalAllocBuffer::reset_statistics();
 697   }
 698   ~ZMarkThreadClosure() {
 699     ZThreadLocalAllocBuffer::publish_statistics();
 700   }
 701 
 702   virtual void do_thread(Thread* thread) {
 703     JavaThread* const jt = JavaThread::cast(thread);
 704 
 705     StackWatermarkSet::finish_processing(jt, (void*)root_function(), StackWatermarkKind::gc);
 706     ZThreadLocalAllocBuffer::update_stats(jt);
 707   }
 708 };
 709 
 710 class ZMarkNMethodClosure : public NMethodClosure {
 711 private:
 712   ZBarrierSetNMethod* const _bs_nm;
 713 
 714 public:
 715   ZMarkNMethodClosure()
 716     : _bs_nm(static_cast<ZBarrierSetNMethod*>(BarrierSet::barrier_set()->barrier_set_nmethod())) {}
 717 
 718   virtual void do_nmethod(nmethod* nm) {
 719     ZLocker<ZReentrantLock> locker(ZNMethod::lock_for_nmethod(nm));
 720     if (_bs_nm->is_armed(nm)) {
 721       // Heal barriers
 722       ZNMethod::nmethod_patch_barriers(nm);
 723 
 724       // Heal oops
 725       ZUncoloredRootMarkOopClosure cl(ZNMethod::color(nm));
 726       ZNMethod::nmethod_oops_do_inner(nm, &cl);
 727 
 728       // CodeCache unloading support
 729       nm->mark_as_maybe_on_stack();
 730 
 731       log_trace(gc, nmethod)("nmethod: " PTR_FORMAT " visited by old", p2i(nm));
 732 
 733       // Disarm
 734       _bs_nm->disarm(nm);
 735     }
 736   }
 737 };
 738 
 739 class ZMarkYoungNMethodClosure : public NMethodClosure {
 740 private:
 741   ZBarrierSetNMethod* const _bs_nm;
 742 
 743 public:
 744   ZMarkYoungNMethodClosure()
 745     : _bs_nm(static_cast<ZBarrierSetNMethod*>(BarrierSet::barrier_set()->barrier_set_nmethod())) {}
 746 
 747   virtual void do_nmethod(nmethod* nm) {
 748     ZLocker<ZReentrantLock> locker(ZNMethod::lock_for_nmethod(nm));
 749     if (nm->is_unloading()) {
 750       return;
 751     }
 752 
 753     if (_bs_nm->is_armed(nm)) {
 754       const uintptr_t prev_color = ZNMethod::color(nm);
 755 
 756       // Heal oops
 757       ZUncoloredRootMarkYoungOopClosure cl(prev_color);
 758       ZNMethod::nmethod_oops_do_inner(nm, &cl);
 759 
 760       // Disarm only the young marking, not any potential old marking cycle
 761 
 762       const uintptr_t old_marked_mask = ZPointerMarkedMask ^ (ZPointerMarkedYoung0 | ZPointerMarkedYoung1);
 763       const uintptr_t old_marked = prev_color & old_marked_mask;
 764 
 765       const zpointer new_disarm_value_ptr = ZAddress::color(zaddress::null, ZPointerLoadGoodMask | ZPointerMarkedYoung | old_marked | ZPointerRemembered);
 766 
 767       // Check if disarming for young mark, completely disarms the nmethod entry barrier
 768       const bool complete_disarm = ZPointer::is_store_good(new_disarm_value_ptr);
 769 
 770       if (complete_disarm) {
 771         // We are about to completely disarm the nmethod, must take responsibility to patch all barriers before disarming
 772         ZNMethod::nmethod_patch_barriers(nm);
 773       }
 774 
 775       _bs_nm->guard_with(nm, (int)untype(new_disarm_value_ptr));
 776 
 777       if (complete_disarm) {
 778         log_trace(gc, nmethod)("nmethod: " PTR_FORMAT " visited by young (complete) [" PTR_FORMAT " -> " PTR_FORMAT "]", p2i(nm), prev_color, untype(new_disarm_value_ptr));
 779         assert(!_bs_nm->is_armed(nm), "Must not be considered armed anymore");
 780       } else {
 781         log_trace(gc, nmethod)("nmethod: " PTR_FORMAT " visited by young (incomplete) [" PTR_FORMAT " -> " PTR_FORMAT "]", p2i(nm), prev_color, untype(new_disarm_value_ptr));
 782         assert(_bs_nm->is_armed(nm), "Must be considered armed");
 783       }
 784     }
 785   }
 786 };
 787 
 788 typedef ClaimingCLDToOopClosure<ClassLoaderData::_claim_strong> ZMarkOldCLDClosure;
 789 
 790 class ZMarkOldRootsTask : public ZTask {
 791 private:
 792   ZRootsIteratorStrongColored   _roots_colored;
 793   ZRootsIteratorStrongUncolored _roots_uncolored;
 794 
 795   ZMarkOopClosure               _cl_colored;
 796   ZMarkOldCLDClosure            _cld_cl;
 797 
 798   ZMarkThreadClosure            _thread_cl;
 799   ZMarkNMethodClosure           _nm_cl;
 800 
 801 public:
 802   ZMarkOldRootsTask()
 803     : ZTask("ZMarkOldRootsTask"),
 804       _roots_colored(ZGenerationIdOptional::old),
 805       _roots_uncolored(ZGenerationIdOptional::old),
 806       _cl_colored(),
 807       _cld_cl(&_cl_colored),
 808       _thread_cl(),
 809       _nm_cl() {}
 810 
 811   virtual void work() {
 812     {
 813       ZStatTimerWorker timer(ZSubPhaseConcurrentMarkRootColoredOld);
 814       _roots_colored.apply(&_cl_colored,
 815                            &_cld_cl);
 816     }
 817 
 818     {
 819       ZStatTimerWorker timer(ZSubPhaseConcurrentMarkRootUncoloredOld);
 820       _roots_uncolored.apply(&_thread_cl,
 821                              &_nm_cl);
 822     }
 823 
 824     // Flush and free worker stacks. Needed here since
 825     // the set of workers executing during root scanning
 826     // can be different from the set of workers executing
 827     // during mark.
 828     ZHeap::heap()->mark_flush(Thread::current());
 829   }
 830 };
 831 
 832 class ZMarkYoungCLDClosure : public ClaimingCLDToOopClosure<ClassLoaderData::_claim_none> {
 833 public:
 834   virtual void do_cld(ClassLoaderData* cld) {
 835     if (!cld->is_alive()) {
 836       // Skip marking through concurrently unloading CLDs
 837       return;
 838     }
 839     ClaimingCLDToOopClosure<ClassLoaderData::_claim_none>::do_cld(cld);
 840   }
 841 
 842   ZMarkYoungCLDClosure(OopClosure* cl)
 843     : ClaimingCLDToOopClosure<ClassLoaderData::_claim_none>(cl) {}
 844 };
 845 
 846 class ZMarkYoungRootsTask : public ZTask {
 847 private:
 848   ZRootsIteratorAllColored   _roots_colored;
 849   ZRootsIteratorAllUncolored _roots_uncolored;
 850 
 851   ZMarkYoungOopClosure       _cl_colored;
 852   ZMarkYoungCLDClosure       _cld_cl;
 853 
 854   ZMarkThreadClosure         _thread_cl;
 855   ZMarkYoungNMethodClosure   _nm_cl;
 856 
 857 public:
 858   ZMarkYoungRootsTask()
 859     : ZTask("ZMarkYoungRootsTask"),
 860       _roots_colored(ZGenerationIdOptional::young),
 861       _roots_uncolored(ZGenerationIdOptional::young),
 862       _cl_colored(),
 863       _cld_cl(&_cl_colored),
 864       _thread_cl(),
 865       _nm_cl() {}
 866 
 867   virtual void work() {
 868     {
 869       ZStatTimerWorker timer(ZSubPhaseConcurrentMarkRootColoredYoung);
 870       _roots_colored.apply(&_cl_colored,
 871                            &_cld_cl);
 872     }
 873 
 874     {
 875       ZStatTimerWorker timer(ZSubPhaseConcurrentMarkRootUncoloredYoung);
 876       _roots_uncolored.apply(&_thread_cl,
 877                              &_nm_cl);
 878     }
 879 
 880     // Flush and free worker stacks. Needed here since
 881     // the set of workers executing during root scanning
 882     // can be different from the set of workers executing
 883     // during mark.
 884     ZHeap::heap()->mark_flush(Thread::current());
 885   }
 886 };
 887 
 888 class ZMarkTask : public ZRestartableTask {
 889 private:
 890   ZMark* const _mark;
 891 
 892 public:
 893   ZMarkTask(ZMark* mark)
 894     : ZRestartableTask("ZMarkTask"),
 895       _mark(mark) {
 896     _mark->prepare_work();
 897   }
 898 
 899   ~ZMarkTask() {
 900     _mark->finish_work();
 901   }
 902 
 903   virtual void work() {
 904     SuspendibleThreadSetJoiner sts_joiner;
 905     _mark->follow_work_complete();
 906     // We might have found pointers into the other generation, and then we want to
 907     // publish such marking stacks to prevent that generation from getting a mark continue.
 908     // We also flush in case of a resize where a new worker thread continues the marking
 909     // work, causing a mark continue for the collected generation.
 910     ZHeap::heap()->mark_flush(Thread::current());
 911   }
 912 
 913   virtual void resize_workers(uint nworkers) {
 914     _mark->resize_workers(nworkers);
 915   }
 916 };
 917 
 918 void ZMark::resize_workers(uint nworkers) {
 919   _nworkers = nworkers;
 920   const size_t nstripes = calculate_nstripes(nworkers);
 921   _stripes.set_nstripes(nstripes);
 922   _terminate.reset(nworkers);
 923 }
 924 
 925 void ZMark::mark_young_roots() {
 926   SuspendibleThreadSetJoiner sts_joiner;
 927   ZMarkYoungRootsTask task;
 928   workers()->run(&task);
 929 }
 930 
 931 void ZMark::mark_old_roots() {
 932   SuspendibleThreadSetJoiner sts_joiner;
 933   ZMarkOldRootsTask task;
 934   workers()->run(&task);
 935 }
 936 
 937 void ZMark::mark_follow() {
 938   for (;;) {
 939     ZMarkTask task(this);
 940     workers()->run(&task);
 941     if (ZAbort::should_abort() || !try_terminate_flush()) {
 942       break;
 943     }
 944   }
 945 }
 946 
 947 bool ZMark::try_end() {
 948   if (_terminate.resurrected()) {
 949     // An oop was resurrected after concurrent termination.
 950     return false;
 951   }
 952 
 953   // Try end marking
 954   ZMarkFlushStacksHandshakeClosure cl(this);
 955   Threads::non_java_threads_do(&cl);
 956 
 957   // Check if non-java threads have any pending marking
 958   if (cl.flushed() || !_stripes.is_empty()) {
 959     return false;
 960   }
 961 
 962   // Mark completed
 963   return true;
 964 }
 965 
 966 bool ZMark::end() {
 967   // Try end marking
 968   if (!try_end()) {
 969     // Mark not completed
 970     _ncontinue++;
 971     return false;
 972   }
 973 
 974   // Verification
 975   if (ZVerifyMarking) {
 976     verify_all_stacks_empty();
 977   }
 978 
 979   // Update statistics
 980   _generation->stat_mark()->at_mark_end(_nproactiveflush, _nterminateflush, _ntrycomplete, _ncontinue);
 981 
 982   // Mark completed
 983   return true;
 984 }
 985 
 986 void ZMark::free() {
 987   // Free any unused mark stack space
 988   _marking_smr.free();
 989 }
 990 
 991 bool ZMark::flush(Thread* thread) {
 992   if (thread->is_Java_thread()) {
 993     ZThreadLocalData::store_barrier_buffer(thread)->flush();
 994   }
 995   ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::mark_stacks(thread, _generation->id());
 996   return stacks->flush(&_stripes, &_terminate);
 997 }
 998 
 999 class ZVerifyMarkStacksEmptyClosure : public ThreadClosure {
1000 private:
1001   const ZMarkStripeSet* const _stripes;
1002   const ZGenerationId _generation_id;
1003 
1004 public:
1005   ZVerifyMarkStacksEmptyClosure(const ZMarkStripeSet* stripes, ZGenerationId id)
1006     : _stripes(stripes),
1007       _generation_id(id) {}
1008 
1009   void do_thread(Thread* thread) {
1010     ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::mark_stacks(thread, _generation_id);
1011     guarantee(stacks->is_empty(_stripes), "Should be empty");
1012   }
1013 };
1014 
1015 void ZMark::verify_all_stacks_empty() const {
1016   // Verify thread stacks
1017   ZVerifyMarkStacksEmptyClosure cl(&_stripes, _generation->id());
1018   Threads::threads_do(&cl);
1019 
1020   // Verify stripe stacks
1021   guarantee(_stripes.is_empty(), "Should be empty");
1022 }
1023 
1024 void ZMark::verify_worker_stacks_empty() const {
1025   // Verify thread stacks
1026   ZVerifyMarkStacksEmptyClosure cl(&_stripes, _generation->id());
1027   workers()->threads_do(&cl);
1028 }