1 /*
   2  * Copyright (c) 2005, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "classfile/classLoaderDataGraph.hpp"
  27 #include "classfile/symbolTable.hpp"
  28 #include "classfile/systemDictionary.hpp"
  29 #include "code/codeCache.hpp"
  30 #include "compiler/oopMap.hpp"
  31 #include "gc/parallel/objectStartArray.inline.hpp"
  32 #include "gc/parallel/parallelArguments.hpp"
  33 #include "gc/parallel/parallelScavengeHeap.inline.hpp"
  34 #include "gc/parallel/parMarkBitMap.inline.hpp"
  35 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
  36 #include "gc/parallel/psCompactionManagerNew.inline.hpp"
  37 #include "gc/parallel/psOldGen.hpp"
  38 #include "gc/parallel/psParallelCompactNew.inline.hpp"
  39 #include "gc/parallel/psPromotionManager.inline.hpp"
  40 #include "gc/parallel/psScavenge.hpp"
  41 #include "gc/parallel/psYoungGen.hpp"
  42 #include "gc/shared/classUnloadingContext.hpp"
  43 #include "gc/shared/fullGCForwarding.inline.hpp"
  44 #include "gc/shared/gcCause.hpp"
  45 #include "gc/shared/gcHeapSummary.hpp"
  46 #include "gc/shared/gcId.hpp"
  47 #include "gc/shared/gcLocker.hpp"
  48 #include "gc/shared/gcTimer.hpp"
  49 #include "gc/shared/gcTrace.hpp"
  50 #include "gc/shared/gcTraceTime.inline.hpp"
  51 #include "gc/shared/gcVMOperations.hpp"
  52 #include "gc/shared/isGCActiveMark.hpp"
  53 #include "gc/shared/oopStorageSetParState.inline.hpp"
  54 #include "gc/shared/preservedMarks.inline.hpp"
  55 #include "gc/shared/referencePolicy.hpp"
  56 #include "gc/shared/referenceProcessor.hpp"
  57 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
  58 #include "gc/shared/spaceDecorator.hpp"
  59 #include "gc/shared/taskTerminator.hpp"
  60 #include "gc/shared/weakProcessor.inline.hpp"
  61 #include "gc/shared/workerPolicy.hpp"
  62 #include "gc/shared/workerThread.hpp"
  63 #include "gc/shared/workerUtils.hpp"
  64 #include "logging/log.hpp"
  65 #include "memory/iterator.inline.hpp"
  66 #include "memory/memoryReserver.hpp"
  67 #include "memory/metaspaceUtils.hpp"
  68 #include "memory/resourceArea.hpp"
  69 #include "memory/universe.hpp"
  70 #include "nmt/memTracker.hpp"
  71 #include "oops/methodData.hpp"
  72 #include "runtime/java.hpp"
  73 #include "runtime/safepoint.hpp"
  74 #include "runtime/threads.hpp"
  75 #include "runtime/vmThread.hpp"
  76 #include "services/memoryService.hpp"
  77 #include "utilities/align.hpp"
  78 #include "utilities/debug.hpp"
  79 #include "utilities/events.hpp"
  80 #include "utilities/formatBuffer.hpp"
  81 #include "utilities/macros.hpp"
  82 #if INCLUDE_JVMCI
  83 #include "jvmci/jvmci.hpp"
  84 #endif
  85 
  86 SpaceInfoNew PSParallelCompactNew::_space_info[PSParallelCompactNew::last_space_id];
  87 
  88 size_t PSParallelCompactNew::_num_regions;
  89 PCRegionData* PSParallelCompactNew::_region_data_array;
  90 size_t PSParallelCompactNew::_num_regions_serial;
  91 PCRegionData* PSParallelCompactNew::_region_data_array_serial;
  92 PCRegionData** PSParallelCompactNew::_per_worker_region_data;
  93 bool PSParallelCompactNew::_serial = false;
  94 
  95 SpanSubjectToDiscoveryClosure PSParallelCompactNew::_span_based_discoverer;
  96 ReferenceProcessor* PSParallelCompactNew::_ref_processor = nullptr;
  97 
  98 void PSParallelCompactNew::print_on(outputStream* st) {
  99   _mark_bitmap.print_on(st);
 100 }
 101 
 102 STWGCTimer          PSParallelCompactNew::_gc_timer;
 103 ParallelOldTracer   PSParallelCompactNew::_gc_tracer;
 104 elapsedTimer        PSParallelCompactNew::_accumulated_time;
 105 unsigned int        PSParallelCompactNew::_maximum_compaction_gc_num = 0;
 106 CollectorCounters*  PSParallelCompactNew::_counters = nullptr;
 107 ParMarkBitMap       PSParallelCompactNew::_mark_bitmap;
 108 
 109 PSParallelCompactNew::IsAliveClosure PSParallelCompactNew::_is_alive_closure;
 110 
 111 class PCAdjustPointerClosure: public BasicOopIterateClosure {
 112   template <typename T>
 113   void do_oop_work(T* p) { PSParallelCompactNew::adjust_pointer(p); }
 114 
 115 public:
 116   void do_oop(oop* p) final          { do_oop_work(p); }
 117   void do_oop(narrowOop* p) final    { do_oop_work(p); }
 118 
 119   ReferenceIterationMode reference_iteration_mode() final { return DO_FIELDS; }
 120 };
 121 
 122 static PCAdjustPointerClosure pc_adjust_pointer_closure;
 123 
 124 class IsAliveClosure: public BoolObjectClosure {
 125 public:
 126   bool do_object_b(oop p) final;
 127 };
 128 
 129 
 130 bool PSParallelCompactNew::IsAliveClosure::do_object_b(oop p) { return mark_bitmap()->is_marked(p); }
 131 
 132 void PSParallelCompactNew::post_initialize() {
 133   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 134   _span_based_discoverer.set_span(heap->reserved_region());
 135   _ref_processor =
 136     new ReferenceProcessor(&_span_based_discoverer,
 137                            ParallelGCThreads,   // mt processing degree
 138                            ParallelGCThreads,   // mt discovery degree
 139                            false,               // concurrent_discovery
 140                            &_is_alive_closure); // non-header is alive closure
 141 
 142   _counters = new CollectorCounters("Parallel full collection pauses", 1);
 143 
 144   // Initialize static fields in ParCompactionManager.
 145   ParCompactionManagerNew::initialize(mark_bitmap());
 146 }
 147 
 148 bool PSParallelCompactNew::initialize_aux_data() {
 149   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 150   MemRegion mr = heap->reserved_region();
 151   assert(mr.byte_size() != 0, "heap should be reserved");
 152 
 153   initialize_space_info();
 154 
 155   if (!_mark_bitmap.initialize(mr)) {
 156     vm_shutdown_during_initialization(
 157       err_msg("Unable to allocate %zuKB bitmaps for parallel "
 158       "garbage collection for the requested %zuKB heap.",
 159       _mark_bitmap.reserved_byte_size()/K, mr.byte_size()/K));
 160     return false;
 161   }
 162 
 163   return true;
 164 }
 165 
 166 void PSParallelCompactNew::initialize_space_info()
 167 {
 168   memset(&_space_info, 0, sizeof(_space_info));
 169 
 170   PSYoungGen* young_gen = ParallelScavengeHeap::young_gen();
 171 
 172   _space_info[old_space_id].set_space(ParallelScavengeHeap::old_gen()->object_space());
 173   _space_info[eden_space_id].set_space(young_gen->eden_space());
 174   _space_info[from_space_id].set_space(young_gen->from_space());
 175   _space_info[to_space_id].set_space(young_gen->to_space());
 176 
 177   _space_info[old_space_id].set_start_array(ParallelScavengeHeap::old_gen()->start_array());
 178 }
 179 
 180 void
 181 PSParallelCompactNew::clear_data_covering_space(SpaceId id)
 182 {
 183   // At this point, top is the value before GC, new_top() is the value that will
 184   // be set at the end of GC.  The marking bitmap is cleared to top; nothing
 185   // should be marked above top.
 186   MutableSpace* const space = _space_info[id].space();
 187   HeapWord* const bot = space->bottom();
 188   HeapWord* const top = space->top();
 189 
 190   _mark_bitmap.clear_range(bot, top);
 191 }
 192 
 193 void PSParallelCompactNew::pre_compact()
 194 {
 195   // Update the from & to space pointers in space_info, since they are swapped
 196   // at each young gen gc.  Do the update unconditionally (even though a
 197   // promotion failure does not swap spaces) because an unknown number of young
 198   // collections will have swapped the spaces an unknown number of times.
 199   GCTraceTime(Debug, gc, phases) tm("Pre Compact", &_gc_timer);
 200   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 201   _space_info[from_space_id].set_space(ParallelScavengeHeap::young_gen()->from_space());
 202   _space_info[to_space_id].set_space(ParallelScavengeHeap::young_gen()->to_space());
 203 
 204   // Increment the invocation count
 205   heap->increment_total_collections(true);
 206 
 207   CodeCache::on_gc_marking_cycle_start();
 208 
 209   heap->print_before_gc();
 210   heap->trace_heap_before_gc(&_gc_tracer);
 211 
 212   // Fill in TLABs
 213   heap->ensure_parsability(true);  // retire TLABs
 214 
 215   if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
 216     Universe::verify("Before GC");
 217   }
 218 
 219   DEBUG_ONLY(mark_bitmap()->verify_clear();)
 220 }
 221 
 222 void PSParallelCompactNew::post_compact()
 223 {
 224   GCTraceTime(Info, gc, phases) tm("Post Compact", &_gc_timer);
 225 
 226   CodeCache::on_gc_marking_cycle_finish();
 227   CodeCache::arm_all_nmethods();
 228 
 229   for (unsigned int id = old_space_id; id < last_space_id; ++id) {
 230     // Clear the marking bitmap, summary data and split info.
 231     clear_data_covering_space(SpaceId(id));
 232   }
 233 
 234   {
 235     PCRegionData* last_live[last_space_id];
 236     for (uint i = old_space_id; i < last_space_id; ++i) {
 237       last_live[i] = nullptr;
 238     }
 239 
 240     // Figure out last region in each space that has live data.
 241     uint space_id = old_space_id;
 242     MutableSpace* space = _space_info[space_id].space();
 243     size_t num_regions = get_num_regions();
 244     PCRegionData* region_data_array = get_region_data_array();
 245     last_live[space_id] = &region_data_array[0];
 246     for (size_t idx = 0; idx < num_regions; idx++) {
 247       PCRegionData* rd = region_data_array + idx;
 248       if(!space->contains(rd->bottom())) {
 249         ++space_id;
 250         assert(space_id < last_space_id, "invariant");
 251         space = _space_info[space_id].space();
 252         log_develop_trace(gc, compaction)("Last live for space: %u: %zu", space_id, idx);
 253         last_live[space_id] = rd;
 254       }
 255       assert(space->contains(rd->bottom()), "next space should contain next region");
 256       log_develop_trace(gc, compaction)("post-compact region: idx: %zu, bottom: " PTR_FORMAT ", new_top: " PTR_FORMAT ", end: " PTR_FORMAT, rd->idx(), p2i(rd->bottom()), p2i(rd->new_top()), p2i(rd->end()));
 257       if (rd->new_top() > rd->bottom()) {
 258         last_live[space_id] = rd;
 259         log_develop_trace(gc, compaction)("Bump last live for space: %u", space_id);
 260       }
 261     }
 262 
 263     for (uint i = old_space_id; i < last_space_id; ++i) {
 264       PCRegionData* rd = last_live[i];
 265         log_develop_trace(gc, compaction)(
 266                 "Last live region in space: %u, compaction region, " PTR_FORMAT ", #%zu: [" PTR_FORMAT ", " PTR_FORMAT "), new_top: " PTR_FORMAT,
 267                 i, p2i(rd), rd->idx(),
 268                 p2i(rd->bottom()), p2i(rd->end()), p2i(rd->new_top()));
 269     }
 270 
 271     // Fill all gaps and update the space boundaries.
 272     space_id = old_space_id;
 273     space = _space_info[space_id].space();
 274     size_t total_live = 0;
 275     size_t total_waste = 0;
 276     for (size_t idx = 0; idx < num_regions; idx++) {
 277       PCRegionData* rd = &region_data_array[idx];
 278       PCRegionData* last_live_in_space = last_live[space_id];
 279       assert(last_live_in_space != nullptr, "last live must not be null");
 280       if (rd != last_live_in_space) {
 281         if (rd->new_top() < rd->end()) {
 282           ObjectStartArray* sa = start_array(SpaceId(space_id));
 283           if (sa != nullptr) {
 284             sa->update_for_block(rd->new_top(), rd->end());
 285           }
 286           ParallelScavengeHeap::heap()->fill_with_dummy_object(rd->new_top(), rd->end(), false);
 287         }
 288         size_t live = pointer_delta(rd->new_top(), rd->bottom());
 289         size_t waste = pointer_delta(rd->end(), rd->new_top());
 290         total_live += live;
 291         total_waste += waste;
 292         log_develop_trace(gc, compaction)(
 293                 "Live compaction region, #%zu: [" PTR_FORMAT ", " PTR_FORMAT "), new_top: " PTR_FORMAT ", live: %zu, waste: %zu",
 294                 rd->idx(),
 295                 p2i(rd->bottom()), p2i(rd->end()), p2i(rd->new_top()), live, waste);
 296       } else {
 297         // Update top of space.
 298         space->set_top(rd->new_top());
 299         size_t live = pointer_delta(rd->new_top(), rd->bottom());
 300         total_live += live;
 301         log_develop_trace(gc, compaction)(
 302                 "Live compaction region, #%zu: [" PTR_FORMAT ", " PTR_FORMAT "), new_top: " PTR_FORMAT ", live: %zu, waste: %zu",
 303                 rd->idx(),
 304                 p2i(rd->bottom()), p2i(rd->end()), p2i(rd->new_top()), live, size_t(0));
 305 
 306         // Fast-Forward to next space.
 307         for (; idx < num_regions - 1; idx++) {
 308           rd = &region_data_array[idx + 1];
 309           if (!space->contains(rd->bottom())) {
 310             space_id++;
 311             assert(space_id < last_space_id, "must be");
 312             space = _space_info[space_id].space();
 313             assert(space->contains(rd->bottom()), "space must contain region");
 314             break;
 315           }
 316         }
 317       }
 318     }
 319     log_develop_debug(gc, compaction)("total live: %zu, total waste: %zu, ratio: %f", total_live, total_waste, ((float)total_waste)/((float)(total_live + total_waste)));
 320   }
 321   {
 322     FREE_C_HEAP_ARRAY(PCRegionData*, _per_worker_region_data);
 323     FREE_C_HEAP_ARRAY(PCRegionData, _region_data_array);
 324     FREE_C_HEAP_ARRAY(PCRegionData, _region_data_array_serial);
 325   }
 326 #ifdef ASSERT
 327   {
 328     mark_bitmap()->verify_clear();
 329   }
 330 #endif
 331 
 332   ParCompactionManagerNew::flush_all_string_dedup_requests();
 333 
 334   MutableSpace* const eden_space = _space_info[eden_space_id].space();
 335   MutableSpace* const from_space = _space_info[from_space_id].space();
 336   MutableSpace* const to_space   = _space_info[to_space_id].space();
 337 
 338   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 339   bool eden_empty = eden_space->is_empty();
 340 
 341   // Update heap occupancy information which is used as input to the soft ref
 342   // clearing policy at the next gc.
 343   Universe::heap()->update_capacity_and_used_at_gc();
 344 
 345   bool young_gen_empty = eden_empty && from_space->is_empty() &&
 346     to_space->is_empty();
 347 
 348   PSCardTable* ct = heap->card_table();
 349   MemRegion old_mr = ParallelScavengeHeap::old_gen()->committed();
 350   if (young_gen_empty) {
 351     ct->clear_MemRegion(old_mr);
 352   } else {
 353     ct->dirty_MemRegion(old_mr);
 354   }
 355 
 356   {
 357     // Delete metaspaces for unloaded class loaders and clean up loader_data graph
 358     GCTraceTime(Debug, gc, phases) t("Purge Class Loader Data", gc_timer());
 359     ClassLoaderDataGraph::purge(true /* at_safepoint */);
 360     DEBUG_ONLY(MetaspaceUtils::verify();)
 361   }
 362 
 363   // Need to clear claim bits for the next mark.
 364   ClassLoaderDataGraph::clear_claimed_marks();
 365 
 366   heap->prune_scavengable_nmethods();
 367 
 368 #if COMPILER2_OR_JVMCI
 369   DerivedPointerTable::update_pointers();
 370 #endif
 371 
 372   // Signal that we have completed a visit to all live objects.
 373   Universe::heap()->record_whole_heap_examined_timestamp();
 374 }
 375 
 376 void PSParallelCompactNew::setup_regions_parallel() {
 377   static const size_t REGION_SIZE_WORDS = (SpaceAlignment / HeapWordSize);
 378   size_t num_regions = 0;
 379   for (uint i = old_space_id; i < last_space_id; ++i) {
 380     MutableSpace* const space = _space_info[i].space();
 381     size_t const space_size_words = space->capacity_in_words();
 382     num_regions += align_up(space_size_words, REGION_SIZE_WORDS) / REGION_SIZE_WORDS;
 383   }
 384   _region_data_array = NEW_C_HEAP_ARRAY(PCRegionData, num_regions, mtGC);
 385 
 386   size_t region_idx = 0;
 387   for (uint i = old_space_id; i < last_space_id; ++i) {
 388     const MutableSpace* space = _space_info[i].space();
 389     HeapWord* addr = space->bottom();
 390     HeapWord* sp_end = space->end();
 391     HeapWord* sp_top = space->top();
 392     while (addr < sp_end) {
 393       HeapWord* end = MIN2(align_up(addr + REGION_SIZE_WORDS, REGION_SIZE_WORDS), space->end());
 394       if (addr < sp_top) {
 395         HeapWord* prev_obj_start = _mark_bitmap.find_obj_beg_reverse(addr, end);
 396         if (prev_obj_start < end) {
 397           HeapWord* prev_obj_end = prev_obj_start + cast_to_oop(prev_obj_start)->size();
 398           if (end < prev_obj_end) {
 399             // Object crosses region boundary, adjust end to be after object's last word.
 400             end = prev_obj_end;
 401           }
 402         }
 403       }
 404       assert(region_idx < num_regions, "must not exceed number of regions: region_idx: %zu, num_regions: %zu", region_idx, num_regions);
 405       HeapWord* top;
 406       if (sp_top < addr) {
 407         top = addr;
 408       } else if (sp_top >= end) {
 409         top = end;
 410       } else {
 411         top = sp_top;
 412       }
 413       assert(ParallelScavengeHeap::heap()->is_in_reserved(addr), "addr must be in heap: " PTR_FORMAT, p2i(addr));
 414       new (_region_data_array + region_idx) PCRegionData(region_idx, addr, top, end);
 415       addr = end;
 416       region_idx++;
 417     }
 418   }
 419   _num_regions = region_idx;
 420   log_info(gc)("Number of regions: %zu", _num_regions);
 421 }
 422 
 423 void PSParallelCompactNew::setup_regions_serial() {
 424   _num_regions_serial = last_space_id;
 425   _region_data_array_serial = NEW_C_HEAP_ARRAY(PCRegionData, _num_regions_serial, mtGC);
 426   new (_region_data_array_serial + old_space_id)  PCRegionData(old_space_id, space(old_space_id)->bottom(), space(old_space_id)->top(), space(old_space_id)->end());
 427   new (_region_data_array_serial + eden_space_id) PCRegionData(eden_space_id, space(eden_space_id)->bottom(), space(eden_space_id)->top(), space(eden_space_id)->end());
 428   new (_region_data_array_serial + from_space_id) PCRegionData(from_space_id, space(from_space_id)->bottom(), space(from_space_id)->top(), space(from_space_id)->end());
 429   new (_region_data_array_serial + to_space_id)   PCRegionData(to_space_id, space(to_space_id)->bottom(), space(to_space_id)->top(), space(to_space_id)->end());
 430 }
 431 
 432 bool PSParallelCompactNew::check_maximum_compaction(bool should_do_max_compaction) {
 433 
 434   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 435 
 436   // Check System.GC
 437   bool is_max_on_system_gc = UseMaximumCompactionOnSystemGC
 438                           && GCCause::is_user_requested_gc(heap->gc_cause());
 439 
 440   return should_do_max_compaction
 441       || is_max_on_system_gc;
 442 }
 443 
 444 void PSParallelCompactNew::summary_phase() {
 445   GCTraceTime(Info, gc, phases) tm("Summary Phase", &_gc_timer);
 446 
 447   setup_regions_serial();
 448   setup_regions_parallel();
 449 
 450 #ifndef PRODUCT
 451   for (size_t idx = 0; idx < _num_regions; idx++) {
 452     PCRegionData* rd = &_region_data_array[idx];
 453     log_develop_trace(gc, compaction)("Compaction region #%zu: [" PTR_FORMAT ", " PTR_FORMAT ")", rd->idx(), p2i(
 454             rd->bottom()), p2i(rd->end()));
 455   }
 456 #endif
 457 }
 458 
 459 // This method should contain all heap-specific policy for invoking a full
 460 // collection.  invoke_no_policy() will only attempt to compact the heap; it
 461 // will do nothing further.  If we need to bail out for policy reasons, scavenge
 462 // before full gc, or any other specialized behavior, it needs to be added here.
 463 //
 464 // Note that this method should only be called from the vm_thread while at a
 465 // safepoint.
 466 //
 467 // Note that the all_soft_refs_clear flag in the soft ref policy
 468 // may be true because this method can be called without intervening
 469 // activity.  For example when the heap space is tight and full measure
 470 // are being taken to free space.
 471 bool PSParallelCompactNew::invoke(bool clear_all_soft_refs, bool should_do_max_compaction) {
 472   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
 473   assert(Thread::current() == (Thread*)VMThread::vm_thread(),
 474          "should be in vm thread");
 475 
 476   SvcGCMarker sgcm(SvcGCMarker::FULL);
 477   IsSTWGCActiveMark mark;
 478 
 479   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 480 
 481   GCIdMark gc_id_mark;
 482   _gc_timer.register_gc_start();
 483   _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start());
 484 
 485   GCCause::Cause gc_cause = heap->gc_cause();
 486   PSYoungGen* young_gen = ParallelScavengeHeap::young_gen();
 487   PSOldGen* old_gen = ParallelScavengeHeap::old_gen();
 488   PSAdaptiveSizePolicy* size_policy = heap->size_policy();
 489 
 490   // Make sure data structures are sane, make the heap parsable, and do other
 491   // miscellaneous bookkeeping.
 492   pre_compact();
 493 
 494   const PreGenGCValues pre_gc_values = heap->get_pre_gc_values();
 495 
 496   {
 497     const uint active_workers =
 498       WorkerPolicy::calc_active_workers(ParallelScavengeHeap::heap()->workers().max_workers(),
 499                                         ParallelScavengeHeap::heap()->workers().active_workers(),
 500                                         Threads::number_of_non_daemon_threads());
 501     ParallelScavengeHeap::heap()->workers().set_active_workers(active_workers);
 502 
 503     if (check_maximum_compaction(should_do_max_compaction)) {
 504       // Serial compaction executes the forwarding and compaction phases serially,
 505       // thus achieving perfect compaction.
 506       // Marking and ajust-references would still be executed in parallel threads.
 507       _serial = true;
 508     } else {
 509       _serial = false;
 510     }
 511 
 512     GCTraceCPUTime tcpu(&_gc_tracer);
 513     GCTraceTime(Info, gc) tm("Pause Full", nullptr, gc_cause, true);
 514 
 515     heap->pre_full_gc_dump(&_gc_timer);
 516 
 517     TraceCollectorStats tcs(counters());
 518     TraceMemoryManagerStats tms(heap->old_gc_manager(), gc_cause, "end of major GC");
 519 
 520     if (log_is_enabled(Debug, gc, heap, exit)) {
 521       accumulated_time()->start();
 522     }
 523 
 524     // Let the size policy know we're starting
 525     size_policy->major_collection_begin();
 526 
 527 #if COMPILER2_OR_JVMCI
 528     DerivedPointerTable::clear();
 529 #endif
 530 
 531     ref_processor()->start_discovery(clear_all_soft_refs);
 532 
 533     ClassUnloadingContext ctx(1 /* num_nmethod_unlink_workers */,
 534                               false /* unregister_nmethods_during_purge */,
 535                               false /* lock_nmethod_free_separately */);
 536 
 537     marking_phase(&_gc_tracer);
 538 
 539     summary_phase();
 540 
 541 #if COMPILER2_OR_JVMCI
 542     assert(DerivedPointerTable::is_active(), "Sanity");
 543     DerivedPointerTable::set_active(false);
 544 #endif
 545 
 546     FullGCForwarding::begin();
 547 
 548     forward_to_new_addr();
 549 
 550     adjust_pointers();
 551 
 552     compact();
 553 
 554     FullGCForwarding::end();
 555 
 556     ParCompactionManagerNew::_preserved_marks_set->restore(&ParallelScavengeHeap::heap()->workers());
 557 
 558     // Reset the mark bitmap, summary data, and do other bookkeeping.  Must be
 559     // done before resizing.
 560     post_compact();
 561 
 562     // Let the size policy know we're done
 563     size_policy->major_collection_end();
 564 
 565     size_policy->sample_old_gen_used_bytes(MAX2(pre_gc_values.old_gen_used(), old_gen->used_in_bytes()));
 566 
 567     if (UseAdaptiveSizePolicy) {
 568       heap->resize_after_full_gc();
 569     }
 570 
 571     heap->resize_all_tlabs();
 572 
 573     // Resize the metaspace capacity after a collection
 574     MetaspaceGC::compute_new_size();
 575 
 576     if (log_is_enabled(Debug, gc, heap, exit)) {
 577       accumulated_time()->stop();
 578     }
 579 
 580     heap->print_heap_change(pre_gc_values);
 581 
 582     // Track memory usage and detect low memory
 583     MemoryService::track_memory_usage();
 584     heap->update_counters();
 585 
 586     heap->post_full_gc_dump(&_gc_timer);
 587 
 588     size_policy->record_gc_pause_end_instant();
 589   }
 590 
 591   heap->gc_epilogue(true);
 592 
 593   if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
 594     Universe::verify("After GC");
 595   }
 596 
 597   heap->print_after_gc();
 598   heap->trace_heap_after_gc(&_gc_tracer);
 599 
 600   _gc_timer.register_gc_end();
 601 
 602   _gc_tracer.report_gc_end(_gc_timer.gc_end(), _gc_timer.time_partitions());
 603 
 604   return true;
 605 }
 606 
 607 class PCAddThreadRootsMarkingTaskClosureNew : public ThreadClosure {
 608 private:
 609   uint _worker_id;
 610 
 611 public:
 612   explicit PCAddThreadRootsMarkingTaskClosureNew(uint worker_id) : _worker_id(worker_id) { }
 613   void do_thread(Thread* thread) final {
 614     assert(ParallelScavengeHeap::heap()->is_stw_gc_active(), "called outside gc");
 615 
 616     ResourceMark rm;
 617 
 618     ParCompactionManagerNew* cm = ParCompactionManagerNew::gc_thread_compaction_manager(_worker_id);
 619 
 620     MarkingNMethodClosure mark_and_push_in_blobs(&cm->_mark_and_push_closure);
 621 
 622     thread->oops_do(&cm->_mark_and_push_closure, &mark_and_push_in_blobs);
 623 
 624     // Do the real work
 625     cm->follow_marking_stacks();
 626   }
 627 };
 628 
 629 void steal_marking_work_new(TaskTerminator& terminator, uint worker_id) {
 630   assert(ParallelScavengeHeap::heap()->is_stw_gc_active(), "called outside gc");
 631 
 632   ParCompactionManagerNew* cm =
 633     ParCompactionManagerNew::gc_thread_compaction_manager(worker_id);
 634 
 635   do {
 636     ScannerTask task;
 637     if (ParCompactionManagerNew::steal(worker_id, task)) {
 638       cm->follow_contents(task, true);
 639     }
 640     cm->follow_marking_stacks();
 641   } while (!terminator.offer_termination());
 642 }
 643 
 644 class MarkFromRootsTaskNew : public WorkerTask {
 645   NMethodMarkingScope _nmethod_marking_scope;
 646   ThreadsClaimTokenScope _threads_claim_token_scope;
 647   OopStorageSetStrongParState<false /* concurrent */, false /* is_const */> _oop_storage_set_par_state;
 648   TaskTerminator _terminator;
 649   uint _active_workers;
 650 
 651 public:
 652   explicit MarkFromRootsTaskNew(uint active_workers) :
 653       WorkerTask("MarkFromRootsTaskNew"),
 654       _nmethod_marking_scope(),
 655       _threads_claim_token_scope(),
 656       _terminator(active_workers, ParCompactionManagerNew::marking_stacks()),
 657       _active_workers(active_workers) {}
 658 
 659   void work(uint worker_id) final {
 660     ParCompactionManagerNew* cm = ParCompactionManagerNew::gc_thread_compaction_manager(worker_id);
 661     {
 662       CLDToOopClosure cld_closure(&cm->_mark_and_push_closure, ClassLoaderData::_claim_stw_fullgc_mark);
 663       ClassLoaderDataGraph::always_strong_cld_do(&cld_closure);
 664 
 665       // Do the real work
 666       cm->follow_marking_stacks();
 667     }
 668 
 669     {
 670       PCAddThreadRootsMarkingTaskClosureNew closure(worker_id);
 671       Threads::possibly_parallel_threads_do(_active_workers > 1 /* is_par */, &closure);
 672     }
 673 
 674     // Mark from OopStorages
 675     {
 676       _oop_storage_set_par_state.oops_do(&cm->_mark_and_push_closure);
 677       // Do the real work
 678       cm->follow_marking_stacks();
 679     }
 680 
 681     if (_active_workers > 1) {
 682       steal_marking_work_new(_terminator, worker_id);
 683     }
 684   }
 685 };
 686 
 687 class ParallelCompactRefProcProxyTaskNew : public RefProcProxyTask {
 688   TaskTerminator _terminator;
 689 
 690 public:
 691   explicit ParallelCompactRefProcProxyTaskNew(uint max_workers)
 692     : RefProcProxyTask("ParallelCompactRefProcProxyTaskNew", max_workers),
 693       _terminator(_max_workers, ParCompactionManagerNew::marking_stacks()) {}
 694 
 695   void work(uint worker_id) final {
 696     assert(worker_id < _max_workers, "sanity");
 697     ParCompactionManagerNew* cm = (_tm == RefProcThreadModel::Single) ? ParCompactionManagerNew::get_vmthread_cm() : ParCompactionManagerNew::gc_thread_compaction_manager(worker_id);
 698     BarrierEnqueueDiscoveredFieldClosure enqueue;
 699     ParCompactionManagerNew::FollowStackClosure complete_gc(cm, (_tm == RefProcThreadModel::Single) ? nullptr : &_terminator, worker_id);
 700     _rp_task->rp_work(worker_id, PSParallelCompactNew::is_alive_closure(), &cm->_mark_and_push_closure, &enqueue, &complete_gc);
 701   }
 702 
 703   void prepare_run_task_hook() final {
 704     _terminator.reset_for_reuse(_queue_count);
 705   }
 706 };
 707 
 708 void PSParallelCompactNew::marking_phase(ParallelOldTracer *gc_tracer) {
 709   // Recursively traverse all live objects and mark them
 710   GCTraceTime(Info, gc, phases) tm("Marking Phase", &_gc_timer);
 711 
 712   uint active_gc_threads = ParallelScavengeHeap::heap()->workers().active_workers();
 713 
 714   ClassLoaderDataGraph::verify_claimed_marks_cleared(ClassLoaderData::_claim_stw_fullgc_mark);
 715   {
 716     GCTraceTime(Debug, gc, phases) pm_tm("Par Mark", &_gc_timer);
 717 
 718     MarkFromRootsTaskNew task(active_gc_threads);
 719     ParallelScavengeHeap::heap()->workers().run_task(&task);
 720   }
 721 
 722   // Process reference objects found during marking
 723   {
 724     GCTraceTime(Debug, gc, phases) rp_tm("Reference Processing", &_gc_timer);
 725 
 726     ReferenceProcessorStats stats;
 727     ReferenceProcessorPhaseTimes pt(&_gc_timer, ref_processor()->max_num_queues());
 728 
 729     ParallelCompactRefProcProxyTaskNew task(ref_processor()->max_num_queues());
 730     stats = ref_processor()->process_discovered_references(task, &ParallelScavengeHeap::heap()->workers(), pt);
 731 
 732     gc_tracer->report_gc_reference_stats(stats);
 733     pt.print_all_references();
 734   }
 735 
 736   // This is the point where the entire marking should have completed.
 737   ParCompactionManagerNew::verify_all_marking_stack_empty();
 738 
 739   {
 740     GCTraceTime(Debug, gc, phases) wp_tm("Weak Processing", &_gc_timer);
 741     WeakProcessor::weak_oops_do(&ParallelScavengeHeap::heap()->workers(),
 742                                 is_alive_closure(),
 743                                 &do_nothing_cl,
 744                                 1);
 745   }
 746 
 747   {
 748     GCTraceTime(Debug, gc, phases) tm_m("Class Unloading", &_gc_timer);
 749 
 750     ClassUnloadingContext* ctx = ClassUnloadingContext::context();
 751 
 752     bool unloading_occurred;
 753     {
 754       CodeCache::UnlinkingScope scope(is_alive_closure());
 755 
 756       // Follow system dictionary roots and unload classes.
 757       unloading_occurred = SystemDictionary::do_unloading(&_gc_timer);
 758 
 759       // Unload nmethods.
 760       CodeCache::do_unloading(unloading_occurred);
 761     }
 762 
 763     {
 764       GCTraceTime(Debug, gc, phases) t("Purge Unlinked NMethods", gc_timer());
 765       // Release unloaded nmethod's memory.
 766       ctx->purge_nmethods();
 767     }
 768     {
 769       GCTraceTime(Debug, gc, phases) ur("Unregister NMethods", &_gc_timer);
 770       ParallelScavengeHeap::heap()->prune_unlinked_nmethods();
 771     }
 772     {
 773       GCTraceTime(Debug, gc, phases) t("Free Code Blobs", gc_timer());
 774       ctx->free_nmethods();
 775     }
 776 
 777     // Prune dead klasses from subklass/sibling/implementor lists.
 778     Klass::clean_weak_klass_links(unloading_occurred);
 779 
 780     // Clean JVMCI metadata handles.
 781     JVMCI_ONLY(JVMCI::do_unloading(unloading_occurred));
 782   }
 783 
 784   {
 785     GCTraceTime(Debug, gc, phases) roc_tm("Report Object Count", &_gc_timer);
 786     _gc_tracer.report_object_count_after_gc(is_alive_closure(), &ParallelScavengeHeap::heap()->workers());
 787   }
 788 #if TASKQUEUE_STATS
 789   ParCompactionManagerNew::print_and_reset_taskqueue_stats();
 790 #endif
 791 }
 792 
 793 void PSParallelCompactNew::adjust_pointers_in_spaces(uint worker_id) {
 794   auto start_time = Ticks::now();
 795   for (size_t i = 0; i < _num_regions; i++) {
 796     PCRegionData* region = &_region_data_array[i];
 797     if (!region->claim()) {
 798       continue;
 799     }
 800     log_trace(gc, compaction)("Adjusting pointers in region: %zu (worker_id: %u)", region->idx(), worker_id);
 801     HeapWord* end = region->top();
 802     HeapWord* current = _mark_bitmap.find_obj_beg(region->bottom(), end);
 803     while (current < end) {
 804       assert(_mark_bitmap.is_marked(current), "must be marked");
 805       oop obj = cast_to_oop(current);
 806       size_t size = obj->size();
 807       obj->oop_iterate(&pc_adjust_pointer_closure);
 808       current = _mark_bitmap.find_obj_beg(current + size, end);
 809     }
 810   }
 811   log_trace(gc, phases)("adjust_pointers_in_spaces worker %u: %.3f ms", worker_id, (Ticks::now() - start_time).seconds() * 1000);
 812 }
 813 
 814 class PSAdjustTaskNew final : public WorkerTask {
 815   SubTasksDone                               _sub_tasks;
 816   WeakProcessor::Task                        _weak_proc_task;
 817   OopStorageSetStrongParState<false, false>  _oop_storage_iter;
 818   uint                                       _nworkers;
 819 
 820   enum PSAdjustSubTask {
 821     PSAdjustSubTask_code_cache,
 822 
 823     PSAdjustSubTask_num_elements
 824   };
 825 
 826 public:
 827   explicit PSAdjustTaskNew(uint nworkers) :
 828     WorkerTask("PSAdjust task"),
 829     _sub_tasks(PSAdjustSubTask_num_elements),
 830     _weak_proc_task(nworkers),
 831     _nworkers(nworkers) {
 832 
 833     ClassLoaderDataGraph::verify_claimed_marks_cleared(ClassLoaderData::_claim_stw_fullgc_adjust);
 834     if (nworkers > 1) {
 835       Threads::change_thread_claim_token();
 836     }
 837   }
 838 
 839   ~PSAdjustTaskNew() {
 840     Threads::assert_all_threads_claimed();
 841   }
 842 
 843   void work(uint worker_id) final {
 844     ParCompactionManagerNew* cm = ParCompactionManagerNew::gc_thread_compaction_manager(worker_id);
 845     cm->preserved_marks()->adjust_during_full_gc();
 846     {
 847       // adjust pointers in all spaces
 848       PSParallelCompactNew::adjust_pointers_in_spaces(worker_id);
 849     }
 850     {
 851       ResourceMark rm;
 852       Threads::possibly_parallel_oops_do(_nworkers > 1, &pc_adjust_pointer_closure, nullptr);
 853     }
 854     _oop_storage_iter.oops_do(&pc_adjust_pointer_closure);
 855     {
 856       CLDToOopClosure cld_closure(&pc_adjust_pointer_closure, ClassLoaderData::_claim_stw_fullgc_adjust);
 857       ClassLoaderDataGraph::cld_do(&cld_closure);
 858     }
 859     {
 860       AlwaysTrueClosure always_alive;
 861       _weak_proc_task.work(worker_id, &always_alive, &pc_adjust_pointer_closure);
 862     }
 863     if (_sub_tasks.try_claim_task(PSAdjustSubTask_code_cache)) {
 864       NMethodToOopClosure adjust_code(&pc_adjust_pointer_closure, NMethodToOopClosure::FixRelocations);
 865       CodeCache::nmethods_do(&adjust_code);
 866     }
 867     _sub_tasks.all_tasks_claimed();
 868   }
 869 };
 870 
 871 void PSParallelCompactNew::adjust_pointers() {
 872   // Adjust the pointers to reflect the new locations
 873   GCTraceTime(Info, gc, phases) tm("Adjust Pointers", &_gc_timer);
 874   uint num_workers = ParallelScavengeHeap::heap()->workers().active_workers();
 875   PSAdjustTaskNew task(num_workers);
 876   ParallelScavengeHeap::heap()->workers().run_task(&task);
 877 }
 878 
 879 void PSParallelCompactNew::forward_to_new_addr() {
 880   GCTraceTime(Info, gc, phases) tm("Forward", &_gc_timer);
 881   uint num_workers = get_num_workers();
 882   _per_worker_region_data = NEW_C_HEAP_ARRAY(PCRegionData*, num_workers, mtGC);
 883   for (uint i = 0; i < num_workers; i++) {
 884     _per_worker_region_data[i] = nullptr;
 885   }
 886 
 887   class ForwardState {
 888     uint _worker_id;
 889     PCRegionData* _compaction_region;
 890     HeapWord* _compaction_point;
 891 
 892     void ensure_compaction_point() {
 893       if (_compaction_point == nullptr) {
 894         assert(_compaction_region == nullptr, "invariant");
 895         _compaction_region = _per_worker_region_data[_worker_id];
 896         assert(_compaction_region != nullptr, "invariant");
 897         _compaction_point = _compaction_region->bottom();
 898       }
 899     }
 900   public:
 901     explicit ForwardState(uint worker_id) :
 902             _worker_id(worker_id),
 903             _compaction_region(nullptr),
 904             _compaction_point(nullptr) {
 905     }
 906 
 907     size_t available() const {
 908       return pointer_delta(_compaction_region->end(), _compaction_point);
 909     }
 910 
 911     void forward_objs_in_region(ParCompactionManagerNew* cm, PCRegionData* region) {
 912       ensure_compaction_point();
 913       HeapWord* end = region->end();
 914       HeapWord* current = _mark_bitmap.find_obj_beg(region->bottom(), end);
 915       while (current < end) {
 916         assert(_mark_bitmap.is_marked(current), "must be marked");
 917         oop obj = cast_to_oop(current);
 918         assert(region->contains(obj), "object must not cross region boundary: obj: " PTR_FORMAT ", obj_end: " PTR_FORMAT ", region start: " PTR_FORMAT ", region end: " PTR_FORMAT, p2i(obj), p2i(cast_from_oop<HeapWord*>(obj) + obj->size()), p2i(region->bottom()), p2i(region->end()));
 919         size_t old_size = obj->size();
 920         size_t new_size = obj->copy_size(old_size, obj->mark());
 921         size_t size = (current == _compaction_point) ? old_size : new_size;
 922         while (size > available()) {
 923           _compaction_region->set_new_top(_compaction_point);
 924           _compaction_region = _compaction_region->local_next();
 925           assert(_compaction_region != nullptr, "must find a compaction region");
 926           _compaction_point = _compaction_region->bottom();
 927           size = (current == _compaction_point) ? old_size : new_size;
 928         }
 929         //log_develop_trace(gc, compaction)("Forwarding obj: " PTR_FORMAT ", to: " PTR_FORMAT, p2i(obj), p2i(_compaction_point));
 930         if (current != _compaction_point) {
 931           cm->preserved_marks()->push_if_necessary(obj, obj->mark());
 932           FullGCForwarding::forward_to(obj, cast_to_oop(_compaction_point));
 933         }
 934         _compaction_point += size;
 935         assert(_compaction_point <= _compaction_region->end(), "object must fit in region");
 936         current += old_size;
 937         assert(current <= end, "object must not cross region boundary");
 938         current = _mark_bitmap.find_obj_beg(current, end);
 939       }
 940     }
 941     void finish() {
 942       if (_compaction_region != nullptr) {
 943         _compaction_region->set_new_top(_compaction_point);
 944       }
 945     }
 946   };
 947 
 948   struct ForwardTask final : public WorkerTask {
 949     ForwardTask() : WorkerTask("PSForward task") {}
 950 
 951     void work(uint worker_id) override {
 952       ParCompactionManagerNew* cm = ParCompactionManagerNew::gc_thread_compaction_manager(worker_id);
 953       ForwardState state(worker_id);
 954       PCRegionData** last_link = &_per_worker_region_data[worker_id];
 955       size_t idx = worker_id;
 956       uint num_workers = get_num_workers();
 957       size_t num_regions = get_num_regions();
 958       PCRegionData* region_data_array = get_region_data_array();
 959       while (idx < num_regions) {
 960         PCRegionData* region = region_data_array + idx;
 961         *last_link = region;
 962         last_link = region->local_next_addr();
 963         state.forward_objs_in_region(cm, region);
 964         idx += num_workers;
 965       }
 966       state.finish();
 967     }
 968   } task;
 969 
 970   uint par_workers = ParallelScavengeHeap::heap()->workers().active_workers();
 971   ParallelScavengeHeap::heap()->workers().set_active_workers(num_workers);
 972   ParallelScavengeHeap::heap()->workers().run_task(&task);
 973   ParallelScavengeHeap::heap()->workers().set_active_workers(par_workers);
 974 
 975 #ifndef PRODUCT
 976   for (uint wid = 0; wid < num_workers; wid++) {
 977     for (PCRegionData* rd = _per_worker_region_data[wid]; rd != nullptr; rd = rd->local_next()) {
 978       log_develop_trace(gc, compaction)("Per worker compaction region, worker: %d, #%zu: [" PTR_FORMAT ", " PTR_FORMAT "), new_top: " PTR_FORMAT, wid, rd->idx(),
 979                                         p2i(rd->bottom()), p2i(rd->end()), p2i(rd->new_top()));
 980     }
 981   }
 982 #endif
 983 }
 984 
 985 void PSParallelCompactNew::compact() {
 986   GCTraceTime(Info, gc, phases) tm("Compaction Phase", &_gc_timer);
 987   class CompactTask final : public WorkerTask {
 988     static void compact_region(PCRegionData* region) {
 989       HeapWord* bottom = region->bottom();
 990       HeapWord* end = region->top();
 991       if (bottom == end) {
 992         return;
 993       }
 994       HeapWord* current = _mark_bitmap.find_obj_beg(bottom, end);
 995       while (current < end) {
 996         oop obj = cast_to_oop(current);
 997         size_t size = obj->size();
 998         if (FullGCForwarding::is_forwarded(obj)) {
 999           oop fwd = FullGCForwarding::forwardee(obj);
1000           auto* dst = cast_from_oop<HeapWord*>(fwd);
1001           ObjectStartArray* sa = start_array(space_id(dst));
1002           if (sa != nullptr) {
1003             assert(dst != current, "expect moving object");
1004             size_t new_words = obj->copy_size(size, obj->mark());
1005             sa->update_for_block(dst, dst + new_words);
1006           }
1007 
1008           Copy::aligned_conjoint_words(current, dst, size);
1009           fwd->init_mark();
1010           fwd->initialize_hash_if_necessary(obj);
1011         } else {
1012           // The start_array must be updated even if the object is not moving.
1013           ObjectStartArray* sa = start_array(space_id(current));
1014           if (sa != nullptr) {
1015             sa->update_for_block(current, current + size);
1016           }
1017         }
1018         current = _mark_bitmap.find_obj_beg(current + size, end);
1019       }
1020     }
1021   public:
1022     explicit CompactTask() : WorkerTask("PSCompact task") {}
1023     void work(uint worker_id) override {
1024       PCRegionData* region = _per_worker_region_data[worker_id];
1025       while (region != nullptr) {
1026         log_trace(gc)("Compact worker: %u, compacting region: %zu", worker_id, region->idx());
1027         compact_region(region);
1028         region = region->local_next();
1029       }
1030     }
1031   } task;
1032 
1033   uint num_workers = get_num_workers();
1034   uint par_workers = ParallelScavengeHeap::heap()->workers().active_workers();
1035   ParallelScavengeHeap::heap()->workers().set_active_workers(num_workers);
1036   ParallelScavengeHeap::heap()->workers().run_task(&task);
1037   ParallelScavengeHeap::heap()->workers().set_active_workers(par_workers);
1038 }
1039 
1040 // Return the SpaceId for the space containing addr.  If addr is not in the
1041 // heap, last_space_id is returned.  In debug mode it expects the address to be
1042 // in the heap and asserts such.
1043 PSParallelCompactNew::SpaceId PSParallelCompactNew::space_id(HeapWord* addr) {
1044   assert(ParallelScavengeHeap::heap()->is_in_reserved(addr), "addr not in the heap");
1045 
1046   for (unsigned int id = old_space_id; id < last_space_id; ++id) {
1047     if (_space_info[id].space()->contains(addr)) {
1048       return SpaceId(id);
1049     }
1050   }
1051 
1052   assert(false, "no space contains the addr");
1053   return last_space_id;
1054 }