1 /*
   2  * Copyright (c) 2005, 2026, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "classfile/classLoaderDataGraph.hpp"
  26 #include "classfile/javaClasses.inline.hpp"
  27 #include "classfile/stringTable.hpp"
  28 #include "classfile/symbolTable.hpp"
  29 #include "classfile/systemDictionary.hpp"
  30 #include "code/codeCache.hpp"
  31 #include "code/nmethod.hpp"
  32 #include "compiler/oopMap.hpp"
  33 #include "gc/parallel/objectStartArray.inline.hpp"
  34 #include "gc/parallel/parallelArguments.hpp"
  35 #include "gc/parallel/parallelScavengeHeap.inline.hpp"
  36 #include "gc/parallel/parMarkBitMap.inline.hpp"
  37 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
  38 #include "gc/parallel/psCompactionManager.inline.hpp"
  39 #include "gc/parallel/psOldGen.hpp"
  40 #include "gc/parallel/psParallelCompact.inline.hpp"
  41 #include "gc/parallel/psPromotionManager.inline.hpp"
  42 #include "gc/parallel/psRootType.hpp"
  43 #include "gc/parallel/psScavenge.hpp"
  44 #include "gc/parallel/psStringDedup.hpp"
  45 #include "gc/parallel/psYoungGen.hpp"
  46 #include "gc/shared/classUnloadingContext.hpp"
  47 #include "gc/shared/collectedHeap.inline.hpp"
  48 #include "gc/shared/fullGCForwarding.inline.hpp"
  49 #include "gc/shared/gcCause.hpp"
  50 #include "gc/shared/gcHeapSummary.hpp"
  51 #include "gc/shared/gcId.hpp"
  52 #include "gc/shared/gcLocker.hpp"
  53 #include "gc/shared/gcTimer.hpp"
  54 #include "gc/shared/gcTrace.hpp"
  55 #include "gc/shared/gcTraceTime.inline.hpp"
  56 #include "gc/shared/gcVMOperations.hpp"
  57 #include "gc/shared/isGCActiveMark.hpp"
  58 #include "gc/shared/oopStorage.inline.hpp"
  59 #include "gc/shared/oopStorageSet.inline.hpp"
  60 #include "gc/shared/oopStorageSetParState.inline.hpp"
  61 #include "gc/shared/parallelCleaning.hpp"
  62 #include "gc/shared/preservedMarks.inline.hpp"
  63 #include "gc/shared/referencePolicy.hpp"
  64 #include "gc/shared/referenceProcessor.hpp"
  65 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
  66 #include "gc/shared/spaceDecorator.hpp"
  67 #include "gc/shared/taskTerminator.hpp"
  68 #include "gc/shared/weakProcessor.inline.hpp"
  69 #include "gc/shared/workerPolicy.hpp"
  70 #include "gc/shared/workerThread.hpp"
  71 #include "gc/shared/workerUtils.hpp"
  72 #include "logging/log.hpp"
  73 #include "memory/iterator.inline.hpp"
  74 #include "memory/memoryReserver.hpp"
  75 #include "memory/metaspaceUtils.hpp"
  76 #include "memory/resourceArea.hpp"
  77 #include "memory/universe.hpp"
  78 #include "nmt/memTracker.hpp"
  79 #include "oops/access.inline.hpp"
  80 #include "oops/flatArrayKlass.inline.hpp"
  81 #include "oops/instanceClassLoaderKlass.inline.hpp"
  82 #include "oops/instanceKlass.inline.hpp"
  83 #include "oops/instanceMirrorKlass.inline.hpp"
  84 #include "oops/methodData.hpp"
  85 #include "oops/objArrayKlass.inline.hpp"
  86 #include "oops/oop.inline.hpp"
  87 #include "runtime/arguments.hpp"
  88 #include "runtime/atomicAccess.hpp"
  89 #include "runtime/handles.inline.hpp"
  90 #include "runtime/java.hpp"
  91 #include "runtime/safepoint.hpp"
  92 #include "runtime/threads.hpp"
  93 #include "runtime/vmThread.hpp"
  94 #include "services/memoryService.hpp"
  95 #include "utilities/align.hpp"
  96 #include "utilities/debug.hpp"
  97 #include "utilities/events.hpp"
  98 #include "utilities/formatBuffer.hpp"
  99 #include "utilities/macros.hpp"
 100 #include "utilities/stack.inline.hpp"
 101 #if INCLUDE_JVMCI
 102 #include "jvmci/jvmci.hpp"
 103 #endif
 104 
 105 #include <math.h>
 106 
 107 // All sizes are in HeapWords.
 108 const size_t ParallelCompactData::Log2RegionSize  = 16; // 64K words
 109 const size_t ParallelCompactData::RegionSize      = (size_t)1 << Log2RegionSize;
 110 static_assert(ParallelCompactData::RegionSize >= BitsPerWord, "region-start bit word-aligned");
 111 const size_t ParallelCompactData::RegionSizeBytes =
 112   RegionSize << LogHeapWordSize;
 113 const size_t ParallelCompactData::RegionSizeOffsetMask = RegionSize - 1;
 114 const size_t ParallelCompactData::RegionAddrOffsetMask = RegionSizeBytes - 1;
 115 const size_t ParallelCompactData::RegionAddrMask       = ~RegionAddrOffsetMask;
 116 
 117 const ParallelCompactData::RegionData::region_sz_t
 118 ParallelCompactData::RegionData::dc_shift = 27;
 119 
 120 const ParallelCompactData::RegionData::region_sz_t
 121 ParallelCompactData::RegionData::dc_mask = ~0U << dc_shift;
 122 
 123 const ParallelCompactData::RegionData::region_sz_t
 124 ParallelCompactData::RegionData::dc_one = 0x1U << dc_shift;
 125 
 126 const ParallelCompactData::RegionData::region_sz_t
 127 ParallelCompactData::RegionData::los_mask = ~dc_mask;
 128 
 129 const ParallelCompactData::RegionData::region_sz_t
 130 ParallelCompactData::RegionData::dc_claimed = 0x8U << dc_shift;
 131 
 132 const ParallelCompactData::RegionData::region_sz_t
 133 ParallelCompactData::RegionData::dc_completed = 0xcU << dc_shift;
 134 
 135 bool ParallelCompactData::RegionData::is_clear() {
 136   return (_destination == nullptr) &&
 137          (_source_region == 0) &&
 138          (_partial_obj_addr == nullptr) &&
 139          (_partial_obj_size == 0) &&
 140          (_dc_and_los == 0) &&
 141          (_shadow_state == 0);
 142 }
 143 
 144 #ifdef ASSERT
 145 void ParallelCompactData::RegionData::verify_clear() {
 146   assert(_destination == nullptr, "inv");
 147   assert(_source_region == 0, "inv");
 148   assert(_partial_obj_addr == nullptr, "inv");
 149   assert(_partial_obj_size == 0, "inv");
 150   assert(_dc_and_los == 0, "inv");
 151   assert(_shadow_state == 0, "inv");
 152 }
 153 #endif
 154 
 155 SpaceInfo PSParallelCompact::_space_info[PSParallelCompact::last_space_id];
 156 
 157 SpanSubjectToDiscoveryClosure PSParallelCompact::_span_based_discoverer;
 158 ReferenceProcessor* PSParallelCompact::_ref_processor = nullptr;
 159 
 160 void SplitInfo::record(size_t split_region_idx, HeapWord* split_point, size_t preceding_live_words) {
 161   assert(split_region_idx != 0, "precondition");
 162 
 163   // Obj denoted by split_point will be deferred to the next space.
 164   assert(split_point != nullptr, "precondition");
 165 
 166   const ParallelCompactData& sd = PSParallelCompact::summary_data();
 167 
 168   PSParallelCompact::RegionData* split_region_ptr = sd.region(split_region_idx);
 169   assert(preceding_live_words < split_region_ptr->data_size(), "inv");
 170 
 171   HeapWord* preceding_destination = split_region_ptr->destination();
 172   assert(preceding_destination != nullptr, "inv");
 173 
 174   // How many regions does the preceding part occupy
 175   uint preceding_destination_count;
 176   if (preceding_live_words == 0) {
 177     preceding_destination_count = 0;
 178   } else {
 179     // -1 so that the ending address doesn't fall on the region-boundary
 180     if (sd.region_align_down(preceding_destination) ==
 181         sd.region_align_down(preceding_destination + preceding_live_words - 1)) {
 182       preceding_destination_count = 1;
 183     } else {
 184       preceding_destination_count = 2;
 185     }
 186   }
 187 
 188   _split_region_idx = split_region_idx;
 189   _split_point = split_point;
 190   _preceding_live_words = preceding_live_words;
 191   _preceding_destination = preceding_destination;
 192   _preceding_destination_count = preceding_destination_count;
 193 }
 194 
 195 void SplitInfo::clear()
 196 {
 197   _split_region_idx = 0;
 198   _split_point = nullptr;
 199   _preceding_live_words = 0;
 200   _preceding_destination = nullptr;
 201   _preceding_destination_count = 0;
 202   assert(!is_valid(), "sanity");
 203 }
 204 
 205 #ifdef  ASSERT
 206 void SplitInfo::verify_clear()
 207 {
 208   assert(_split_region_idx == 0, "not clear");
 209   assert(_split_point == nullptr, "not clear");
 210   assert(_preceding_live_words == 0, "not clear");
 211   assert(_preceding_destination == nullptr, "not clear");
 212   assert(_preceding_destination_count == 0, "not clear");
 213 }
 214 #endif  // #ifdef ASSERT
 215 
 216 
 217 void PSParallelCompact::print_on(outputStream* st) {
 218   _mark_bitmap.print_on(st);
 219 }
 220 
 221 ParallelCompactData::ParallelCompactData() :
 222   _heap_start(nullptr),
 223   DEBUG_ONLY(_heap_end(nullptr) COMMA)
 224   _region_vspace(nullptr),
 225   _reserved_byte_size(0),
 226   _region_data(nullptr),
 227   _region_count(0) {}
 228 
 229 bool ParallelCompactData::initialize(MemRegion reserved_heap)
 230 {
 231   _heap_start = reserved_heap.start();
 232   const size_t heap_size = reserved_heap.word_size();
 233   DEBUG_ONLY(_heap_end = _heap_start + heap_size;)
 234 
 235   assert(region_align_down(_heap_start) == _heap_start,
 236          "region start not aligned");
 237 
 238   return initialize_region_data(heap_size);
 239 }
 240 
 241 PSVirtualSpace*
 242 ParallelCompactData::create_vspace(size_t count, size_t element_size)
 243 {
 244   const size_t raw_bytes = count * element_size;
 245   const size_t page_sz = os::page_size_for_region_aligned(raw_bytes, 10);
 246   const size_t granularity = os::vm_allocation_granularity();
 247   const size_t rs_align = MAX2(page_sz, granularity);
 248 
 249   _reserved_byte_size = align_up(raw_bytes, rs_align);
 250 
 251   ReservedSpace rs = MemoryReserver::reserve(_reserved_byte_size,
 252                                              rs_align,
 253                                              page_sz,
 254                                              mtGC);
 255 
 256   if (!rs.is_reserved()) {
 257     // Failed to reserve memory.
 258     return nullptr;
 259   }
 260 
 261   os::trace_page_sizes("Parallel Compact Data", raw_bytes, raw_bytes, rs.base(),
 262                        rs.size(), page_sz);
 263 
 264   MemTracker::record_virtual_memory_tag(rs, mtGC);
 265 
 266   PSVirtualSpace* vspace = new PSVirtualSpace(rs, page_sz);
 267 
 268   if (!vspace->expand_by(_reserved_byte_size)) {
 269     // Failed to commit memory.
 270 
 271     delete vspace;
 272 
 273     // Release memory reserved in the space.
 274     MemoryReserver::release(rs);
 275 
 276     return nullptr;
 277   }
 278 
 279   return vspace;
 280 }
 281 
 282 bool ParallelCompactData::initialize_region_data(size_t heap_size)
 283 {
 284   assert(is_aligned(heap_size, RegionSize), "precondition");
 285 
 286   const size_t count = heap_size >> Log2RegionSize;
 287   _region_vspace = create_vspace(count, sizeof(RegionData));
 288   if (_region_vspace != nullptr) {
 289     _region_data = (RegionData*)_region_vspace->reserved_low_addr();
 290     _region_count = count;
 291     return true;
 292   }
 293   return false;
 294 }
 295 
 296 void ParallelCompactData::clear_range(size_t beg_region, size_t end_region) {
 297   assert(beg_region <= _region_count, "beg_region out of range");
 298   assert(end_region <= _region_count, "end_region out of range");
 299 
 300   const size_t region_cnt = end_region - beg_region;
 301   memset(_region_data + beg_region, 0, region_cnt * sizeof(RegionData));
 302 }
 303 
 304 // The total live words on src_region would overflow the target space, so find
 305 // the overflowing object and record the split point. The invariant is that an
 306 // obj should not cross space boundary.
 307 HeapWord* ParallelCompactData::summarize_split_space(size_t src_region,
 308                                                      SplitInfo& split_info,
 309                                                      HeapWord* const destination,
 310                                                      HeapWord* const target_end,
 311                                                      HeapWord** target_next) {
 312   assert(destination <= target_end, "sanity");
 313   assert(destination + _region_data[src_region].data_size() > target_end,
 314     "region should not fit into target space");
 315   assert(is_region_aligned(target_end), "sanity");
 316 
 317   size_t partial_obj_size = _region_data[src_region].partial_obj_size();
 318 
 319   if (destination + partial_obj_size > target_end) {
 320     assert(partial_obj_size > 0, "inv");
 321     // The overflowing obj is from a previous region.
 322     //
 323     // source-regions:
 324     //
 325     // ***************
 326     // |     A|AA    |
 327     // ***************
 328     //       ^
 329     //       | split-point
 330     //
 331     // dest-region:
 332     //
 333     // ********
 334     // |~~~~A |
 335     // ********
 336     //       ^^
 337     //       || target-space-end
 338     //       |
 339     //       | destination
 340     //
 341     // AAA would overflow target-space.
 342     //
 343     HeapWord* overflowing_obj = _region_data[src_region].partial_obj_addr();
 344     size_t split_region = addr_to_region_idx(overflowing_obj);
 345 
 346     // The number of live words before the overflowing object on this split region
 347     size_t preceding_live_words;
 348     if (is_region_aligned(overflowing_obj)) {
 349       preceding_live_words = 0;
 350     } else {
 351       // Words accounted by the overflowing object on the split region
 352       size_t overflowing_size = pointer_delta(region_align_up(overflowing_obj), overflowing_obj);
 353       preceding_live_words = region(split_region)->data_size() - overflowing_size;
 354     }
 355 
 356     split_info.record(split_region, overflowing_obj, preceding_live_words);
 357 
 358     // The [overflowing_obj, src_region_start) part has been accounted for, so
 359     // must move back the new_top, now that this overflowing obj is deferred.
 360     HeapWord* new_top = destination - pointer_delta(region_to_addr(src_region), overflowing_obj);
 361 
 362     // If the overflowing obj was relocated to its original destination,
 363     // those destination regions would have their source_region set. Now that
 364     // this overflowing obj is relocated somewhere else, reset the
 365     // source_region.
 366     {
 367       size_t range_start = addr_to_region_idx(region_align_up(new_top));
 368       size_t range_end = addr_to_region_idx(region_align_up(destination));
 369       for (size_t i = range_start; i < range_end; ++i) {
 370         region(i)->set_source_region(0);
 371       }
 372     }
 373 
 374     // Update new top of target space
 375     *target_next = new_top;
 376 
 377     return overflowing_obj;
 378   }
 379 
 380   // Obj-iteration to locate the overflowing obj
 381   HeapWord* region_start = region_to_addr(src_region);
 382   HeapWord* region_end = region_start + RegionSize;
 383   HeapWord* cur_addr = region_start + partial_obj_size;
 384   size_t live_words = partial_obj_size;
 385 
 386   while (true) {
 387     assert(cur_addr < region_end, "inv");
 388     cur_addr = PSParallelCompact::mark_bitmap()->find_obj_beg(cur_addr, region_end);
 389     // There must be an overflowing obj in this region
 390     assert(cur_addr < region_end, "inv");
 391 
 392     oop obj = cast_to_oop(cur_addr);
 393     size_t obj_size = obj->size();
 394     if (destination + live_words + obj_size > target_end) {
 395       // Found the overflowing obj
 396       split_info.record(src_region, cur_addr, live_words);
 397       *target_next = destination + live_words;
 398       return cur_addr;
 399     }
 400 
 401     live_words += obj_size;
 402     cur_addr += obj_size;
 403   }
 404 }
 405 
 406 size_t ParallelCompactData::live_words_in_space(const MutableSpace* space,
 407                                                 HeapWord** full_region_prefix_end) {
 408   size_t cur_region = addr_to_region_idx(space->bottom());
 409   const size_t end_region = addr_to_region_idx(region_align_up(space->top()));
 410   size_t live_words = 0;
 411   if (full_region_prefix_end == nullptr) {
 412     for (/* empty */; cur_region < end_region; ++cur_region) {
 413       live_words += _region_data[cur_region].data_size();
 414     }
 415   } else {
 416     bool first_set = false;
 417     for (/* empty */; cur_region < end_region; ++cur_region) {
 418       size_t live_words_in_region = _region_data[cur_region].data_size();
 419       if (!first_set && live_words_in_region < RegionSize) {
 420         *full_region_prefix_end = region_to_addr(cur_region);
 421         first_set = true;
 422       }
 423       live_words += live_words_in_region;
 424     }
 425     if (!first_set) {
 426       // All regions are full of live objs.
 427       assert(is_region_aligned(space->top()), "inv");
 428       *full_region_prefix_end = space->top();
 429     }
 430     assert(*full_region_prefix_end != nullptr, "postcondition");
 431     assert(is_region_aligned(*full_region_prefix_end), "inv");
 432     assert(*full_region_prefix_end >= space->bottom(), "in-range");
 433     assert(*full_region_prefix_end <= space->top(), "in-range");
 434   }
 435   return live_words;
 436 }
 437 
 438 bool ParallelCompactData::summarize(SplitInfo& split_info,
 439                                     HeapWord* source_beg, HeapWord* source_end,
 440                                     HeapWord** source_next,
 441                                     HeapWord* target_beg, HeapWord* target_end,
 442                                     HeapWord** target_next)
 443 {
 444   HeapWord* const source_next_val = source_next == nullptr ? nullptr : *source_next;
 445   log_develop_trace(gc, compaction)(
 446       "sb=" PTR_FORMAT " se=" PTR_FORMAT " sn=" PTR_FORMAT
 447       "tb=" PTR_FORMAT " te=" PTR_FORMAT " tn=" PTR_FORMAT,
 448       p2i(source_beg), p2i(source_end), p2i(source_next_val),
 449       p2i(target_beg), p2i(target_end), p2i(*target_next));
 450 
 451   size_t cur_region = addr_to_region_idx(source_beg);
 452   const size_t end_region = addr_to_region_idx(region_align_up(source_end));
 453 
 454   HeapWord *dest_addr = target_beg;
 455   for (/* empty */; cur_region < end_region; cur_region++) {
 456     size_t words = _region_data[cur_region].data_size();
 457 
 458     // Skip empty ones
 459     if (words == 0) {
 460       continue;
 461     }
 462 
 463     if (split_info.is_split(cur_region)) {
 464       assert(words > split_info.preceding_live_words(), "inv");
 465       words -= split_info.preceding_live_words();
 466     }
 467 
 468     _region_data[cur_region].set_destination(dest_addr);
 469 
 470     // If cur_region does not fit entirely into the target space, find a point
 471     // at which the source space can be 'split' so that part is copied to the
 472     // target space and the rest is copied elsewhere.
 473     if (dest_addr + words > target_end) {
 474       assert(source_next != nullptr, "source_next is null when splitting");
 475       *source_next = summarize_split_space(cur_region, split_info, dest_addr,
 476                                            target_end, target_next);
 477       return false;
 478     }
 479 
 480     uint destination_count = split_info.is_split(cur_region)
 481                              ? split_info.preceding_destination_count()
 482                              : 0;
 483 
 484     HeapWord* const last_addr = dest_addr + words - 1;
 485     const size_t dest_region_1 = addr_to_region_idx(dest_addr);
 486     const size_t dest_region_2 = addr_to_region_idx(last_addr);
 487 
 488     // Initially assume that the destination regions will be the same and
 489     // adjust the value below if necessary.  Under this assumption, if
 490     // cur_region == dest_region_2, then cur_region will be compacted
 491     // completely into itself.
 492     destination_count += cur_region == dest_region_2 ? 0 : 1;
 493     if (dest_region_1 != dest_region_2) {
 494       // Destination regions differ; adjust destination_count.
 495       destination_count += 1;
 496       // Data from cur_region will be copied to the start of dest_region_2.
 497       _region_data[dest_region_2].set_source_region(cur_region);
 498     } else if (is_region_aligned(dest_addr)) {
 499       // Data from cur_region will be copied to the start of the destination
 500       // region.
 501       _region_data[dest_region_1].set_source_region(cur_region);
 502     }
 503 
 504     _region_data[cur_region].set_destination_count(destination_count);
 505     dest_addr += words;
 506   }
 507 
 508   *target_next = dest_addr;
 509   return true;
 510 }
 511 
 512 #ifdef ASSERT
 513 void ParallelCompactData::verify_clear() {
 514   for (uint cur_idx = 0; cur_idx < region_count(); ++cur_idx) {
 515     if (!region(cur_idx)->is_clear()) {
 516       log_warning(gc)("Uncleared Region: %u", cur_idx);
 517       region(cur_idx)->verify_clear();
 518     }
 519   }
 520 }
 521 #endif  // #ifdef ASSERT
 522 
 523 STWGCTimer          PSParallelCompact::_gc_timer;
 524 ParallelOldTracer   PSParallelCompact::_gc_tracer;
 525 elapsedTimer        PSParallelCompact::_accumulated_time;
 526 unsigned int        PSParallelCompact::_maximum_compaction_gc_num = 0;
 527 CollectorCounters*  PSParallelCompact::_counters = nullptr;
 528 ParMarkBitMap       PSParallelCompact::_mark_bitmap;
 529 ParallelCompactData PSParallelCompact::_summary_data;
 530 
 531 PSParallelCompact::IsAliveClosure PSParallelCompact::_is_alive_closure;
 532 
 533 class PCAdjustPointerClosure: public BasicOopIterateClosure {
 534   template <typename T>
 535   void do_oop_work(T* p) { PSParallelCompact::adjust_pointer(p); }
 536 
 537 public:
 538   virtual void do_oop(oop* p)                { do_oop_work(p); }
 539   virtual void do_oop(narrowOop* p)          { do_oop_work(p); }
 540 
 541   virtual ReferenceIterationMode reference_iteration_mode() { return DO_FIELDS; }
 542 };
 543 
 544 static PCAdjustPointerClosure pc_adjust_pointer_closure;
 545 
 546 bool PSParallelCompact::IsAliveClosure::do_object_b(oop p) { return mark_bitmap()->is_marked(p); }
 547 
 548 void PSParallelCompact::post_initialize() {
 549   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 550   _span_based_discoverer.set_span(heap->reserved_region());
 551   _ref_processor =
 552     new ReferenceProcessor(&_span_based_discoverer,
 553                            ParallelGCThreads,   // mt processing degree
 554                            ParallelGCThreads,   // mt discovery degree
 555                            false,               // concurrent_discovery
 556                            &_is_alive_closure); // non-header is alive closure
 557 
 558   _counters = new CollectorCounters("Parallel full collection pauses", 1);
 559 
 560   // Initialize static fields in ParCompactionManager.
 561   ParCompactionManager::initialize(mark_bitmap());
 562 }
 563 
 564 bool PSParallelCompact::initialize_aux_data() {
 565   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 566   MemRegion mr = heap->reserved_region();
 567   assert(mr.byte_size() != 0, "heap should be reserved");
 568 
 569   initialize_space_info();
 570 
 571   if (!_mark_bitmap.initialize(mr)) {
 572     vm_shutdown_during_initialization(
 573       err_msg("Unable to allocate %zuKB bitmaps for parallel "
 574       "garbage collection for the requested %zuKB heap.",
 575       _mark_bitmap.reserved_byte_size()/K, mr.byte_size()/K));
 576     return false;
 577   }
 578 
 579   if (!_summary_data.initialize(mr)) {
 580     vm_shutdown_during_initialization(
 581       err_msg("Unable to allocate %zuKB card tables for parallel "
 582       "garbage collection for the requested %zuKB heap.",
 583       _summary_data.reserved_byte_size()/K, mr.byte_size()/K));
 584     return false;
 585   }
 586 
 587   return true;
 588 }
 589 
 590 void PSParallelCompact::initialize_space_info()
 591 {
 592   memset(&_space_info, 0, sizeof(_space_info));
 593 
 594   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 595   PSYoungGen* young_gen = heap->young_gen();
 596 
 597   _space_info[old_space_id].set_space(heap->old_gen()->object_space());
 598   _space_info[eden_space_id].set_space(young_gen->eden_space());
 599   _space_info[from_space_id].set_space(young_gen->from_space());
 600   _space_info[to_space_id].set_space(young_gen->to_space());
 601 
 602   _space_info[old_space_id].set_start_array(heap->old_gen()->start_array());
 603 }
 604 
 605 void
 606 PSParallelCompact::clear_data_covering_space(SpaceId id)
 607 {
 608   // At this point, top is the value before GC, new_top() is the value that will
 609   // be set at the end of GC.  The marking bitmap is cleared to top; nothing
 610   // should be marked above top.  The summary data is cleared to the larger of
 611   // top & new_top.
 612   MutableSpace* const space = _space_info[id].space();
 613   HeapWord* const bot = space->bottom();
 614   HeapWord* const top = space->top();
 615   HeapWord* const max_top = MAX2(top, _space_info[id].new_top());
 616 
 617   _mark_bitmap.clear_range(bot, top);
 618 
 619   const size_t beg_region = _summary_data.addr_to_region_idx(bot);
 620   const size_t end_region =
 621     _summary_data.addr_to_region_idx(_summary_data.region_align_up(max_top));
 622   _summary_data.clear_range(beg_region, end_region);
 623 
 624   // Clear the data used to 'split' regions.
 625   SplitInfo& split_info = _space_info[id].split_info();
 626   if (split_info.is_valid()) {
 627     split_info.clear();
 628   }
 629   DEBUG_ONLY(split_info.verify_clear();)
 630 }
 631 
 632 void PSParallelCompact::pre_compact()
 633 {
 634   // Update the from & to space pointers in space_info, since they are swapped
 635   // at each young gen gc.  Do the update unconditionally (even though a
 636   // promotion failure does not swap spaces) because an unknown number of young
 637   // collections will have swapped the spaces an unknown number of times.
 638   GCTraceTime(Debug, gc, phases) tm("Pre Compact", &_gc_timer);
 639   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 640   _space_info[from_space_id].set_space(heap->young_gen()->from_space());
 641   _space_info[to_space_id].set_space(heap->young_gen()->to_space());
 642 
 643   heap->increment_total_collections(true);
 644 
 645   CodeCache::on_gc_marking_cycle_start();
 646 
 647   heap->print_before_gc();
 648   heap->trace_heap_before_gc(&_gc_tracer);
 649 
 650   // Fill in TLABs
 651   heap->ensure_parsability(true);  // retire TLABs
 652 
 653   if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
 654     Universe::verify("Before GC");
 655   }
 656 
 657   DEBUG_ONLY(mark_bitmap()->verify_clear();)
 658   DEBUG_ONLY(summary_data().verify_clear();)
 659 }
 660 
 661 void PSParallelCompact::post_compact()
 662 {
 663   GCTraceTime(Info, gc, phases) tm("Post Compact", &_gc_timer);
 664   ParCompactionManager::remove_all_shadow_regions();
 665 
 666   CodeCache::on_gc_marking_cycle_finish();
 667   CodeCache::arm_all_nmethods();
 668 
 669   // Need to clear claim bits for the next full-gc (marking and adjust-pointers).
 670   ClassLoaderDataGraph::clear_claimed_marks();
 671 
 672   for (unsigned int id = old_space_id; id < last_space_id; ++id) {
 673     // Clear the marking bitmap, summary data and split info.
 674     clear_data_covering_space(SpaceId(id));
 675     {
 676       MutableSpace* space = _space_info[id].space();
 677       HeapWord* top = space->top();
 678       HeapWord* new_top = _space_info[id].new_top();
 679       if (ZapUnusedHeapArea && new_top < top) {
 680         space->mangle_region(MemRegion(new_top, top));
 681       }
 682       // Update top().  Must be done after clearing the bitmap and summary data.
 683       space->set_top(new_top);
 684     }
 685   }
 686 
 687 #ifdef ASSERT
 688   {
 689     mark_bitmap()->verify_clear();
 690     summary_data().verify_clear();
 691   }
 692 #endif
 693 
 694   ParCompactionManager::flush_all_string_dedup_requests();
 695 
 696   MutableSpace* const eden_space = _space_info[eden_space_id].space();
 697   MutableSpace* const from_space = _space_info[from_space_id].space();
 698   MutableSpace* const to_space   = _space_info[to_space_id].space();
 699 
 700   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 701   bool eden_empty = eden_space->is_empty();
 702 
 703   // Update heap occupancy information which is used as input to the soft ref
 704   // clearing policy at the next gc.
 705   Universe::heap()->update_capacity_and_used_at_gc();
 706 
 707   bool young_gen_empty = eden_empty && from_space->is_empty() &&
 708     to_space->is_empty();
 709 
 710   PSCardTable* ct = heap->card_table();
 711   MemRegion old_mr = heap->old_gen()->committed();
 712   if (young_gen_empty) {
 713     ct->clear_MemRegion(old_mr);
 714   } else {
 715     ct->dirty_MemRegion(old_mr);
 716   }
 717 
 718   heap->prune_scavengable_nmethods();
 719 
 720 #if COMPILER2_OR_JVMCI
 721   DerivedPointerTable::update_pointers();
 722 #endif
 723 
 724   // Signal that we have completed a visit to all live objects.
 725   Universe::heap()->record_whole_heap_examined_timestamp();
 726 }
 727 
 728 HeapWord* PSParallelCompact::compute_dense_prefix_for_old_space(MutableSpace* old_space,
 729                                                                 HeapWord* full_region_prefix_end) {
 730   const size_t region_size = ParallelCompactData::RegionSize;
 731   const ParallelCompactData& sd = summary_data();
 732 
 733   // Iteration starts with the region *after* the full-region-prefix-end.
 734   const RegionData* const start_region = sd.addr_to_region_ptr(full_region_prefix_end);
 735   // If final region is not full, iteration stops before that region,
 736   // because fill_dense_prefix_end assumes that prefix_end <= top.
 737   const RegionData* const end_region = sd.addr_to_region_ptr(old_space->top());
 738   assert(start_region <= end_region, "inv");
 739 
 740   size_t max_waste = old_space->capacity_in_words() * (MarkSweepDeadRatio / 100.0);
 741   const RegionData* cur_region = start_region;
 742   for (/* empty */; cur_region < end_region; ++cur_region) {
 743     assert(region_size >= cur_region->data_size(), "inv");
 744     size_t dead_size = region_size - cur_region->data_size();
 745     if (max_waste < dead_size) {
 746       break;
 747     }
 748     max_waste -= dead_size;
 749   }
 750 
 751   HeapWord* const prefix_end = sd.region_to_addr(cur_region);
 752   assert(sd.is_region_aligned(prefix_end), "postcondition");
 753   assert(prefix_end >= full_region_prefix_end, "in-range");
 754   assert(prefix_end <= old_space->top(), "in-range");
 755   return prefix_end;
 756 }
 757 
 758 void PSParallelCompact::fill_dense_prefix_end(SpaceId id) {
 759   // Comparing two sizes to decide if filling is required:
 760   //
 761   // The size of the filler (min-obj-size) is 2 heap words with the default
 762   // MinObjAlignment, since both markword and klass take 1 heap word.
 763   // With +UseCompactObjectHeaders, the minimum filler size is only one word,
 764   // because the Klass* gets encoded in the mark-word.
 765   //
 766   // The size of the gap (if any) right before dense-prefix-end is
 767   // MinObjAlignment.
 768   //
 769   // Need to fill in the gap only if it's smaller than min-obj-size, and the
 770   // filler obj will extend to next region.
 771 
 772   if (MinObjAlignment >= checked_cast<int>(CollectedHeap::min_fill_size())) {
 773     return;
 774   }
 775 
 776   assert(!UseCompactObjectHeaders, "Compact headers can allocate small objects");
 777   assert(CollectedHeap::min_fill_size() == 2, "inv");
 778   HeapWord* const dense_prefix_end = dense_prefix(id);
 779   assert(_summary_data.is_region_aligned(dense_prefix_end), "precondition");
 780   assert(dense_prefix_end <= space(id)->top(), "precondition");
 781   if (dense_prefix_end == space(id)->top()) {
 782     // Must not have single-word gap right before prefix-end/top.
 783     return;
 784   }
 785   RegionData* const region_after_dense_prefix = _summary_data.addr_to_region_ptr(dense_prefix_end);
 786 
 787   if (region_after_dense_prefix->partial_obj_size() != 0 ||
 788       _mark_bitmap.is_marked(dense_prefix_end)) {
 789     // The region after the dense prefix starts with live bytes.
 790     return;
 791   }
 792 
 793   HeapWord* block_start = start_array(id)->block_start_reaching_into_card(dense_prefix_end);
 794   if (block_start == dense_prefix_end - 1) {
 795     assert(!_mark_bitmap.is_marked(block_start), "inv");
 796     // There is exactly one heap word gap right before the dense prefix end, so we need a filler object.
 797     // The filler object will extend into region_after_dense_prefix.
 798     const size_t obj_len = 2; // min-fill-size
 799     HeapWord* const obj_beg = dense_prefix_end - 1;
 800     CollectedHeap::fill_with_object(obj_beg, obj_len);
 801     _mark_bitmap.mark_obj(obj_beg);
 802     _summary_data.addr_to_region_ptr(obj_beg)->add_live_obj(1);
 803     region_after_dense_prefix->set_partial_obj_size(1);
 804     region_after_dense_prefix->set_partial_obj_addr(obj_beg);
 805     assert(start_array(id) != nullptr, "sanity");
 806     start_array(id)->update_for_block(obj_beg, obj_beg + obj_len);
 807   }
 808 }
 809 
 810 bool PSParallelCompact::check_maximum_compaction(bool should_do_max_compaction,
 811                                                  size_t total_live_words,
 812                                                  MutableSpace* const old_space,
 813                                                  HeapWord* full_region_prefix_end) {
 814 
 815   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 816 
 817   // Check System.GC
 818   bool is_max_on_system_gc = UseMaximumCompactionOnSystemGC
 819                           && GCCause::is_user_requested_gc(heap->gc_cause());
 820 
 821   // Check if all live objs are too much for old-gen.
 822   const bool is_old_gen_too_full = (total_live_words >= old_space->capacity_in_words());
 823 
 824   // If all regions in old-gen are full
 825   const bool is_region_full =
 826     full_region_prefix_end >= _summary_data.region_align_down(old_space->top());
 827 
 828   return should_do_max_compaction
 829       || is_max_on_system_gc
 830       || is_old_gen_too_full
 831       || is_region_full;
 832 }
 833 
 834 void PSParallelCompact::summary_phase(bool should_do_max_compaction)
 835 {
 836   GCTraceTime(Info, gc, phases) tm("Summary Phase", &_gc_timer);
 837 
 838   MutableSpace* const old_space = _space_info[old_space_id].space();
 839   {
 840     size_t total_live_words = 0;
 841     HeapWord* full_region_prefix_end = nullptr;
 842     {
 843       // old-gen
 844       size_t live_words = _summary_data.live_words_in_space(old_space,
 845                                                             &full_region_prefix_end);
 846       total_live_words += live_words;
 847     }
 848     // young-gen
 849     for (uint i = eden_space_id; i < last_space_id; ++i) {
 850       const MutableSpace* space = _space_info[i].space();
 851       size_t live_words = _summary_data.live_words_in_space(space);
 852       total_live_words += live_words;
 853       _space_info[i].set_new_top(space->bottom() + live_words);
 854       _space_info[i].set_dense_prefix(space->bottom());
 855     }
 856 
 857     should_do_max_compaction = check_maximum_compaction(should_do_max_compaction,
 858                                                         total_live_words,
 859                                                         old_space,
 860                                                         full_region_prefix_end);
 861     {
 862       GCTraceTime(Info, gc, phases) tm("Summary Phase: expand", &_gc_timer);
 863       // Try to expand old-gen in order to fit all live objs and waste.
 864       size_t target_capacity_bytes = total_live_words * HeapWordSize
 865                                    + old_space->capacity_in_bytes() * (MarkSweepDeadRatio / 100);
 866       ParallelScavengeHeap::heap()->old_gen()->try_expand_till_size(target_capacity_bytes);
 867     }
 868 
 869     HeapWord* dense_prefix_end = should_do_max_compaction
 870                                  ? full_region_prefix_end
 871                                  : compute_dense_prefix_for_old_space(old_space,
 872                                                                       full_region_prefix_end);
 873     SpaceId id = old_space_id;
 874     _space_info[id].set_dense_prefix(dense_prefix_end);
 875 
 876     if (dense_prefix_end != old_space->bottom()) {
 877       fill_dense_prefix_end(id);
 878     }
 879 
 880     // Compacting objs in [dense_prefix_end, old_space->top())
 881     _summary_data.summarize(_space_info[id].split_info(),
 882                             dense_prefix_end, old_space->top(), nullptr,
 883                             dense_prefix_end, old_space->end(),
 884                             _space_info[id].new_top_addr());
 885   }
 886 
 887   // Summarize the remaining spaces in the young gen.  The initial target space
 888   // is the old gen.  If a space does not fit entirely into the target, then the
 889   // remainder is compacted into the space itself and that space becomes the new
 890   // target.
 891   SpaceId dst_space_id = old_space_id;
 892   HeapWord* dst_space_end = old_space->end();
 893   HeapWord** new_top_addr = _space_info[dst_space_id].new_top_addr();
 894   for (unsigned int id = eden_space_id; id < last_space_id; ++id) {
 895     const MutableSpace* space = _space_info[id].space();
 896     const size_t live = pointer_delta(_space_info[id].new_top(),
 897                                       space->bottom());
 898     const size_t available = pointer_delta(dst_space_end, *new_top_addr);
 899 
 900     if (live > 0 && live <= available) {
 901       // All the live data will fit.
 902       bool done = _summary_data.summarize(_space_info[id].split_info(),
 903                                           space->bottom(), space->top(),
 904                                           nullptr,
 905                                           *new_top_addr, dst_space_end,
 906                                           new_top_addr);
 907       assert(done, "space must fit into old gen");
 908 
 909       // Reset the new_top value for the space.
 910       _space_info[id].set_new_top(space->bottom());
 911     } else if (live > 0) {
 912       // Attempt to fit part of the source space into the target space.
 913       HeapWord* next_src_addr = nullptr;
 914       bool done = _summary_data.summarize(_space_info[id].split_info(),
 915                                           space->bottom(), space->top(),
 916                                           &next_src_addr,
 917                                           *new_top_addr, dst_space_end,
 918                                           new_top_addr);
 919       assert(!done, "space should not fit into old gen");
 920       assert(next_src_addr != nullptr, "sanity");
 921 
 922       // The source space becomes the new target, so the remainder is compacted
 923       // within the space itself.
 924       dst_space_id = SpaceId(id);
 925       dst_space_end = space->end();
 926       new_top_addr = _space_info[id].new_top_addr();
 927       done = _summary_data.summarize(_space_info[id].split_info(),
 928                                      next_src_addr, space->top(),
 929                                      nullptr,
 930                                      space->bottom(), dst_space_end,
 931                                      new_top_addr);
 932       assert(done, "space must fit when compacted into itself");
 933       assert(*new_top_addr <= space->top(), "usage should not grow");
 934     }
 935   }
 936 }
 937 
 938 void PSParallelCompact::report_object_count_after_gc() {
 939   GCTraceTime(Debug, gc, phases) tm("Report Object Count", &_gc_timer);
 940   // The heap is compacted, all objects are iterable. However there may be
 941   // filler objects in the heap which we should ignore.
 942   class SkipFillerObjectClosure : public BoolObjectClosure {
 943   public:
 944     bool do_object_b(oop obj) override { return !CollectedHeap::is_filler_object(obj); }
 945   } cl;
 946   _gc_tracer.report_object_count_after_gc(&cl, &ParallelScavengeHeap::heap()->workers());
 947 }
 948 
 949 bool PSParallelCompact::invoke(bool clear_all_soft_refs, bool should_do_max_compaction) {
 950   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
 951   assert(Thread::current() == (Thread*)VMThread::vm_thread(),
 952          "should be in vm thread");
 953   assert(ref_processor() != nullptr, "Sanity");
 954 
 955   SvcGCMarker sgcm(SvcGCMarker::FULL);
 956   IsSTWGCActiveMark mark;
 957 
 958   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 959 
 960   GCIdMark gc_id_mark;
 961   _gc_timer.register_gc_start();
 962   _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start());
 963 
 964   GCCause::Cause gc_cause = heap->gc_cause();
 965   PSOldGen* old_gen = heap->old_gen();
 966   PSAdaptiveSizePolicy* size_policy = heap->size_policy();
 967 
 968   // Make sure data structures are sane, make the heap parsable, and do other
 969   // miscellaneous bookkeeping.
 970   pre_compact();
 971 
 972   const PreGenGCValues pre_gc_values = heap->get_pre_gc_values();
 973 
 974   {
 975     const uint active_workers =
 976       WorkerPolicy::calc_active_workers(ParallelScavengeHeap::heap()->workers().max_workers(),
 977                                         ParallelScavengeHeap::heap()->workers().active_workers(),
 978                                         Threads::number_of_non_daemon_threads());
 979     ParallelScavengeHeap::heap()->workers().set_active_workers(active_workers);
 980 
 981     GCTraceCPUTime tcpu(&_gc_tracer);
 982     GCTraceTime(Info, gc) tm("Pause Full", nullptr, gc_cause, true);
 983 
 984     heap->pre_full_gc_dump(&_gc_timer);
 985 
 986     TraceCollectorStats tcs(counters());
 987     TraceMemoryManagerStats tms(heap->old_gc_manager(), gc_cause, "end of major GC");
 988 
 989     if (log_is_enabled(Debug, gc, heap, exit)) {
 990       accumulated_time()->start();
 991     }
 992 
 993     // Let the size policy know we're starting
 994     size_policy->major_collection_begin();
 995 
 996 #if COMPILER2_OR_JVMCI
 997     DerivedPointerTable::clear();
 998 #endif
 999 
1000     ref_processor()->start_discovery(clear_all_soft_refs);
1001 
1002     marking_phase(&_gc_tracer);
1003 
1004     summary_phase(should_do_max_compaction);
1005 
1006 #if COMPILER2_OR_JVMCI
1007     assert(DerivedPointerTable::is_active(), "Sanity");
1008     DerivedPointerTable::set_active(false);
1009 #endif
1010 
1011     forward_to_new_addr();
1012 
1013     adjust_pointers();
1014 
1015     compact();
1016 
1017     ParCompactionManager::_preserved_marks_set->restore(&ParallelScavengeHeap::heap()->workers());
1018 
1019     ParCompactionManager::verify_all_region_stack_empty();
1020 
1021     // Reset the mark bitmap, summary data, and do other bookkeeping.  Must be
1022     // done before resizing.
1023     post_compact();
1024 
1025     size_policy->major_collection_end();
1026 
1027     size_policy->sample_old_gen_used_bytes(MAX2(pre_gc_values.old_gen_used(), old_gen->used_in_bytes()));
1028 
1029     if (UseAdaptiveSizePolicy) {
1030       heap->resize_after_full_gc();
1031     }
1032 
1033     heap->resize_all_tlabs();
1034 
1035     // Resize the metaspace capacity after a collection
1036     MetaspaceGC::compute_new_size();
1037 
1038     if (log_is_enabled(Debug, gc, heap, exit)) {
1039       accumulated_time()->stop();
1040     }
1041 
1042     heap->print_heap_change(pre_gc_values);
1043 
1044     report_object_count_after_gc();
1045 
1046     // Track memory usage and detect low memory
1047     MemoryService::track_memory_usage();
1048     heap->update_counters();
1049 
1050     heap->post_full_gc_dump(&_gc_timer);
1051 
1052     size_policy->record_gc_pause_end_instant();
1053   }
1054 
1055   heap->gc_epilogue(true);
1056 
1057   if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
1058     Universe::verify("After GC");
1059   }
1060 
1061   heap->print_after_gc();
1062   heap->trace_heap_after_gc(&_gc_tracer);
1063 
1064   _gc_timer.register_gc_end();
1065 
1066   _gc_tracer.report_dense_prefix(dense_prefix(old_space_id));
1067   _gc_tracer.report_gc_end(_gc_timer.gc_end(), _gc_timer.time_partitions());
1068 
1069   return true;
1070 }
1071 
1072 class PCAddThreadRootsMarkingTaskClosure : public ThreadClosure {
1073   ParCompactionManager* _cm;
1074 
1075 public:
1076   PCAddThreadRootsMarkingTaskClosure(ParCompactionManager* cm) : _cm(cm) { }
1077   void do_thread(Thread* thread) {
1078     ResourceMark rm;
1079 
1080     MarkingNMethodClosure mark_and_push_in_blobs(&_cm->_mark_and_push_closure);
1081 
1082     thread->oops_do(&_cm->_mark_and_push_closure, &mark_and_push_in_blobs);
1083 
1084     // Do the real work
1085     _cm->follow_marking_stacks();
1086   }
1087 };
1088 
1089 void steal_marking_work(TaskTerminator& terminator, uint worker_id) {
1090   assert(ParallelScavengeHeap::heap()->is_stw_gc_active(), "called outside gc");
1091 
1092   ParCompactionManager* cm =
1093     ParCompactionManager::gc_thread_compaction_manager(worker_id);
1094 
1095   do {
1096     ScannerTask task;
1097     if (ParCompactionManager::steal(worker_id, task)) {
1098       cm->follow_contents(task, true);
1099     }
1100     cm->follow_marking_stacks();
1101   } while (!terminator.offer_termination());
1102 }
1103 
1104 class MarkFromRootsTask : public WorkerTask {
1105   NMethodMarkingScope _nmethod_marking_scope;
1106   ThreadsClaimTokenScope _threads_claim_token_scope;
1107   OopStorageSetStrongParState<false /* concurrent */, false /* is_const */> _oop_storage_set_par_state;
1108   TaskTerminator _terminator;
1109   uint _active_workers;
1110 
1111 public:
1112   MarkFromRootsTask(uint active_workers) :
1113       WorkerTask("MarkFromRootsTask"),
1114       _nmethod_marking_scope(),
1115       _threads_claim_token_scope(),
1116       _terminator(active_workers, ParCompactionManager::marking_stacks()),
1117       _active_workers(active_workers) {}
1118 
1119   virtual void work(uint worker_id) {
1120     ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(worker_id);
1121     cm->create_marking_stats_cache();
1122     {
1123       CLDToOopClosure cld_closure(&cm->_mark_and_push_closure, ClassLoaderData::_claim_stw_fullgc_mark);
1124       ClassLoaderDataGraph::always_strong_cld_do(&cld_closure);
1125 
1126       // Do the real work
1127       cm->follow_marking_stacks();
1128     }
1129 
1130     {
1131       PCAddThreadRootsMarkingTaskClosure closure(cm);
1132       Threads::possibly_parallel_threads_do(_active_workers > 1 /* is_par */, &closure);
1133     }
1134 
1135     // Mark from OopStorages
1136     {
1137       _oop_storage_set_par_state.oops_do(&cm->_mark_and_push_closure);
1138       // Do the real work
1139       cm->follow_marking_stacks();
1140     }
1141 
1142     if (_active_workers > 1) {
1143       steal_marking_work(_terminator, worker_id);
1144     }
1145   }
1146 };
1147 
1148 class ParallelCompactRefProcProxyTask : public RefProcProxyTask {
1149   TaskTerminator _terminator;
1150 
1151 public:
1152   ParallelCompactRefProcProxyTask(uint max_workers)
1153     : RefProcProxyTask("ParallelCompactRefProcProxyTask", max_workers),
1154       _terminator(_max_workers, ParCompactionManager::marking_stacks()) {}
1155 
1156   void work(uint worker_id) override {
1157     assert(worker_id < _max_workers, "sanity");
1158     ParCompactionManager* cm = (_tm == RefProcThreadModel::Single) ? ParCompactionManager::get_vmthread_cm() : ParCompactionManager::gc_thread_compaction_manager(worker_id);
1159     BarrierEnqueueDiscoveredFieldClosure enqueue;
1160     ParCompactionManager::FollowStackClosure complete_gc(cm, (_tm == RefProcThreadModel::Single) ? nullptr : &_terminator, worker_id);
1161     _rp_task->rp_work(worker_id, PSParallelCompact::is_alive_closure(), &cm->_mark_and_push_closure, &enqueue, &complete_gc);
1162   }
1163 
1164   void prepare_run_task_hook() override {
1165     _terminator.reset_for_reuse(_queue_count);
1166   }
1167 };
1168 
1169 static void flush_marking_stats_cache(const uint num_workers) {
1170   for (uint i = 0; i < num_workers; ++i) {
1171     ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(i);
1172     cm->flush_and_destroy_marking_stats_cache();
1173   }
1174 }
1175 
1176 class PSParallelCleaningTask : public WorkerTask {
1177   bool                    _unloading_occurred;
1178   CodeCacheUnloadingTask  _code_cache_task;
1179   // Prune dead klasses from subklass/sibling/implementor lists.
1180   KlassCleaningTask       _klass_cleaning_task;
1181 
1182 public:
1183   PSParallelCleaningTask(bool unloading_occurred) :
1184     WorkerTask("PS Parallel Cleaning"),
1185     _unloading_occurred(unloading_occurred),
1186     _code_cache_task(unloading_occurred),
1187     _klass_cleaning_task() {}
1188 
1189   void work(uint worker_id) {
1190 #if INCLUDE_JVMCI
1191     if (EnableJVMCI && worker_id == 0) {
1192       // Serial work; only first worker.
1193       // Clean JVMCI metadata handles.
1194       JVMCI::do_unloading(_unloading_occurred);
1195     }
1196 #endif
1197 
1198     // Do first pass of code cache cleaning.
1199     _code_cache_task.work(worker_id);
1200 
1201     // Clean all klasses that were not unloaded.
1202     // The weak metadata in klass doesn't need to be
1203     // processed if there was no unloading.
1204     if (_unloading_occurred) {
1205       _klass_cleaning_task.work();
1206     }
1207   }
1208 };
1209 
1210 void PSParallelCompact::marking_phase(ParallelOldTracer *gc_tracer) {
1211   // Recursively traverse all live objects and mark them
1212   GCTraceTime(Info, gc, phases) tm("Marking Phase", &_gc_timer);
1213 
1214   uint active_gc_threads = ParallelScavengeHeap::heap()->workers().active_workers();
1215 
1216   ClassLoaderDataGraph::verify_claimed_marks_cleared(ClassLoaderData::_claim_stw_fullgc_mark);
1217   {
1218     GCTraceTime(Debug, gc, phases) tm("Par Mark", &_gc_timer);
1219 
1220     MarkFromRootsTask task(active_gc_threads);
1221     ParallelScavengeHeap::heap()->workers().run_task(&task);
1222   }
1223 
1224   // Process reference objects found during marking
1225   {
1226     GCTraceTime(Debug, gc, phases) tm("Reference Processing", &_gc_timer);
1227 
1228     ReferenceProcessorStats stats;
1229     ReferenceProcessorPhaseTimes pt(&_gc_timer, ref_processor()->max_num_queues());
1230 
1231     ParallelCompactRefProcProxyTask task(ref_processor()->max_num_queues());
1232     stats = ref_processor()->process_discovered_references(task, &ParallelScavengeHeap::heap()->workers(), pt);
1233 
1234     gc_tracer->report_gc_reference_stats(stats);
1235     pt.print_all_references();
1236   }
1237 
1238   {
1239     GCTraceTime(Debug, gc, phases) tm("Flush Marking Stats", &_gc_timer);
1240 
1241     flush_marking_stats_cache(active_gc_threads);
1242   }
1243 
1244   // This is the point where the entire marking should have completed.
1245   ParCompactionManager::verify_all_marking_stack_empty();
1246 
1247   {
1248     GCTraceTime(Debug, gc, phases) tm("Weak Processing", &_gc_timer);
1249     WeakProcessor::weak_oops_do(&ParallelScavengeHeap::heap()->workers(),
1250                                 is_alive_closure(),
1251                                 &do_nothing_cl,
1252                                 1);
1253   }
1254 
1255   {
1256     GCTraceTime(Debug, gc, phases) tm_m("Class Unloading", &_gc_timer);
1257 
1258     ClassUnloadingContext ctx(active_gc_threads /* num_nmethod_unlink_workers */,
1259                               false /* unregister_nmethods_during_purge */,
1260                               false /* lock_nmethod_free_separately */);
1261 
1262     {
1263       CodeCache::UnlinkingScope scope(is_alive_closure());
1264 
1265       // Follow system dictionary roots and unload classes.
1266       bool unloading_occurred = SystemDictionary::do_unloading(&_gc_timer);
1267 
1268       PSParallelCleaningTask task{unloading_occurred};
1269       ParallelScavengeHeap::heap()->workers().run_task(&task);
1270     }
1271 
1272     {
1273       GCTraceTime(Debug, gc, phases) t("Purge Unlinked NMethods", gc_timer());
1274       // Release unloaded nmethod's memory.
1275       ctx.purge_nmethods();
1276     }
1277     {
1278       GCTraceTime(Debug, gc, phases) ur("Unregister NMethods", &_gc_timer);
1279       ParallelScavengeHeap::heap()->prune_unlinked_nmethods();
1280     }
1281     {
1282       GCTraceTime(Debug, gc, phases) t("Free Code Blobs", gc_timer());
1283       ctx.free_nmethods();
1284     }
1285     {
1286       // Delete metaspaces for unloaded class loaders and clean up loader_data graph
1287       GCTraceTime(Debug, gc, phases) t("Purge Class Loader Data", gc_timer());
1288       ClassLoaderDataGraph::purge(true /* at_safepoint */);
1289       DEBUG_ONLY(MetaspaceUtils::verify();)
1290     }
1291   }
1292 
1293 #if TASKQUEUE_STATS
1294   ParCompactionManager::print_and_reset_taskqueue_stats();
1295 #endif
1296 }
1297 
1298 template<typename Func>
1299 void PSParallelCompact::adjust_in_space_helper(SpaceId id, volatile uint* claim_counter, Func&& on_stripe) {
1300   MutableSpace* sp = PSParallelCompact::space(id);
1301   HeapWord* const bottom = sp->bottom();
1302   HeapWord* const top = sp->top();
1303   if (bottom == top) {
1304     return;
1305   }
1306 
1307   const uint num_regions_per_stripe = 2;
1308   const size_t region_size = ParallelCompactData::RegionSize;
1309   const size_t stripe_size = num_regions_per_stripe * region_size;
1310 
1311   while (true) {
1312     uint counter = AtomicAccess::fetch_then_add(claim_counter, num_regions_per_stripe);
1313     HeapWord* cur_stripe = bottom + counter * region_size;
1314     if (cur_stripe >= top) {
1315       break;
1316     }
1317     HeapWord* stripe_end = MIN2(cur_stripe + stripe_size, top);
1318     on_stripe(cur_stripe, stripe_end);
1319   }
1320 }
1321 
1322 void PSParallelCompact::adjust_in_old_space(volatile uint* claim_counter) {
1323   // Regions in old-space shouldn't be split.
1324   assert(!_space_info[old_space_id].split_info().is_valid(), "inv");
1325 
1326   auto scan_obj_with_limit = [&] (HeapWord* obj_start, HeapWord* left, HeapWord* right) {
1327     assert(mark_bitmap()->is_marked(obj_start), "inv");
1328     oop obj = cast_to_oop(obj_start);
1329     return obj->oop_iterate_size(&pc_adjust_pointer_closure, MemRegion(left, right));
1330   };
1331 
1332   adjust_in_space_helper(old_space_id, claim_counter, [&] (HeapWord* stripe_start, HeapWord* stripe_end) {
1333     assert(_summary_data.is_region_aligned(stripe_start), "inv");
1334     RegionData* cur_region = _summary_data.addr_to_region_ptr(stripe_start);
1335     HeapWord* obj_start;
1336     if (cur_region->partial_obj_size() != 0) {
1337       obj_start = cur_region->partial_obj_addr();
1338       obj_start += scan_obj_with_limit(obj_start, stripe_start, stripe_end);
1339     } else {
1340       obj_start = stripe_start;
1341     }
1342 
1343     while (obj_start < stripe_end) {
1344       obj_start = mark_bitmap()->find_obj_beg(obj_start, stripe_end);
1345       if (obj_start >= stripe_end) {
1346         break;
1347       }
1348       obj_start += scan_obj_with_limit(obj_start, stripe_start, stripe_end);
1349     }
1350   });
1351 }
1352 
1353 void PSParallelCompact::adjust_in_young_space(SpaceId id, volatile uint* claim_counter) {
1354   adjust_in_space_helper(id, claim_counter, [](HeapWord* stripe_start, HeapWord* stripe_end) {
1355     HeapWord* obj_start = stripe_start;
1356     while (obj_start < stripe_end) {
1357       obj_start = mark_bitmap()->find_obj_beg(obj_start, stripe_end);
1358       if (obj_start >= stripe_end) {
1359         break;
1360       }
1361       oop obj = cast_to_oop(obj_start);
1362       obj_start += obj->oop_iterate_size(&pc_adjust_pointer_closure);
1363     }
1364   });
1365 }
1366 
1367 void PSParallelCompact::adjust_pointers_in_spaces(uint worker_id, volatile uint* claim_counters) {
1368   auto start_time = Ticks::now();
1369   adjust_in_old_space(&claim_counters[0]);
1370   for (uint id = eden_space_id; id < last_space_id; ++id) {
1371     adjust_in_young_space(SpaceId(id), &claim_counters[id]);
1372   }
1373   log_trace(gc, phases)("adjust_pointers_in_spaces worker %u: %.3f ms", worker_id, (Ticks::now() - start_time).seconds() * 1000);
1374 }
1375 
1376 class PSAdjustTask final : public WorkerTask {
1377   ThreadsClaimTokenScope                     _threads_claim_token_scope;
1378   WeakProcessor::Task                        _weak_proc_task;
1379   OopStorageSetStrongParState<false, false>  _oop_storage_iter;
1380   uint                                       _nworkers;
1381   volatile bool                              _code_cache_claimed;
1382   volatile uint _claim_counters[PSParallelCompact::last_space_id] = {};
1383 
1384   bool try_claim_code_cache_task() {
1385     return AtomicAccess::load(&_code_cache_claimed) == false
1386         && AtomicAccess::cmpxchg(&_code_cache_claimed, false, true) == false;
1387   }
1388 
1389 public:
1390   PSAdjustTask(uint nworkers) :
1391     WorkerTask("PSAdjust task"),
1392     _threads_claim_token_scope(),
1393     _weak_proc_task(nworkers),
1394     _oop_storage_iter(),
1395     _nworkers(nworkers),
1396     _code_cache_claimed(false) {
1397 
1398     ClassLoaderDataGraph::verify_claimed_marks_cleared(ClassLoaderData::_claim_stw_fullgc_adjust);
1399   }
1400 
1401   void work(uint worker_id) {
1402     {
1403       // Pointers in heap.
1404       ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(worker_id);
1405       cm->preserved_marks()->adjust_during_full_gc();
1406 
1407       PSParallelCompact::adjust_pointers_in_spaces(worker_id, _claim_counters);
1408     }
1409 
1410     {
1411       // All (strong and weak) CLDs.
1412       CLDToOopClosure cld_closure(&pc_adjust_pointer_closure, ClassLoaderData::_claim_stw_fullgc_adjust);
1413       ClassLoaderDataGraph::cld_do(&cld_closure);
1414     }
1415 
1416     {
1417       // Threads stack frames. No need to visit on-stack nmethods, because all
1418       // nmethods are visited in one go via CodeCache::nmethods_do.
1419       ResourceMark rm;
1420       Threads::possibly_parallel_oops_do(_nworkers > 1, &pc_adjust_pointer_closure, nullptr);
1421       if (try_claim_code_cache_task()) {
1422         NMethodToOopClosure adjust_code(&pc_adjust_pointer_closure, NMethodToOopClosure::FixRelocations);
1423         CodeCache::nmethods_do(&adjust_code);
1424       }
1425     }
1426 
1427     {
1428       // VM internal strong and weak roots.
1429       _oop_storage_iter.oops_do(&pc_adjust_pointer_closure);
1430       AlwaysTrueClosure always_alive;
1431       _weak_proc_task.work(worker_id, &always_alive, &pc_adjust_pointer_closure);
1432     }
1433   }
1434 };
1435 
1436 void PSParallelCompact::adjust_pointers() {
1437   // Adjust the pointers to reflect the new locations
1438   GCTraceTime(Info, gc, phases) tm("Adjust Pointers", &_gc_timer);
1439   uint nworkers = ParallelScavengeHeap::heap()->workers().active_workers();
1440   PSAdjustTask task(nworkers);
1441   ParallelScavengeHeap::heap()->workers().run_task(&task);
1442 }
1443 
1444 // Split [start, end) evenly for a number of workers and return the
1445 // range for worker_id.
1446 static void split_regions_for_worker(size_t start, size_t end,
1447                                      uint worker_id, uint num_workers,
1448                                      size_t* worker_start, size_t* worker_end) {
1449   assert(start < end, "precondition");
1450   assert(num_workers > 0, "precondition");
1451   assert(worker_id < num_workers, "precondition");
1452 
1453   size_t num_regions = end - start;
1454   size_t num_regions_per_worker = num_regions / num_workers;
1455   size_t remainder = num_regions % num_workers;
1456   // The first few workers will get one extra.
1457   *worker_start = start + worker_id * num_regions_per_worker
1458                   + MIN2(checked_cast<size_t>(worker_id), remainder);
1459   *worker_end = *worker_start + num_regions_per_worker
1460                 + (worker_id < remainder ? 1 : 0);
1461 }
1462 
1463 static bool safe_to_read_header(size_t words) {
1464   precond(words > 0);
1465 
1466   // Safe to read if we have enough words for the full header, i.e., both
1467   // markWord and Klass pointer.
1468   const bool safe = words >= (size_t)oopDesc::header_size();
1469 
1470   // If using Compact Object Headers, the full header is inside the markWord,
1471   // so will always be safe to read
1472   assert(!UseCompactObjectHeaders || safe, "Compact Object Headers should always be safe");
1473 
1474   return safe;
1475 }
1476 
1477 void PSParallelCompact::forward_to_new_addr() {
1478   GCTraceTime(Info, gc, phases) tm("Forward", &_gc_timer);
1479   uint nworkers = ParallelScavengeHeap::heap()->workers().active_workers();
1480 
1481   struct ForwardTask final : public WorkerTask {
1482     uint _num_workers;
1483 
1484     explicit ForwardTask(uint num_workers) :
1485       WorkerTask("PSForward task"),
1486       _num_workers(num_workers) {}
1487 
1488     static bool should_preserve_mark(oop obj, HeapWord* end_addr) {
1489       size_t remaining_words = pointer_delta(end_addr, cast_from_oop<HeapWord*>(obj));
1490 
1491       if (Arguments::is_valhalla_enabled() && !safe_to_read_header(remaining_words)) {
1492         // When using Valhalla, it might be necessary to preserve the Valhalla-
1493         // specific bits in the markWord. If the entire object header is
1494         // copied, the correct markWord (with the appropriate Valhalla bits)
1495         // can be safely read from the Klass. However, if the full header is
1496         // not copied, we cannot safely read the Klass to obtain this information.
1497         // In such cases, we always preserve the markWord to ensure that all
1498         // relevant bits, including Valhalla-specific ones, are retained.
1499         return true;
1500       } else {
1501         return obj->mark().must_be_preserved();
1502       }
1503     }
1504 
1505     static void forward_objs_in_range(ParCompactionManager* cm,
1506                                       HeapWord* start,
1507                                       HeapWord* end,
1508                                       HeapWord* destination) {
1509       HeapWord* cur_addr = start;
1510       HeapWord* new_addr = destination;
1511 
1512       while (cur_addr < end) {
1513         cur_addr = mark_bitmap()->find_obj_beg(cur_addr, end);
1514         if (cur_addr >= end) {
1515           return;
1516         }
1517         assert(mark_bitmap()->is_marked(cur_addr), "inv");
1518         oop obj = cast_to_oop(cur_addr);
1519 
1520         if (new_addr != cur_addr) {
1521           if (should_preserve_mark(obj, end)) {
1522             cm->preserved_marks()->push_always(obj, obj->mark());
1523           }
1524 
1525           FullGCForwarding::forward_to(obj, cast_to_oop(new_addr));
1526         }
1527         size_t obj_size = obj->size();
1528         new_addr += obj_size;
1529         cur_addr += obj_size;
1530       }
1531     }
1532 
1533     void work(uint worker_id) override {
1534       ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(worker_id);
1535       for (uint id = old_space_id; id < last_space_id; ++id) {
1536         MutableSpace* sp = PSParallelCompact::space(SpaceId(id));
1537         HeapWord* dense_prefix_addr = dense_prefix(SpaceId(id));
1538         HeapWord* top = sp->top();
1539 
1540         if (dense_prefix_addr == top) {
1541           // Empty space
1542           continue;
1543         }
1544 
1545         const SplitInfo& split_info = _space_info[SpaceId(id)].split_info();
1546         size_t dense_prefix_region = _summary_data.addr_to_region_idx(dense_prefix_addr);
1547         size_t top_region = _summary_data.addr_to_region_idx(_summary_data.region_align_up(top));
1548         size_t start_region;
1549         size_t end_region;
1550         split_regions_for_worker(dense_prefix_region, top_region,
1551                                  worker_id, _num_workers,
1552                                  &start_region, &end_region);
1553         for (size_t cur_region = start_region; cur_region < end_region; ++cur_region) {
1554           RegionData* region_ptr = _summary_data.region(cur_region);
1555           size_t partial_obj_size = region_ptr->partial_obj_size();
1556 
1557           if (partial_obj_size == ParallelCompactData::RegionSize) {
1558             // No obj-start
1559             continue;
1560           }
1561 
1562           HeapWord* region_start = _summary_data.region_to_addr(cur_region);
1563           HeapWord* region_end = region_start + ParallelCompactData::RegionSize;
1564 
1565           if (split_info.is_split(cur_region)) {
1566             // Part 1: will be relocated to space-1
1567             HeapWord* preceding_destination = split_info.preceding_destination();
1568             HeapWord* split_point = split_info.split_point();
1569             forward_objs_in_range(cm, region_start + partial_obj_size, split_point, preceding_destination + partial_obj_size);
1570 
1571             // Part 2: will be relocated to space-2
1572             HeapWord* destination = region_ptr->destination();
1573             forward_objs_in_range(cm, split_point, region_end, destination);
1574           } else {
1575             HeapWord* destination = region_ptr->destination();
1576             forward_objs_in_range(cm, region_start + partial_obj_size, region_end, destination + partial_obj_size);
1577           }
1578         }
1579       }
1580     }
1581   } task(nworkers);
1582 
1583   ParallelScavengeHeap::heap()->workers().run_task(&task);
1584   DEBUG_ONLY(verify_forward();)
1585 }
1586 
1587 #ifdef ASSERT
1588 void PSParallelCompact::verify_forward() {
1589   HeapWord* const old_dense_prefix_addr = dense_prefix(SpaceId(old_space_id));
1590   // The destination addr for the first live obj after dense-prefix.
1591   HeapWord* bump_ptr = old_dense_prefix_addr
1592                      + _summary_data.addr_to_region_ptr(old_dense_prefix_addr)->partial_obj_size();
1593   SpaceId bump_ptr_space = old_space_id;
1594 
1595   for (uint id = old_space_id; id < last_space_id; ++id) {
1596     MutableSpace* sp = PSParallelCompact::space(SpaceId(id));
1597     // Only verify objs after dense-prefix, because those before dense-prefix are not moved (forwarded).
1598     HeapWord* cur_addr = dense_prefix(SpaceId(id));
1599     HeapWord* top = sp->top();
1600 
1601     while (cur_addr < top) {
1602       cur_addr = mark_bitmap()->find_obj_beg(cur_addr, top);
1603       if (cur_addr >= top) {
1604         break;
1605       }
1606       assert(mark_bitmap()->is_marked(cur_addr), "inv");
1607       assert(bump_ptr <= _space_info[bump_ptr_space].new_top(), "inv");
1608       // Move to the space containing cur_addr
1609       if (bump_ptr == _space_info[bump_ptr_space].new_top()) {
1610         bump_ptr = space(space_id(cur_addr))->bottom();
1611         bump_ptr_space = space_id(bump_ptr);
1612       }
1613       oop obj = cast_to_oop(cur_addr);
1614       if (cur_addr == bump_ptr) {
1615         assert(!FullGCForwarding::is_forwarded(obj), "inv");
1616       } else {
1617         assert(FullGCForwarding::forwardee(obj) == cast_to_oop(bump_ptr), "inv");
1618       }
1619       bump_ptr += obj->size();
1620       cur_addr += obj->size();
1621     }
1622   }
1623 }
1624 #endif
1625 
1626 // Helper class to print 8 region numbers per line and then print the total at the end.
1627 class FillableRegionLogger : public StackObj {
1628 private:
1629   Log(gc, compaction) log;
1630   static const int LineLength = 8;
1631   size_t _regions[LineLength];
1632   int _next_index;
1633   bool _enabled;
1634   size_t _total_regions;
1635 public:
1636   FillableRegionLogger() : _next_index(0), _enabled(log_develop_is_enabled(Trace, gc, compaction)), _total_regions(0) { }
1637   ~FillableRegionLogger() {
1638     log.trace("%zu initially fillable regions", _total_regions);
1639   }
1640 
1641   void print_line() {
1642     if (!_enabled || _next_index == 0) {
1643       return;
1644     }
1645     FormatBuffer<> line("Fillable: ");
1646     for (int i = 0; i < _next_index; i++) {
1647       line.append(" %7zu", _regions[i]);
1648     }
1649     log.trace("%s", line.buffer());
1650     _next_index = 0;
1651   }
1652 
1653   void handle(size_t region) {
1654     if (!_enabled) {
1655       return;
1656     }
1657     _regions[_next_index++] = region;
1658     if (_next_index == LineLength) {
1659       print_line();
1660     }
1661     _total_regions++;
1662   }
1663 };
1664 
1665 void PSParallelCompact::prepare_region_draining_tasks(uint parallel_gc_threads)
1666 {
1667   GCTraceTime(Trace, gc, phases) tm("Drain Task Setup", &_gc_timer);
1668 
1669   // Find the threads that are active
1670   uint worker_id = 0;
1671 
1672   // Find all regions that are available (can be filled immediately) and
1673   // distribute them to the thread stacks.  The iteration is done in reverse
1674   // order (high to low) so the regions will be removed in ascending order.
1675 
1676   const ParallelCompactData& sd = PSParallelCompact::summary_data();
1677 
1678   // id + 1 is used to test termination so unsigned  can
1679   // be used with an old_space_id == 0.
1680   FillableRegionLogger region_logger;
1681   for (unsigned int id = last_space_id - 1; id + 1 > old_space_id; --id) {
1682     SpaceInfo* const space_info = _space_info + id;
1683     HeapWord* const new_top = space_info->new_top();
1684 
1685     const size_t beg_region = sd.addr_to_region_idx(space_info->dense_prefix());
1686     const size_t end_region =
1687       sd.addr_to_region_idx(sd.region_align_up(new_top));
1688 
1689     for (size_t cur = end_region - 1; cur + 1 > beg_region; --cur) {
1690       if (sd.region(cur)->claim_unsafe()) {
1691         ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(worker_id);
1692         bool result = sd.region(cur)->mark_normal();
1693         assert(result, "Must succeed at this point.");
1694         cm->region_stack()->push(cur);
1695         region_logger.handle(cur);
1696         // Assign regions to tasks in round-robin fashion.
1697         if (++worker_id == parallel_gc_threads) {
1698           worker_id = 0;
1699         }
1700       }
1701     }
1702     region_logger.print_line();
1703   }
1704 }
1705 
1706 static void compaction_with_stealing_work(TaskTerminator* terminator, uint worker_id) {
1707   assert(ParallelScavengeHeap::heap()->is_stw_gc_active(), "called outside gc");
1708 
1709   ParCompactionManager* cm =
1710     ParCompactionManager::gc_thread_compaction_manager(worker_id);
1711 
1712   // Drain the stacks that have been preloaded with regions
1713   // that are ready to fill.
1714 
1715   cm->drain_region_stacks();
1716 
1717   guarantee(cm->region_stack()->is_empty(), "Not empty");
1718 
1719   size_t region_index = 0;
1720 
1721   while (true) {
1722     if (ParCompactionManager::steal(worker_id, region_index)) {
1723       PSParallelCompact::fill_and_update_region(cm, region_index);
1724       cm->drain_region_stacks();
1725     } else if (PSParallelCompact::steal_unavailable_region(cm, region_index)) {
1726       // Fill and update an unavailable region with the help of a shadow region
1727       PSParallelCompact::fill_and_update_shadow_region(cm, region_index);
1728       cm->drain_region_stacks();
1729     } else {
1730       if (terminator->offer_termination()) {
1731         break;
1732       }
1733       // Go around again.
1734     }
1735   }
1736 }
1737 
1738 class FillDensePrefixAndCompactionTask: public WorkerTask {
1739   TaskTerminator _terminator;
1740 
1741 public:
1742   FillDensePrefixAndCompactionTask(uint active_workers) :
1743       WorkerTask("FillDensePrefixAndCompactionTask"),
1744       _terminator(active_workers, ParCompactionManager::region_task_queues()) {
1745   }
1746 
1747   virtual void work(uint worker_id) {
1748     if (worker_id == 0) {
1749       auto start = Ticks::now();
1750       PSParallelCompact::fill_dead_objs_in_dense_prefix();
1751       log_trace(gc, phases)("Fill dense prefix by worker 0: %.3f ms", (Ticks::now() - start).seconds() * 1000);
1752     }
1753     compaction_with_stealing_work(&_terminator, worker_id);
1754   }
1755 };
1756 
1757 void PSParallelCompact::fill_range_in_dense_prefix(HeapWord* start, HeapWord* end) {
1758 #ifdef ASSERT
1759   {
1760     assert(start < end, "precondition");
1761     assert(mark_bitmap()->find_obj_beg(start, end) == end, "precondition");
1762     HeapWord* bottom = _space_info[old_space_id].space()->bottom();
1763     if (start != bottom) {
1764       // The preceding live obj.
1765       HeapWord* obj_start = mark_bitmap()->find_obj_beg_reverse(bottom, start);
1766       HeapWord* obj_end = obj_start + cast_to_oop(obj_start)->size();
1767       assert(obj_end == start, "precondition");
1768     }
1769   }
1770 #endif
1771 
1772   CollectedHeap::fill_with_objects(start, pointer_delta(end, start));
1773   HeapWord* addr = start;
1774   do {
1775     size_t size = cast_to_oop(addr)->size();
1776     start_array(old_space_id)->update_for_block(addr, addr + size);
1777     addr += size;
1778   } while (addr < end);
1779 }
1780 
1781 void PSParallelCompact::fill_dead_objs_in_dense_prefix() {
1782   ParMarkBitMap* bitmap = mark_bitmap();
1783 
1784   HeapWord* const bottom = _space_info[old_space_id].space()->bottom();
1785   HeapWord* const prefix_end = dense_prefix(old_space_id);
1786 
1787   const size_t region_size = ParallelCompactData::RegionSize;
1788 
1789   // Fill dead space in [start_addr, end_addr)
1790   HeapWord* const start_addr = bottom;
1791   HeapWord* const end_addr   = prefix_end;
1792 
1793   for (HeapWord* cur_addr = start_addr; cur_addr < end_addr; /* empty */) {
1794     RegionData* cur_region_ptr = _summary_data.addr_to_region_ptr(cur_addr);
1795     if (cur_region_ptr->data_size() == region_size) {
1796       // Full; no dead space. Next region.
1797       if (_summary_data.is_region_aligned(cur_addr)) {
1798         cur_addr += region_size;
1799       } else {
1800         cur_addr = _summary_data.region_align_up(cur_addr);
1801       }
1802       continue;
1803     }
1804 
1805     // Fill dead space inside cur_region.
1806     if (_summary_data.is_region_aligned(cur_addr)) {
1807       cur_addr += cur_region_ptr->partial_obj_size();
1808     }
1809 
1810     HeapWord* region_end_addr = _summary_data.region_align_up(cur_addr + 1);
1811     assert(region_end_addr <= end_addr, "inv");
1812     while (cur_addr < region_end_addr) {
1813       // Use end_addr to allow filler-obj to cross region boundary.
1814       HeapWord* live_start = bitmap->find_obj_beg(cur_addr, end_addr);
1815       if (cur_addr != live_start) {
1816         // Found dead space [cur_addr, live_start).
1817         fill_range_in_dense_prefix(cur_addr, live_start);
1818       }
1819       if (live_start >= region_end_addr) {
1820         cur_addr = live_start;
1821         break;
1822       }
1823       assert(bitmap->is_marked(live_start), "inv");
1824       cur_addr = live_start + cast_to_oop(live_start)->size();
1825     }
1826   }
1827 }
1828 
1829 void PSParallelCompact::compact() {
1830   GCTraceTime(Info, gc, phases) tm("Compaction Phase", &_gc_timer);
1831 
1832   uint active_gc_threads = ParallelScavengeHeap::heap()->workers().active_workers();
1833 
1834   initialize_shadow_regions(active_gc_threads);
1835   prepare_region_draining_tasks(active_gc_threads);
1836 
1837   {
1838     GCTraceTime(Trace, gc, phases) tm("Par Compact", &_gc_timer);
1839 
1840     FillDensePrefixAndCompactionTask task(active_gc_threads);
1841     ParallelScavengeHeap::heap()->workers().run_task(&task);
1842 
1843 #ifdef  ASSERT
1844     verify_filler_in_dense_prefix();
1845 
1846     // Verify that all regions have been processed.
1847     for (unsigned int id = old_space_id; id < last_space_id; ++id) {
1848       verify_complete(SpaceId(id));
1849     }
1850 #endif
1851   }
1852 }
1853 
1854 #ifdef  ASSERT
1855 void PSParallelCompact::verify_filler_in_dense_prefix() {
1856   HeapWord* bottom = _space_info[old_space_id].space()->bottom();
1857   HeapWord* dense_prefix_end = dense_prefix(old_space_id);
1858 
1859   const size_t region_size = ParallelCompactData::RegionSize;
1860 
1861   for (HeapWord* cur_addr = bottom; cur_addr < dense_prefix_end; /* empty */) {
1862     RegionData* cur_region_ptr = _summary_data.addr_to_region_ptr(cur_addr);
1863     if (cur_region_ptr->data_size() == region_size) {
1864       // Full; no dead space. Next region.
1865       if (_summary_data.is_region_aligned(cur_addr)) {
1866         cur_addr += region_size;
1867       } else {
1868         cur_addr = _summary_data.region_align_up(cur_addr);
1869       }
1870       continue;
1871     }
1872 
1873     // This region contains filler objs.
1874     if (_summary_data.is_region_aligned(cur_addr)) {
1875       cur_addr += cur_region_ptr->partial_obj_size();
1876     }
1877 
1878     HeapWord* region_end_addr = _summary_data.region_align_up(cur_addr + 1);
1879     assert(region_end_addr <= dense_prefix_end, "inv");
1880 
1881     while (cur_addr < region_end_addr) {
1882       oop obj = cast_to_oop(cur_addr);
1883       oopDesc::verify(obj);
1884       if (!mark_bitmap()->is_marked(cur_addr)) {
1885         assert(CollectedHeap::is_filler_object(cast_to_oop(cur_addr)), "inv");
1886       }
1887       cur_addr += obj->size();
1888     }
1889   }
1890 }
1891 
1892 void PSParallelCompact::verify_complete(SpaceId space_id) {
1893   // All Regions served as compaction targets, from dense_prefix() to
1894   // new_top(), should be marked as filled and all Regions between new_top()
1895   // and top() should be available (i.e., should have been emptied).
1896   ParallelCompactData& sd = summary_data();
1897   SpaceInfo si = _space_info[space_id];
1898   HeapWord* new_top_addr = sd.region_align_up(si.new_top());
1899   HeapWord* old_top_addr = sd.region_align_up(si.space()->top());
1900   const size_t beg_region = sd.addr_to_region_idx(si.dense_prefix());
1901   const size_t new_top_region = sd.addr_to_region_idx(new_top_addr);
1902   const size_t old_top_region = sd.addr_to_region_idx(old_top_addr);
1903 
1904   size_t cur_region;
1905   for (cur_region = beg_region; cur_region < new_top_region; ++cur_region) {
1906     const RegionData* const c = sd.region(cur_region);
1907     assert(c->completed(), "region %zu not filled: destination_count=%u",
1908            cur_region, c->destination_count());
1909   }
1910 
1911   for (cur_region = new_top_region; cur_region < old_top_region; ++cur_region) {
1912     const RegionData* const c = sd.region(cur_region);
1913     assert(c->available(), "region %zu not empty: destination_count=%u",
1914            cur_region, c->destination_count());
1915   }
1916 }
1917 #endif  // #ifdef ASSERT
1918 
1919 // Return the SpaceId for the space containing addr.  If addr is not in the
1920 // heap, last_space_id is returned.  In debug mode it expects the address to be
1921 // in the heap and asserts such.
1922 PSParallelCompact::SpaceId PSParallelCompact::space_id(HeapWord* addr) {
1923   assert(ParallelScavengeHeap::heap()->is_in_reserved(addr), "addr not in the heap");
1924 
1925   for (unsigned int id = old_space_id; id < last_space_id; ++id) {
1926     if (_space_info[id].space()->contains(addr)) {
1927       return SpaceId(id);
1928     }
1929   }
1930 
1931   assert(false, "no space contains the addr");
1932   return last_space_id;
1933 }
1934 
1935 // Skip over count live words starting from beg, and return the address of the
1936 // next live word. Callers must also ensure that there are enough live words in
1937 // the range [beg, end) to skip.
1938 HeapWord* PSParallelCompact::skip_live_words(HeapWord* beg, HeapWord* end, size_t count)
1939 {
1940   ParMarkBitMap* m = mark_bitmap();
1941   HeapWord* cur_addr = beg;
1942   while (true) {
1943     cur_addr = m->find_obj_beg(cur_addr, end);
1944     assert(cur_addr < end, "inv");
1945     size_t obj_size = cast_to_oop(cur_addr)->size();
1946     // Strictly greater-than
1947     if (obj_size > count) {
1948       return cur_addr + count;
1949     }
1950     count -= obj_size;
1951     cur_addr += obj_size;
1952   }
1953 }
1954 
1955 // On starting to fill a destination region (dest-region), we need to know the
1956 // location of the word that will be at the start of the dest-region after
1957 // compaction. A dest-region can have one or more source regions, but only the
1958 // first source-region contains this location. This location is retrieved by
1959 // calling `first_src_addr` on a dest-region.
1960 // Conversely, a source-region has a dest-region which holds the destination of
1961 // the first live word on this source-region, based on which the destination
1962 // for the rest of live words can be derived.
1963 //
1964 // Note:
1965 // There is some complication due to space-boundary-fragmentation (an obj can't
1966 // cross space-boundary) -- a source-region may be split and behave like two
1967 // distinct regions with their own dest-region, as depicted below.
1968 //
1969 // source-region: region-n
1970 //
1971 // **********************
1972 // |     A|A~~~~B|B     |
1973 // **********************
1974 //    n-1     n     n+1
1975 //
1976 // AA, BB denote two live objs. ~~~~ denotes unknown number of live objs.
1977 //
1978 // Assuming the dest-region for region-n is the final region before
1979 // old-space-end and its first-live-word is the middle of AA, the heap content
1980 // will look like the following after compaction:
1981 //
1982 // **************                  *************
1983 //      A|A~~~~ |                  |BB    |
1984 // **************                  *************
1985 //              ^                  ^
1986 //              | old-space-end    | eden-space-start
1987 //
1988 // Therefore, in this example, region-n will have two dest-regions:
1989 // 1. the final region in old-space
1990 // 2. the first region in eden-space.
1991 // To handle this special case, we introduce the concept of split-region, whose
1992 // contents are relocated to two spaces. `SplitInfo` captures all necessary
1993 // info about the split, the first part, spliting-point, and the second part.
1994 HeapWord* PSParallelCompact::first_src_addr(HeapWord* const dest_addr,
1995                                             SpaceId src_space_id,
1996                                             size_t src_region_idx)
1997 {
1998   const size_t RegionSize = ParallelCompactData::RegionSize;
1999   const ParallelCompactData& sd = summary_data();
2000   assert(sd.is_region_aligned(dest_addr), "precondition");
2001 
2002   const RegionData* const src_region_ptr = sd.region(src_region_idx);
2003   assert(src_region_ptr->data_size() > 0, "src region cannot be empty");
2004 
2005   const size_t partial_obj_size = src_region_ptr->partial_obj_size();
2006   HeapWord* const src_region_destination = src_region_ptr->destination();
2007 
2008   HeapWord* const region_start = sd.region_to_addr(src_region_idx);
2009   HeapWord* const region_end = sd.region_to_addr(src_region_idx) + RegionSize;
2010 
2011   // Identify the actual destination for the first live words on this region,
2012   // taking split-region into account.
2013   HeapWord* region_start_destination;
2014   const SplitInfo& split_info = _space_info[src_space_id].split_info();
2015   if (split_info.is_split(src_region_idx)) {
2016     // The second part of this split region; use the recorded split point.
2017     if (dest_addr == src_region_destination) {
2018       return split_info.split_point();
2019     }
2020     region_start_destination = split_info.preceding_destination();
2021   } else {
2022     region_start_destination = src_region_destination;
2023   }
2024 
2025   // Calculate the offset to be skipped
2026   size_t words_to_skip = pointer_delta(dest_addr, region_start_destination);
2027 
2028   HeapWord* result;
2029   if (partial_obj_size > words_to_skip) {
2030     result = region_start + words_to_skip;
2031   } else {
2032     words_to_skip -= partial_obj_size;
2033     result = skip_live_words(region_start + partial_obj_size, region_end, words_to_skip);
2034   }
2035 
2036   if (split_info.is_split(src_region_idx)) {
2037     assert(result < split_info.split_point(), "postcondition");
2038   } else {
2039     assert(result < region_end, "postcondition");
2040   }
2041 
2042   return result;
2043 }
2044 
2045 void PSParallelCompact::decrement_destination_counts(ParCompactionManager* cm,
2046                                                      SpaceId src_space_id,
2047                                                      size_t beg_region,
2048                                                      HeapWord* end_addr)
2049 {
2050   ParallelCompactData& sd = summary_data();
2051 
2052 #ifdef ASSERT
2053   MutableSpace* const src_space = _space_info[src_space_id].space();
2054   HeapWord* const beg_addr = sd.region_to_addr(beg_region);
2055   assert(src_space->contains(beg_addr) || beg_addr == src_space->end(),
2056          "src_space_id does not match beg_addr");
2057   assert(src_space->contains(end_addr) || end_addr == src_space->end(),
2058          "src_space_id does not match end_addr");
2059 #endif // #ifdef ASSERT
2060 
2061   RegionData* const beg = sd.region(beg_region);
2062   RegionData* const end = sd.addr_to_region_ptr(sd.region_align_up(end_addr));
2063 
2064   // Regions up to new_top() are enqueued if they become available.
2065   HeapWord* const new_top = _space_info[src_space_id].new_top();
2066   RegionData* const enqueue_end =
2067     sd.addr_to_region_ptr(sd.region_align_up(new_top));
2068 
2069   for (RegionData* cur = beg; cur < end; ++cur) {
2070     assert(cur->data_size() > 0, "region must have live data");
2071     cur->decrement_destination_count();
2072     if (cur < enqueue_end && cur->available() && cur->claim()) {
2073       if (cur->mark_normal()) {
2074         cm->push_region(sd.region(cur));
2075       } else if (cur->mark_copied()) {
2076         // Try to copy the content of the shadow region back to its corresponding
2077         // heap region if the shadow region is filled. Otherwise, the GC thread
2078         // fills the shadow region will copy the data back (see
2079         // MoveAndUpdateShadowClosure::complete_region).
2080         copy_back(sd.region_to_addr(cur->shadow_region()), sd.region_to_addr(cur));
2081         ParCompactionManager::push_shadow_region_mt_safe(cur->shadow_region());
2082         cur->set_completed();
2083       }
2084     }
2085   }
2086 }
2087 
2088 size_t PSParallelCompact::next_src_region(MoveAndUpdateClosure& closure,
2089                                           SpaceId& src_space_id,
2090                                           HeapWord*& src_space_top,
2091                                           HeapWord* end_addr)
2092 {
2093   ParallelCompactData& sd = PSParallelCompact::summary_data();
2094 
2095   size_t src_region_idx = 0;
2096 
2097   // Skip empty regions (if any) up to the top of the space.
2098   HeapWord* const src_aligned_up = sd.region_align_up(end_addr);
2099   RegionData* src_region_ptr = sd.addr_to_region_ptr(src_aligned_up);
2100   HeapWord* const top_aligned_up = sd.region_align_up(src_space_top);
2101   const RegionData* const top_region_ptr = sd.addr_to_region_ptr(top_aligned_up);
2102 
2103   while (src_region_ptr < top_region_ptr && src_region_ptr->data_size() == 0) {
2104     ++src_region_ptr;
2105   }
2106 
2107   if (src_region_ptr < top_region_ptr) {
2108     // Found the first non-empty region in the same space.
2109     src_region_idx = sd.region(src_region_ptr);
2110     closure.set_source(sd.region_to_addr(src_region_idx));
2111     return src_region_idx;
2112   }
2113 
2114   // Switch to a new source space and find the first non-empty region.
2115   uint space_id = src_space_id + 1;
2116   assert(space_id < last_space_id, "not enough spaces");
2117 
2118   for (/* empty */; space_id < last_space_id; ++space_id) {
2119     HeapWord* bottom = _space_info[space_id].space()->bottom();
2120     HeapWord* top = _space_info[space_id].space()->top();
2121     // Skip empty space
2122     if (bottom == top) {
2123       continue;
2124     }
2125 
2126     // Identify the first region that contains live words in this space
2127     size_t cur_region = sd.addr_to_region_idx(bottom);
2128     size_t end_region = sd.addr_to_region_idx(sd.region_align_up(top));
2129 
2130     for (/* empty */ ; cur_region < end_region; ++cur_region) {
2131       RegionData* cur = sd.region(cur_region);
2132       if (cur->live_obj_size() > 0) {
2133         HeapWord* region_start_addr = sd.region_to_addr(cur_region);
2134 
2135         src_space_id = SpaceId(space_id);
2136         src_space_top = top;
2137         closure.set_source(region_start_addr);
2138         return cur_region;
2139       }
2140     }
2141   }
2142 
2143   ShouldNotReachHere();
2144 }
2145 
2146 HeapWord* PSParallelCompact::partial_obj_end(HeapWord* region_start_addr) {
2147   ParallelCompactData& sd = summary_data();
2148   assert(sd.is_region_aligned(region_start_addr), "precondition");
2149 
2150   // Use per-region partial_obj_size to locate the end of the obj, that extends
2151   // to region_start_addr.
2152   size_t start_region_idx = sd.addr_to_region_idx(region_start_addr);
2153   size_t end_region_idx = sd.region_count();
2154   size_t accumulated_size = 0;
2155   for (size_t region_idx = start_region_idx; region_idx < end_region_idx; ++region_idx) {
2156     size_t cur_partial_obj_size = sd.region(region_idx)->partial_obj_size();
2157     accumulated_size += cur_partial_obj_size;
2158     if (cur_partial_obj_size != ParallelCompactData::RegionSize) {
2159       break;
2160     }
2161   }
2162   return region_start_addr + accumulated_size;
2163 }
2164 
2165 static markWord safe_mark_word_prototype(HeapWord* cur_addr, HeapWord* end_addr) {
2166   // If the original markWord contains bits that cannot be reconstructed because
2167   // the header cannot be safely read, a placeholder is used. In this case,
2168   // the correct markWord is preserved before compaction and restored after
2169   // compaction completes.
2170   size_t remaining_words = pointer_delta(end_addr, cur_addr);
2171 
2172   if (UseCompactObjectHeaders || (Arguments::is_valhalla_enabled() && safe_to_read_header(remaining_words))) {
2173     return cast_to_oop(cur_addr)->klass()->prototype_header();
2174   } else {
2175     return markWord::prototype();
2176   }
2177 }
2178 
2179 // Use region_idx as the destination region, and evacuate all live objs on its
2180 // source regions to this destination region.
2181 void PSParallelCompact::fill_region(ParCompactionManager* cm, MoveAndUpdateClosure& closure, size_t region_idx)
2182 {
2183   ParMarkBitMap* const bitmap = mark_bitmap();
2184   ParallelCompactData& sd = summary_data();
2185   RegionData* const region_ptr = sd.region(region_idx);
2186 
2187   // Get the source region and related info.
2188   size_t src_region_idx = region_ptr->source_region();
2189   SpaceId src_space_id = space_id(sd.region_to_addr(src_region_idx));
2190   HeapWord* src_space_top = _space_info[src_space_id].space()->top();
2191   HeapWord* dest_addr = sd.region_to_addr(region_idx);
2192 
2193   closure.set_source(first_src_addr(dest_addr, src_space_id, src_region_idx));
2194 
2195   // Adjust src_region_idx to prepare for decrementing destination counts (the
2196   // destination count is not decremented when a region is copied to itself).
2197   if (src_region_idx == region_idx) {
2198     src_region_idx += 1;
2199   }
2200 
2201   // source-region:
2202   //
2203   // **********
2204   // |   ~~~  |
2205   // **********
2206   //      ^
2207   //      |-- closure.source() / first_src_addr
2208   //
2209   //
2210   // ~~~ : live words
2211   //
2212   // destination-region:
2213   //
2214   // **********
2215   // |        |
2216   // **********
2217   // ^
2218   // |-- region-start
2219   if (bitmap->is_unmarked(closure.source())) {
2220     // An object overflows the previous destination region, so this
2221     // destination region should copy the remainder of the object or as much as
2222     // will fit.
2223     HeapWord* const old_src_addr = closure.source();
2224     {
2225       HeapWord* region_start = sd.region_align_down(closure.source());
2226       HeapWord* obj_start = bitmap->find_obj_beg_reverse(region_start, closure.source());
2227       HeapWord* obj_end;
2228       if (obj_start != closure.source()) {
2229         assert(bitmap->is_marked(obj_start), "inv");
2230         // Found the actual obj-start, try to find the obj-end using either
2231         // size() if this obj is completely contained in the current region.
2232         HeapWord* next_region_start = region_start + ParallelCompactData::RegionSize;
2233         HeapWord* partial_obj_start = (next_region_start >= src_space_top)
2234                                       ? nullptr
2235                                       : sd.addr_to_region_ptr(next_region_start)->partial_obj_addr();
2236         // This obj extends to next region iff partial_obj_addr of the *next*
2237         // region is the same as obj-start.
2238         if (partial_obj_start == obj_start) {
2239           // This obj extends to next region.
2240           obj_end = partial_obj_end(next_region_start);
2241         } else {
2242           // Completely contained in this region; safe to use size().
2243           obj_end = obj_start + cast_to_oop(obj_start)->size();
2244         }
2245       } else {
2246         // This obj extends to current region.
2247         obj_end = partial_obj_end(region_start);
2248       }
2249       size_t partial_obj_size = pointer_delta(obj_end, closure.source());
2250       closure.copy_partial_obj(partial_obj_size);
2251     }
2252 
2253     if (closure.is_full()) {
2254       decrement_destination_counts(cm, src_space_id, src_region_idx, closure.source());
2255       closure.complete_region(dest_addr, region_ptr);
2256       return;
2257     }
2258 
2259     // Finished copying without using up the current destination-region
2260     HeapWord* const end_addr = sd.region_align_down(closure.source());
2261     if (sd.region_align_down(old_src_addr) != end_addr) {
2262       assert(sd.region_align_up(old_src_addr) == end_addr, "only one region");
2263       // The partial object was copied from more than one source region.
2264       decrement_destination_counts(cm, src_space_id, src_region_idx, end_addr);
2265 
2266       // Move to the next source region, possibly switching spaces as well.  All
2267       // args except end_addr may be modified.
2268       src_region_idx = next_src_region(closure, src_space_id, src_space_top, end_addr);
2269     }
2270   }
2271 
2272   // Handle the rest obj-by-obj, where we know obj-start.
2273   do {
2274     HeapWord* cur_addr = closure.source();
2275     HeapWord* const end_addr = MIN2(sd.region_align_up(cur_addr + 1),
2276                                     src_space_top);
2277     // To handle the case where the final obj in source region extends to next region.
2278     HeapWord* final_obj_start = (end_addr == src_space_top)
2279                                 ? nullptr
2280                                 : sd.addr_to_region_ptr(end_addr)->partial_obj_addr();
2281     // Apply closure on objs inside [cur_addr, end_addr)
2282     do {
2283       cur_addr = bitmap->find_obj_beg(cur_addr, end_addr);
2284       if (cur_addr == end_addr) {
2285         break;
2286       }
2287       size_t obj_size;
2288       if (final_obj_start == cur_addr) {
2289         obj_size = pointer_delta(partial_obj_end(end_addr), cur_addr);
2290       } else {
2291         // This obj doesn't extend into next region; size() is safe to use.
2292         obj_size = cast_to_oop(cur_addr)->size();
2293       }
2294 
2295       markWord mark = safe_mark_word_prototype(cur_addr, end_addr);
2296 
2297       // Perform the move and update of the object
2298       closure.do_addr(cur_addr, obj_size, mark);
2299 
2300       cur_addr += obj_size;
2301     } while (cur_addr < end_addr && !closure.is_full());
2302 
2303     if (closure.is_full()) {
2304       decrement_destination_counts(cm, src_space_id, src_region_idx, closure.source());
2305       closure.complete_region(dest_addr, region_ptr);
2306       return;
2307     }
2308 
2309     decrement_destination_counts(cm, src_space_id, src_region_idx, end_addr);
2310 
2311     // Move to the next source region, possibly switching spaces as well.  All
2312     // args except end_addr may be modified.
2313     src_region_idx = next_src_region(closure, src_space_id, src_space_top, end_addr);
2314   } while (true);
2315 }
2316 
2317 void PSParallelCompact::fill_and_update_region(ParCompactionManager* cm, size_t region_idx)
2318 {
2319   MoveAndUpdateClosure cl(mark_bitmap(), region_idx);
2320   fill_region(cm, cl, region_idx);
2321 }
2322 
2323 void PSParallelCompact::fill_and_update_shadow_region(ParCompactionManager* cm, size_t region_idx)
2324 {
2325   // Get a shadow region first
2326   ParallelCompactData& sd = summary_data();
2327   RegionData* const region_ptr = sd.region(region_idx);
2328   size_t shadow_region = ParCompactionManager::pop_shadow_region_mt_safe(region_ptr);
2329   // The InvalidShadow return value indicates the corresponding heap region is available,
2330   // so use MoveAndUpdateClosure to fill the normal region. Otherwise, use
2331   // MoveAndUpdateShadowClosure to fill the acquired shadow region.
2332   if (shadow_region == ParCompactionManager::InvalidShadow) {
2333     MoveAndUpdateClosure cl(mark_bitmap(), region_idx);
2334     region_ptr->shadow_to_normal();
2335     return fill_region(cm, cl, region_idx);
2336   } else {
2337     MoveAndUpdateShadowClosure cl(mark_bitmap(), region_idx, shadow_region);
2338     return fill_region(cm, cl, region_idx);
2339   }
2340 }
2341 
2342 void PSParallelCompact::copy_back(HeapWord *shadow_addr, HeapWord *region_addr)
2343 {
2344   Copy::aligned_conjoint_words(shadow_addr, region_addr, _summary_data.RegionSize);
2345 }
2346 
2347 bool PSParallelCompact::steal_unavailable_region(ParCompactionManager* cm, size_t &region_idx)
2348 {
2349   size_t next = cm->next_shadow_region();
2350   ParallelCompactData& sd = summary_data();
2351   size_t old_new_top = sd.addr_to_region_idx(_space_info[old_space_id].new_top());
2352   uint active_gc_threads = ParallelScavengeHeap::heap()->workers().active_workers();
2353 
2354   while (next < old_new_top) {
2355     if (sd.region(next)->mark_shadow()) {
2356       region_idx = next;
2357       return true;
2358     }
2359     next = cm->move_next_shadow_region_by(active_gc_threads);
2360   }
2361 
2362   return false;
2363 }
2364 
2365 // The shadow region is an optimization to address region dependencies in full GC. The basic
2366 // idea is making more regions available by temporally storing their live objects in empty
2367 // shadow regions to resolve dependencies between them and the destination regions. Therefore,
2368 // GC threads need not wait destination regions to be available before processing sources.
2369 //
2370 // A typical workflow would be:
2371 // After draining its own stack and failing to steal from others, a GC worker would pick an
2372 // unavailable region (destination count > 0) and get a shadow region. Then the worker fills
2373 // the shadow region by copying live objects from source regions of the unavailable one. Once
2374 // the unavailable region becomes available, the data in the shadow region will be copied back.
2375 // Shadow regions are empty regions in the to-space and regions between top and end of other spaces.
2376 void PSParallelCompact::initialize_shadow_regions(uint parallel_gc_threads)
2377 {
2378   const ParallelCompactData& sd = PSParallelCompact::summary_data();
2379 
2380   for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2381     SpaceInfo* const space_info = _space_info + id;
2382     MutableSpace* const space = space_info->space();
2383 
2384     const size_t beg_region =
2385       sd.addr_to_region_idx(sd.region_align_up(MAX2(space_info->new_top(), space->top())));
2386     const size_t end_region =
2387       sd.addr_to_region_idx(sd.region_align_down(space->end()));
2388 
2389     for (size_t cur = beg_region; cur < end_region; ++cur) {
2390       ParCompactionManager::push_shadow_region(cur);
2391     }
2392   }
2393 
2394   size_t beg_region = sd.addr_to_region_idx(_space_info[old_space_id].dense_prefix());
2395   for (uint i = 0; i < parallel_gc_threads; i++) {
2396     ParCompactionManager *cm = ParCompactionManager::gc_thread_compaction_manager(i);
2397     cm->set_next_shadow_region(beg_region + i);
2398   }
2399 }
2400 
2401 void MoveAndUpdateClosure::copy_partial_obj(size_t partial_obj_size)
2402 {
2403   size_t words = MIN2(partial_obj_size, words_remaining());
2404 
2405   // This test is necessary; if omitted, the pointer updates to a partial object
2406   // that crosses the dense prefix boundary could be overwritten.
2407   if (source() != copy_destination()) {
2408     DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
2409     Copy::aligned_conjoint_words(source(), copy_destination(), words);
2410   }
2411   update_state(words);
2412 }
2413 
2414 void MoveAndUpdateClosure::complete_region(HeapWord* dest_addr, PSParallelCompact::RegionData* region_ptr) {
2415   assert(region_ptr->shadow_state() == ParallelCompactData::RegionData::NormalRegion, "Region should be finished");
2416   region_ptr->set_completed();
2417 }
2418 
2419 void MoveAndUpdateClosure::do_addr(HeapWord* addr, size_t words, markWord mark) {
2420   assert(destination() != nullptr, "sanity");
2421   _source = addr;
2422 
2423   // The start_array must be updated even if the object is not moving.
2424   if (_start_array != nullptr) {
2425     _start_array->update_for_block(destination(), destination() + words);
2426   }
2427 
2428   // Avoid overflow
2429   words = MIN2(words, words_remaining());
2430   assert(words > 0, "inv");
2431 
2432   if (copy_destination() != source()) {
2433     DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
2434     assert(source() != destination(), "inv");
2435     assert(FullGCForwarding::is_forwarded(cast_to_oop(source())), "inv");
2436     assert(FullGCForwarding::forwardee(cast_to_oop(source())) == cast_to_oop(destination()), "inv");
2437     Copy::aligned_conjoint_words(source(), copy_destination(), words);
2438     cast_to_oop(copy_destination())->set_mark(mark);
2439   }
2440 
2441   update_state(words);
2442 }
2443 
2444 void MoveAndUpdateShadowClosure::complete_region(HeapWord* dest_addr, PSParallelCompact::RegionData* region_ptr) {
2445   assert(region_ptr->shadow_state() == ParallelCompactData::RegionData::ShadowRegion, "Region should be shadow");
2446   // Record the shadow region index
2447   region_ptr->set_shadow_region(_shadow);
2448   // Mark the shadow region as filled to indicate the data is ready to be
2449   // copied back
2450   region_ptr->mark_filled();
2451   // Try to copy the content of the shadow region back to its corresponding
2452   // heap region if available; the GC thread that decreases the destination
2453   // count to zero will do the copying otherwise (see
2454   // PSParallelCompact::decrement_destination_counts).
2455   if (((region_ptr->available() && region_ptr->claim()) || region_ptr->claimed()) && region_ptr->mark_copied()) {
2456     region_ptr->set_completed();
2457     PSParallelCompact::copy_back(PSParallelCompact::summary_data().region_to_addr(_shadow), dest_addr);
2458     ParCompactionManager::push_shadow_region_mt_safe(_shadow);
2459   }
2460 }