1 /*
   2  * Copyright (c) 2005, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "classfile/classLoaderDataGraph.hpp"
  26 #include "classfile/javaClasses.inline.hpp"
  27 #include "classfile/stringTable.hpp"
  28 #include "classfile/symbolTable.hpp"
  29 #include "classfile/systemDictionary.hpp"
  30 #include "code/codeCache.hpp"
  31 #include "compiler/oopMap.hpp"
  32 #include "gc/parallel/objectStartArray.inline.hpp"
  33 #include "gc/parallel/parallelArguments.hpp"
  34 #include "gc/parallel/parallelScavengeHeap.inline.hpp"
  35 #include "gc/parallel/parMarkBitMap.inline.hpp"
  36 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
  37 #include "gc/parallel/psCompactionManager.inline.hpp"
  38 #include "gc/parallel/psOldGen.hpp"
  39 #include "gc/parallel/psParallelCompact.inline.hpp"
  40 #include "gc/parallel/psPromotionManager.inline.hpp"
  41 #include "gc/parallel/psRootType.hpp"
  42 #include "gc/parallel/psScavenge.hpp"
  43 #include "gc/parallel/psStringDedup.hpp"
  44 #include "gc/parallel/psYoungGen.hpp"
  45 #include "gc/shared/classUnloadingContext.hpp"
  46 #include "gc/shared/fullGCForwarding.inline.hpp"
  47 #include "gc/shared/gcCause.hpp"
  48 #include "gc/shared/gcHeapSummary.hpp"
  49 #include "gc/shared/gcId.hpp"
  50 #include "gc/shared/gcLocker.hpp"
  51 #include "gc/shared/gcTimer.hpp"
  52 #include "gc/shared/gcTrace.hpp"
  53 #include "gc/shared/gcTraceTime.inline.hpp"
  54 #include "gc/shared/gcVMOperations.hpp"
  55 #include "gc/shared/isGCActiveMark.hpp"
  56 #include "gc/shared/oopStorage.inline.hpp"
  57 #include "gc/shared/oopStorageSet.inline.hpp"
  58 #include "gc/shared/oopStorageSetParState.inline.hpp"
  59 #include "gc/shared/preservedMarks.inline.hpp"
  60 #include "gc/shared/referencePolicy.hpp"
  61 #include "gc/shared/referenceProcessor.hpp"
  62 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
  63 #include "gc/shared/spaceDecorator.hpp"
  64 #include "gc/shared/strongRootsScope.hpp"
  65 #include "gc/shared/taskTerminator.hpp"
  66 #include "gc/shared/weakProcessor.inline.hpp"
  67 #include "gc/shared/workerPolicy.hpp"
  68 #include "gc/shared/workerThread.hpp"
  69 #include "gc/shared/workerUtils.hpp"
  70 #include "logging/log.hpp"
  71 #include "memory/iterator.inline.hpp"
  72 #include "memory/memoryReserver.hpp"
  73 #include "memory/metaspaceUtils.hpp"
  74 #include "memory/resourceArea.hpp"
  75 #include "memory/universe.hpp"
  76 #include "nmt/memTracker.hpp"
  77 #include "oops/access.inline.hpp"
  78 #include "oops/instanceClassLoaderKlass.inline.hpp"
  79 #include "oops/instanceKlass.inline.hpp"
  80 #include "oops/instanceMirrorKlass.inline.hpp"
  81 #include "oops/methodData.hpp"
  82 #include "oops/objArrayKlass.inline.hpp"
  83 #include "oops/oop.inline.hpp"
  84 #include "runtime/atomic.hpp"
  85 #include "runtime/handles.inline.hpp"
  86 #include "runtime/java.hpp"
  87 #include "runtime/safepoint.hpp"
  88 #include "runtime/threads.hpp"
  89 #include "runtime/vmThread.hpp"
  90 #include "services/memoryService.hpp"
  91 #include "utilities/align.hpp"
  92 #include "utilities/debug.hpp"
  93 #include "utilities/events.hpp"
  94 #include "utilities/formatBuffer.hpp"
  95 #include "utilities/macros.hpp"
  96 #include "utilities/stack.inline.hpp"
  97 #if INCLUDE_JVMCI
  98 #include "jvmci/jvmci.hpp"
  99 #endif
 100 
 101 #include <math.h>
 102 
 103 // All sizes are in HeapWords.
 104 const size_t ParallelCompactData::Log2RegionSize  = 16; // 64K words
 105 const size_t ParallelCompactData::RegionSize      = (size_t)1 << Log2RegionSize;
 106 static_assert(ParallelCompactData::RegionSize >= BitsPerWord, "region-start bit word-aligned");
 107 const size_t ParallelCompactData::RegionSizeBytes =
 108   RegionSize << LogHeapWordSize;
 109 const size_t ParallelCompactData::RegionSizeOffsetMask = RegionSize - 1;
 110 const size_t ParallelCompactData::RegionAddrOffsetMask = RegionSizeBytes - 1;
 111 const size_t ParallelCompactData::RegionAddrMask       = ~RegionAddrOffsetMask;
 112 
 113 const ParallelCompactData::RegionData::region_sz_t
 114 ParallelCompactData::RegionData::dc_shift = 27;
 115 
 116 const ParallelCompactData::RegionData::region_sz_t
 117 ParallelCompactData::RegionData::dc_mask = ~0U << dc_shift;
 118 
 119 const ParallelCompactData::RegionData::region_sz_t
 120 ParallelCompactData::RegionData::dc_one = 0x1U << dc_shift;
 121 
 122 const ParallelCompactData::RegionData::region_sz_t
 123 ParallelCompactData::RegionData::los_mask = ~dc_mask;
 124 
 125 const ParallelCompactData::RegionData::region_sz_t
 126 ParallelCompactData::RegionData::dc_claimed = 0x8U << dc_shift;
 127 
 128 const ParallelCompactData::RegionData::region_sz_t
 129 ParallelCompactData::RegionData::dc_completed = 0xcU << dc_shift;
 130 
 131 bool ParallelCompactData::RegionData::is_clear() {
 132   return (_destination == nullptr) &&
 133          (_source_region == 0) &&
 134          (_partial_obj_addr == nullptr) &&
 135          (_partial_obj_size == 0) &&
 136          (_dc_and_los == 0) &&
 137          (_shadow_state == 0);
 138 }
 139 
 140 #ifdef ASSERT
 141 void ParallelCompactData::RegionData::verify_clear() {
 142   assert(_destination == nullptr, "inv");
 143   assert(_source_region == 0, "inv");
 144   assert(_partial_obj_addr == nullptr, "inv");
 145   assert(_partial_obj_size == 0, "inv");
 146   assert(_dc_and_los == 0, "inv");
 147   assert(_shadow_state == 0, "inv");
 148 }
 149 #endif
 150 
 151 SpaceInfo PSParallelCompact::_space_info[PSParallelCompact::last_space_id];
 152 
 153 SpanSubjectToDiscoveryClosure PSParallelCompact::_span_based_discoverer;
 154 ReferenceProcessor* PSParallelCompact::_ref_processor = nullptr;
 155 
 156 void SplitInfo::record(size_t split_region_idx, HeapWord* split_point, size_t preceding_live_words) {
 157   assert(split_region_idx != 0, "precondition");
 158 
 159   // Obj denoted by split_point will be deferred to the next space.
 160   assert(split_point != nullptr, "precondition");
 161 
 162   const ParallelCompactData& sd = PSParallelCompact::summary_data();
 163 
 164   PSParallelCompact::RegionData* split_region_ptr = sd.region(split_region_idx);
 165   assert(preceding_live_words < split_region_ptr->data_size(), "inv");
 166 
 167   HeapWord* preceding_destination = split_region_ptr->destination();
 168   assert(preceding_destination != nullptr, "inv");
 169 
 170   // How many regions does the preceding part occupy
 171   uint preceding_destination_count;
 172   if (preceding_live_words == 0) {
 173     preceding_destination_count = 0;
 174   } else {
 175     // -1 so that the ending address doesn't fall on the region-boundary
 176     if (sd.region_align_down(preceding_destination) ==
 177         sd.region_align_down(preceding_destination + preceding_live_words - 1)) {
 178       preceding_destination_count = 1;
 179     } else {
 180       preceding_destination_count = 2;
 181     }
 182   }
 183 
 184   _split_region_idx = split_region_idx;
 185   _split_point = split_point;
 186   _preceding_live_words = preceding_live_words;
 187   _preceding_destination = preceding_destination;
 188   _preceding_destination_count = preceding_destination_count;
 189 }
 190 
 191 void SplitInfo::clear()
 192 {
 193   _split_region_idx = 0;
 194   _split_point = nullptr;
 195   _preceding_live_words = 0;
 196   _preceding_destination = nullptr;
 197   _preceding_destination_count = 0;
 198   assert(!is_valid(), "sanity");
 199 }
 200 
 201 #ifdef  ASSERT
 202 void SplitInfo::verify_clear()
 203 {
 204   assert(_split_region_idx == 0, "not clear");
 205   assert(_split_point == nullptr, "not clear");
 206   assert(_preceding_live_words == 0, "not clear");
 207   assert(_preceding_destination == nullptr, "not clear");
 208   assert(_preceding_destination_count == 0, "not clear");
 209 }
 210 #endif  // #ifdef ASSERT
 211 
 212 
 213 void PSParallelCompact::print_on(outputStream* st) {
 214   _mark_bitmap.print_on(st);
 215 }
 216 
 217 ParallelCompactData::ParallelCompactData() :
 218   _heap_start(nullptr),
 219   DEBUG_ONLY(_heap_end(nullptr) COMMA)
 220   _region_vspace(nullptr),
 221   _reserved_byte_size(0),
 222   _region_data(nullptr),
 223   _region_count(0) {}
 224 
 225 bool ParallelCompactData::initialize(MemRegion reserved_heap)
 226 {
 227   _heap_start = reserved_heap.start();
 228   const size_t heap_size = reserved_heap.word_size();
 229   DEBUG_ONLY(_heap_end = _heap_start + heap_size;)
 230 
 231   assert(region_align_down(_heap_start) == _heap_start,
 232          "region start not aligned");
 233 
 234   return initialize_region_data(heap_size);
 235 }
 236 
 237 PSVirtualSpace*
 238 ParallelCompactData::create_vspace(size_t count, size_t element_size)
 239 {
 240   const size_t raw_bytes = count * element_size;
 241   const size_t page_sz = os::page_size_for_region_aligned(raw_bytes, 10);
 242   const size_t granularity = os::vm_allocation_granularity();
 243   const size_t rs_align = MAX2(page_sz, granularity);
 244 
 245   _reserved_byte_size = align_up(raw_bytes, rs_align);
 246 
 247   ReservedSpace rs = MemoryReserver::reserve(_reserved_byte_size,
 248                                              rs_align,
 249                                              page_sz,
 250                                              mtGC);
 251 
 252   if (!rs.is_reserved()) {
 253     // Failed to reserve memory.
 254     return nullptr;
 255   }
 256 
 257   os::trace_page_sizes("Parallel Compact Data", raw_bytes, raw_bytes, rs.base(),
 258                        rs.size(), page_sz);
 259 
 260   MemTracker::record_virtual_memory_tag(rs, mtGC);
 261 
 262   PSVirtualSpace* vspace = new PSVirtualSpace(rs, page_sz);
 263 
 264   if (!vspace->expand_by(_reserved_byte_size)) {
 265     // Failed to commit memory.
 266 
 267     delete vspace;
 268 
 269     // Release memory reserved in the space.
 270     MemoryReserver::release(rs);
 271 
 272     return nullptr;
 273   }
 274 
 275   return vspace;
 276 }
 277 
 278 bool ParallelCompactData::initialize_region_data(size_t heap_size)
 279 {
 280   assert(is_aligned(heap_size, RegionSize), "precondition");
 281 
 282   const size_t count = heap_size >> Log2RegionSize;
 283   _region_vspace = create_vspace(count, sizeof(RegionData));
 284   if (_region_vspace != nullptr) {
 285     _region_data = (RegionData*)_region_vspace->reserved_low_addr();
 286     _region_count = count;
 287     return true;
 288   }
 289   return false;
 290 }
 291 
 292 void ParallelCompactData::clear_range(size_t beg_region, size_t end_region) {
 293   assert(beg_region <= _region_count, "beg_region out of range");
 294   assert(end_region <= _region_count, "end_region out of range");
 295 
 296   const size_t region_cnt = end_region - beg_region;
 297   memset(_region_data + beg_region, 0, region_cnt * sizeof(RegionData));
 298 }
 299 
 300 void
 301 ParallelCompactData::summarize_dense_prefix(HeapWord* beg, HeapWord* end)
 302 {
 303   assert(is_region_aligned(beg), "not RegionSize aligned");
 304   assert(is_region_aligned(end), "not RegionSize aligned");
 305 
 306   size_t cur_region = addr_to_region_idx(beg);
 307   const size_t end_region = addr_to_region_idx(end);
 308   HeapWord* addr = beg;
 309   while (cur_region < end_region) {
 310     _region_data[cur_region].set_destination(addr);
 311     _region_data[cur_region].set_destination_count(0);
 312     _region_data[cur_region].set_source_region(cur_region);
 313 
 314     // Update live_obj_size so the region appears completely full.
 315     size_t live_size = RegionSize - _region_data[cur_region].partial_obj_size();
 316     _region_data[cur_region].set_live_obj_size(live_size);
 317 
 318     ++cur_region;
 319     addr += RegionSize;
 320   }
 321 }
 322 
 323 // The total live words on src_region would overflow the target space, so find
 324 // the overflowing object and record the split point. The invariant is that an
 325 // obj should not cross space boundary.
 326 HeapWord* ParallelCompactData::summarize_split_space(size_t src_region,
 327                                                      SplitInfo& split_info,
 328                                                      HeapWord* const destination,
 329                                                      HeapWord* const target_end,
 330                                                      HeapWord** target_next) {
 331   assert(destination <= target_end, "sanity");
 332   assert(destination + _region_data[src_region].data_size() > target_end,
 333     "region should not fit into target space");
 334   assert(is_region_aligned(target_end), "sanity");
 335 
 336   size_t partial_obj_size = _region_data[src_region].partial_obj_size();
 337 
 338   if (destination + partial_obj_size > target_end) {
 339     assert(partial_obj_size > 0, "inv");
 340     // The overflowing obj is from a previous region.
 341     //
 342     // source-regions:
 343     //
 344     // ***************
 345     // |     A|AA    |
 346     // ***************
 347     //       ^
 348     //       | split-point
 349     //
 350     // dest-region:
 351     //
 352     // ********
 353     // |~~~~A |
 354     // ********
 355     //       ^^
 356     //       || target-space-end
 357     //       |
 358     //       | destination
 359     //
 360     // AAA would overflow target-space.
 361     //
 362     HeapWord* overflowing_obj = _region_data[src_region].partial_obj_addr();
 363     size_t split_region = addr_to_region_idx(overflowing_obj);
 364 
 365     // The number of live words before the overflowing object on this split region
 366     size_t preceding_live_words;
 367     if (is_region_aligned(overflowing_obj)) {
 368       preceding_live_words = 0;
 369     } else {
 370       // Words accounted by the overflowing object on the split region
 371       size_t overflowing_size = pointer_delta(region_align_up(overflowing_obj), overflowing_obj);
 372       preceding_live_words = region(split_region)->data_size() - overflowing_size;
 373     }
 374 
 375     split_info.record(split_region, overflowing_obj, preceding_live_words);
 376 
 377     // The [overflowing_obj, src_region_start) part has been accounted for, so
 378     // must move back the new_top, now that this overflowing obj is deferred.
 379     HeapWord* new_top = destination - pointer_delta(region_to_addr(src_region), overflowing_obj);
 380 
 381     // If the overflowing obj was relocated to its original destination,
 382     // those destination regions would have their source_region set. Now that
 383     // this overflowing obj is relocated somewhere else, reset the
 384     // source_region.
 385     {
 386       size_t range_start = addr_to_region_idx(region_align_up(new_top));
 387       size_t range_end = addr_to_region_idx(region_align_up(destination));
 388       for (size_t i = range_start; i < range_end; ++i) {
 389         region(i)->set_source_region(0);
 390       }
 391     }
 392 
 393     // Update new top of target space
 394     *target_next = new_top;
 395 
 396     return overflowing_obj;
 397   }
 398 
 399   // Obj-iteration to locate the overflowing obj
 400   HeapWord* region_start = region_to_addr(src_region);
 401   HeapWord* region_end = region_start + RegionSize;
 402   HeapWord* cur_addr = region_start + partial_obj_size;
 403   size_t live_words = partial_obj_size;
 404 
 405   while (true) {
 406     assert(cur_addr < region_end, "inv");
 407     cur_addr = PSParallelCompact::mark_bitmap()->find_obj_beg(cur_addr, region_end);
 408     // There must be an overflowing obj in this region
 409     assert(cur_addr < region_end, "inv");
 410 
 411     oop obj = cast_to_oop(cur_addr);
 412     size_t obj_size = obj->size();
 413     if (destination + live_words + obj_size > target_end) {
 414       // Found the overflowing obj
 415       split_info.record(src_region, cur_addr, live_words);
 416       *target_next = destination + live_words;
 417       return cur_addr;
 418     }
 419 
 420     live_words += obj_size;
 421     cur_addr += obj_size;
 422   }
 423 }
 424 
 425 size_t ParallelCompactData::live_words_in_space(const MutableSpace* space,
 426                                                 HeapWord** full_region_prefix_end) {
 427   size_t cur_region = addr_to_region_idx(space->bottom());
 428   const size_t end_region = addr_to_region_idx(region_align_up(space->top()));
 429   size_t live_words = 0;
 430   if (full_region_prefix_end == nullptr) {
 431     for (/* empty */; cur_region < end_region; ++cur_region) {
 432       live_words += _region_data[cur_region].data_size();
 433     }
 434   } else {
 435     bool first_set = false;
 436     for (/* empty */; cur_region < end_region; ++cur_region) {
 437       size_t live_words_in_region = _region_data[cur_region].data_size();
 438       if (!first_set && live_words_in_region < RegionSize) {
 439         *full_region_prefix_end = region_to_addr(cur_region);
 440         first_set = true;
 441       }
 442       live_words += live_words_in_region;
 443     }
 444     if (!first_set) {
 445       // All regions are full of live objs.
 446       assert(is_region_aligned(space->top()), "inv");
 447       *full_region_prefix_end = space->top();
 448     }
 449     assert(*full_region_prefix_end != nullptr, "postcondition");
 450     assert(is_region_aligned(*full_region_prefix_end), "inv");
 451     assert(*full_region_prefix_end >= space->bottom(), "in-range");
 452     assert(*full_region_prefix_end <= space->top(), "in-range");
 453   }
 454   return live_words;
 455 }
 456 
 457 bool ParallelCompactData::summarize(SplitInfo& split_info,
 458                                     HeapWord* source_beg, HeapWord* source_end,
 459                                     HeapWord** source_next,
 460                                     HeapWord* target_beg, HeapWord* target_end,
 461                                     HeapWord** target_next)
 462 {
 463   HeapWord* const source_next_val = source_next == nullptr ? nullptr : *source_next;
 464   log_develop_trace(gc, compaction)(
 465       "sb=" PTR_FORMAT " se=" PTR_FORMAT " sn=" PTR_FORMAT
 466       "tb=" PTR_FORMAT " te=" PTR_FORMAT " tn=" PTR_FORMAT,
 467       p2i(source_beg), p2i(source_end), p2i(source_next_val),
 468       p2i(target_beg), p2i(target_end), p2i(*target_next));
 469 
 470   size_t cur_region = addr_to_region_idx(source_beg);
 471   const size_t end_region = addr_to_region_idx(region_align_up(source_end));
 472 
 473   HeapWord *dest_addr = target_beg;
 474   for (/* empty */; cur_region < end_region; cur_region++) {
 475     size_t words = _region_data[cur_region].data_size();
 476 
 477     // Skip empty ones
 478     if (words == 0) {
 479       continue;
 480     }
 481 
 482     if (split_info.is_split(cur_region)) {
 483       assert(words > split_info.preceding_live_words(), "inv");
 484       words -= split_info.preceding_live_words();
 485     }
 486 
 487     _region_data[cur_region].set_destination(dest_addr);
 488 
 489     // If cur_region does not fit entirely into the target space, find a point
 490     // at which the source space can be 'split' so that part is copied to the
 491     // target space and the rest is copied elsewhere.
 492     if (dest_addr + words > target_end) {
 493       assert(source_next != nullptr, "source_next is null when splitting");
 494       *source_next = summarize_split_space(cur_region, split_info, dest_addr,
 495                                            target_end, target_next);
 496       return false;
 497     }
 498 
 499     uint destination_count = split_info.is_split(cur_region)
 500                              ? split_info.preceding_destination_count()
 501                              : 0;
 502 
 503     HeapWord* const last_addr = dest_addr + words - 1;
 504     const size_t dest_region_1 = addr_to_region_idx(dest_addr);
 505     const size_t dest_region_2 = addr_to_region_idx(last_addr);
 506 
 507     // Initially assume that the destination regions will be the same and
 508     // adjust the value below if necessary.  Under this assumption, if
 509     // cur_region == dest_region_2, then cur_region will be compacted
 510     // completely into itself.
 511     destination_count += cur_region == dest_region_2 ? 0 : 1;
 512     if (dest_region_1 != dest_region_2) {
 513       // Destination regions differ; adjust destination_count.
 514       destination_count += 1;
 515       // Data from cur_region will be copied to the start of dest_region_2.
 516       _region_data[dest_region_2].set_source_region(cur_region);
 517     } else if (is_region_aligned(dest_addr)) {
 518       // Data from cur_region will be copied to the start of the destination
 519       // region.
 520       _region_data[dest_region_1].set_source_region(cur_region);
 521     }
 522 
 523     _region_data[cur_region].set_destination_count(destination_count);
 524     dest_addr += words;
 525   }
 526 
 527   *target_next = dest_addr;
 528   return true;
 529 }
 530 
 531 #ifdef ASSERT
 532 void ParallelCompactData::verify_clear() {
 533   for (uint cur_idx = 0; cur_idx < region_count(); ++cur_idx) {
 534     if (!region(cur_idx)->is_clear()) {
 535       log_warning(gc)("Uncleared Region: %u", cur_idx);
 536       region(cur_idx)->verify_clear();
 537     }
 538   }
 539 }
 540 #endif  // #ifdef ASSERT
 541 
 542 STWGCTimer          PSParallelCompact::_gc_timer;
 543 ParallelOldTracer   PSParallelCompact::_gc_tracer;
 544 elapsedTimer        PSParallelCompact::_accumulated_time;
 545 unsigned int        PSParallelCompact::_maximum_compaction_gc_num = 0;
 546 CollectorCounters*  PSParallelCompact::_counters = nullptr;
 547 ParMarkBitMap       PSParallelCompact::_mark_bitmap;
 548 ParallelCompactData PSParallelCompact::_summary_data;
 549 
 550 PSParallelCompact::IsAliveClosure PSParallelCompact::_is_alive_closure;
 551 
 552 class PCAdjustPointerClosure: public BasicOopIterateClosure {
 553   template <typename T>
 554   void do_oop_work(T* p) { PSParallelCompact::adjust_pointer(p); }
 555 
 556 public:
 557   virtual void do_oop(oop* p)                { do_oop_work(p); }
 558   virtual void do_oop(narrowOop* p)          { do_oop_work(p); }
 559 
 560   virtual ReferenceIterationMode reference_iteration_mode() { return DO_FIELDS; }
 561 };
 562 
 563 static PCAdjustPointerClosure pc_adjust_pointer_closure;
 564 
 565 bool PSParallelCompact::IsAliveClosure::do_object_b(oop p) { return mark_bitmap()->is_marked(p); }
 566 
 567 void PSParallelCompact::post_initialize() {
 568   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 569   _span_based_discoverer.set_span(heap->reserved_region());
 570   _ref_processor =
 571     new ReferenceProcessor(&_span_based_discoverer,
 572                            ParallelGCThreads,   // mt processing degree
 573                            ParallelGCThreads,   // mt discovery degree
 574                            false,               // concurrent_discovery
 575                            &_is_alive_closure); // non-header is alive closure
 576 
 577   _counters = new CollectorCounters("Parallel full collection pauses", 1);
 578 
 579   // Initialize static fields in ParCompactionManager.
 580   ParCompactionManager::initialize(mark_bitmap());
 581 }
 582 
 583 bool PSParallelCompact::initialize_aux_data() {
 584   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 585   MemRegion mr = heap->reserved_region();
 586   assert(mr.byte_size() != 0, "heap should be reserved");
 587 
 588   initialize_space_info();
 589 
 590   if (!_mark_bitmap.initialize(mr)) {
 591     vm_shutdown_during_initialization(
 592       err_msg("Unable to allocate %zuKB bitmaps for parallel "
 593       "garbage collection for the requested %zuKB heap.",
 594       _mark_bitmap.reserved_byte_size()/K, mr.byte_size()/K));
 595     return false;
 596   }
 597 
 598   if (!_summary_data.initialize(mr)) {
 599     vm_shutdown_during_initialization(
 600       err_msg("Unable to allocate %zuKB card tables for parallel "
 601       "garbage collection for the requested %zuKB heap.",
 602       _summary_data.reserved_byte_size()/K, mr.byte_size()/K));
 603     return false;
 604   }
 605 
 606   return true;
 607 }
 608 
 609 void PSParallelCompact::initialize_space_info()
 610 {
 611   memset(&_space_info, 0, sizeof(_space_info));
 612 
 613   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 614   PSYoungGen* young_gen = heap->young_gen();
 615 
 616   _space_info[old_space_id].set_space(heap->old_gen()->object_space());
 617   _space_info[eden_space_id].set_space(young_gen->eden_space());
 618   _space_info[from_space_id].set_space(young_gen->from_space());
 619   _space_info[to_space_id].set_space(young_gen->to_space());
 620 
 621   _space_info[old_space_id].set_start_array(heap->old_gen()->start_array());
 622 }
 623 
 624 void
 625 PSParallelCompact::clear_data_covering_space(SpaceId id)
 626 {
 627   // At this point, top is the value before GC, new_top() is the value that will
 628   // be set at the end of GC.  The marking bitmap is cleared to top; nothing
 629   // should be marked above top.  The summary data is cleared to the larger of
 630   // top & new_top.
 631   MutableSpace* const space = _space_info[id].space();
 632   HeapWord* const bot = space->bottom();
 633   HeapWord* const top = space->top();
 634   HeapWord* const max_top = MAX2(top, _space_info[id].new_top());
 635 
 636   _mark_bitmap.clear_range(bot, top);
 637 
 638   const size_t beg_region = _summary_data.addr_to_region_idx(bot);
 639   const size_t end_region =
 640     _summary_data.addr_to_region_idx(_summary_data.region_align_up(max_top));
 641   _summary_data.clear_range(beg_region, end_region);
 642 
 643   // Clear the data used to 'split' regions.
 644   SplitInfo& split_info = _space_info[id].split_info();
 645   if (split_info.is_valid()) {
 646     split_info.clear();
 647   }
 648   DEBUG_ONLY(split_info.verify_clear();)
 649 }
 650 
 651 void PSParallelCompact::pre_compact()
 652 {
 653   // Update the from & to space pointers in space_info, since they are swapped
 654   // at each young gen gc.  Do the update unconditionally (even though a
 655   // promotion failure does not swap spaces) because an unknown number of young
 656   // collections will have swapped the spaces an unknown number of times.
 657   GCTraceTime(Debug, gc, phases) tm("Pre Compact", &_gc_timer);
 658   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 659   _space_info[from_space_id].set_space(heap->young_gen()->from_space());
 660   _space_info[to_space_id].set_space(heap->young_gen()->to_space());
 661 
 662   heap->increment_total_collections(true);
 663 
 664   CodeCache::on_gc_marking_cycle_start();
 665 
 666   heap->print_before_gc();
 667   heap->trace_heap_before_gc(&_gc_tracer);
 668 
 669   // Fill in TLABs
 670   heap->ensure_parsability(true);  // retire TLABs
 671 
 672   if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
 673     Universe::verify("Before GC");
 674   }
 675 
 676   DEBUG_ONLY(mark_bitmap()->verify_clear();)
 677   DEBUG_ONLY(summary_data().verify_clear();)
 678 }
 679 
 680 void PSParallelCompact::post_compact()
 681 {
 682   GCTraceTime(Info, gc, phases) tm("Post Compact", &_gc_timer);
 683   ParCompactionManager::remove_all_shadow_regions();
 684 
 685   CodeCache::on_gc_marking_cycle_finish();
 686   CodeCache::arm_all_nmethods();
 687 
 688   // Need to clear claim bits for the next full-gc (marking and adjust-pointers).
 689   ClassLoaderDataGraph::clear_claimed_marks();
 690 
 691   for (unsigned int id = old_space_id; id < last_space_id; ++id) {
 692     // Clear the marking bitmap, summary data and split info.
 693     clear_data_covering_space(SpaceId(id));
 694     {
 695       MutableSpace* space = _space_info[id].space();
 696       HeapWord* top = space->top();
 697       HeapWord* new_top = _space_info[id].new_top();
 698       if (ZapUnusedHeapArea && new_top < top) {
 699         space->mangle_region(MemRegion(new_top, top));
 700       }
 701       // Update top().  Must be done after clearing the bitmap and summary data.
 702       space->set_top(new_top);
 703     }
 704   }
 705 
 706 #ifdef ASSERT
 707   {
 708     mark_bitmap()->verify_clear();
 709     summary_data().verify_clear();
 710   }
 711 #endif
 712 
 713   ParCompactionManager::flush_all_string_dedup_requests();
 714 
 715   MutableSpace* const eden_space = _space_info[eden_space_id].space();
 716   MutableSpace* const from_space = _space_info[from_space_id].space();
 717   MutableSpace* const to_space   = _space_info[to_space_id].space();
 718 
 719   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 720   bool eden_empty = eden_space->is_empty();
 721 
 722   // Update heap occupancy information which is used as input to the soft ref
 723   // clearing policy at the next gc.
 724   Universe::heap()->update_capacity_and_used_at_gc();
 725 
 726   bool young_gen_empty = eden_empty && from_space->is_empty() &&
 727     to_space->is_empty();
 728 
 729   PSCardTable* ct = heap->card_table();
 730   MemRegion old_mr = heap->old_gen()->committed();
 731   if (young_gen_empty) {
 732     ct->clear_MemRegion(old_mr);
 733   } else {
 734     ct->dirty_MemRegion(old_mr);
 735   }
 736 
 737   heap->prune_scavengable_nmethods();
 738 
 739 #if COMPILER2_OR_JVMCI
 740   DerivedPointerTable::update_pointers();
 741 #endif
 742 
 743   // Signal that we have completed a visit to all live objects.
 744   Universe::heap()->record_whole_heap_examined_timestamp();
 745 }
 746 
 747 HeapWord* PSParallelCompact::compute_dense_prefix_for_old_space(MutableSpace* old_space,
 748                                                                 HeapWord* full_region_prefix_end) {
 749   const size_t region_size = ParallelCompactData::RegionSize;
 750   const ParallelCompactData& sd = summary_data();
 751 
 752   // Iteration starts with the region *after* the full-region-prefix-end.
 753   const RegionData* const start_region = sd.addr_to_region_ptr(full_region_prefix_end);
 754   // If final region is not full, iteration stops before that region,
 755   // because fill_dense_prefix_end assumes that prefix_end <= top.
 756   const RegionData* const end_region = sd.addr_to_region_ptr(old_space->top());
 757   assert(start_region <= end_region, "inv");
 758 
 759   size_t max_waste = old_space->capacity_in_words() * (MarkSweepDeadRatio / 100.0);
 760   const RegionData* cur_region = start_region;
 761   for (/* empty */; cur_region < end_region; ++cur_region) {
 762     assert(region_size >= cur_region->data_size(), "inv");
 763     size_t dead_size = region_size - cur_region->data_size();
 764     if (max_waste < dead_size) {
 765       break;
 766     }
 767     max_waste -= dead_size;
 768   }
 769 
 770   HeapWord* const prefix_end = sd.region_to_addr(cur_region);
 771   assert(sd.is_region_aligned(prefix_end), "postcondition");
 772   assert(prefix_end >= full_region_prefix_end, "in-range");
 773   assert(prefix_end <= old_space->top(), "in-range");
 774   return prefix_end;
 775 }
 776 
 777 void PSParallelCompact::fill_dense_prefix_end(SpaceId id) {
 778   // Comparing two sizes to decide if filling is required:
 779   //
 780   // The size of the filler (min-obj-size) is 2 heap words with the default
 781   // MinObjAlignment, since both markword and klass take 1 heap word.
 782   // With +UseCompactObjectHeaders, the minimum filler size is only one word,
 783   // because the Klass* gets encoded in the mark-word.
 784   //
 785   // The size of the gap (if any) right before dense-prefix-end is
 786   // MinObjAlignment.
 787   //
 788   // Need to fill in the gap only if it's smaller than min-obj-size, and the
 789   // filler obj will extend to next region.
 790 
 791   if (MinObjAlignment >= checked_cast<int>(CollectedHeap::min_fill_size())) {
 792     return;
 793   }
 794 
 795   assert(!UseCompactObjectHeaders, "Compact headers can allocate small objects");
 796   assert(CollectedHeap::min_fill_size() == 2, "inv");
 797   HeapWord* const dense_prefix_end = dense_prefix(id);
 798   assert(_summary_data.is_region_aligned(dense_prefix_end), "precondition");
 799   assert(dense_prefix_end <= space(id)->top(), "precondition");
 800   if (dense_prefix_end == space(id)->top()) {
 801     // Must not have single-word gap right before prefix-end/top.
 802     return;
 803   }
 804   RegionData* const region_after_dense_prefix = _summary_data.addr_to_region_ptr(dense_prefix_end);
 805 
 806   if (region_after_dense_prefix->partial_obj_size() != 0 ||
 807       _mark_bitmap.is_marked(dense_prefix_end)) {
 808     // The region after the dense prefix starts with live bytes.
 809     return;
 810   }
 811 
 812   HeapWord* block_start = start_array(id)->block_start_reaching_into_card(dense_prefix_end);
 813   if (block_start == dense_prefix_end - 1) {
 814     assert(!_mark_bitmap.is_marked(block_start), "inv");
 815     // There is exactly one heap word gap right before the dense prefix end, so we need a filler object.
 816     // The filler object will extend into region_after_dense_prefix.
 817     const size_t obj_len = 2; // min-fill-size
 818     HeapWord* const obj_beg = dense_prefix_end - 1;
 819     CollectedHeap::fill_with_object(obj_beg, obj_len);
 820     _mark_bitmap.mark_obj(obj_beg);
 821     _summary_data.addr_to_region_ptr(obj_beg)->add_live_obj(1);
 822     region_after_dense_prefix->set_partial_obj_size(1);
 823     region_after_dense_prefix->set_partial_obj_addr(obj_beg);
 824     assert(start_array(id) != nullptr, "sanity");
 825     start_array(id)->update_for_block(obj_beg, obj_beg + obj_len);
 826   }
 827 }
 828 
 829 bool PSParallelCompact::check_maximum_compaction(size_t total_live_words,
 830                                                  MutableSpace* const old_space,
 831                                                  HeapWord* full_region_prefix_end) {
 832 
 833   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 834 
 835   // Check System.GC
 836   bool is_max_on_system_gc = UseMaximumCompactionOnSystemGC
 837                           && GCCause::is_user_requested_gc(heap->gc_cause());
 838 
 839   // Check if all live objs are too much for old-gen.
 840   const bool is_old_gen_too_full = (total_live_words >= old_space->capacity_in_words());
 841 
 842   // JVM flags
 843   const uint total_invocations = heap->total_full_collections();
 844   assert(total_invocations >= _maximum_compaction_gc_num, "sanity");
 845   const size_t gcs_since_max = total_invocations - _maximum_compaction_gc_num;
 846   const bool is_interval_ended = gcs_since_max > HeapMaximumCompactionInterval;
 847 
 848   // If all regions in old-gen are full
 849   const bool is_region_full =
 850     full_region_prefix_end >= _summary_data.region_align_down(old_space->top());
 851 
 852   if (is_max_on_system_gc || is_old_gen_too_full || is_interval_ended || is_region_full) {
 853     _maximum_compaction_gc_num = total_invocations;
 854     return true;
 855   }
 856 
 857   return false;
 858 }
 859 
 860 void PSParallelCompact::summary_phase()
 861 {
 862   GCTraceTime(Info, gc, phases) tm("Summary Phase", &_gc_timer);
 863 
 864   MutableSpace* const old_space = _space_info[old_space_id].space();
 865   {
 866     size_t total_live_words = 0;
 867     HeapWord* full_region_prefix_end = nullptr;
 868     {
 869       // old-gen
 870       size_t live_words = _summary_data.live_words_in_space(old_space,
 871                                                             &full_region_prefix_end);
 872       total_live_words += live_words;
 873     }
 874     // young-gen
 875     for (uint i = eden_space_id; i < last_space_id; ++i) {
 876       const MutableSpace* space = _space_info[i].space();
 877       size_t live_words = _summary_data.live_words_in_space(space);
 878       total_live_words += live_words;
 879       _space_info[i].set_new_top(space->bottom() + live_words);
 880       _space_info[i].set_dense_prefix(space->bottom());
 881     }
 882 
 883     bool maximum_compaction = check_maximum_compaction(total_live_words,
 884                                                        old_space,
 885                                                        full_region_prefix_end);
 886     {
 887       GCTraceTime(Info, gc, phases) tm("Summary Phase: expand", &_gc_timer);
 888       // Try to expand old-gen in order to fit all live objs and waste.
 889       size_t target_capacity_bytes = total_live_words * HeapWordSize
 890                                    + old_space->capacity_in_bytes() * (MarkSweepDeadRatio / 100);
 891       ParallelScavengeHeap::heap()->old_gen()->try_expand_till_size(target_capacity_bytes);
 892     }
 893 
 894     HeapWord* dense_prefix_end = maximum_compaction
 895                                  ? full_region_prefix_end
 896                                  : compute_dense_prefix_for_old_space(old_space,
 897                                                                       full_region_prefix_end);
 898     SpaceId id = old_space_id;
 899     _space_info[id].set_dense_prefix(dense_prefix_end);
 900 
 901     if (dense_prefix_end != old_space->bottom()) {
 902       fill_dense_prefix_end(id);
 903       _summary_data.summarize_dense_prefix(old_space->bottom(), dense_prefix_end);
 904     }
 905 
 906     // Compacting objs in [dense_prefix_end, old_space->top())
 907     _summary_data.summarize(_space_info[id].split_info(),
 908                             dense_prefix_end, old_space->top(), nullptr,
 909                             dense_prefix_end, old_space->end(),
 910                             _space_info[id].new_top_addr());
 911   }
 912 
 913   // Summarize the remaining spaces in the young gen.  The initial target space
 914   // is the old gen.  If a space does not fit entirely into the target, then the
 915   // remainder is compacted into the space itself and that space becomes the new
 916   // target.
 917   SpaceId dst_space_id = old_space_id;
 918   HeapWord* dst_space_end = old_space->end();
 919   HeapWord** new_top_addr = _space_info[dst_space_id].new_top_addr();
 920   for (unsigned int id = eden_space_id; id < last_space_id; ++id) {
 921     const MutableSpace* space = _space_info[id].space();
 922     const size_t live = pointer_delta(_space_info[id].new_top(),
 923                                       space->bottom());
 924     const size_t available = pointer_delta(dst_space_end, *new_top_addr);
 925 
 926     if (live > 0 && live <= available) {
 927       // All the live data will fit.
 928       bool done = _summary_data.summarize(_space_info[id].split_info(),
 929                                           space->bottom(), space->top(),
 930                                           nullptr,
 931                                           *new_top_addr, dst_space_end,
 932                                           new_top_addr);
 933       assert(done, "space must fit into old gen");
 934 
 935       // Reset the new_top value for the space.
 936       _space_info[id].set_new_top(space->bottom());
 937     } else if (live > 0) {
 938       // Attempt to fit part of the source space into the target space.
 939       HeapWord* next_src_addr = nullptr;
 940       bool done = _summary_data.summarize(_space_info[id].split_info(),
 941                                           space->bottom(), space->top(),
 942                                           &next_src_addr,
 943                                           *new_top_addr, dst_space_end,
 944                                           new_top_addr);
 945       assert(!done, "space should not fit into old gen");
 946       assert(next_src_addr != nullptr, "sanity");
 947 
 948       // The source space becomes the new target, so the remainder is compacted
 949       // within the space itself.
 950       dst_space_id = SpaceId(id);
 951       dst_space_end = space->end();
 952       new_top_addr = _space_info[id].new_top_addr();
 953       done = _summary_data.summarize(_space_info[id].split_info(),
 954                                      next_src_addr, space->top(),
 955                                      nullptr,
 956                                      space->bottom(), dst_space_end,
 957                                      new_top_addr);
 958       assert(done, "space must fit when compacted into itself");
 959       assert(*new_top_addr <= space->top(), "usage should not grow");
 960     }
 961   }
 962 }
 963 
 964 // This method invokes a full collection. The argument controls whether
 965 // soft-refs should be cleared or not.
 966 // Note that this method should only be called from the vm_thread while at a
 967 // safepoint.
 968 bool PSParallelCompact::invoke(bool clear_all_soft_refs) {
 969   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
 970   assert(Thread::current() == (Thread*)VMThread::vm_thread(),
 971          "should be in vm thread");
 972 
 973   SvcGCMarker sgcm(SvcGCMarker::FULL);
 974   IsSTWGCActiveMark mark;
 975 
 976   return PSParallelCompact::invoke_no_policy(clear_all_soft_refs);
 977 }
 978 
 979 // This method contains no policy. You should probably
 980 // be calling invoke() instead.
 981 bool PSParallelCompact::invoke_no_policy(bool clear_all_soft_refs) {
 982   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
 983   assert(ref_processor() != nullptr, "Sanity");
 984 
 985   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 986 
 987   GCIdMark gc_id_mark;
 988   _gc_timer.register_gc_start();
 989   _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start());
 990 
 991   GCCause::Cause gc_cause = heap->gc_cause();
 992   PSOldGen* old_gen = heap->old_gen();
 993   PSAdaptiveSizePolicy* size_policy = heap->size_policy();
 994 
 995   // Make sure data structures are sane, make the heap parsable, and do other
 996   // miscellaneous bookkeeping.
 997   pre_compact();
 998 
 999   const PreGenGCValues pre_gc_values = heap->get_pre_gc_values();
1000 
1001   {
1002     const uint active_workers =
1003       WorkerPolicy::calc_active_workers(ParallelScavengeHeap::heap()->workers().max_workers(),
1004                                         ParallelScavengeHeap::heap()->workers().active_workers(),
1005                                         Threads::number_of_non_daemon_threads());
1006     ParallelScavengeHeap::heap()->workers().set_active_workers(active_workers);
1007 
1008     GCTraceCPUTime tcpu(&_gc_tracer);
1009     GCTraceTime(Info, gc) tm("Pause Full", nullptr, gc_cause, true);
1010 
1011     heap->pre_full_gc_dump(&_gc_timer);
1012 
1013     TraceCollectorStats tcs(counters());
1014     TraceMemoryManagerStats tms(heap->old_gc_manager(), gc_cause, "end of major GC");
1015 
1016     if (log_is_enabled(Debug, gc, heap, exit)) {
1017       accumulated_time()->start();
1018     }
1019 
1020     // Let the size policy know we're starting
1021     size_policy->major_collection_begin();
1022 
1023 #if COMPILER2_OR_JVMCI
1024     DerivedPointerTable::clear();
1025 #endif
1026 
1027     ref_processor()->start_discovery(clear_all_soft_refs);
1028 
1029     marking_phase(&_gc_tracer);
1030 
1031     summary_phase();
1032 
1033 #if COMPILER2_OR_JVMCI
1034     assert(DerivedPointerTable::is_active(), "Sanity");
1035     DerivedPointerTable::set_active(false);
1036 #endif
1037 
1038     forward_to_new_addr();
1039 
1040     adjust_pointers();
1041 
1042     compact();
1043 
1044     ParCompactionManager::_preserved_marks_set->restore(&ParallelScavengeHeap::heap()->workers());
1045 
1046     ParCompactionManager::verify_all_region_stack_empty();
1047 
1048     // Reset the mark bitmap, summary data, and do other bookkeeping.  Must be
1049     // done before resizing.
1050     post_compact();
1051 
1052     size_policy->major_collection_end();
1053 
1054     size_policy->sample_old_gen_used_bytes(MAX2(pre_gc_values.old_gen_used(), old_gen->used_in_bytes()));
1055 
1056     if (UseAdaptiveSizePolicy) {
1057       heap->resize_after_full_gc();
1058     }
1059 
1060     heap->resize_all_tlabs();
1061 
1062     // Resize the metaspace capacity after a collection
1063     MetaspaceGC::compute_new_size();
1064 
1065     if (log_is_enabled(Debug, gc, heap, exit)) {
1066       accumulated_time()->stop();
1067     }
1068 
1069     heap->print_heap_change(pre_gc_values);
1070 
1071     // Track memory usage and detect low memory
1072     MemoryService::track_memory_usage();
1073     heap->update_counters();
1074 
1075     heap->post_full_gc_dump(&_gc_timer);
1076 
1077     size_policy->record_gc_pause_end_instant();
1078   }
1079 
1080   heap->gc_epilogue(true);
1081 
1082   if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
1083     Universe::verify("After GC");
1084   }
1085 
1086   heap->print_after_gc();
1087   heap->trace_heap_after_gc(&_gc_tracer);
1088 
1089   _gc_timer.register_gc_end();
1090 
1091   _gc_tracer.report_dense_prefix(dense_prefix(old_space_id));
1092   _gc_tracer.report_gc_end(_gc_timer.gc_end(), _gc_timer.time_partitions());
1093 
1094   return true;
1095 }
1096 
1097 class PCAddThreadRootsMarkingTaskClosure : public ThreadClosure {
1098 private:
1099   uint _worker_id;
1100 
1101 public:
1102   PCAddThreadRootsMarkingTaskClosure(uint worker_id) : _worker_id(worker_id) { }
1103   void do_thread(Thread* thread) {
1104     assert(ParallelScavengeHeap::heap()->is_stw_gc_active(), "called outside gc");
1105 
1106     ResourceMark rm;
1107 
1108     ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(_worker_id);
1109 
1110     MarkingNMethodClosure mark_and_push_in_blobs(&cm->_mark_and_push_closure,
1111                                                  !NMethodToOopClosure::FixRelocations,
1112                                                  true /* keepalive nmethods */);
1113 
1114     thread->oops_do(&cm->_mark_and_push_closure, &mark_and_push_in_blobs);
1115 
1116     // Do the real work
1117     cm->follow_marking_stacks();
1118   }
1119 };
1120 
1121 void steal_marking_work(TaskTerminator& terminator, uint worker_id) {
1122   assert(ParallelScavengeHeap::heap()->is_stw_gc_active(), "called outside gc");
1123 
1124   ParCompactionManager* cm =
1125     ParCompactionManager::gc_thread_compaction_manager(worker_id);
1126 
1127   do {
1128     ScannerTask task;
1129     if (ParCompactionManager::steal(worker_id, task)) {
1130       cm->follow_contents(task, true);
1131     }
1132     cm->follow_marking_stacks();
1133   } while (!terminator.offer_termination());
1134 }
1135 
1136 class MarkFromRootsTask : public WorkerTask {
1137   StrongRootsScope _strong_roots_scope; // needed for Threads::possibly_parallel_threads_do
1138   OopStorageSetStrongParState<false /* concurrent */, false /* is_const */> _oop_storage_set_par_state;
1139   TaskTerminator _terminator;
1140   uint _active_workers;
1141 
1142 public:
1143   MarkFromRootsTask(uint active_workers) :
1144       WorkerTask("MarkFromRootsTask"),
1145       _strong_roots_scope(active_workers),
1146       _terminator(active_workers, ParCompactionManager::marking_stacks()),
1147       _active_workers(active_workers) {}
1148 
1149   virtual void work(uint worker_id) {
1150     ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(worker_id);
1151     cm->create_marking_stats_cache();
1152     {
1153       CLDToOopClosure cld_closure(&cm->_mark_and_push_closure, ClassLoaderData::_claim_stw_fullgc_mark);
1154       ClassLoaderDataGraph::always_strong_cld_do(&cld_closure);
1155 
1156       // Do the real work
1157       cm->follow_marking_stacks();
1158     }
1159 
1160     {
1161       PCAddThreadRootsMarkingTaskClosure closure(worker_id);
1162       Threads::possibly_parallel_threads_do(_active_workers > 1 /* is_par */, &closure);
1163     }
1164 
1165     // Mark from OopStorages
1166     {
1167       _oop_storage_set_par_state.oops_do(&cm->_mark_and_push_closure);
1168       // Do the real work
1169       cm->follow_marking_stacks();
1170     }
1171 
1172     if (_active_workers > 1) {
1173       steal_marking_work(_terminator, worker_id);
1174     }
1175   }
1176 };
1177 
1178 class ParallelCompactRefProcProxyTask : public RefProcProxyTask {
1179   TaskTerminator _terminator;
1180 
1181 public:
1182   ParallelCompactRefProcProxyTask(uint max_workers)
1183     : RefProcProxyTask("ParallelCompactRefProcProxyTask", max_workers),
1184       _terminator(_max_workers, ParCompactionManager::marking_stacks()) {}
1185 
1186   void work(uint worker_id) override {
1187     assert(worker_id < _max_workers, "sanity");
1188     ParCompactionManager* cm = (_tm == RefProcThreadModel::Single) ? ParCompactionManager::get_vmthread_cm() : ParCompactionManager::gc_thread_compaction_manager(worker_id);
1189     BarrierEnqueueDiscoveredFieldClosure enqueue;
1190     ParCompactionManager::FollowStackClosure complete_gc(cm, (_tm == RefProcThreadModel::Single) ? nullptr : &_terminator, worker_id);
1191     _rp_task->rp_work(worker_id, PSParallelCompact::is_alive_closure(), &cm->_mark_and_push_closure, &enqueue, &complete_gc);
1192   }
1193 
1194   void prepare_run_task_hook() override {
1195     _terminator.reset_for_reuse(_queue_count);
1196   }
1197 };
1198 
1199 static void flush_marking_stats_cache(const uint num_workers) {
1200   for (uint i = 0; i < num_workers; ++i) {
1201     ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(i);
1202     cm->flush_and_destroy_marking_stats_cache();
1203   }
1204 }
1205 
1206 void PSParallelCompact::marking_phase(ParallelOldTracer *gc_tracer) {
1207   // Recursively traverse all live objects and mark them
1208   GCTraceTime(Info, gc, phases) tm("Marking Phase", &_gc_timer);
1209 
1210   uint active_gc_threads = ParallelScavengeHeap::heap()->workers().active_workers();
1211 
1212   ClassLoaderDataGraph::verify_claimed_marks_cleared(ClassLoaderData::_claim_stw_fullgc_mark);
1213   {
1214     GCTraceTime(Debug, gc, phases) tm("Par Mark", &_gc_timer);
1215 
1216     MarkFromRootsTask task(active_gc_threads);
1217     ParallelScavengeHeap::heap()->workers().run_task(&task);
1218   }
1219 
1220   // Process reference objects found during marking
1221   {
1222     GCTraceTime(Debug, gc, phases) tm("Reference Processing", &_gc_timer);
1223 
1224     ReferenceProcessorStats stats;
1225     ReferenceProcessorPhaseTimes pt(&_gc_timer, ref_processor()->max_num_queues());
1226 
1227     ParallelCompactRefProcProxyTask task(ref_processor()->max_num_queues());
1228     stats = ref_processor()->process_discovered_references(task, &ParallelScavengeHeap::heap()->workers(), pt);
1229 
1230     gc_tracer->report_gc_reference_stats(stats);
1231     pt.print_all_references();
1232   }
1233 
1234   {
1235     GCTraceTime(Debug, gc, phases) tm("Flush Marking Stats", &_gc_timer);
1236 
1237     flush_marking_stats_cache(active_gc_threads);
1238   }
1239 
1240   // This is the point where the entire marking should have completed.
1241   ParCompactionManager::verify_all_marking_stack_empty();
1242 
1243   {
1244     GCTraceTime(Debug, gc, phases) tm("Weak Processing", &_gc_timer);
1245     WeakProcessor::weak_oops_do(&ParallelScavengeHeap::heap()->workers(),
1246                                 is_alive_closure(),
1247                                 &do_nothing_cl,
1248                                 1);
1249   }
1250 
1251   {
1252     GCTraceTime(Debug, gc, phases) tm_m("Class Unloading", &_gc_timer);
1253 
1254     ClassUnloadingContext ctx(1 /* num_nmethod_unlink_workers */,
1255                               false /* unregister_nmethods_during_purge */,
1256                               false /* lock_nmethod_free_separately */);
1257 
1258     bool unloading_occurred;
1259     {
1260       CodeCache::UnlinkingScope scope(is_alive_closure());
1261 
1262       // Follow system dictionary roots and unload classes.
1263       unloading_occurred = SystemDictionary::do_unloading(&_gc_timer);
1264 
1265       // Unload nmethods.
1266       CodeCache::do_unloading(unloading_occurred);
1267     }
1268 
1269     {
1270       GCTraceTime(Debug, gc, phases) t("Purge Unlinked NMethods", gc_timer());
1271       // Release unloaded nmethod's memory.
1272       ctx.purge_nmethods();
1273     }
1274     {
1275       GCTraceTime(Debug, gc, phases) ur("Unregister NMethods", &_gc_timer);
1276       ParallelScavengeHeap::heap()->prune_unlinked_nmethods();
1277     }
1278     {
1279       GCTraceTime(Debug, gc, phases) t("Free Code Blobs", gc_timer());
1280       ctx.free_nmethods();
1281     }
1282 
1283     // Prune dead klasses from subklass/sibling/implementor lists.
1284     Klass::clean_weak_klass_links(unloading_occurred);
1285 
1286     // Clean JVMCI metadata handles.
1287     JVMCI_ONLY(JVMCI::do_unloading(unloading_occurred));
1288     {
1289       // Delete metaspaces for unloaded class loaders and clean up loader_data graph
1290       GCTraceTime(Debug, gc, phases) t("Purge Class Loader Data", gc_timer());
1291       ClassLoaderDataGraph::purge(true /* at_safepoint */);
1292       DEBUG_ONLY(MetaspaceUtils::verify();)
1293     }
1294   }
1295 
1296   {
1297     GCTraceTime(Debug, gc, phases) tm("Report Object Count", &_gc_timer);
1298     _gc_tracer.report_object_count_after_gc(is_alive_closure(), &ParallelScavengeHeap::heap()->workers());
1299   }
1300 #if TASKQUEUE_STATS
1301   ParCompactionManager::print_and_reset_taskqueue_stats();
1302 #endif
1303 }
1304 
1305 template<typename Func>
1306 void PSParallelCompact::adjust_in_space_helper(SpaceId id, volatile uint* claim_counter, Func&& on_stripe) {
1307   MutableSpace* sp = PSParallelCompact::space(id);
1308   HeapWord* const bottom = sp->bottom();
1309   HeapWord* const top = sp->top();
1310   if (bottom == top) {
1311     return;
1312   }
1313 
1314   const uint num_regions_per_stripe = 2;
1315   const size_t region_size = ParallelCompactData::RegionSize;
1316   const size_t stripe_size = num_regions_per_stripe * region_size;
1317 
1318   while (true) {
1319     uint counter = Atomic::fetch_then_add(claim_counter, num_regions_per_stripe);
1320     HeapWord* cur_stripe = bottom + counter * region_size;
1321     if (cur_stripe >= top) {
1322       break;
1323     }
1324     HeapWord* stripe_end = MIN2(cur_stripe + stripe_size, top);
1325     on_stripe(cur_stripe, stripe_end);
1326   }
1327 }
1328 
1329 void PSParallelCompact::adjust_in_old_space(volatile uint* claim_counter) {
1330   // Regions in old-space shouldn't be split.
1331   assert(!_space_info[old_space_id].split_info().is_valid(), "inv");
1332 
1333   auto scan_obj_with_limit = [&] (HeapWord* obj_start, HeapWord* left, HeapWord* right) {
1334     assert(mark_bitmap()->is_marked(obj_start), "inv");
1335     oop obj = cast_to_oop(obj_start);
1336     return obj->oop_iterate_size(&pc_adjust_pointer_closure, MemRegion(left, right));
1337   };
1338 
1339   adjust_in_space_helper(old_space_id, claim_counter, [&] (HeapWord* stripe_start, HeapWord* stripe_end) {
1340     assert(_summary_data.is_region_aligned(stripe_start), "inv");
1341     RegionData* cur_region = _summary_data.addr_to_region_ptr(stripe_start);
1342     HeapWord* obj_start;
1343     if (cur_region->partial_obj_size() != 0) {
1344       obj_start = cur_region->partial_obj_addr();
1345       obj_start += scan_obj_with_limit(obj_start, stripe_start, stripe_end);
1346     } else {
1347       obj_start = stripe_start;
1348     }
1349 
1350     while (obj_start < stripe_end) {
1351       obj_start = mark_bitmap()->find_obj_beg(obj_start, stripe_end);
1352       if (obj_start >= stripe_end) {
1353         break;
1354       }
1355       obj_start += scan_obj_with_limit(obj_start, stripe_start, stripe_end);
1356     }
1357   });
1358 }
1359 
1360 void PSParallelCompact::adjust_in_young_space(SpaceId id, volatile uint* claim_counter) {
1361   adjust_in_space_helper(id, claim_counter, [](HeapWord* stripe_start, HeapWord* stripe_end) {
1362     HeapWord* obj_start = stripe_start;
1363     while (obj_start < stripe_end) {
1364       obj_start = mark_bitmap()->find_obj_beg(obj_start, stripe_end);
1365       if (obj_start >= stripe_end) {
1366         break;
1367       }
1368       oop obj = cast_to_oop(obj_start);
1369       obj_start += obj->oop_iterate_size(&pc_adjust_pointer_closure);
1370     }
1371   });
1372 }
1373 
1374 void PSParallelCompact::adjust_pointers_in_spaces(uint worker_id, volatile uint* claim_counters) {
1375   auto start_time = Ticks::now();
1376   adjust_in_old_space(&claim_counters[0]);
1377   for (uint id = eden_space_id; id < last_space_id; ++id) {
1378     adjust_in_young_space(SpaceId(id), &claim_counters[id]);
1379   }
1380   log_trace(gc, phases)("adjust_pointers_in_spaces worker %u: %.3f ms", worker_id, (Ticks::now() - start_time).seconds() * 1000);
1381 }
1382 
1383 class PSAdjustTask final : public WorkerTask {
1384   SubTasksDone                               _sub_tasks;
1385   WeakProcessor::Task                        _weak_proc_task;
1386   OopStorageSetStrongParState<false, false>  _oop_storage_iter;
1387   uint                                       _nworkers;
1388   volatile uint _claim_counters[PSParallelCompact::last_space_id] = {};
1389 
1390   enum PSAdjustSubTask {
1391     PSAdjustSubTask_code_cache,
1392 
1393     PSAdjustSubTask_num_elements
1394   };
1395 
1396 public:
1397   PSAdjustTask(uint nworkers) :
1398     WorkerTask("PSAdjust task"),
1399     _sub_tasks(PSAdjustSubTask_num_elements),
1400     _weak_proc_task(nworkers),
1401     _nworkers(nworkers) {
1402 
1403     ClassLoaderDataGraph::verify_claimed_marks_cleared(ClassLoaderData::_claim_stw_fullgc_adjust);
1404     if (nworkers > 1) {
1405       Threads::change_thread_claim_token();
1406     }
1407   }
1408 
1409   ~PSAdjustTask() {
1410     Threads::assert_all_threads_claimed();
1411   }
1412 
1413   void work(uint worker_id) {
1414     ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(worker_id);
1415     cm->preserved_marks()->adjust_during_full_gc();
1416     {
1417       // adjust pointers in all spaces
1418       PSParallelCompact::adjust_pointers_in_spaces(worker_id, _claim_counters);
1419     }
1420     {
1421       ResourceMark rm;
1422       Threads::possibly_parallel_oops_do(_nworkers > 1, &pc_adjust_pointer_closure, nullptr);
1423     }
1424     _oop_storage_iter.oops_do(&pc_adjust_pointer_closure);
1425     {
1426       CLDToOopClosure cld_closure(&pc_adjust_pointer_closure, ClassLoaderData::_claim_stw_fullgc_adjust);
1427       ClassLoaderDataGraph::cld_do(&cld_closure);
1428     }
1429     {
1430       AlwaysTrueClosure always_alive;
1431       _weak_proc_task.work(worker_id, &always_alive, &pc_adjust_pointer_closure);
1432     }
1433     if (_sub_tasks.try_claim_task(PSAdjustSubTask_code_cache)) {
1434       NMethodToOopClosure adjust_code(&pc_adjust_pointer_closure, NMethodToOopClosure::FixRelocations);
1435       CodeCache::nmethods_do(&adjust_code);
1436     }
1437     _sub_tasks.all_tasks_claimed();
1438   }
1439 };
1440 
1441 void PSParallelCompact::adjust_pointers() {
1442   // Adjust the pointers to reflect the new locations
1443   GCTraceTime(Info, gc, phases) tm("Adjust Pointers", &_gc_timer);
1444   uint nworkers = ParallelScavengeHeap::heap()->workers().active_workers();
1445   PSAdjustTask task(nworkers);
1446   ParallelScavengeHeap::heap()->workers().run_task(&task);
1447 }
1448 
1449 // Split [start, end) evenly for a number of workers and return the
1450 // range for worker_id.
1451 static void split_regions_for_worker(size_t start, size_t end,
1452                                      uint worker_id, uint num_workers,
1453                                      size_t* worker_start, size_t* worker_end) {
1454   assert(start < end, "precondition");
1455   assert(num_workers > 0, "precondition");
1456   assert(worker_id < num_workers, "precondition");
1457 
1458   size_t num_regions = end - start;
1459   size_t num_regions_per_worker = num_regions / num_workers;
1460   size_t remainder = num_regions % num_workers;
1461   // The first few workers will get one extra.
1462   *worker_start = start + worker_id * num_regions_per_worker
1463                   + MIN2(checked_cast<size_t>(worker_id), remainder);
1464   *worker_end = *worker_start + num_regions_per_worker
1465                 + (worker_id < remainder ? 1 : 0);
1466 }
1467 
1468 void PSParallelCompact::forward_to_new_addr() {
1469   GCTraceTime(Info, gc, phases) tm("Forward", &_gc_timer);
1470   uint nworkers = ParallelScavengeHeap::heap()->workers().active_workers();
1471 
1472   struct ForwardTask final : public WorkerTask {
1473     uint _num_workers;
1474 
1475     explicit ForwardTask(uint num_workers) :
1476       WorkerTask("PSForward task"),
1477       _num_workers(num_workers) {}
1478 
1479     static void forward_objs_in_range(ParCompactionManager* cm,
1480                                       HeapWord* start,
1481                                       HeapWord* end,
1482                                       HeapWord* destination) {
1483       HeapWord* cur_addr = start;
1484       HeapWord* new_addr = destination;
1485 
1486       while (cur_addr < end) {
1487         cur_addr = mark_bitmap()->find_obj_beg(cur_addr, end);
1488         if (cur_addr >= end) {
1489           return;
1490         }
1491         assert(mark_bitmap()->is_marked(cur_addr), "inv");
1492         oop obj = cast_to_oop(cur_addr);
1493         if (new_addr != cur_addr) {
1494           cm->preserved_marks()->push_if_necessary(obj, obj->mark());
1495           FullGCForwarding::forward_to(obj, cast_to_oop(new_addr));
1496         }
1497         size_t obj_size = obj->size();
1498         new_addr += obj_size;
1499         cur_addr += obj_size;
1500       }
1501     }
1502 
1503     void work(uint worker_id) override {
1504       ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(worker_id);
1505       for (uint id = old_space_id; id < last_space_id; ++id) {
1506         MutableSpace* sp = PSParallelCompact::space(SpaceId(id));
1507         HeapWord* dense_prefix_addr = dense_prefix(SpaceId(id));
1508         HeapWord* top = sp->top();
1509 
1510         if (dense_prefix_addr == top) {
1511           // Empty space
1512           continue;
1513         }
1514 
1515         const SplitInfo& split_info = _space_info[SpaceId(id)].split_info();
1516         size_t dense_prefix_region = _summary_data.addr_to_region_idx(dense_prefix_addr);
1517         size_t top_region = _summary_data.addr_to_region_idx(_summary_data.region_align_up(top));
1518         size_t start_region;
1519         size_t end_region;
1520         split_regions_for_worker(dense_prefix_region, top_region,
1521                                  worker_id, _num_workers,
1522                                  &start_region, &end_region);
1523         for (size_t cur_region = start_region; cur_region < end_region; ++cur_region) {
1524           RegionData* region_ptr = _summary_data.region(cur_region);
1525           size_t partial_obj_size = region_ptr->partial_obj_size();
1526 
1527           if (partial_obj_size == ParallelCompactData::RegionSize) {
1528             // No obj-start
1529             continue;
1530           }
1531 
1532           HeapWord* region_start = _summary_data.region_to_addr(cur_region);
1533           HeapWord* region_end = region_start + ParallelCompactData::RegionSize;
1534 
1535           if (split_info.is_split(cur_region)) {
1536             // Part 1: will be relocated to space-1
1537             HeapWord* preceding_destination = split_info.preceding_destination();
1538             HeapWord* split_point = split_info.split_point();
1539             forward_objs_in_range(cm, region_start + partial_obj_size, split_point, preceding_destination + partial_obj_size);
1540 
1541             // Part 2: will be relocated to space-2
1542             HeapWord* destination = region_ptr->destination();
1543             forward_objs_in_range(cm, split_point, region_end, destination);
1544           } else {
1545             HeapWord* destination = region_ptr->destination();
1546             forward_objs_in_range(cm, region_start + partial_obj_size, region_end, destination + partial_obj_size);
1547           }
1548         }
1549       }
1550     }
1551   } task(nworkers);
1552 
1553   ParallelScavengeHeap::heap()->workers().run_task(&task);
1554   DEBUG_ONLY(verify_forward();)
1555 }
1556 
1557 #ifdef ASSERT
1558 void PSParallelCompact::verify_forward() {
1559   HeapWord* const old_dense_prefix_addr = dense_prefix(SpaceId(old_space_id));
1560   RegionData* old_region = _summary_data.region(_summary_data.addr_to_region_idx(old_dense_prefix_addr));
1561   HeapWord* bump_ptr = old_region->partial_obj_size() != 0
1562                        ? old_dense_prefix_addr + old_region->partial_obj_size()
1563                        : old_dense_prefix_addr;
1564   SpaceId bump_ptr_space = old_space_id;
1565 
1566   for (uint id = old_space_id; id < last_space_id; ++id) {
1567     MutableSpace* sp = PSParallelCompact::space(SpaceId(id));
1568     HeapWord* dense_prefix_addr = dense_prefix(SpaceId(id));
1569     HeapWord* top = sp->top();
1570     HeapWord* cur_addr = dense_prefix_addr;
1571 
1572     while (cur_addr < top) {
1573       cur_addr = mark_bitmap()->find_obj_beg(cur_addr, top);
1574       if (cur_addr >= top) {
1575         break;
1576       }
1577       assert(mark_bitmap()->is_marked(cur_addr), "inv");
1578       assert(bump_ptr <= _space_info[bump_ptr_space].new_top(), "inv");
1579       // Move to the space containing cur_addr
1580       if (bump_ptr == _space_info[bump_ptr_space].new_top()) {
1581         bump_ptr = space(space_id(cur_addr))->bottom();
1582         bump_ptr_space = space_id(bump_ptr);
1583       }
1584       oop obj = cast_to_oop(cur_addr);
1585       if (cur_addr == bump_ptr) {
1586         assert(!FullGCForwarding::is_forwarded(obj), "inv");
1587       } else {
1588         assert(FullGCForwarding::forwardee(obj) == cast_to_oop(bump_ptr), "inv");
1589       }
1590       bump_ptr += obj->size();
1591       cur_addr += obj->size();
1592     }
1593   }
1594 }
1595 #endif
1596 
1597 // Helper class to print 8 region numbers per line and then print the total at the end.
1598 class FillableRegionLogger : public StackObj {
1599 private:
1600   Log(gc, compaction) log;
1601   static const int LineLength = 8;
1602   size_t _regions[LineLength];
1603   int _next_index;
1604   bool _enabled;
1605   size_t _total_regions;
1606 public:
1607   FillableRegionLogger() : _next_index(0), _enabled(log_develop_is_enabled(Trace, gc, compaction)), _total_regions(0) { }
1608   ~FillableRegionLogger() {
1609     log.trace("%zu initially fillable regions", _total_regions);
1610   }
1611 
1612   void print_line() {
1613     if (!_enabled || _next_index == 0) {
1614       return;
1615     }
1616     FormatBuffer<> line("Fillable: ");
1617     for (int i = 0; i < _next_index; i++) {
1618       line.append(" %7zu", _regions[i]);
1619     }
1620     log.trace("%s", line.buffer());
1621     _next_index = 0;
1622   }
1623 
1624   void handle(size_t region) {
1625     if (!_enabled) {
1626       return;
1627     }
1628     _regions[_next_index++] = region;
1629     if (_next_index == LineLength) {
1630       print_line();
1631     }
1632     _total_regions++;
1633   }
1634 };
1635 
1636 void PSParallelCompact::prepare_region_draining_tasks(uint parallel_gc_threads)
1637 {
1638   GCTraceTime(Trace, gc, phases) tm("Drain Task Setup", &_gc_timer);
1639 
1640   // Find the threads that are active
1641   uint worker_id = 0;
1642 
1643   // Find all regions that are available (can be filled immediately) and
1644   // distribute them to the thread stacks.  The iteration is done in reverse
1645   // order (high to low) so the regions will be removed in ascending order.
1646 
1647   const ParallelCompactData& sd = PSParallelCompact::summary_data();
1648 
1649   // id + 1 is used to test termination so unsigned  can
1650   // be used with an old_space_id == 0.
1651   FillableRegionLogger region_logger;
1652   for (unsigned int id = last_space_id - 1; id + 1 > old_space_id; --id) {
1653     SpaceInfo* const space_info = _space_info + id;
1654     HeapWord* const new_top = space_info->new_top();
1655 
1656     const size_t beg_region = sd.addr_to_region_idx(space_info->dense_prefix());
1657     const size_t end_region =
1658       sd.addr_to_region_idx(sd.region_align_up(new_top));
1659 
1660     for (size_t cur = end_region - 1; cur + 1 > beg_region; --cur) {
1661       if (sd.region(cur)->claim_unsafe()) {
1662         ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(worker_id);
1663         bool result = sd.region(cur)->mark_normal();
1664         assert(result, "Must succeed at this point.");
1665         cm->region_stack()->push(cur);
1666         region_logger.handle(cur);
1667         // Assign regions to tasks in round-robin fashion.
1668         if (++worker_id == parallel_gc_threads) {
1669           worker_id = 0;
1670         }
1671       }
1672     }
1673     region_logger.print_line();
1674   }
1675 }
1676 
1677 static void compaction_with_stealing_work(TaskTerminator* terminator, uint worker_id) {
1678   assert(ParallelScavengeHeap::heap()->is_stw_gc_active(), "called outside gc");
1679 
1680   ParCompactionManager* cm =
1681     ParCompactionManager::gc_thread_compaction_manager(worker_id);
1682 
1683   // Drain the stacks that have been preloaded with regions
1684   // that are ready to fill.
1685 
1686   cm->drain_region_stacks();
1687 
1688   guarantee(cm->region_stack()->is_empty(), "Not empty");
1689 
1690   size_t region_index = 0;
1691 
1692   while (true) {
1693     if (ParCompactionManager::steal(worker_id, region_index)) {
1694       PSParallelCompact::fill_and_update_region(cm, region_index);
1695       cm->drain_region_stacks();
1696     } else if (PSParallelCompact::steal_unavailable_region(cm, region_index)) {
1697       // Fill and update an unavailable region with the help of a shadow region
1698       PSParallelCompact::fill_and_update_shadow_region(cm, region_index);
1699       cm->drain_region_stacks();
1700     } else {
1701       if (terminator->offer_termination()) {
1702         break;
1703       }
1704       // Go around again.
1705     }
1706   }
1707 }
1708 
1709 class FillDensePrefixAndCompactionTask: public WorkerTask {
1710   uint _num_workers;
1711   TaskTerminator _terminator;
1712 
1713 public:
1714   FillDensePrefixAndCompactionTask(uint active_workers) :
1715       WorkerTask("FillDensePrefixAndCompactionTask"),
1716       _num_workers(active_workers),
1717       _terminator(active_workers, ParCompactionManager::region_task_queues()) {
1718   }
1719 
1720   virtual void work(uint worker_id) {
1721     {
1722       auto start = Ticks::now();
1723       PSParallelCompact::fill_dead_objs_in_dense_prefix(worker_id, _num_workers);
1724       log_trace(gc, phases)("Fill dense prefix by worker %u: %.3f ms", worker_id, (Ticks::now() - start).seconds() * 1000);
1725     }
1726     compaction_with_stealing_work(&_terminator, worker_id);
1727   }
1728 };
1729 
1730 void PSParallelCompact::fill_range_in_dense_prefix(HeapWord* start, HeapWord* end) {
1731 #ifdef ASSERT
1732   {
1733     assert(start < end, "precondition");
1734     assert(mark_bitmap()->find_obj_beg(start, end) == end, "precondition");
1735     HeapWord* bottom = _space_info[old_space_id].space()->bottom();
1736     if (start != bottom) {
1737       HeapWord* obj_start = mark_bitmap()->find_obj_beg_reverse(bottom, start);
1738       HeapWord* after_obj = obj_start + cast_to_oop(obj_start)->size();
1739       assert(after_obj == start, "precondition");
1740     }
1741   }
1742 #endif
1743 
1744   CollectedHeap::fill_with_objects(start, pointer_delta(end, start));
1745   HeapWord* addr = start;
1746   do {
1747     size_t size = cast_to_oop(addr)->size();
1748     start_array(old_space_id)->update_for_block(addr, addr + size);
1749     addr += size;
1750   } while (addr < end);
1751 }
1752 
1753 void PSParallelCompact::fill_dead_objs_in_dense_prefix(uint worker_id, uint num_workers) {
1754   ParMarkBitMap* bitmap = mark_bitmap();
1755 
1756   HeapWord* const bottom = _space_info[old_space_id].space()->bottom();
1757   HeapWord* const prefix_end = dense_prefix(old_space_id);
1758 
1759   if (bottom == prefix_end) {
1760     return;
1761   }
1762 
1763   size_t bottom_region = _summary_data.addr_to_region_idx(bottom);
1764   size_t prefix_end_region = _summary_data.addr_to_region_idx(prefix_end);
1765 
1766   size_t start_region;
1767   size_t end_region;
1768   split_regions_for_worker(bottom_region, prefix_end_region,
1769                            worker_id, num_workers,
1770                            &start_region, &end_region);
1771 
1772   if (start_region == end_region) {
1773     return;
1774   }
1775 
1776   HeapWord* const start_addr = _summary_data.region_to_addr(start_region);
1777   HeapWord* const end_addr = _summary_data.region_to_addr(end_region);
1778 
1779   // Skip live partial obj (if any) from previous region.
1780   HeapWord* cur_addr;
1781   RegionData* start_region_ptr = _summary_data.region(start_region);
1782   if (start_region_ptr->partial_obj_size() != 0) {
1783     HeapWord* partial_obj_start = start_region_ptr->partial_obj_addr();
1784     assert(bitmap->is_marked(partial_obj_start), "inv");
1785     cur_addr = partial_obj_start + cast_to_oop(partial_obj_start)->size();
1786   } else {
1787     cur_addr = start_addr;
1788   }
1789 
1790   // end_addr is inclusive to handle regions starting with dead space.
1791   while (cur_addr <= end_addr) {
1792     // Use prefix_end to handle trailing obj in each worker region-chunk.
1793     HeapWord* live_start = bitmap->find_obj_beg(cur_addr, prefix_end);
1794     if (cur_addr != live_start) {
1795       // Only worker 0 handles proceeding dead space.
1796       if (cur_addr != start_addr || worker_id == 0) {
1797         fill_range_in_dense_prefix(cur_addr, live_start);
1798       }
1799     }
1800     if (live_start >= end_addr) {
1801       break;
1802     }
1803     assert(bitmap->is_marked(live_start), "inv");
1804     cur_addr = live_start + cast_to_oop(live_start)->size();
1805   }
1806 }
1807 
1808 void PSParallelCompact::compact() {
1809   GCTraceTime(Info, gc, phases) tm("Compaction Phase", &_gc_timer);
1810 
1811   uint active_gc_threads = ParallelScavengeHeap::heap()->workers().active_workers();
1812 
1813   initialize_shadow_regions(active_gc_threads);
1814   prepare_region_draining_tasks(active_gc_threads);
1815 
1816   {
1817     GCTraceTime(Trace, gc, phases) tm("Par Compact", &_gc_timer);
1818 
1819     FillDensePrefixAndCompactionTask task(active_gc_threads);
1820     ParallelScavengeHeap::heap()->workers().run_task(&task);
1821 
1822 #ifdef  ASSERT
1823     verify_filler_in_dense_prefix();
1824 
1825     // Verify that all regions have been processed.
1826     for (unsigned int id = old_space_id; id < last_space_id; ++id) {
1827       verify_complete(SpaceId(id));
1828     }
1829 #endif
1830   }
1831 }
1832 
1833 #ifdef  ASSERT
1834 void PSParallelCompact::verify_filler_in_dense_prefix() {
1835   HeapWord* bottom = _space_info[old_space_id].space()->bottom();
1836   HeapWord* dense_prefix_end = dense_prefix(old_space_id);
1837   HeapWord* cur_addr = bottom;
1838   while (cur_addr < dense_prefix_end) {
1839     oop obj = cast_to_oop(cur_addr);
1840     oopDesc::verify(obj);
1841     if (!mark_bitmap()->is_marked(cur_addr)) {
1842       Klass* k = cast_to_oop(cur_addr)->klass();
1843       assert(k == Universe::fillerArrayKlass() || k == vmClasses::FillerObject_klass(), "inv");
1844     }
1845     cur_addr += obj->size();
1846   }
1847 }
1848 
1849 void PSParallelCompact::verify_complete(SpaceId space_id) {
1850   // All Regions served as compaction targets, from dense_prefix() to
1851   // new_top(), should be marked as filled and all Regions between new_top()
1852   // and top() should be available (i.e., should have been emptied).
1853   ParallelCompactData& sd = summary_data();
1854   SpaceInfo si = _space_info[space_id];
1855   HeapWord* new_top_addr = sd.region_align_up(si.new_top());
1856   HeapWord* old_top_addr = sd.region_align_up(si.space()->top());
1857   const size_t beg_region = sd.addr_to_region_idx(si.dense_prefix());
1858   const size_t new_top_region = sd.addr_to_region_idx(new_top_addr);
1859   const size_t old_top_region = sd.addr_to_region_idx(old_top_addr);
1860 
1861   size_t cur_region;
1862   for (cur_region = beg_region; cur_region < new_top_region; ++cur_region) {
1863     const RegionData* const c = sd.region(cur_region);
1864     assert(c->completed(), "region %zu not filled: destination_count=%u",
1865            cur_region, c->destination_count());
1866   }
1867 
1868   for (cur_region = new_top_region; cur_region < old_top_region; ++cur_region) {
1869     const RegionData* const c = sd.region(cur_region);
1870     assert(c->available(), "region %zu not empty: destination_count=%u",
1871            cur_region, c->destination_count());
1872   }
1873 }
1874 #endif  // #ifdef ASSERT
1875 
1876 // Return the SpaceId for the space containing addr.  If addr is not in the
1877 // heap, last_space_id is returned.  In debug mode it expects the address to be
1878 // in the heap and asserts such.
1879 PSParallelCompact::SpaceId PSParallelCompact::space_id(HeapWord* addr) {
1880   assert(ParallelScavengeHeap::heap()->is_in_reserved(addr), "addr not in the heap");
1881 
1882   for (unsigned int id = old_space_id; id < last_space_id; ++id) {
1883     if (_space_info[id].space()->contains(addr)) {
1884       return SpaceId(id);
1885     }
1886   }
1887 
1888   assert(false, "no space contains the addr");
1889   return last_space_id;
1890 }
1891 
1892 // Skip over count live words starting from beg, and return the address of the
1893 // next live word. Callers must also ensure that there are enough live words in
1894 // the range [beg, end) to skip.
1895 HeapWord* PSParallelCompact::skip_live_words(HeapWord* beg, HeapWord* end, size_t count)
1896 {
1897   ParMarkBitMap* m = mark_bitmap();
1898   HeapWord* cur_addr = beg;
1899   while (true) {
1900     cur_addr = m->find_obj_beg(cur_addr, end);
1901     assert(cur_addr < end, "inv");
1902     size_t obj_size = cast_to_oop(cur_addr)->size();
1903     // Strictly greater-than
1904     if (obj_size > count) {
1905       return cur_addr + count;
1906     }
1907     count -= obj_size;
1908     cur_addr += obj_size;
1909   }
1910 }
1911 
1912 // On starting to fill a destination region (dest-region), we need to know the
1913 // location of the word that will be at the start of the dest-region after
1914 // compaction. A dest-region can have one or more source regions, but only the
1915 // first source-region contains this location. This location is retrieved by
1916 // calling `first_src_addr` on a dest-region.
1917 // Conversely, a source-region has a dest-region which holds the destination of
1918 // the first live word on this source-region, based on which the destination
1919 // for the rest of live words can be derived.
1920 //
1921 // Note:
1922 // There is some complication due to space-boundary-fragmentation (an obj can't
1923 // cross space-boundary) -- a source-region may be split and behave like two
1924 // distinct regions with their own dest-region, as depicted below.
1925 //
1926 // source-region: region-n
1927 //
1928 // **********************
1929 // |     A|A~~~~B|B     |
1930 // **********************
1931 //    n-1     n     n+1
1932 //
1933 // AA, BB denote two live objs. ~~~~ denotes unknown number of live objs.
1934 //
1935 // Assuming the dest-region for region-n is the final region before
1936 // old-space-end and its first-live-word is the middle of AA, the heap content
1937 // will look like the following after compaction:
1938 //
1939 // **************                  *************
1940 //      A|A~~~~ |                  |BB    |
1941 // **************                  *************
1942 //              ^                  ^
1943 //              | old-space-end    | eden-space-start
1944 //
1945 // Therefore, in this example, region-n will have two dest-regions:
1946 // 1. the final region in old-space
1947 // 2. the first region in eden-space.
1948 // To handle this special case, we introduce the concept of split-region, whose
1949 // contents are relocated to two spaces. `SplitInfo` captures all necessary
1950 // info about the split, the first part, spliting-point, and the second part.
1951 HeapWord* PSParallelCompact::first_src_addr(HeapWord* const dest_addr,
1952                                             SpaceId src_space_id,
1953                                             size_t src_region_idx)
1954 {
1955   const size_t RegionSize = ParallelCompactData::RegionSize;
1956   const ParallelCompactData& sd = summary_data();
1957   assert(sd.is_region_aligned(dest_addr), "precondition");
1958 
1959   const RegionData* const src_region_ptr = sd.region(src_region_idx);
1960   assert(src_region_ptr->data_size() > 0, "src region cannot be empty");
1961 
1962   const size_t partial_obj_size = src_region_ptr->partial_obj_size();
1963   HeapWord* const src_region_destination = src_region_ptr->destination();
1964 
1965   HeapWord* const region_start = sd.region_to_addr(src_region_idx);
1966   HeapWord* const region_end = sd.region_to_addr(src_region_idx) + RegionSize;
1967 
1968   // Identify the actual destination for the first live words on this region,
1969   // taking split-region into account.
1970   HeapWord* region_start_destination;
1971   const SplitInfo& split_info = _space_info[src_space_id].split_info();
1972   if (split_info.is_split(src_region_idx)) {
1973     // The second part of this split region; use the recorded split point.
1974     if (dest_addr == src_region_destination) {
1975       return split_info.split_point();
1976     }
1977     region_start_destination = split_info.preceding_destination();
1978   } else {
1979     region_start_destination = src_region_destination;
1980   }
1981 
1982   // Calculate the offset to be skipped
1983   size_t words_to_skip = pointer_delta(dest_addr, region_start_destination);
1984 
1985   HeapWord* result;
1986   if (partial_obj_size > words_to_skip) {
1987     result = region_start + words_to_skip;
1988   } else {
1989     words_to_skip -= partial_obj_size;
1990     result = skip_live_words(region_start + partial_obj_size, region_end, words_to_skip);
1991   }
1992 
1993   if (split_info.is_split(src_region_idx)) {
1994     assert(result < split_info.split_point(), "postcondition");
1995   } else {
1996     assert(result < region_end, "postcondition");
1997   }
1998 
1999   return result;
2000 }
2001 
2002 void PSParallelCompact::decrement_destination_counts(ParCompactionManager* cm,
2003                                                      SpaceId src_space_id,
2004                                                      size_t beg_region,
2005                                                      HeapWord* end_addr)
2006 {
2007   ParallelCompactData& sd = summary_data();
2008 
2009 #ifdef ASSERT
2010   MutableSpace* const src_space = _space_info[src_space_id].space();
2011   HeapWord* const beg_addr = sd.region_to_addr(beg_region);
2012   assert(src_space->contains(beg_addr) || beg_addr == src_space->end(),
2013          "src_space_id does not match beg_addr");
2014   assert(src_space->contains(end_addr) || end_addr == src_space->end(),
2015          "src_space_id does not match end_addr");
2016 #endif // #ifdef ASSERT
2017 
2018   RegionData* const beg = sd.region(beg_region);
2019   RegionData* const end = sd.addr_to_region_ptr(sd.region_align_up(end_addr));
2020 
2021   // Regions up to new_top() are enqueued if they become available.
2022   HeapWord* const new_top = _space_info[src_space_id].new_top();
2023   RegionData* const enqueue_end =
2024     sd.addr_to_region_ptr(sd.region_align_up(new_top));
2025 
2026   for (RegionData* cur = beg; cur < end; ++cur) {
2027     assert(cur->data_size() > 0, "region must have live data");
2028     cur->decrement_destination_count();
2029     if (cur < enqueue_end && cur->available() && cur->claim()) {
2030       if (cur->mark_normal()) {
2031         cm->push_region(sd.region(cur));
2032       } else if (cur->mark_copied()) {
2033         // Try to copy the content of the shadow region back to its corresponding
2034         // heap region if the shadow region is filled. Otherwise, the GC thread
2035         // fills the shadow region will copy the data back (see
2036         // MoveAndUpdateShadowClosure::complete_region).
2037         copy_back(sd.region_to_addr(cur->shadow_region()), sd.region_to_addr(cur));
2038         ParCompactionManager::push_shadow_region_mt_safe(cur->shadow_region());
2039         cur->set_completed();
2040       }
2041     }
2042   }
2043 }
2044 
2045 size_t PSParallelCompact::next_src_region(MoveAndUpdateClosure& closure,
2046                                           SpaceId& src_space_id,
2047                                           HeapWord*& src_space_top,
2048                                           HeapWord* end_addr)
2049 {
2050   ParallelCompactData& sd = PSParallelCompact::summary_data();
2051 
2052   size_t src_region_idx = 0;
2053 
2054   // Skip empty regions (if any) up to the top of the space.
2055   HeapWord* const src_aligned_up = sd.region_align_up(end_addr);
2056   RegionData* src_region_ptr = sd.addr_to_region_ptr(src_aligned_up);
2057   HeapWord* const top_aligned_up = sd.region_align_up(src_space_top);
2058   const RegionData* const top_region_ptr = sd.addr_to_region_ptr(top_aligned_up);
2059 
2060   while (src_region_ptr < top_region_ptr && src_region_ptr->data_size() == 0) {
2061     ++src_region_ptr;
2062   }
2063 
2064   if (src_region_ptr < top_region_ptr) {
2065     // Found the first non-empty region in the same space.
2066     src_region_idx = sd.region(src_region_ptr);
2067     closure.set_source(sd.region_to_addr(src_region_idx));
2068     return src_region_idx;
2069   }
2070 
2071   // Switch to a new source space and find the first non-empty region.
2072   uint space_id = src_space_id + 1;
2073   assert(space_id < last_space_id, "not enough spaces");
2074 
2075   for (/* empty */; space_id < last_space_id; ++space_id) {
2076     HeapWord* bottom = _space_info[space_id].space()->bottom();
2077     HeapWord* top = _space_info[space_id].space()->top();
2078     // Skip empty space
2079     if (bottom == top) {
2080       continue;
2081     }
2082 
2083     // Identify the first region that contains live words in this space
2084     size_t cur_region = sd.addr_to_region_idx(bottom);
2085     size_t end_region = sd.addr_to_region_idx(sd.region_align_up(top));
2086 
2087     for (/* empty */ ; cur_region < end_region; ++cur_region) {
2088       RegionData* cur = sd.region(cur_region);
2089       if (cur->live_obj_size() > 0) {
2090         HeapWord* region_start_addr = sd.region_to_addr(cur_region);
2091 
2092         src_space_id = SpaceId(space_id);
2093         src_space_top = top;
2094         closure.set_source(region_start_addr);
2095         return cur_region;
2096       }
2097     }
2098   }
2099 
2100   ShouldNotReachHere();
2101 }
2102 
2103 HeapWord* PSParallelCompact::partial_obj_end(HeapWord* region_start_addr) {
2104   ParallelCompactData& sd = summary_data();
2105   assert(sd.is_region_aligned(region_start_addr), "precondition");
2106 
2107   // Use per-region partial_obj_size to locate the end of the obj, that extends
2108   // to region_start_addr.
2109   size_t start_region_idx = sd.addr_to_region_idx(region_start_addr);
2110   size_t end_region_idx = sd.region_count();
2111   size_t accumulated_size = 0;
2112   for (size_t region_idx = start_region_idx; region_idx < end_region_idx; ++region_idx) {
2113     size_t cur_partial_obj_size = sd.region(region_idx)->partial_obj_size();
2114     accumulated_size += cur_partial_obj_size;
2115     if (cur_partial_obj_size != ParallelCompactData::RegionSize) {
2116       break;
2117     }
2118   }
2119   return region_start_addr + accumulated_size;
2120 }
2121 
2122 // Use region_idx as the destination region, and evacuate all live objs on its
2123 // source regions to this destination region.
2124 void PSParallelCompact::fill_region(ParCompactionManager* cm, MoveAndUpdateClosure& closure, size_t region_idx)
2125 {
2126   ParMarkBitMap* const bitmap = mark_bitmap();
2127   ParallelCompactData& sd = summary_data();
2128   RegionData* const region_ptr = sd.region(region_idx);
2129 
2130   // Get the source region and related info.
2131   size_t src_region_idx = region_ptr->source_region();
2132   SpaceId src_space_id = space_id(sd.region_to_addr(src_region_idx));
2133   HeapWord* src_space_top = _space_info[src_space_id].space()->top();
2134   HeapWord* dest_addr = sd.region_to_addr(region_idx);
2135 
2136   closure.set_source(first_src_addr(dest_addr, src_space_id, src_region_idx));
2137 
2138   // Adjust src_region_idx to prepare for decrementing destination counts (the
2139   // destination count is not decremented when a region is copied to itself).
2140   if (src_region_idx == region_idx) {
2141     src_region_idx += 1;
2142   }
2143 
2144   // source-region:
2145   //
2146   // **********
2147   // |   ~~~  |
2148   // **********
2149   //      ^
2150   //      |-- closure.source() / first_src_addr
2151   //
2152   //
2153   // ~~~ : live words
2154   //
2155   // destination-region:
2156   //
2157   // **********
2158   // |        |
2159   // **********
2160   // ^
2161   // |-- region-start
2162   if (bitmap->is_unmarked(closure.source())) {
2163     // An object overflows the previous destination region, so this
2164     // destination region should copy the remainder of the object or as much as
2165     // will fit.
2166     HeapWord* const old_src_addr = closure.source();
2167     {
2168       HeapWord* region_start = sd.region_align_down(closure.source());
2169       HeapWord* obj_start = bitmap->find_obj_beg_reverse(region_start, closure.source());
2170       HeapWord* obj_end;
2171       if (obj_start != closure.source()) {
2172         assert(bitmap->is_marked(obj_start), "inv");
2173         // Found the actual obj-start, try to find the obj-end using either
2174         // size() if this obj is completely contained in the current region.
2175         HeapWord* next_region_start = region_start + ParallelCompactData::RegionSize;
2176         HeapWord* partial_obj_start = (next_region_start >= src_space_top)
2177                                       ? nullptr
2178                                       : sd.addr_to_region_ptr(next_region_start)->partial_obj_addr();
2179         // This obj extends to next region iff partial_obj_addr of the *next*
2180         // region is the same as obj-start.
2181         if (partial_obj_start == obj_start) {
2182           // This obj extends to next region.
2183           obj_end = partial_obj_end(next_region_start);
2184         } else {
2185           // Completely contained in this region; safe to use size().
2186           obj_end = obj_start + cast_to_oop(obj_start)->size();
2187         }
2188       } else {
2189         // This obj extends to current region.
2190         obj_end = partial_obj_end(region_start);
2191       }
2192       size_t partial_obj_size = pointer_delta(obj_end, closure.source());
2193       closure.copy_partial_obj(partial_obj_size);
2194     }
2195 
2196     if (closure.is_full()) {
2197       decrement_destination_counts(cm, src_space_id, src_region_idx, closure.source());
2198       closure.complete_region(dest_addr, region_ptr);
2199       return;
2200     }
2201 
2202     // Finished copying without using up the current destination-region
2203     HeapWord* const end_addr = sd.region_align_down(closure.source());
2204     if (sd.region_align_down(old_src_addr) != end_addr) {
2205       assert(sd.region_align_up(old_src_addr) == end_addr, "only one region");
2206       // The partial object was copied from more than one source region.
2207       decrement_destination_counts(cm, src_space_id, src_region_idx, end_addr);
2208 
2209       // Move to the next source region, possibly switching spaces as well.  All
2210       // args except end_addr may be modified.
2211       src_region_idx = next_src_region(closure, src_space_id, src_space_top, end_addr);
2212     }
2213   }
2214 
2215   // Handle the rest obj-by-obj, where we know obj-start.
2216   do {
2217     HeapWord* cur_addr = closure.source();
2218     HeapWord* const end_addr = MIN2(sd.region_align_up(cur_addr + 1),
2219                                     src_space_top);
2220     // To handle the case where the final obj in source region extends to next region.
2221     HeapWord* final_obj_start = (end_addr == src_space_top)
2222                                 ? nullptr
2223                                 : sd.addr_to_region_ptr(end_addr)->partial_obj_addr();
2224     // Apply closure on objs inside [cur_addr, end_addr)
2225     do {
2226       cur_addr = bitmap->find_obj_beg(cur_addr, end_addr);
2227       if (cur_addr == end_addr) {
2228         break;
2229       }
2230       size_t obj_size;
2231       if (final_obj_start == cur_addr) {
2232         obj_size = pointer_delta(partial_obj_end(end_addr), cur_addr);
2233       } else {
2234         // This obj doesn't extend into next region; size() is safe to use.
2235         obj_size = cast_to_oop(cur_addr)->size();
2236       }
2237       closure.do_addr(cur_addr, obj_size);
2238       cur_addr += obj_size;
2239     } while (cur_addr < end_addr && !closure.is_full());
2240 
2241     if (closure.is_full()) {
2242       decrement_destination_counts(cm, src_space_id, src_region_idx, closure.source());
2243       closure.complete_region(dest_addr, region_ptr);
2244       return;
2245     }
2246 
2247     decrement_destination_counts(cm, src_space_id, src_region_idx, end_addr);
2248 
2249     // Move to the next source region, possibly switching spaces as well.  All
2250     // args except end_addr may be modified.
2251     src_region_idx = next_src_region(closure, src_space_id, src_space_top, end_addr);
2252   } while (true);
2253 }
2254 
2255 void PSParallelCompact::fill_and_update_region(ParCompactionManager* cm, size_t region_idx)
2256 {
2257   MoveAndUpdateClosure cl(mark_bitmap(), region_idx);
2258   fill_region(cm, cl, region_idx);
2259 }
2260 
2261 void PSParallelCompact::fill_and_update_shadow_region(ParCompactionManager* cm, size_t region_idx)
2262 {
2263   // Get a shadow region first
2264   ParallelCompactData& sd = summary_data();
2265   RegionData* const region_ptr = sd.region(region_idx);
2266   size_t shadow_region = ParCompactionManager::pop_shadow_region_mt_safe(region_ptr);
2267   // The InvalidShadow return value indicates the corresponding heap region is available,
2268   // so use MoveAndUpdateClosure to fill the normal region. Otherwise, use
2269   // MoveAndUpdateShadowClosure to fill the acquired shadow region.
2270   if (shadow_region == ParCompactionManager::InvalidShadow) {
2271     MoveAndUpdateClosure cl(mark_bitmap(), region_idx);
2272     region_ptr->shadow_to_normal();
2273     return fill_region(cm, cl, region_idx);
2274   } else {
2275     MoveAndUpdateShadowClosure cl(mark_bitmap(), region_idx, shadow_region);
2276     return fill_region(cm, cl, region_idx);
2277   }
2278 }
2279 
2280 void PSParallelCompact::copy_back(HeapWord *shadow_addr, HeapWord *region_addr)
2281 {
2282   Copy::aligned_conjoint_words(shadow_addr, region_addr, _summary_data.RegionSize);
2283 }
2284 
2285 bool PSParallelCompact::steal_unavailable_region(ParCompactionManager* cm, size_t &region_idx)
2286 {
2287   size_t next = cm->next_shadow_region();
2288   ParallelCompactData& sd = summary_data();
2289   size_t old_new_top = sd.addr_to_region_idx(_space_info[old_space_id].new_top());
2290   uint active_gc_threads = ParallelScavengeHeap::heap()->workers().active_workers();
2291 
2292   while (next < old_new_top) {
2293     if (sd.region(next)->mark_shadow()) {
2294       region_idx = next;
2295       return true;
2296     }
2297     next = cm->move_next_shadow_region_by(active_gc_threads);
2298   }
2299 
2300   return false;
2301 }
2302 
2303 // The shadow region is an optimization to address region dependencies in full GC. The basic
2304 // idea is making more regions available by temporally storing their live objects in empty
2305 // shadow regions to resolve dependencies between them and the destination regions. Therefore,
2306 // GC threads need not wait destination regions to be available before processing sources.
2307 //
2308 // A typical workflow would be:
2309 // After draining its own stack and failing to steal from others, a GC worker would pick an
2310 // unavailable region (destination count > 0) and get a shadow region. Then the worker fills
2311 // the shadow region by copying live objects from source regions of the unavailable one. Once
2312 // the unavailable region becomes available, the data in the shadow region will be copied back.
2313 // Shadow regions are empty regions in the to-space and regions between top and end of other spaces.
2314 void PSParallelCompact::initialize_shadow_regions(uint parallel_gc_threads)
2315 {
2316   const ParallelCompactData& sd = PSParallelCompact::summary_data();
2317 
2318   for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2319     SpaceInfo* const space_info = _space_info + id;
2320     MutableSpace* const space = space_info->space();
2321 
2322     const size_t beg_region =
2323       sd.addr_to_region_idx(sd.region_align_up(MAX2(space_info->new_top(), space->top())));
2324     const size_t end_region =
2325       sd.addr_to_region_idx(sd.region_align_down(space->end()));
2326 
2327     for (size_t cur = beg_region; cur < end_region; ++cur) {
2328       ParCompactionManager::push_shadow_region(cur);
2329     }
2330   }
2331 
2332   size_t beg_region = sd.addr_to_region_idx(_space_info[old_space_id].dense_prefix());
2333   for (uint i = 0; i < parallel_gc_threads; i++) {
2334     ParCompactionManager *cm = ParCompactionManager::gc_thread_compaction_manager(i);
2335     cm->set_next_shadow_region(beg_region + i);
2336   }
2337 }
2338 
2339 void MoveAndUpdateClosure::copy_partial_obj(size_t partial_obj_size)
2340 {
2341   size_t words = MIN2(partial_obj_size, words_remaining());
2342 
2343   // This test is necessary; if omitted, the pointer updates to a partial object
2344   // that crosses the dense prefix boundary could be overwritten.
2345   if (source() != copy_destination()) {
2346     DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
2347     Copy::aligned_conjoint_words(source(), copy_destination(), words);
2348   }
2349   update_state(words);
2350 }
2351 
2352 void MoveAndUpdateClosure::complete_region(HeapWord* dest_addr, PSParallelCompact::RegionData* region_ptr) {
2353   assert(region_ptr->shadow_state() == ParallelCompactData::RegionData::NormalRegion, "Region should be finished");
2354   region_ptr->set_completed();
2355 }
2356 
2357 void MoveAndUpdateClosure::do_addr(HeapWord* addr, size_t words) {
2358   assert(destination() != nullptr, "sanity");
2359   _source = addr;
2360 
2361   // The start_array must be updated even if the object is not moving.
2362   if (_start_array != nullptr) {
2363     _start_array->update_for_block(destination(), destination() + words);
2364   }
2365 
2366   // Avoid overflow
2367   words = MIN2(words, words_remaining());
2368   assert(words > 0, "inv");
2369 
2370   if (copy_destination() != source()) {
2371     DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
2372     assert(source() != destination(), "inv");
2373     assert(FullGCForwarding::is_forwarded(cast_to_oop(source())), "inv");
2374     assert(FullGCForwarding::forwardee(cast_to_oop(source())) == cast_to_oop(destination()), "inv");
2375     Copy::aligned_conjoint_words(source(), copy_destination(), words);
2376     cast_to_oop(copy_destination())->init_mark();
2377   }
2378 
2379   update_state(words);
2380 }
2381 
2382 void MoveAndUpdateShadowClosure::complete_region(HeapWord* dest_addr, PSParallelCompact::RegionData* region_ptr) {
2383   assert(region_ptr->shadow_state() == ParallelCompactData::RegionData::ShadowRegion, "Region should be shadow");
2384   // Record the shadow region index
2385   region_ptr->set_shadow_region(_shadow);
2386   // Mark the shadow region as filled to indicate the data is ready to be
2387   // copied back
2388   region_ptr->mark_filled();
2389   // Try to copy the content of the shadow region back to its corresponding
2390   // heap region if available; the GC thread that decreases the destination
2391   // count to zero will do the copying otherwise (see
2392   // PSParallelCompact::decrement_destination_counts).
2393   if (((region_ptr->available() && region_ptr->claim()) || region_ptr->claimed()) && region_ptr->mark_copied()) {
2394     region_ptr->set_completed();
2395     PSParallelCompact::copy_back(PSParallelCompact::summary_data().region_to_addr(_shadow), dest_addr);
2396     ParCompactionManager::push_shadow_region_mt_safe(_shadow);
2397   }
2398 }