1 /*
   2  * Copyright (c) 2005, 2024, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/classLoaderDataGraph.hpp"
  27 #include "classfile/javaClasses.inline.hpp"
  28 #include "classfile/stringTable.hpp"
  29 #include "classfile/symbolTable.hpp"
  30 #include "classfile/systemDictionary.hpp"
  31 #include "code/codeCache.hpp"
  32 #include "compiler/oopMap.hpp"
  33 #include "gc/parallel/objectStartArray.inline.hpp"
  34 #include "gc/parallel/parallelArguments.hpp"
  35 #include "gc/parallel/parallelScavengeHeap.inline.hpp"
  36 #include "gc/parallel/parMarkBitMap.inline.hpp"
  37 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
  38 #include "gc/parallel/psCompactionManager.inline.hpp"
  39 #include "gc/parallel/psOldGen.hpp"
  40 #include "gc/parallel/psParallelCompact.inline.hpp"
  41 #include "gc/parallel/psPromotionManager.inline.hpp"
  42 #include "gc/parallel/psRootType.hpp"
  43 #include "gc/parallel/psScavenge.hpp"
  44 #include "gc/parallel/psStringDedup.hpp"
  45 #include "gc/parallel/psYoungGen.hpp"
  46 #include "gc/shared/classUnloadingContext.hpp"
  47 #include "gc/shared/gcCause.hpp"
  48 #include "gc/shared/gcHeapSummary.hpp"
  49 #include "gc/shared/gcId.hpp"
  50 #include "gc/shared/gcLocker.hpp"
  51 #include "gc/shared/gcTimer.hpp"
  52 #include "gc/shared/gcTrace.hpp"
  53 #include "gc/shared/gcTraceTime.inline.hpp"
  54 #include "gc/shared/gcVMOperations.hpp"
  55 #include "gc/shared/isGCActiveMark.hpp"
  56 #include "gc/shared/oopStorage.inline.hpp"
  57 #include "gc/shared/oopStorageSet.inline.hpp"
  58 #include "gc/shared/oopStorageSetParState.inline.hpp"
  59 #include "gc/shared/preservedMarks.inline.hpp"
  60 #include "gc/shared/referencePolicy.hpp"
  61 #include "gc/shared/referenceProcessor.hpp"
  62 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
  63 #include "gc/shared/strongRootsScope.hpp"
  64 #include "gc/shared/taskTerminator.hpp"
  65 #include "gc/shared/weakProcessor.inline.hpp"
  66 #include "gc/shared/workerPolicy.hpp"
  67 #include "gc/shared/workerThread.hpp"
  68 #include "gc/shared/workerUtils.hpp"
  69 #include "logging/log.hpp"
  70 #include "memory/iterator.inline.hpp"
  71 #include "memory/metaspaceUtils.hpp"
  72 #include "memory/resourceArea.hpp"
  73 #include "memory/universe.hpp"
  74 #include "nmt/memTracker.hpp"
  75 #include "oops/access.inline.hpp"
  76 #include "oops/instanceClassLoaderKlass.inline.hpp"
  77 #include "oops/instanceKlass.inline.hpp"
  78 #include "oops/instanceMirrorKlass.inline.hpp"
  79 #include "oops/methodData.hpp"
  80 #include "oops/objArrayKlass.inline.hpp"
  81 #include "oops/oop.inline.hpp"
  82 #include "runtime/atomic.hpp"
  83 #include "runtime/handles.inline.hpp"
  84 #include "runtime/java.hpp"
  85 #include "runtime/safepoint.hpp"
  86 #include "runtime/threads.hpp"
  87 #include "runtime/vmThread.hpp"
  88 #include "services/memoryService.hpp"
  89 #include "utilities/align.hpp"
  90 #include "utilities/debug.hpp"
  91 #include "utilities/events.hpp"
  92 #include "utilities/formatBuffer.hpp"
  93 #include "utilities/macros.hpp"
  94 #include "utilities/stack.inline.hpp"
  95 #if INCLUDE_JVMCI
  96 #include "jvmci/jvmci.hpp"
  97 #endif
  98 
  99 #include <math.h>
 100 
 101 // All sizes are in HeapWords.
 102 const size_t ParallelCompactData::Log2RegionSize  = 16; // 64K words
 103 const size_t ParallelCompactData::RegionSize      = (size_t)1 << Log2RegionSize;
 104 static_assert(ParallelCompactData::RegionSize >= BitsPerWord, "region-start bit word-aligned");
 105 const size_t ParallelCompactData::RegionSizeBytes =
 106   RegionSize << LogHeapWordSize;
 107 const size_t ParallelCompactData::RegionSizeOffsetMask = RegionSize - 1;
 108 const size_t ParallelCompactData::RegionAddrOffsetMask = RegionSizeBytes - 1;
 109 const size_t ParallelCompactData::RegionAddrMask       = ~RegionAddrOffsetMask;
 110 
 111 const ParallelCompactData::RegionData::region_sz_t
 112 ParallelCompactData::RegionData::dc_shift = 27;
 113 
 114 const ParallelCompactData::RegionData::region_sz_t
 115 ParallelCompactData::RegionData::dc_mask = ~0U << dc_shift;
 116 
 117 const ParallelCompactData::RegionData::region_sz_t
 118 ParallelCompactData::RegionData::dc_one = 0x1U << dc_shift;
 119 
 120 const ParallelCompactData::RegionData::region_sz_t
 121 ParallelCompactData::RegionData::los_mask = ~dc_mask;
 122 
 123 const ParallelCompactData::RegionData::region_sz_t
 124 ParallelCompactData::RegionData::dc_claimed = 0x8U << dc_shift;
 125 
 126 const ParallelCompactData::RegionData::region_sz_t
 127 ParallelCompactData::RegionData::dc_completed = 0xcU << dc_shift;
 128 
 129 SpaceInfo PSParallelCompact::_space_info[PSParallelCompact::last_space_id];
 130 
 131 SpanSubjectToDiscoveryClosure PSParallelCompact::_span_based_discoverer;
 132 ReferenceProcessor* PSParallelCompact::_ref_processor = nullptr;
 133 
 134 void SplitInfo::record(size_t src_region_idx, size_t partial_obj_size,
 135                        HeapWord* destination)
 136 {
 137   assert(src_region_idx != 0, "invalid src_region_idx");
 138   assert(partial_obj_size != 0, "invalid partial_obj_size argument");
 139   assert(destination != nullptr, "invalid destination argument");
 140 
 141   _src_region_idx = src_region_idx;
 142   _partial_obj_size = partial_obj_size;
 143   _destination = destination;
 144 
 145   // These fields may not be updated below, so make sure they're clear.
 146   assert(_dest_region_addr == nullptr, "should have been cleared");
 147   assert(_first_src_addr == nullptr, "should have been cleared");
 148 
 149   // Determine the number of destination regions for the partial object.
 150   HeapWord* const last_word = destination + partial_obj_size - 1;
 151   const ParallelCompactData& sd = PSParallelCompact::summary_data();
 152   HeapWord* const beg_region_addr = sd.region_align_down(destination);
 153   HeapWord* const end_region_addr = sd.region_align_down(last_word);
 154 
 155   if (beg_region_addr == end_region_addr) {
 156     // One destination region.
 157     _destination_count = 1;
 158     if (end_region_addr == destination) {
 159       // The destination falls on a region boundary, thus the first word of the
 160       // partial object will be the first word copied to the destination region.
 161       _dest_region_addr = end_region_addr;
 162       _first_src_addr = sd.region_to_addr(src_region_idx);
 163     }
 164   } else {
 165     // Two destination regions.  When copied, the partial object will cross a
 166     // destination region boundary, so a word somewhere within the partial
 167     // object will be the first word copied to the second destination region.
 168     _destination_count = 2;
 169     _dest_region_addr = end_region_addr;
 170     const size_t ofs = pointer_delta(end_region_addr, destination);
 171     assert(ofs < _partial_obj_size, "sanity");
 172     _first_src_addr = sd.region_to_addr(src_region_idx) + ofs;
 173   }
 174 }
 175 
 176 void SplitInfo::clear()
 177 {
 178   _src_region_idx = 0;
 179   _partial_obj_size = 0;
 180   _destination = nullptr;
 181   _destination_count = 0;
 182   _dest_region_addr = nullptr;
 183   _first_src_addr = nullptr;
 184   assert(!is_valid(), "sanity");
 185 }
 186 
 187 #ifdef  ASSERT
 188 void SplitInfo::verify_clear()
 189 {
 190   assert(_src_region_idx == 0, "not clear");
 191   assert(_partial_obj_size == 0, "not clear");
 192   assert(_destination == nullptr, "not clear");
 193   assert(_destination_count == 0, "not clear");
 194   assert(_dest_region_addr == nullptr, "not clear");
 195   assert(_first_src_addr == nullptr, "not clear");
 196 }
 197 #endif  // #ifdef ASSERT
 198 
 199 
 200 void PSParallelCompact::print_on_error(outputStream* st) {
 201   _mark_bitmap.print_on_error(st);
 202 }
 203 
 204 ParallelCompactData::ParallelCompactData() :
 205   _heap_start(nullptr),
 206   DEBUG_ONLY(_heap_end(nullptr) COMMA)
 207   _region_vspace(nullptr),
 208   _reserved_byte_size(0),
 209   _region_data(nullptr),
 210   _region_count(0) {}
 211 
 212 bool ParallelCompactData::initialize(MemRegion reserved_heap)
 213 {
 214   _heap_start = reserved_heap.start();
 215   const size_t heap_size = reserved_heap.word_size();
 216   DEBUG_ONLY(_heap_end = _heap_start + heap_size;)
 217 
 218   assert(region_align_down(_heap_start) == _heap_start,
 219          "region start not aligned");
 220 
 221   return initialize_region_data(heap_size);
 222 }
 223 
 224 PSVirtualSpace*
 225 ParallelCompactData::create_vspace(size_t count, size_t element_size)
 226 {
 227   const size_t raw_bytes = count * element_size;
 228   const size_t page_sz = os::page_size_for_region_aligned(raw_bytes, 10);
 229   const size_t granularity = os::vm_allocation_granularity();
 230   _reserved_byte_size = align_up(raw_bytes, MAX2(page_sz, granularity));
 231 
 232   const size_t rs_align = page_sz == os::vm_page_size() ? 0 :
 233     MAX2(page_sz, granularity);
 234   ReservedSpace rs(_reserved_byte_size, rs_align, page_sz);
 235   os::trace_page_sizes("Parallel Compact Data", raw_bytes, raw_bytes, rs.base(),
 236                        rs.size(), page_sz);
 237 
 238   MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
 239 
 240   PSVirtualSpace* vspace = new PSVirtualSpace(rs, page_sz);
 241   if (vspace != nullptr) {
 242     if (vspace->expand_by(_reserved_byte_size)) {
 243       return vspace;
 244     }
 245     delete vspace;
 246     // Release memory reserved in the space.
 247     rs.release();
 248   }
 249 
 250   return nullptr;
 251 }
 252 
 253 bool ParallelCompactData::initialize_region_data(size_t heap_size)
 254 {
 255   assert(is_aligned(heap_size, RegionSize), "precondition");
 256 
 257   const size_t count = heap_size >> Log2RegionSize;
 258   _region_vspace = create_vspace(count, sizeof(RegionData));
 259   if (_region_vspace != nullptr) {
 260     _region_data = (RegionData*)_region_vspace->reserved_low_addr();
 261     _region_count = count;
 262     return true;
 263   }
 264   return false;
 265 }
 266 
 267 void ParallelCompactData::clear_range(size_t beg_region, size_t end_region) {
 268   assert(beg_region <= _region_count, "beg_region out of range");
 269   assert(end_region <= _region_count, "end_region out of range");
 270 
 271   const size_t region_cnt = end_region - beg_region;
 272   memset(_region_data + beg_region, 0, region_cnt * sizeof(RegionData));
 273 }
 274 
 275 void
 276 ParallelCompactData::summarize_dense_prefix(HeapWord* beg, HeapWord* end)
 277 {
 278   assert(is_region_aligned(beg), "not RegionSize aligned");
 279   assert(is_region_aligned(end), "not RegionSize aligned");
 280 
 281   size_t cur_region = addr_to_region_idx(beg);
 282   const size_t end_region = addr_to_region_idx(end);
 283   HeapWord* addr = beg;
 284   while (cur_region < end_region) {
 285     _region_data[cur_region].set_destination(addr);
 286     _region_data[cur_region].set_destination_count(0);
 287     _region_data[cur_region].set_source_region(cur_region);
 288 
 289     // Update live_obj_size so the region appears completely full.
 290     size_t live_size = RegionSize - _region_data[cur_region].partial_obj_size();
 291     _region_data[cur_region].set_live_obj_size(live_size);
 292 
 293     ++cur_region;
 294     addr += RegionSize;
 295   }
 296 }
 297 
 298 // Find the point at which a space can be split and, if necessary, record the
 299 // split point.
 300 //
 301 // If the current src region (which overflowed the destination space) doesn't
 302 // have a partial object, the split point is at the beginning of the current src
 303 // region (an "easy" split, no extra bookkeeping required).
 304 //
 305 // If the current src region has a partial object, the split point is in the
 306 // region where that partial object starts (call it the split_region).  If
 307 // split_region has a partial object, then the split point is just after that
 308 // partial object (a "hard" split where we have to record the split data and
 309 // zero the partial_obj_size field).  With a "hard" split, we know that the
 310 // partial_obj ends within split_region because the partial object that caused
 311 // the overflow starts in split_region.  If split_region doesn't have a partial
 312 // obj, then the split is at the beginning of split_region (another "easy"
 313 // split).
 314 HeapWord*
 315 ParallelCompactData::summarize_split_space(size_t src_region,
 316                                            SplitInfo& split_info,
 317                                            HeapWord* destination,
 318                                            HeapWord* target_end,
 319                                            HeapWord** target_next)
 320 {
 321   assert(destination <= target_end, "sanity");
 322   assert(destination + _region_data[src_region].data_size() > target_end,
 323     "region should not fit into target space");
 324   assert(is_region_aligned(target_end), "sanity");
 325 
 326   size_t split_region = src_region;
 327   HeapWord* split_destination = destination;
 328   size_t partial_obj_size = _region_data[src_region].partial_obj_size();
 329 
 330   if (destination + partial_obj_size > target_end) {
 331     // The split point is just after the partial object (if any) in the
 332     // src_region that contains the start of the object that overflowed the
 333     // destination space.
 334     //
 335     // Find the start of the "overflow" object and set split_region to the
 336     // region containing it.
 337     HeapWord* const overflow_obj = _region_data[src_region].partial_obj_addr();
 338     split_region = addr_to_region_idx(overflow_obj);
 339 
 340     // Clear the source_region field of all destination regions whose first word
 341     // came from data after the split point (a non-null source_region field
 342     // implies a region must be filled).
 343     //
 344     // An alternative to the simple loop below:  clear during post_compact(),
 345     // which uses memcpy instead of individual stores, and is easy to
 346     // parallelize.  (The downside is that it clears the entire RegionData
 347     // object as opposed to just one field.)
 348     //
 349     // post_compact() would have to clear the summary data up to the highest
 350     // address that was written during the summary phase, which would be
 351     //
 352     //         max(top, max(new_top, clear_top))
 353     //
 354     // where clear_top is a new field in SpaceInfo.  Would have to set clear_top
 355     // to target_end.
 356     const RegionData* const sr = region(split_region);
 357     const size_t beg_idx =
 358       addr_to_region_idx(region_align_up(sr->destination() +
 359                                          sr->partial_obj_size()));
 360     const size_t end_idx = addr_to_region_idx(target_end);
 361 
 362     log_develop_trace(gc, compaction)("split:  clearing source_region field in [" SIZE_FORMAT ", " SIZE_FORMAT ")", beg_idx, end_idx);
 363     for (size_t idx = beg_idx; idx < end_idx; ++idx) {
 364       _region_data[idx].set_source_region(0);
 365     }
 366 
 367     // Set split_destination and partial_obj_size to reflect the split region.
 368     split_destination = sr->destination();
 369     partial_obj_size = sr->partial_obj_size();
 370   }
 371 
 372   // The split is recorded only if a partial object extends onto the region.
 373   if (partial_obj_size != 0) {
 374     _region_data[split_region].set_partial_obj_size(0);
 375     split_info.record(split_region, partial_obj_size, split_destination);
 376   }
 377 
 378   // Setup the continuation addresses.
 379   *target_next = split_destination + partial_obj_size;
 380   HeapWord* const source_next = region_to_addr(split_region) + partial_obj_size;
 381 
 382   if (log_develop_is_enabled(Trace, gc, compaction)) {
 383     const char * split_type = partial_obj_size == 0 ? "easy" : "hard";
 384     log_develop_trace(gc, compaction)("%s split:  src=" PTR_FORMAT " src_c=" SIZE_FORMAT " pos=" SIZE_FORMAT,
 385                                       split_type, p2i(source_next), split_region, partial_obj_size);
 386     log_develop_trace(gc, compaction)("%s split:  dst=" PTR_FORMAT " dst_c=" SIZE_FORMAT " tn=" PTR_FORMAT,
 387                                       split_type, p2i(split_destination),
 388                                       addr_to_region_idx(split_destination),
 389                                       p2i(*target_next));
 390 
 391     if (partial_obj_size != 0) {
 392       HeapWord* const po_beg = split_info.destination();
 393       HeapWord* const po_end = po_beg + split_info.partial_obj_size();
 394       log_develop_trace(gc, compaction)("%s split:  po_beg=" PTR_FORMAT " " SIZE_FORMAT " po_end=" PTR_FORMAT " " SIZE_FORMAT,
 395                                         split_type,
 396                                         p2i(po_beg), addr_to_region_idx(po_beg),
 397                                         p2i(po_end), addr_to_region_idx(po_end));
 398     }
 399   }
 400 
 401   return source_next;
 402 }
 403 
 404 size_t ParallelCompactData::live_words_in_space(const MutableSpace* space,
 405                                                 HeapWord** full_region_prefix_end) {
 406   size_t cur_region = addr_to_region_idx(space->bottom());
 407   const size_t end_region = addr_to_region_idx(region_align_up(space->top()));
 408   size_t live_words = 0;
 409   if (full_region_prefix_end == nullptr) {
 410     for (/* empty */; cur_region < end_region; ++cur_region) {
 411       live_words += _region_data[cur_region].data_size();
 412     }
 413   } else {
 414     bool first_set = false;
 415     for (/* empty */; cur_region < end_region; ++cur_region) {
 416       size_t live_words_in_region = _region_data[cur_region].data_size();
 417       if (!first_set && live_words_in_region < RegionSize) {
 418         *full_region_prefix_end = region_to_addr(cur_region);
 419         first_set = true;
 420       }
 421       live_words += live_words_in_region;
 422     }
 423     if (!first_set) {
 424       // All regions are full of live objs.
 425       assert(is_region_aligned(space->top()), "inv");
 426       *full_region_prefix_end = space->top();
 427     }
 428     assert(*full_region_prefix_end != nullptr, "postcondition");
 429     assert(is_region_aligned(*full_region_prefix_end), "inv");
 430     assert(*full_region_prefix_end >= space->bottom(), "in-range");
 431     assert(*full_region_prefix_end <= space->top(), "in-range");
 432   }
 433   return live_words;
 434 }
 435 
 436 bool ParallelCompactData::summarize(SplitInfo& split_info,
 437                                     HeapWord* source_beg, HeapWord* source_end,
 438                                     HeapWord** source_next,
 439                                     HeapWord* target_beg, HeapWord* target_end,
 440                                     HeapWord** target_next)
 441 {
 442   HeapWord* const source_next_val = source_next == nullptr ? nullptr : *source_next;
 443   log_develop_trace(gc, compaction)(
 444       "sb=" PTR_FORMAT " se=" PTR_FORMAT " sn=" PTR_FORMAT
 445       "tb=" PTR_FORMAT " te=" PTR_FORMAT " tn=" PTR_FORMAT,
 446       p2i(source_beg), p2i(source_end), p2i(source_next_val),
 447       p2i(target_beg), p2i(target_end), p2i(*target_next));
 448 
 449   size_t cur_region = addr_to_region_idx(source_beg);
 450   const size_t end_region = addr_to_region_idx(region_align_up(source_end));
 451 
 452   HeapWord *dest_addr = target_beg;
 453   while (cur_region < end_region) {
 454     // The destination must be set even if the region has no data.
 455     _region_data[cur_region].set_destination(dest_addr);
 456 
 457     size_t words = _region_data[cur_region].data_size();
 458     if (words > 0) {
 459       // If cur_region does not fit entirely into the target space, find a point
 460       // at which the source space can be 'split' so that part is copied to the
 461       // target space and the rest is copied elsewhere.
 462       if (dest_addr + words > target_end) {
 463         assert(source_next != nullptr, "source_next is null when splitting");
 464         *source_next = summarize_split_space(cur_region, split_info, dest_addr,
 465                                              target_end, target_next);
 466         return false;
 467       }
 468 
 469       // Compute the destination_count for cur_region, and if necessary, update
 470       // source_region for a destination region.  The source_region field is
 471       // updated if cur_region is the first (left-most) region to be copied to a
 472       // destination region.
 473       //
 474       // The destination_count calculation is a bit subtle.  A region that has
 475       // data that compacts into itself does not count itself as a destination.
 476       // This maintains the invariant that a zero count means the region is
 477       // available and can be claimed and then filled.
 478       uint destination_count = 0;
 479       if (split_info.is_split(cur_region)) {
 480         // The current region has been split:  the partial object will be copied
 481         // to one destination space and the remaining data will be copied to
 482         // another destination space.  Adjust the initial destination_count and,
 483         // if necessary, set the source_region field if the partial object will
 484         // cross a destination region boundary.
 485         destination_count = split_info.destination_count();
 486         if (destination_count == 2) {
 487           size_t dest_idx = addr_to_region_idx(split_info.dest_region_addr());
 488           _region_data[dest_idx].set_source_region(cur_region);
 489         }
 490       }
 491 
 492       HeapWord* const last_addr = dest_addr + words - 1;
 493       const size_t dest_region_1 = addr_to_region_idx(dest_addr);
 494       const size_t dest_region_2 = addr_to_region_idx(last_addr);
 495 
 496       // Initially assume that the destination regions will be the same and
 497       // adjust the value below if necessary.  Under this assumption, if
 498       // cur_region == dest_region_2, then cur_region will be compacted
 499       // completely into itself.
 500       destination_count += cur_region == dest_region_2 ? 0 : 1;
 501       if (dest_region_1 != dest_region_2) {
 502         // Destination regions differ; adjust destination_count.
 503         destination_count += 1;
 504         // Data from cur_region will be copied to the start of dest_region_2.
 505         _region_data[dest_region_2].set_source_region(cur_region);
 506       } else if (is_region_aligned(dest_addr)) {
 507         // Data from cur_region will be copied to the start of the destination
 508         // region.
 509         _region_data[dest_region_1].set_source_region(cur_region);
 510       }
 511 
 512       _region_data[cur_region].set_destination_count(destination_count);
 513       dest_addr += words;
 514     }
 515 
 516     ++cur_region;
 517   }
 518 
 519   *target_next = dest_addr;
 520   return true;
 521 }
 522 
 523 #ifdef ASSERT
 524 void ParallelCompactData::verify_clear()
 525 {
 526   const size_t* const beg = (const size_t*) _region_vspace->committed_low_addr();
 527   const size_t* const end = (const size_t*) _region_vspace->committed_high_addr();
 528   for (const size_t* p = beg; p < end; ++p) {
 529     assert(*p == 0, "not zero");
 530   }
 531 }
 532 #endif  // #ifdef ASSERT
 533 
 534 STWGCTimer          PSParallelCompact::_gc_timer;
 535 ParallelOldTracer   PSParallelCompact::_gc_tracer;
 536 elapsedTimer        PSParallelCompact::_accumulated_time;
 537 unsigned int        PSParallelCompact::_maximum_compaction_gc_num = 0;
 538 CollectorCounters*  PSParallelCompact::_counters = nullptr;
 539 ParMarkBitMap       PSParallelCompact::_mark_bitmap;
 540 ParallelCompactData PSParallelCompact::_summary_data;
 541 
 542 PSParallelCompact::IsAliveClosure PSParallelCompact::_is_alive_closure;
 543 
 544 class PCAdjustPointerClosure: public BasicOopIterateClosure {
 545   template <typename T>
 546   void do_oop_work(T* p) { PSParallelCompact::adjust_pointer(p); }
 547 
 548 public:
 549   virtual void do_oop(oop* p)                { do_oop_work(p); }
 550   virtual void do_oop(narrowOop* p)          { do_oop_work(p); }
 551 
 552   virtual ReferenceIterationMode reference_iteration_mode() { return DO_FIELDS; }
 553 };
 554 
 555 static PCAdjustPointerClosure pc_adjust_pointer_closure;
 556 
 557 bool PSParallelCompact::IsAliveClosure::do_object_b(oop p) { return mark_bitmap()->is_marked(p); }
 558 
 559 void PSParallelCompact::post_initialize() {
 560   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 561   _span_based_discoverer.set_span(heap->reserved_region());
 562   _ref_processor =
 563     new ReferenceProcessor(&_span_based_discoverer,
 564                            ParallelGCThreads,   // mt processing degree
 565                            ParallelGCThreads,   // mt discovery degree
 566                            false,               // concurrent_discovery
 567                            &_is_alive_closure); // non-header is alive closure
 568 
 569   _counters = new CollectorCounters("Parallel full collection pauses", 1);
 570 
 571   // Initialize static fields in ParCompactionManager.
 572   ParCompactionManager::initialize(mark_bitmap());
 573 }
 574 
 575 bool PSParallelCompact::initialize_aux_data() {
 576   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 577   MemRegion mr = heap->reserved_region();
 578   assert(mr.byte_size() != 0, "heap should be reserved");
 579 
 580   initialize_space_info();
 581 
 582   if (!_mark_bitmap.initialize(mr)) {
 583     vm_shutdown_during_initialization(
 584       err_msg("Unable to allocate " SIZE_FORMAT "KB bitmaps for parallel "
 585       "garbage collection for the requested " SIZE_FORMAT "KB heap.",
 586       _mark_bitmap.reserved_byte_size()/K, mr.byte_size()/K));
 587     return false;
 588   }
 589 
 590   if (!_summary_data.initialize(mr)) {
 591     vm_shutdown_during_initialization(
 592       err_msg("Unable to allocate " SIZE_FORMAT "KB card tables for parallel "
 593       "garbage collection for the requested " SIZE_FORMAT "KB heap.",
 594       _summary_data.reserved_byte_size()/K, mr.byte_size()/K));
 595     return false;
 596   }
 597 
 598   return true;
 599 }
 600 
 601 void PSParallelCompact::initialize_space_info()
 602 {
 603   memset(&_space_info, 0, sizeof(_space_info));
 604 
 605   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 606   PSYoungGen* young_gen = heap->young_gen();
 607 
 608   _space_info[old_space_id].set_space(heap->old_gen()->object_space());
 609   _space_info[eden_space_id].set_space(young_gen->eden_space());
 610   _space_info[from_space_id].set_space(young_gen->from_space());
 611   _space_info[to_space_id].set_space(young_gen->to_space());
 612 
 613   _space_info[old_space_id].set_start_array(heap->old_gen()->start_array());
 614 }
 615 
 616 void
 617 PSParallelCompact::clear_data_covering_space(SpaceId id)
 618 {
 619   // At this point, top is the value before GC, new_top() is the value that will
 620   // be set at the end of GC.  The marking bitmap is cleared to top; nothing
 621   // should be marked above top.  The summary data is cleared to the larger of
 622   // top & new_top.
 623   MutableSpace* const space = _space_info[id].space();
 624   HeapWord* const bot = space->bottom();
 625   HeapWord* const top = space->top();
 626   HeapWord* const max_top = MAX2(top, _space_info[id].new_top());
 627 
 628   _mark_bitmap.clear_range(bot, top);
 629 
 630   const size_t beg_region = _summary_data.addr_to_region_idx(bot);
 631   const size_t end_region =
 632     _summary_data.addr_to_region_idx(_summary_data.region_align_up(max_top));
 633   _summary_data.clear_range(beg_region, end_region);
 634 
 635   // Clear the data used to 'split' regions.
 636   SplitInfo& split_info = _space_info[id].split_info();
 637   if (split_info.is_valid()) {
 638     split_info.clear();
 639   }
 640   DEBUG_ONLY(split_info.verify_clear();)
 641 }
 642 
 643 void PSParallelCompact::pre_compact()
 644 {
 645   // Update the from & to space pointers in space_info, since they are swapped
 646   // at each young gen gc.  Do the update unconditionally (even though a
 647   // promotion failure does not swap spaces) because an unknown number of young
 648   // collections will have swapped the spaces an unknown number of times.
 649   GCTraceTime(Debug, gc, phases) tm("Pre Compact", &_gc_timer);
 650   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 651   _space_info[from_space_id].set_space(heap->young_gen()->from_space());
 652   _space_info[to_space_id].set_space(heap->young_gen()->to_space());
 653 
 654   // Increment the invocation count
 655   heap->increment_total_collections(true);
 656 
 657   CodeCache::on_gc_marking_cycle_start();
 658 
 659   heap->print_heap_before_gc();
 660   heap->trace_heap_before_gc(&_gc_tracer);
 661 
 662   // Fill in TLABs
 663   heap->ensure_parsability(true);  // retire TLABs
 664 
 665   if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
 666     Universe::verify("Before GC");
 667   }
 668 
 669   DEBUG_ONLY(mark_bitmap()->verify_clear();)
 670   DEBUG_ONLY(summary_data().verify_clear();)
 671 }
 672 
 673 void PSParallelCompact::post_compact()
 674 {
 675   GCTraceTime(Info, gc, phases) tm("Post Compact", &_gc_timer);
 676   ParCompactionManager::remove_all_shadow_regions();
 677 
 678   CodeCache::on_gc_marking_cycle_finish();
 679   CodeCache::arm_all_nmethods();
 680 
 681   for (unsigned int id = old_space_id; id < last_space_id; ++id) {
 682     // Clear the marking bitmap, summary data and split info.
 683     clear_data_covering_space(SpaceId(id));
 684     {
 685       MutableSpace* space = _space_info[id].space();
 686       HeapWord* top = space->top();
 687       HeapWord* new_top = _space_info[id].new_top();
 688       if (ZapUnusedHeapArea && new_top < top) {
 689         space->mangle_region(MemRegion(new_top, top));
 690       }
 691       // Update top().  Must be done after clearing the bitmap and summary data.
 692       space->set_top(new_top);
 693     }
 694   }
 695 
 696   ParCompactionManager::flush_all_string_dedup_requests();
 697 
 698   MutableSpace* const eden_space = _space_info[eden_space_id].space();
 699   MutableSpace* const from_space = _space_info[from_space_id].space();
 700   MutableSpace* const to_space   = _space_info[to_space_id].space();
 701 
 702   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 703   bool eden_empty = eden_space->is_empty();
 704 
 705   // Update heap occupancy information which is used as input to the soft ref
 706   // clearing policy at the next gc.
 707   Universe::heap()->update_capacity_and_used_at_gc();
 708 
 709   bool young_gen_empty = eden_empty && from_space->is_empty() &&
 710     to_space->is_empty();
 711 
 712   PSCardTable* ct = heap->card_table();
 713   MemRegion old_mr = heap->old_gen()->committed();
 714   if (young_gen_empty) {
 715     ct->clear_MemRegion(old_mr);
 716   } else {
 717     ct->dirty_MemRegion(old_mr);
 718   }
 719 
 720   {
 721     // Delete metaspaces for unloaded class loaders and clean up loader_data graph
 722     GCTraceTime(Debug, gc, phases) t("Purge Class Loader Data", gc_timer());
 723     ClassLoaderDataGraph::purge(true /* at_safepoint */);
 724     DEBUG_ONLY(MetaspaceUtils::verify();)
 725   }
 726 
 727   // Need to clear claim bits for the next mark.
 728   ClassLoaderDataGraph::clear_claimed_marks();
 729 
 730   heap->prune_scavengable_nmethods();
 731 
 732 #if COMPILER2_OR_JVMCI
 733   DerivedPointerTable::update_pointers();
 734 #endif
 735 
 736   // Signal that we have completed a visit to all live objects.
 737   Universe::heap()->record_whole_heap_examined_timestamp();
 738 }
 739 
 740 HeapWord* PSParallelCompact::compute_dense_prefix_for_old_space(MutableSpace* old_space,
 741                                                                 HeapWord* full_region_prefix_end) {
 742   const size_t region_size = ParallelCompactData::RegionSize;
 743   const ParallelCompactData& sd = summary_data();
 744 
 745   // Iteration starts with the region *after* the full-region-prefix-end.
 746   const RegionData* const start_region = sd.addr_to_region_ptr(full_region_prefix_end);
 747   // If final region is not full, iteration stops before that region,
 748   // because fill_dense_prefix_end assumes that prefix_end <= top.
 749   const RegionData* const end_region = sd.addr_to_region_ptr(old_space->top());
 750   assert(start_region <= end_region, "inv");
 751 
 752   size_t max_waste = old_space->capacity_in_words() * (MarkSweepDeadRatio / 100.0);
 753   const RegionData* cur_region = start_region;
 754   for (/* empty */; cur_region < end_region; ++cur_region) {
 755     assert(region_size >= cur_region->data_size(), "inv");
 756     size_t dead_size = region_size - cur_region->data_size();
 757     if (max_waste < dead_size) {
 758       break;
 759     }
 760     max_waste -= dead_size;
 761   }
 762 
 763   HeapWord* const prefix_end = sd.region_to_addr(cur_region);
 764   assert(sd.is_region_aligned(prefix_end), "postcondition");
 765   assert(prefix_end >= full_region_prefix_end, "in-range");
 766   assert(prefix_end <= old_space->top(), "in-range");
 767   return prefix_end;
 768 }
 769 
 770 void PSParallelCompact::fill_dense_prefix_end(SpaceId id) {
 771   // Comparing two sizes to decide if filling is required:
 772   //
 773   // The size of the filler (min-obj-size) is 2 heap words with the default
 774   // MinObjAlignment, since both markword and klass take 1 heap word.
 775   //
 776   // The size of the gap (if any) right before dense-prefix-end is
 777   // MinObjAlignment.
 778   //
 779   // Need to fill in the gap only if it's smaller than min-obj-size, and the
 780   // filler obj will extend to next region.
 781 
 782   // Note: If min-fill-size decreases to 1, this whole method becomes redundant.
 783   assert(CollectedHeap::min_fill_size() >= 2, "inv");
 784 #ifndef _LP64
 785   // In 32-bit system, each heap word is 4 bytes, so MinObjAlignment == 2.
 786   // The gap is always equal to min-fill-size, so nothing to do.
 787   return;
 788 #endif
 789   if (MinObjAlignment > 1) {
 790     return;
 791   }
 792   assert(CollectedHeap::min_fill_size() == 2, "inv");
 793   HeapWord* const dense_prefix_end = dense_prefix(id);
 794   assert(_summary_data.is_region_aligned(dense_prefix_end), "precondition");
 795   assert(dense_prefix_end <= space(id)->top(), "precondition");
 796   if (dense_prefix_end == space(id)->top()) {
 797     // Must not have single-word gap right before prefix-end/top.
 798     return;
 799   }
 800   RegionData* const region_after_dense_prefix = _summary_data.addr_to_region_ptr(dense_prefix_end);
 801 
 802   if (region_after_dense_prefix->partial_obj_size() != 0 ||
 803       _mark_bitmap.is_marked(dense_prefix_end)) {
 804     // The region after the dense prefix starts with live bytes.
 805     return;
 806   }
 807 
 808   HeapWord* block_start = start_array(id)->block_start_reaching_into_card(dense_prefix_end);
 809   if (block_start == dense_prefix_end - 1) {
 810     assert(!_mark_bitmap.is_marked(block_start), "inv");
 811     // There is exactly one heap word gap right before the dense prefix end, so we need a filler object.
 812     // The filler object will extend into region_after_dense_prefix.
 813     const size_t obj_len = 2; // min-fill-size
 814     HeapWord* const obj_beg = dense_prefix_end - 1;
 815     CollectedHeap::fill_with_object(obj_beg, obj_len);
 816     _mark_bitmap.mark_obj(obj_beg);
 817     _summary_data.addr_to_region_ptr(obj_beg)->add_live_obj(1);
 818     region_after_dense_prefix->set_partial_obj_size(1);
 819     region_after_dense_prefix->set_partial_obj_addr(obj_beg);
 820     assert(start_array(id) != nullptr, "sanity");
 821     start_array(id)->update_for_block(obj_beg, obj_beg + obj_len);
 822   }
 823 }
 824 
 825 bool PSParallelCompact::check_maximum_compaction(size_t total_live_words,
 826                                                  MutableSpace* const old_space,
 827                                                  HeapWord* full_region_prefix_end) {
 828 
 829   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 830 
 831   // Check System.GC
 832   bool is_max_on_system_gc = UseMaximumCompactionOnSystemGC
 833                           && GCCause::is_user_requested_gc(heap->gc_cause());
 834 
 835   // Check if all live objs are larger than old-gen.
 836   const bool is_old_gen_overflowing = (total_live_words > old_space->capacity_in_words());
 837 
 838   // JVM flags
 839   const uint total_invocations = heap->total_full_collections();
 840   assert(total_invocations >= _maximum_compaction_gc_num, "sanity");
 841   const size_t gcs_since_max = total_invocations - _maximum_compaction_gc_num;
 842   const bool is_interval_ended = gcs_since_max > HeapMaximumCompactionInterval;
 843 
 844   // If all regions in old-gen are full
 845   const bool is_region_full =
 846     full_region_prefix_end >= _summary_data.region_align_down(old_space->top());
 847 
 848   if (is_max_on_system_gc || is_old_gen_overflowing || is_interval_ended || is_region_full) {
 849     _maximum_compaction_gc_num = total_invocations;
 850     return true;
 851   }
 852 
 853   return false;
 854 }
 855 
 856 void PSParallelCompact::summary_phase()
 857 {
 858   GCTraceTime(Info, gc, phases) tm("Summary Phase", &_gc_timer);
 859 
 860   MutableSpace* const old_space = _space_info[old_space_id].space();
 861   {
 862     size_t total_live_words = 0;
 863     HeapWord* full_region_prefix_end = nullptr;
 864     {
 865       // old-gen
 866       size_t live_words = _summary_data.live_words_in_space(old_space,
 867                                                             &full_region_prefix_end);
 868       total_live_words += live_words;
 869     }
 870     // young-gen
 871     for (uint i = eden_space_id; i < last_space_id; ++i) {
 872       const MutableSpace* space = _space_info[i].space();
 873       size_t live_words = _summary_data.live_words_in_space(space);
 874       total_live_words += live_words;
 875       _space_info[i].set_new_top(space->bottom() + live_words);
 876       _space_info[i].set_dense_prefix(space->bottom());
 877     }
 878 
 879     bool maximum_compaction = check_maximum_compaction(total_live_words,
 880                                                        old_space,
 881                                                        full_region_prefix_end);
 882     HeapWord* dense_prefix_end =
 883       maximum_compaction ? full_region_prefix_end
 884                          : compute_dense_prefix_for_old_space(old_space,
 885                                                               full_region_prefix_end);
 886     SpaceId id = old_space_id;
 887     _space_info[id].set_dense_prefix(dense_prefix_end);
 888 
 889     if (dense_prefix_end != old_space->bottom()) {
 890       fill_dense_prefix_end(id);
 891       _summary_data.summarize_dense_prefix(old_space->bottom(), dense_prefix_end);
 892     }
 893     _summary_data.summarize(_space_info[id].split_info(),
 894                             dense_prefix_end, old_space->top(), nullptr,
 895                             dense_prefix_end, old_space->end(),
 896                             _space_info[id].new_top_addr());
 897   }
 898 
 899   // Summarize the remaining spaces in the young gen.  The initial target space
 900   // is the old gen.  If a space does not fit entirely into the target, then the
 901   // remainder is compacted into the space itself and that space becomes the new
 902   // target.
 903   SpaceId dst_space_id = old_space_id;
 904   HeapWord* dst_space_end = old_space->end();
 905   HeapWord** new_top_addr = _space_info[dst_space_id].new_top_addr();
 906   for (unsigned int id = eden_space_id; id < last_space_id; ++id) {
 907     const MutableSpace* space = _space_info[id].space();
 908     const size_t live = pointer_delta(_space_info[id].new_top(),
 909                                       space->bottom());
 910     const size_t available = pointer_delta(dst_space_end, *new_top_addr);
 911 
 912     if (live > 0 && live <= available) {
 913       // All the live data will fit.
 914       bool done = _summary_data.summarize(_space_info[id].split_info(),
 915                                           space->bottom(), space->top(),
 916                                           nullptr,
 917                                           *new_top_addr, dst_space_end,
 918                                           new_top_addr);
 919       assert(done, "space must fit into old gen");
 920 
 921       // Reset the new_top value for the space.
 922       _space_info[id].set_new_top(space->bottom());
 923     } else if (live > 0) {
 924       // Attempt to fit part of the source space into the target space.
 925       HeapWord* next_src_addr = nullptr;
 926       bool done = _summary_data.summarize(_space_info[id].split_info(),
 927                                           space->bottom(), space->top(),
 928                                           &next_src_addr,
 929                                           *new_top_addr, dst_space_end,
 930                                           new_top_addr);
 931       assert(!done, "space should not fit into old gen");
 932       assert(next_src_addr != nullptr, "sanity");
 933 
 934       // The source space becomes the new target, so the remainder is compacted
 935       // within the space itself.
 936       dst_space_id = SpaceId(id);
 937       dst_space_end = space->end();
 938       new_top_addr = _space_info[id].new_top_addr();
 939       done = _summary_data.summarize(_space_info[id].split_info(),
 940                                      next_src_addr, space->top(),
 941                                      nullptr,
 942                                      space->bottom(), dst_space_end,
 943                                      new_top_addr);
 944       assert(done, "space must fit when compacted into itself");
 945       assert(*new_top_addr <= space->top(), "usage should not grow");
 946     }
 947   }
 948 }
 949 
 950 // This method should contain all heap-specific policy for invoking a full
 951 // collection.  invoke_no_policy() will only attempt to compact the heap; it
 952 // will do nothing further.  If we need to bail out for policy reasons, scavenge
 953 // before full gc, or any other specialized behavior, it needs to be added here.
 954 //
 955 // Note that this method should only be called from the vm_thread while at a
 956 // safepoint.
 957 //
 958 // Note that the all_soft_refs_clear flag in the soft ref policy
 959 // may be true because this method can be called without intervening
 960 // activity.  For example when the heap space is tight and full measure
 961 // are being taken to free space.
 962 bool PSParallelCompact::invoke(bool clear_all_soft_refs) {
 963   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
 964   assert(Thread::current() == (Thread*)VMThread::vm_thread(),
 965          "should be in vm thread");
 966 
 967   SvcGCMarker sgcm(SvcGCMarker::FULL);
 968   IsSTWGCActiveMark mark;
 969 
 970   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 971   clear_all_soft_refs = clear_all_soft_refs
 972                      || heap->soft_ref_policy()->should_clear_all_soft_refs();
 973 
 974   return PSParallelCompact::invoke_no_policy(clear_all_soft_refs);
 975 }
 976 
 977 // This method contains no policy. You should probably
 978 // be calling invoke() instead.
 979 bool PSParallelCompact::invoke_no_policy(bool clear_all_soft_refs) {
 980   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
 981   assert(ref_processor() != nullptr, "Sanity");
 982 
 983   if (GCLocker::check_active_before_gc()) {
 984     return false;
 985   }
 986 
 987   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 988 
 989   GCIdMark gc_id_mark;
 990   _gc_timer.register_gc_start();
 991   _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start());
 992 
 993   GCCause::Cause gc_cause = heap->gc_cause();
 994   PSYoungGen* young_gen = heap->young_gen();
 995   PSOldGen* old_gen = heap->old_gen();
 996   PSAdaptiveSizePolicy* size_policy = heap->size_policy();
 997 
 998   // The scope of casr should end after code that can change
 999   // SoftRefPolicy::_should_clear_all_soft_refs.
1000   ClearedAllSoftRefs casr(clear_all_soft_refs,
1001                           heap->soft_ref_policy());
1002 
1003   // Make sure data structures are sane, make the heap parsable, and do other
1004   // miscellaneous bookkeeping.
1005   pre_compact();
1006 
1007   const PreGenGCValues pre_gc_values = heap->get_pre_gc_values();
1008 
1009   {
1010     const uint active_workers =
1011       WorkerPolicy::calc_active_workers(ParallelScavengeHeap::heap()->workers().max_workers(),
1012                                         ParallelScavengeHeap::heap()->workers().active_workers(),
1013                                         Threads::number_of_non_daemon_threads());
1014     ParallelScavengeHeap::heap()->workers().set_active_workers(active_workers);
1015 
1016     GCTraceCPUTime tcpu(&_gc_tracer);
1017     GCTraceTime(Info, gc) tm("Pause Full", nullptr, gc_cause, true);
1018 
1019     heap->pre_full_gc_dump(&_gc_timer);
1020 
1021     TraceCollectorStats tcs(counters());
1022     TraceMemoryManagerStats tms(heap->old_gc_manager(), gc_cause, "end of major GC");
1023 
1024     if (log_is_enabled(Debug, gc, heap, exit)) {
1025       accumulated_time()->start();
1026     }
1027 
1028     // Let the size policy know we're starting
1029     size_policy->major_collection_begin();
1030 
1031 #if COMPILER2_OR_JVMCI
1032     DerivedPointerTable::clear();
1033 #endif
1034 
1035     ref_processor()->start_discovery(clear_all_soft_refs);
1036 
1037     ClassUnloadingContext ctx(1 /* num_nmethod_unlink_workers */,
1038                               false /* unregister_nmethods_during_purge */,
1039                               false /* lock_nmethod_free_separately */);
1040 
1041     marking_phase(&_gc_tracer);
1042 
1043     summary_phase();
1044 
1045 #if COMPILER2_OR_JVMCI
1046     assert(DerivedPointerTable::is_active(), "Sanity");
1047     DerivedPointerTable::set_active(false);
1048 #endif
1049 
1050     forward_to_new_addr();
1051 
1052     adjust_pointers();
1053 
1054     compact();
1055 
1056     ParCompactionManager::_preserved_marks_set->restore(&ParallelScavengeHeap::heap()->workers());
1057 
1058     ParCompactionManager::verify_all_region_stack_empty();
1059 
1060     // Reset the mark bitmap, summary data, and do other bookkeeping.  Must be
1061     // done before resizing.
1062     post_compact();
1063 
1064     // Let the size policy know we're done
1065     size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause);
1066 
1067     if (UseAdaptiveSizePolicy) {
1068       log_debug(gc, ergo)("AdaptiveSizeStart: collection: %d ", heap->total_collections());
1069       log_trace(gc, ergo)("old_gen_capacity: " SIZE_FORMAT " young_gen_capacity: " SIZE_FORMAT,
1070                           old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes());
1071 
1072       // Don't check if the size_policy is ready here.  Let
1073       // the size_policy check that internally.
1074       if (UseAdaptiveGenerationSizePolicyAtMajorCollection &&
1075           AdaptiveSizePolicy::should_update_promo_stats(gc_cause)) {
1076         // Swap the survivor spaces if from_space is empty. The
1077         // resize_young_gen() called below is normally used after
1078         // a successful young GC and swapping of survivor spaces;
1079         // otherwise, it will fail to resize the young gen with
1080         // the current implementation.
1081         if (young_gen->from_space()->is_empty()) {
1082           young_gen->from_space()->clear(SpaceDecorator::Mangle);
1083           young_gen->swap_spaces();
1084         }
1085 
1086         // Calculate optimal free space amounts
1087         assert(young_gen->max_gen_size() >
1088           young_gen->from_space()->capacity_in_bytes() +
1089           young_gen->to_space()->capacity_in_bytes(),
1090           "Sizes of space in young gen are out-of-bounds");
1091 
1092         size_t young_live = young_gen->used_in_bytes();
1093         size_t eden_live = young_gen->eden_space()->used_in_bytes();
1094         size_t old_live = old_gen->used_in_bytes();
1095         size_t cur_eden = young_gen->eden_space()->capacity_in_bytes();
1096         size_t max_old_gen_size = old_gen->max_gen_size();
1097         size_t max_eden_size = young_gen->max_gen_size() -
1098           young_gen->from_space()->capacity_in_bytes() -
1099           young_gen->to_space()->capacity_in_bytes();
1100 
1101         // Used for diagnostics
1102         size_policy->clear_generation_free_space_flags();
1103 
1104         size_policy->compute_generations_free_space(young_live,
1105                                                     eden_live,
1106                                                     old_live,
1107                                                     cur_eden,
1108                                                     max_old_gen_size,
1109                                                     max_eden_size,
1110                                                     true /* full gc*/);
1111 
1112         size_policy->check_gc_overhead_limit(eden_live,
1113                                              max_old_gen_size,
1114                                              max_eden_size,
1115                                              true /* full gc*/,
1116                                              gc_cause,
1117                                              heap->soft_ref_policy());
1118 
1119         size_policy->decay_supplemental_growth(true /* full gc*/);
1120 
1121         heap->resize_old_gen(
1122           size_policy->calculated_old_free_size_in_bytes());
1123 
1124         heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(),
1125                                size_policy->calculated_survivor_size_in_bytes());
1126       }
1127 
1128       log_debug(gc, ergo)("AdaptiveSizeStop: collection: %d ", heap->total_collections());
1129     }
1130 
1131     if (UsePerfData) {
1132       PSGCAdaptivePolicyCounters* const counters = heap->gc_policy_counters();
1133       counters->update_counters();
1134       counters->update_old_capacity(old_gen->capacity_in_bytes());
1135       counters->update_young_capacity(young_gen->capacity_in_bytes());
1136     }
1137 
1138     heap->resize_all_tlabs();
1139 
1140     // Resize the metaspace capacity after a collection
1141     MetaspaceGC::compute_new_size();
1142 
1143     if (log_is_enabled(Debug, gc, heap, exit)) {
1144       accumulated_time()->stop();
1145     }
1146 
1147     heap->print_heap_change(pre_gc_values);
1148 
1149     // Track memory usage and detect low memory
1150     MemoryService::track_memory_usage();
1151     heap->update_counters();
1152 
1153     heap->post_full_gc_dump(&_gc_timer);
1154   }
1155 
1156   if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
1157     Universe::verify("After GC");
1158   }
1159 
1160   heap->print_heap_after_gc();
1161   heap->trace_heap_after_gc(&_gc_tracer);
1162 
1163   AdaptiveSizePolicyOutput::print(size_policy, heap->total_collections());
1164 
1165   _gc_timer.register_gc_end();
1166 
1167   _gc_tracer.report_dense_prefix(dense_prefix(old_space_id));
1168   _gc_tracer.report_gc_end(_gc_timer.gc_end(), _gc_timer.time_partitions());
1169 
1170   return true;
1171 }
1172 
1173 class PCAddThreadRootsMarkingTaskClosure : public ThreadClosure {
1174 private:
1175   uint _worker_id;
1176 
1177 public:
1178   PCAddThreadRootsMarkingTaskClosure(uint worker_id) : _worker_id(worker_id) { }
1179   void do_thread(Thread* thread) {
1180     assert(ParallelScavengeHeap::heap()->is_stw_gc_active(), "called outside gc");
1181 
1182     ResourceMark rm;
1183 
1184     ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(_worker_id);
1185 
1186     MarkingNMethodClosure mark_and_push_in_blobs(&cm->_mark_and_push_closure,
1187                                                  !NMethodToOopClosure::FixRelocations,
1188                                                  true /* keepalive nmethods */);
1189 
1190     thread->oops_do(&cm->_mark_and_push_closure, &mark_and_push_in_blobs);
1191 
1192     // Do the real work
1193     cm->follow_marking_stacks();
1194   }
1195 };
1196 
1197 void steal_marking_work(TaskTerminator& terminator, uint worker_id) {
1198   assert(ParallelScavengeHeap::heap()->is_stw_gc_active(), "called outside gc");
1199 
1200   ParCompactionManager* cm =
1201     ParCompactionManager::gc_thread_compaction_manager(worker_id);
1202 
1203   do {
1204     oop obj = nullptr;
1205     ObjArrayTask task;
1206     if (ParCompactionManager::steal_objarray(worker_id,  task)) {
1207       cm->follow_array((objArrayOop)task.obj(), task.index());
1208     } else if (ParCompactionManager::steal(worker_id, obj)) {
1209       cm->follow_contents(obj);
1210     }
1211     cm->follow_marking_stacks();
1212   } while (!terminator.offer_termination());
1213 }
1214 
1215 class MarkFromRootsTask : public WorkerTask {
1216   StrongRootsScope _strong_roots_scope; // needed for Threads::possibly_parallel_threads_do
1217   OopStorageSetStrongParState<false /* concurrent */, false /* is_const */> _oop_storage_set_par_state;
1218   TaskTerminator _terminator;
1219   uint _active_workers;
1220 
1221 public:
1222   MarkFromRootsTask(uint active_workers) :
1223       WorkerTask("MarkFromRootsTask"),
1224       _strong_roots_scope(active_workers),
1225       _terminator(active_workers, ParCompactionManager::oop_task_queues()),
1226       _active_workers(active_workers) {}
1227 
1228   virtual void work(uint worker_id) {
1229     ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(worker_id);
1230     cm->create_marking_stats_cache();
1231     {
1232       CLDToOopClosure cld_closure(&cm->_mark_and_push_closure, ClassLoaderData::_claim_stw_fullgc_mark);
1233       ClassLoaderDataGraph::always_strong_cld_do(&cld_closure);
1234 
1235       // Do the real work
1236       cm->follow_marking_stacks();
1237     }
1238 
1239     {
1240       PCAddThreadRootsMarkingTaskClosure closure(worker_id);
1241       Threads::possibly_parallel_threads_do(_active_workers > 1 /* is_par */, &closure);
1242     }
1243 
1244     // Mark from OopStorages
1245     {
1246       _oop_storage_set_par_state.oops_do(&cm->_mark_and_push_closure);
1247       // Do the real work
1248       cm->follow_marking_stacks();
1249     }
1250 
1251     if (_active_workers > 1) {
1252       steal_marking_work(_terminator, worker_id);
1253     }
1254   }
1255 };
1256 
1257 class ParallelCompactRefProcProxyTask : public RefProcProxyTask {
1258   TaskTerminator _terminator;
1259 
1260 public:
1261   ParallelCompactRefProcProxyTask(uint max_workers)
1262     : RefProcProxyTask("ParallelCompactRefProcProxyTask", max_workers),
1263       _terminator(_max_workers, ParCompactionManager::oop_task_queues()) {}
1264 
1265   void work(uint worker_id) override {
1266     assert(worker_id < _max_workers, "sanity");
1267     ParCompactionManager* cm = (_tm == RefProcThreadModel::Single) ? ParCompactionManager::get_vmthread_cm() : ParCompactionManager::gc_thread_compaction_manager(worker_id);
1268     BarrierEnqueueDiscoveredFieldClosure enqueue;
1269     ParCompactionManager::FollowStackClosure complete_gc(cm, (_tm == RefProcThreadModel::Single) ? nullptr : &_terminator, worker_id);
1270     _rp_task->rp_work(worker_id, PSParallelCompact::is_alive_closure(), &cm->_mark_and_push_closure, &enqueue, &complete_gc);
1271   }
1272 
1273   void prepare_run_task_hook() override {
1274     _terminator.reset_for_reuse(_queue_count);
1275   }
1276 };
1277 
1278 static void flush_marking_stats_cache(const uint num_workers) {
1279   for (uint i = 0; i < num_workers; ++i) {
1280     ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(i);
1281     cm->flush_and_destroy_marking_stats_cache();
1282   }
1283 }
1284 
1285 void PSParallelCompact::marking_phase(ParallelOldTracer *gc_tracer) {
1286   // Recursively traverse all live objects and mark them
1287   GCTraceTime(Info, gc, phases) tm("Marking Phase", &_gc_timer);
1288 
1289   uint active_gc_threads = ParallelScavengeHeap::heap()->workers().active_workers();
1290 
1291   ClassLoaderDataGraph::verify_claimed_marks_cleared(ClassLoaderData::_claim_stw_fullgc_mark);
1292   {
1293     GCTraceTime(Debug, gc, phases) tm("Par Mark", &_gc_timer);
1294 
1295     MarkFromRootsTask task(active_gc_threads);
1296     ParallelScavengeHeap::heap()->workers().run_task(&task);
1297   }
1298 
1299   // Process reference objects found during marking
1300   {
1301     GCTraceTime(Debug, gc, phases) tm("Reference Processing", &_gc_timer);
1302 
1303     ReferenceProcessorStats stats;
1304     ReferenceProcessorPhaseTimes pt(&_gc_timer, ref_processor()->max_num_queues());
1305 
1306     ref_processor()->set_active_mt_degree(active_gc_threads);
1307     ParallelCompactRefProcProxyTask task(ref_processor()->max_num_queues());
1308     stats = ref_processor()->process_discovered_references(task, pt);
1309 
1310     gc_tracer->report_gc_reference_stats(stats);
1311     pt.print_all_references();
1312   }
1313 
1314   {
1315     GCTraceTime(Debug, gc, phases) tm("Flush Marking Stats", &_gc_timer);
1316 
1317     flush_marking_stats_cache(active_gc_threads);
1318   }
1319 
1320   // This is the point where the entire marking should have completed.
1321   ParCompactionManager::verify_all_marking_stack_empty();
1322 
1323   {
1324     GCTraceTime(Debug, gc, phases) tm("Weak Processing", &_gc_timer);
1325     WeakProcessor::weak_oops_do(&ParallelScavengeHeap::heap()->workers(),
1326                                 is_alive_closure(),
1327                                 &do_nothing_cl,
1328                                 1);
1329   }
1330 
1331   {
1332     GCTraceTime(Debug, gc, phases) tm_m("Class Unloading", &_gc_timer);
1333 
1334     ClassUnloadingContext* ctx = ClassUnloadingContext::context();
1335 
1336     bool unloading_occurred;
1337     {
1338       CodeCache::UnlinkingScope scope(is_alive_closure());
1339 
1340       // Follow system dictionary roots and unload classes.
1341       unloading_occurred = SystemDictionary::do_unloading(&_gc_timer);
1342 
1343       // Unload nmethods.
1344       CodeCache::do_unloading(unloading_occurred);
1345     }
1346 
1347     {
1348       GCTraceTime(Debug, gc, phases) t("Purge Unlinked NMethods", gc_timer());
1349       // Release unloaded nmethod's memory.
1350       ctx->purge_nmethods();
1351     }
1352     {
1353       GCTraceTime(Debug, gc, phases) ur("Unregister NMethods", &_gc_timer);
1354       ParallelScavengeHeap::heap()->prune_unlinked_nmethods();
1355     }
1356     {
1357       GCTraceTime(Debug, gc, phases) t("Free Code Blobs", gc_timer());
1358       ctx->free_nmethods();
1359     }
1360 
1361     // Prune dead klasses from subklass/sibling/implementor lists.
1362     Klass::clean_weak_klass_links(unloading_occurred);
1363 
1364     // Clean JVMCI metadata handles.
1365     JVMCI_ONLY(JVMCI::do_unloading(unloading_occurred));
1366   }
1367 
1368   {
1369     GCTraceTime(Debug, gc, phases) tm("Report Object Count", &_gc_timer);
1370     _gc_tracer.report_object_count_after_gc(is_alive_closure(), &ParallelScavengeHeap::heap()->workers());
1371   }
1372 #if TASKQUEUE_STATS
1373   ParCompactionManager::oop_task_queues()->print_and_reset_taskqueue_stats("Oop Queue");
1374   ParCompactionManager::_objarray_task_queues->print_and_reset_taskqueue_stats("ObjArrayOop Queue");
1375 #endif
1376 }
1377 
1378 template<typename Func>
1379 void PSParallelCompact::adjust_in_space_helper(SpaceId id, volatile uint* claim_counter, Func&& on_stripe) {
1380   MutableSpace* sp = PSParallelCompact::space(id);
1381   HeapWord* const bottom = sp->bottom();
1382   HeapWord* const top = sp->top();
1383   if (bottom == top) {
1384     return;
1385   }
1386 
1387   const uint num_regions_per_stripe = 2;
1388   const size_t region_size = ParallelCompactData::RegionSize;
1389   const size_t stripe_size = num_regions_per_stripe * region_size;
1390 
1391   while (true) {
1392     uint counter = Atomic::fetch_then_add(claim_counter, num_regions_per_stripe);
1393     HeapWord* cur_stripe = bottom + counter * region_size;
1394     if (cur_stripe >= top) {
1395       break;
1396     }
1397     HeapWord* stripe_end = MIN2(cur_stripe + stripe_size, top);
1398     on_stripe(cur_stripe, stripe_end);
1399   }
1400 }
1401 
1402 void PSParallelCompact::adjust_in_old_space(volatile uint* claim_counter) {
1403   // Regions in old-space shouldn't be split.
1404   assert(!_space_info[old_space_id].split_info().is_valid(), "inv");
1405 
1406   auto scan_obj_with_limit = [&] (HeapWord* obj_start, HeapWord* left, HeapWord* right) {
1407     assert(mark_bitmap()->is_marked(obj_start), "inv");
1408     oop obj = cast_to_oop(obj_start);
1409     return obj->oop_iterate_size(&pc_adjust_pointer_closure, MemRegion(left, right));
1410   };
1411 
1412   adjust_in_space_helper(old_space_id, claim_counter, [&] (HeapWord* stripe_start, HeapWord* stripe_end) {
1413     assert(_summary_data.is_region_aligned(stripe_start), "inv");
1414     RegionData* cur_region = _summary_data.addr_to_region_ptr(stripe_start);
1415     HeapWord* obj_start;
1416     if (cur_region->partial_obj_size() != 0) {
1417       obj_start = cur_region->partial_obj_addr();
1418       obj_start += scan_obj_with_limit(obj_start, stripe_start, stripe_end);
1419     } else {
1420       obj_start = stripe_start;
1421     }
1422 
1423     while (obj_start < stripe_end) {
1424       obj_start = mark_bitmap()->find_obj_beg(obj_start, stripe_end);
1425       if (obj_start >= stripe_end) {
1426         break;
1427       }
1428       obj_start += scan_obj_with_limit(obj_start, stripe_start, stripe_end);
1429     }
1430   });
1431 }
1432 
1433 void PSParallelCompact::adjust_in_young_space(SpaceId id, volatile uint* claim_counter) {
1434   adjust_in_space_helper(id, claim_counter, [](HeapWord* stripe_start, HeapWord* stripe_end) {
1435     HeapWord* obj_start = stripe_start;
1436     while (obj_start < stripe_end) {
1437       obj_start = mark_bitmap()->find_obj_beg(obj_start, stripe_end);
1438       if (obj_start >= stripe_end) {
1439         break;
1440       }
1441       oop obj = cast_to_oop(obj_start);
1442       obj_start += obj->oop_iterate_size(&pc_adjust_pointer_closure);
1443     }
1444   });
1445 }
1446 
1447 void PSParallelCompact::adjust_pointers_in_spaces(uint worker_id, volatile uint* claim_counters) {
1448   auto start_time = Ticks::now();
1449   adjust_in_old_space(&claim_counters[0]);
1450   for (uint id = eden_space_id; id < last_space_id; ++id) {
1451     adjust_in_young_space(SpaceId(id), &claim_counters[id]);
1452   }
1453   log_trace(gc, phases)("adjust_pointers_in_spaces worker %u: %.3f ms", worker_id, (Ticks::now() - start_time).seconds() * 1000);
1454 }
1455 
1456 class PSAdjustTask final : public WorkerTask {
1457   SubTasksDone                               _sub_tasks;
1458   WeakProcessor::Task                        _weak_proc_task;
1459   OopStorageSetStrongParState<false, false>  _oop_storage_iter;
1460   uint                                       _nworkers;
1461   volatile uint _claim_counters[PSParallelCompact::last_space_id] = {};
1462 
1463   enum PSAdjustSubTask {
1464     PSAdjustSubTask_code_cache,
1465 
1466     PSAdjustSubTask_num_elements
1467   };
1468 
1469 public:
1470   PSAdjustTask(uint nworkers) :
1471     WorkerTask("PSAdjust task"),
1472     _sub_tasks(PSAdjustSubTask_num_elements),
1473     _weak_proc_task(nworkers),
1474     _nworkers(nworkers) {
1475 
1476     ClassLoaderDataGraph::verify_claimed_marks_cleared(ClassLoaderData::_claim_stw_fullgc_adjust);
1477     if (nworkers > 1) {
1478       Threads::change_thread_claim_token();
1479     }
1480   }
1481 
1482   ~PSAdjustTask() {
1483     Threads::assert_all_threads_claimed();
1484   }
1485 
1486   void work(uint worker_id) {
1487     ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(worker_id);
1488     cm->preserved_marks()->adjust_during_full_gc();
1489     {
1490       // adjust pointers in all spaces
1491       PSParallelCompact::adjust_pointers_in_spaces(worker_id, _claim_counters);
1492     }
1493     {
1494       ResourceMark rm;
1495       Threads::possibly_parallel_oops_do(_nworkers > 1, &pc_adjust_pointer_closure, nullptr);
1496     }
1497     _oop_storage_iter.oops_do(&pc_adjust_pointer_closure);
1498     {
1499       CLDToOopClosure cld_closure(&pc_adjust_pointer_closure, ClassLoaderData::_claim_stw_fullgc_adjust);
1500       ClassLoaderDataGraph::cld_do(&cld_closure);
1501     }
1502     {
1503       AlwaysTrueClosure always_alive;
1504       _weak_proc_task.work(worker_id, &always_alive, &pc_adjust_pointer_closure);
1505     }
1506     if (_sub_tasks.try_claim_task(PSAdjustSubTask_code_cache)) {
1507       NMethodToOopClosure adjust_code(&pc_adjust_pointer_closure, NMethodToOopClosure::FixRelocations);
1508       CodeCache::nmethods_do(&adjust_code);
1509     }
1510     _sub_tasks.all_tasks_claimed();
1511   }
1512 };
1513 
1514 void PSParallelCompact::adjust_pointers() {
1515   // Adjust the pointers to reflect the new locations
1516   GCTraceTime(Info, gc, phases) tm("Adjust Pointers", &_gc_timer);
1517   uint nworkers = ParallelScavengeHeap::heap()->workers().active_workers();
1518   PSAdjustTask task(nworkers);
1519   ParallelScavengeHeap::heap()->workers().run_task(&task);
1520 }
1521 
1522 // Split [start, end) evenly for a number of workers and return the
1523 // range for worker_id.
1524 static void split_regions_for_worker(size_t start, size_t end,
1525                                      uint worker_id, uint num_workers,
1526                                      size_t* worker_start, size_t* worker_end) {
1527   assert(start < end, "precondition");
1528   assert(num_workers > 0, "precondition");
1529   assert(worker_id < num_workers, "precondition");
1530 
1531   size_t num_regions = end - start;
1532   size_t num_regions_per_worker = num_regions / num_workers;
1533   size_t remainder = num_regions % num_workers;
1534   // The first few workers will get one extra.
1535   *worker_start = start + worker_id * num_regions_per_worker
1536                   + MIN2(checked_cast<size_t>(worker_id), remainder);
1537   *worker_end = *worker_start + num_regions_per_worker
1538                 + (worker_id < remainder ? 1 : 0);
1539 }
1540 
1541 void PSParallelCompact::forward_to_new_addr() {
1542   GCTraceTime(Info, gc, phases) tm("Forward", &_gc_timer);
1543   uint nworkers = ParallelScavengeHeap::heap()->workers().active_workers();
1544 
1545   struct ForwardTask final : public WorkerTask {
1546     uint _num_workers;
1547 
1548     explicit ForwardTask(uint num_workers) :
1549       WorkerTask("PSForward task"),
1550       _num_workers(num_workers) {}
1551 
1552     void work(uint worker_id) override {
1553       ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(worker_id);
1554       for (uint id = old_space_id; id < last_space_id; ++id) {
1555         MutableSpace* sp = PSParallelCompact::space(SpaceId(id));
1556         HeapWord* dense_prefix_addr = dense_prefix(SpaceId(id));
1557         HeapWord* top = sp->top();
1558 
1559         if (dense_prefix_addr == top) {
1560           continue;
1561         }
1562 
1563         size_t dense_prefix_region = _summary_data.addr_to_region_idx(dense_prefix_addr);
1564         size_t top_region = _summary_data.addr_to_region_idx(_summary_data.region_align_up(top));
1565         size_t start_region;
1566         size_t end_region;
1567         split_regions_for_worker(dense_prefix_region, top_region,
1568                                  worker_id, _num_workers,
1569                                  &start_region, &end_region);
1570         for (size_t cur_region = start_region; cur_region < end_region; ++cur_region) {
1571           RegionData* region_ptr = _summary_data.region(cur_region);
1572           size_t live_words = region_ptr->partial_obj_size();
1573 
1574           if (live_words == ParallelCompactData::RegionSize) {
1575             // No obj-start
1576             continue;
1577           }
1578 
1579           HeapWord* region_start = _summary_data.region_to_addr(cur_region);
1580           HeapWord* region_end = region_start + ParallelCompactData::RegionSize;
1581 
1582           HeapWord* cur_addr = region_start + live_words;
1583 
1584           HeapWord* destination = region_ptr->destination();
1585           while (cur_addr < region_end) {
1586             cur_addr = mark_bitmap()->find_obj_beg(cur_addr, region_end);
1587             if (cur_addr >= region_end) {
1588               break;
1589             }
1590             assert(mark_bitmap()->is_marked(cur_addr), "inv");
1591             HeapWord* new_addr = destination + live_words;
1592             oop obj = cast_to_oop(cur_addr);
1593             if (new_addr != cur_addr) {
1594               cm->preserved_marks()->push_if_necessary(obj, obj->mark());
1595               obj->forward_to(cast_to_oop(new_addr));
1596             }
1597             size_t obj_size = obj->size();
1598             live_words += obj_size;
1599             cur_addr += obj_size;
1600           }
1601         }
1602       }
1603     }
1604   } task(nworkers);
1605 
1606   ParallelScavengeHeap::heap()->workers().run_task(&task);
1607   debug_only(verify_forward();)
1608 }
1609 
1610 #ifdef ASSERT
1611 void PSParallelCompact::verify_forward() {
1612   HeapWord* old_dense_prefix_addr = dense_prefix(SpaceId(old_space_id));
1613   RegionData* old_region = _summary_data.region(_summary_data.addr_to_region_idx(old_dense_prefix_addr));
1614   HeapWord* bump_ptr = old_region->partial_obj_size() != 0
1615                        ? old_dense_prefix_addr + old_region->partial_obj_size()
1616                        : old_dense_prefix_addr;
1617   SpaceId bump_ptr_space = old_space_id;
1618 
1619   for (uint id = old_space_id; id < last_space_id; ++id) {
1620     MutableSpace* sp = PSParallelCompact::space(SpaceId(id));
1621     HeapWord* dense_prefix_addr = dense_prefix(SpaceId(id));
1622     HeapWord* top = sp->top();
1623     HeapWord* cur_addr = dense_prefix_addr;
1624 
1625     while (cur_addr < top) {
1626       cur_addr = mark_bitmap()->find_obj_beg(cur_addr, top);
1627       if (cur_addr >= top) {
1628         break;
1629       }
1630       assert(mark_bitmap()->is_marked(cur_addr), "inv");
1631       // Move to the space containing cur_addr
1632       if (bump_ptr == _space_info[bump_ptr_space].new_top()) {
1633         bump_ptr = space(space_id(cur_addr))->bottom();
1634         bump_ptr_space = space_id(bump_ptr);
1635       }
1636       oop obj = cast_to_oop(cur_addr);
1637       if (cur_addr != bump_ptr) {
1638         assert(obj->forwardee() == cast_to_oop(bump_ptr), "inv");
1639       }
1640       bump_ptr += obj->size();
1641       cur_addr += obj->size();
1642     }
1643   }
1644 }
1645 #endif
1646 
1647 // Helper class to print 8 region numbers per line and then print the total at the end.
1648 class FillableRegionLogger : public StackObj {
1649 private:
1650   Log(gc, compaction) log;
1651   static const int LineLength = 8;
1652   size_t _regions[LineLength];
1653   int _next_index;
1654   bool _enabled;
1655   size_t _total_regions;
1656 public:
1657   FillableRegionLogger() : _next_index(0), _enabled(log_develop_is_enabled(Trace, gc, compaction)), _total_regions(0) { }
1658   ~FillableRegionLogger() {
1659     log.trace(SIZE_FORMAT " initially fillable regions", _total_regions);
1660   }
1661 
1662   void print_line() {
1663     if (!_enabled || _next_index == 0) {
1664       return;
1665     }
1666     FormatBuffer<> line("Fillable: ");
1667     for (int i = 0; i < _next_index; i++) {
1668       line.append(" " SIZE_FORMAT_W(7), _regions[i]);
1669     }
1670     log.trace("%s", line.buffer());
1671     _next_index = 0;
1672   }
1673 
1674   void handle(size_t region) {
1675     if (!_enabled) {
1676       return;
1677     }
1678     _regions[_next_index++] = region;
1679     if (_next_index == LineLength) {
1680       print_line();
1681     }
1682     _total_regions++;
1683   }
1684 };
1685 
1686 void PSParallelCompact::prepare_region_draining_tasks(uint parallel_gc_threads)
1687 {
1688   GCTraceTime(Trace, gc, phases) tm("Drain Task Setup", &_gc_timer);
1689 
1690   // Find the threads that are active
1691   uint worker_id = 0;
1692 
1693   // Find all regions that are available (can be filled immediately) and
1694   // distribute them to the thread stacks.  The iteration is done in reverse
1695   // order (high to low) so the regions will be removed in ascending order.
1696 
1697   const ParallelCompactData& sd = PSParallelCompact::summary_data();
1698 
1699   // id + 1 is used to test termination so unsigned  can
1700   // be used with an old_space_id == 0.
1701   FillableRegionLogger region_logger;
1702   for (unsigned int id = to_space_id; id + 1 > old_space_id; --id) {
1703     SpaceInfo* const space_info = _space_info + id;
1704     HeapWord* const new_top = space_info->new_top();
1705 
1706     const size_t beg_region = sd.addr_to_region_idx(space_info->dense_prefix());
1707     const size_t end_region =
1708       sd.addr_to_region_idx(sd.region_align_up(new_top));
1709 
1710     for (size_t cur = end_region - 1; cur + 1 > beg_region; --cur) {
1711       if (sd.region(cur)->claim_unsafe()) {
1712         ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(worker_id);
1713         bool result = sd.region(cur)->mark_normal();
1714         assert(result, "Must succeed at this point.");
1715         cm->region_stack()->push(cur);
1716         region_logger.handle(cur);
1717         // Assign regions to tasks in round-robin fashion.
1718         if (++worker_id == parallel_gc_threads) {
1719           worker_id = 0;
1720         }
1721       }
1722     }
1723     region_logger.print_line();
1724   }
1725 }
1726 
1727 static void compaction_with_stealing_work(TaskTerminator* terminator, uint worker_id) {
1728   assert(ParallelScavengeHeap::heap()->is_stw_gc_active(), "called outside gc");
1729 
1730   ParCompactionManager* cm =
1731     ParCompactionManager::gc_thread_compaction_manager(worker_id);
1732 
1733   // Drain the stacks that have been preloaded with regions
1734   // that are ready to fill.
1735 
1736   cm->drain_region_stacks();
1737 
1738   guarantee(cm->region_stack()->is_empty(), "Not empty");
1739 
1740   size_t region_index = 0;
1741 
1742   while (true) {
1743     if (ParCompactionManager::steal(worker_id, region_index)) {
1744       PSParallelCompact::fill_and_update_region(cm, region_index);
1745       cm->drain_region_stacks();
1746     } else if (PSParallelCompact::steal_unavailable_region(cm, region_index)) {
1747       // Fill and update an unavailable region with the help of a shadow region
1748       PSParallelCompact::fill_and_update_shadow_region(cm, region_index);
1749       cm->drain_region_stacks();
1750     } else {
1751       if (terminator->offer_termination()) {
1752         break;
1753       }
1754       // Go around again.
1755     }
1756   }
1757 }
1758 
1759 class FillDensePrefixAndCompactionTask: public WorkerTask {
1760   uint _num_workers;
1761   TaskTerminator _terminator;
1762 
1763 public:
1764   FillDensePrefixAndCompactionTask(uint active_workers) :
1765       WorkerTask("FillDensePrefixAndCompactionTask"),
1766       _num_workers(active_workers),
1767       _terminator(active_workers, ParCompactionManager::region_task_queues()) {
1768   }
1769 
1770   virtual void work(uint worker_id) {
1771     {
1772       auto start = Ticks::now();
1773       PSParallelCompact::fill_dead_objs_in_dense_prefix(worker_id, _num_workers);
1774       log_trace(gc, phases)("Fill dense prefix by worker %u: %.3f ms", worker_id, (Ticks::now() - start).seconds() * 1000);
1775     }
1776     compaction_with_stealing_work(&_terminator, worker_id);
1777   }
1778 };
1779 
1780 void PSParallelCompact::fill_range_in_dense_prefix(HeapWord* start, HeapWord* end) {
1781 #ifdef ASSERT
1782   {
1783     assert(start < end, "precondition");
1784     assert(mark_bitmap()->find_obj_beg(start, end) == end, "precondition");
1785     HeapWord* bottom = _space_info[old_space_id].space()->bottom();
1786     if (start != bottom) {
1787       HeapWord* obj_start = mark_bitmap()->find_obj_beg_reverse(bottom, start);
1788       HeapWord* after_obj = obj_start + cast_to_oop(obj_start)->size();
1789       assert(after_obj == start, "precondition");
1790     }
1791   }
1792 #endif
1793 
1794   CollectedHeap::fill_with_objects(start, pointer_delta(end, start));
1795   HeapWord* addr = start;
1796   do {
1797     size_t size = cast_to_oop(addr)->size();
1798     start_array(old_space_id)->update_for_block(addr, addr + size);
1799     addr += size;
1800   } while (addr < end);
1801 }
1802 
1803 void PSParallelCompact::fill_dead_objs_in_dense_prefix(uint worker_id, uint num_workers) {
1804   ParMarkBitMap* bitmap = mark_bitmap();
1805 
1806   HeapWord* const bottom = _space_info[old_space_id].space()->bottom();
1807   HeapWord* const prefix_end = dense_prefix(old_space_id);
1808 
1809   if (bottom == prefix_end) {
1810     return;
1811   }
1812 
1813   size_t bottom_region = _summary_data.addr_to_region_idx(bottom);
1814   size_t prefix_end_region = _summary_data.addr_to_region_idx(prefix_end);
1815 
1816   size_t start_region;
1817   size_t end_region;
1818   split_regions_for_worker(bottom_region, prefix_end_region,
1819                            worker_id, num_workers,
1820                            &start_region, &end_region);
1821 
1822   if (start_region == end_region) {
1823     return;
1824   }
1825 
1826   HeapWord* const start_addr = _summary_data.region_to_addr(start_region);
1827   HeapWord* const end_addr = _summary_data.region_to_addr(end_region);
1828 
1829   // Skip live partial obj (if any) from previous region.
1830   HeapWord* cur_addr;
1831   RegionData* start_region_ptr = _summary_data.region(start_region);
1832   if (start_region_ptr->partial_obj_size() != 0) {
1833     HeapWord* partial_obj_start = start_region_ptr->partial_obj_addr();
1834     assert(bitmap->is_marked(partial_obj_start), "inv");
1835     cur_addr = partial_obj_start + cast_to_oop(partial_obj_start)->size();
1836   } else {
1837     cur_addr = start_addr;
1838   }
1839 
1840   // end_addr is inclusive to handle regions starting with dead space.
1841   while (cur_addr <= end_addr) {
1842     // Use prefix_end to handle trailing obj in each worker region-chunk.
1843     HeapWord* live_start = bitmap->find_obj_beg(cur_addr, prefix_end);
1844     if (cur_addr != live_start) {
1845       // Only worker 0 handles proceeding dead space.
1846       if (cur_addr != start_addr || worker_id == 0) {
1847         fill_range_in_dense_prefix(cur_addr, live_start);
1848       }
1849     }
1850     if (live_start >= end_addr) {
1851       break;
1852     }
1853     assert(bitmap->is_marked(live_start), "inv");
1854     cur_addr = live_start + cast_to_oop(live_start)->size();
1855   }
1856 }
1857 
1858 void PSParallelCompact::compact() {
1859   GCTraceTime(Info, gc, phases) tm("Compaction Phase", &_gc_timer);
1860 
1861   uint active_gc_threads = ParallelScavengeHeap::heap()->workers().active_workers();
1862 
1863   initialize_shadow_regions(active_gc_threads);
1864   prepare_region_draining_tasks(active_gc_threads);
1865 
1866   {
1867     GCTraceTime(Trace, gc, phases) tm("Par Compact", &_gc_timer);
1868 
1869     FillDensePrefixAndCompactionTask task(active_gc_threads);
1870     ParallelScavengeHeap::heap()->workers().run_task(&task);
1871 
1872 #ifdef  ASSERT
1873     verify_filler_in_dense_prefix();
1874 
1875     // Verify that all regions have been processed.
1876     for (unsigned int id = old_space_id; id < last_space_id; ++id) {
1877       verify_complete(SpaceId(id));
1878     }
1879 #endif
1880   }
1881 }
1882 
1883 #ifdef  ASSERT
1884 void PSParallelCompact::verify_filler_in_dense_prefix() {
1885   HeapWord* bottom = _space_info[old_space_id].space()->bottom();
1886   HeapWord* dense_prefix_end = dense_prefix(old_space_id);
1887   HeapWord* cur_addr = bottom;
1888   while (cur_addr < dense_prefix_end) {
1889     oop obj = cast_to_oop(cur_addr);
1890     oopDesc::verify(obj);
1891     if (!mark_bitmap()->is_marked(cur_addr)) {
1892       Klass* k = cast_to_oop(cur_addr)->klass_without_asserts();
1893       assert(k == Universe::fillerArrayKlass() || k == vmClasses::FillerObject_klass(), "inv");
1894     }
1895     cur_addr += obj->size();
1896   }
1897 }
1898 
1899 void PSParallelCompact::verify_complete(SpaceId space_id) {
1900   // All Regions served as compaction targets, from dense_prefix() to
1901   // new_top(), should be marked as filled and all Regions between new_top()
1902   // and top() should be available (i.e., should have been emptied).
1903   ParallelCompactData& sd = summary_data();
1904   SpaceInfo si = _space_info[space_id];
1905   HeapWord* new_top_addr = sd.region_align_up(si.new_top());
1906   HeapWord* old_top_addr = sd.region_align_up(si.space()->top());
1907   const size_t beg_region = sd.addr_to_region_idx(si.dense_prefix());
1908   const size_t new_top_region = sd.addr_to_region_idx(new_top_addr);
1909   const size_t old_top_region = sd.addr_to_region_idx(old_top_addr);
1910 
1911   size_t cur_region;
1912   for (cur_region = beg_region; cur_region < new_top_region; ++cur_region) {
1913     const RegionData* const c = sd.region(cur_region);
1914     if (!c->completed()) {
1915       log_warning(gc)("region " SIZE_FORMAT " not filled: destination_count=%u",
1916                       cur_region, c->destination_count());
1917     }
1918   }
1919 
1920   for (cur_region = new_top_region; cur_region < old_top_region; ++cur_region) {
1921     const RegionData* const c = sd.region(cur_region);
1922     if (!c->available()) {
1923       log_warning(gc)("region " SIZE_FORMAT " not empty: destination_count=%u",
1924                       cur_region, c->destination_count());
1925     }
1926   }
1927 }
1928 #endif  // #ifdef ASSERT
1929 
1930 // Return the SpaceId for the space containing addr.  If addr is not in the
1931 // heap, last_space_id is returned.  In debug mode it expects the address to be
1932 // in the heap and asserts such.
1933 PSParallelCompact::SpaceId PSParallelCompact::space_id(HeapWord* addr) {
1934   assert(ParallelScavengeHeap::heap()->is_in_reserved(addr), "addr not in the heap");
1935 
1936   for (unsigned int id = old_space_id; id < last_space_id; ++id) {
1937     if (_space_info[id].space()->contains(addr)) {
1938       return SpaceId(id);
1939     }
1940   }
1941 
1942   assert(false, "no space contains the addr");
1943   return last_space_id;
1944 }
1945 
1946 // Skip over count live words starting from beg, and return the address of the
1947 // next live word.  Unless marked, the word corresponding to beg is assumed to
1948 // be dead.  Callers must either ensure beg does not correspond to the middle of
1949 // an object, or account for those live words in some other way.  Callers must
1950 // also ensure that there are enough live words in the range [beg, end) to skip.
1951 HeapWord*
1952 PSParallelCompact::skip_live_words(HeapWord* beg, HeapWord* end, size_t count)
1953 {
1954   assert(count > 0, "sanity");
1955 
1956   ParMarkBitMap* m = mark_bitmap();
1957   HeapWord* cur_addr = beg;
1958   while (true) {
1959     cur_addr = m->find_obj_beg(cur_addr, end);
1960     assert(cur_addr < end, "inv");
1961     size_t obj_size = cast_to_oop(cur_addr)->size();
1962     // Strictly greater-than
1963     if (obj_size > count) {
1964       return cur_addr + count;
1965     }
1966     count -= obj_size;
1967     cur_addr += obj_size;
1968   }
1969 }
1970 
1971 HeapWord* PSParallelCompact::first_src_addr(HeapWord* const dest_addr,
1972                                             SpaceId src_space_id,
1973                                             size_t src_region_idx)
1974 {
1975   assert(summary_data().is_region_aligned(dest_addr), "not aligned");
1976 
1977   const SplitInfo& split_info = _space_info[src_space_id].split_info();
1978   if (split_info.dest_region_addr() == dest_addr) {
1979     // The partial object ending at the split point contains the first word to
1980     // be copied to dest_addr.
1981     return split_info.first_src_addr();
1982   }
1983 
1984   const ParallelCompactData& sd = summary_data();
1985   ParMarkBitMap* const bitmap = mark_bitmap();
1986   const size_t RegionSize = ParallelCompactData::RegionSize;
1987 
1988   assert(sd.is_region_aligned(dest_addr), "not aligned");
1989   const RegionData* const src_region_ptr = sd.region(src_region_idx);
1990   const size_t partial_obj_size = src_region_ptr->partial_obj_size();
1991   HeapWord* const src_region_destination = src_region_ptr->destination();
1992 
1993   assert(dest_addr >= src_region_destination, "wrong src region");
1994   assert(src_region_ptr->data_size() > 0, "src region cannot be empty");
1995 
1996   HeapWord* const src_region_beg = sd.region_to_addr(src_region_idx);
1997   HeapWord* const src_region_end = src_region_beg + RegionSize;
1998 
1999   HeapWord* addr = src_region_beg;
2000   if (dest_addr == src_region_destination) {
2001     // Return the first live word in the source region.
2002     if (partial_obj_size == 0) {
2003       addr = bitmap->find_obj_beg(addr, src_region_end);
2004       assert(addr < src_region_end, "no objects start in src region");
2005     }
2006     return addr;
2007   }
2008 
2009   // Must skip some live data.
2010   size_t words_to_skip = dest_addr - src_region_destination;
2011   assert(src_region_ptr->data_size() > words_to_skip, "wrong src region");
2012 
2013   if (partial_obj_size >= words_to_skip) {
2014     // All the live words to skip are part of the partial object.
2015     addr += words_to_skip;
2016     if (partial_obj_size == words_to_skip) {
2017       // Find the first live word past the partial object.
2018       addr = bitmap->find_obj_beg(addr, src_region_end);
2019       assert(addr < src_region_end, "wrong src region");
2020     }
2021     return addr;
2022   }
2023 
2024   // Skip over the partial object (if any).
2025   if (partial_obj_size != 0) {
2026     words_to_skip -= partial_obj_size;
2027     addr += partial_obj_size;
2028   }
2029 
2030   // Skip over live words due to objects that start in the region.
2031   addr = skip_live_words(addr, src_region_end, words_to_skip);
2032   assert(addr < src_region_end, "wrong src region");
2033   return addr;
2034 }
2035 
2036 void PSParallelCompact::decrement_destination_counts(ParCompactionManager* cm,
2037                                                      SpaceId src_space_id,
2038                                                      size_t beg_region,
2039                                                      HeapWord* end_addr)
2040 {
2041   ParallelCompactData& sd = summary_data();
2042 
2043 #ifdef ASSERT
2044   MutableSpace* const src_space = _space_info[src_space_id].space();
2045   HeapWord* const beg_addr = sd.region_to_addr(beg_region);
2046   assert(src_space->contains(beg_addr) || beg_addr == src_space->end(),
2047          "src_space_id does not match beg_addr");
2048   assert(src_space->contains(end_addr) || end_addr == src_space->end(),
2049          "src_space_id does not match end_addr");
2050 #endif // #ifdef ASSERT
2051 
2052   RegionData* const beg = sd.region(beg_region);
2053   RegionData* const end = sd.addr_to_region_ptr(sd.region_align_up(end_addr));
2054 
2055   // Regions up to new_top() are enqueued if they become available.
2056   HeapWord* const new_top = _space_info[src_space_id].new_top();
2057   RegionData* const enqueue_end =
2058     sd.addr_to_region_ptr(sd.region_align_up(new_top));
2059 
2060   for (RegionData* cur = beg; cur < end; ++cur) {
2061     assert(cur->data_size() > 0, "region must have live data");
2062     cur->decrement_destination_count();
2063     if (cur < enqueue_end && cur->available() && cur->claim()) {
2064       if (cur->mark_normal()) {
2065         cm->push_region(sd.region(cur));
2066       } else if (cur->mark_copied()) {
2067         // Try to copy the content of the shadow region back to its corresponding
2068         // heap region if the shadow region is filled. Otherwise, the GC thread
2069         // fills the shadow region will copy the data back (see
2070         // MoveAndUpdateShadowClosure::complete_region).
2071         copy_back(sd.region_to_addr(cur->shadow_region()), sd.region_to_addr(cur));
2072         ParCompactionManager::push_shadow_region_mt_safe(cur->shadow_region());
2073         cur->set_completed();
2074       }
2075     }
2076   }
2077 }
2078 
2079 size_t PSParallelCompact::next_src_region(MoveAndUpdateClosure& closure,
2080                                           SpaceId& src_space_id,
2081                                           HeapWord*& src_space_top,
2082                                           HeapWord* end_addr)
2083 {
2084   typedef ParallelCompactData::RegionData RegionData;
2085 
2086   ParallelCompactData& sd = PSParallelCompact::summary_data();
2087   const size_t region_size = ParallelCompactData::RegionSize;
2088 
2089   size_t src_region_idx = 0;
2090 
2091   // Skip empty regions (if any) up to the top of the space.
2092   HeapWord* const src_aligned_up = sd.region_align_up(end_addr);
2093   RegionData* src_region_ptr = sd.addr_to_region_ptr(src_aligned_up);
2094   HeapWord* const top_aligned_up = sd.region_align_up(src_space_top);
2095   const RegionData* const top_region_ptr =
2096     sd.addr_to_region_ptr(top_aligned_up);
2097   while (src_region_ptr < top_region_ptr && src_region_ptr->data_size() == 0) {
2098     ++src_region_ptr;
2099   }
2100 
2101   if (src_region_ptr < top_region_ptr) {
2102     // The next source region is in the current space.  Update src_region_idx
2103     // and the source address to match src_region_ptr.
2104     src_region_idx = sd.region(src_region_ptr);
2105     HeapWord* const src_region_addr = sd.region_to_addr(src_region_idx);
2106     if (src_region_addr > closure.source()) {
2107       closure.set_source(src_region_addr);
2108     }
2109     return src_region_idx;
2110   }
2111 
2112   // Switch to a new source space and find the first non-empty region.
2113   unsigned int space_id = src_space_id + 1;
2114   assert(space_id < last_space_id, "not enough spaces");
2115 
2116   HeapWord* const destination = closure.destination();
2117 
2118   do {
2119     MutableSpace* space = _space_info[space_id].space();
2120     HeapWord* const bottom = space->bottom();
2121     const RegionData* const bottom_cp = sd.addr_to_region_ptr(bottom);
2122 
2123     // Iterate over the spaces that do not compact into themselves.
2124     if (bottom_cp->destination() != bottom) {
2125       HeapWord* const top_aligned_up = sd.region_align_up(space->top());
2126       const RegionData* const top_cp = sd.addr_to_region_ptr(top_aligned_up);
2127 
2128       for (const RegionData* src_cp = bottom_cp; src_cp < top_cp; ++src_cp) {
2129         if (src_cp->live_obj_size() > 0) {
2130           // Found it.
2131           assert(src_cp->destination() == destination,
2132                  "first live obj in the space must match the destination");
2133           assert(src_cp->partial_obj_size() == 0,
2134                  "a space cannot begin with a partial obj");
2135 
2136           src_space_id = SpaceId(space_id);
2137           src_space_top = space->top();
2138           const size_t src_region_idx = sd.region(src_cp);
2139           closure.set_source(sd.region_to_addr(src_region_idx));
2140           return src_region_idx;
2141         } else {
2142           assert(src_cp->data_size() == 0, "sanity");
2143         }
2144       }
2145     }
2146   } while (++space_id < last_space_id);
2147 
2148   assert(false, "no source region was found");
2149   return 0;
2150 }
2151 
2152 HeapWord* PSParallelCompact::partial_obj_end(HeapWord* region_start_addr) {
2153   ParallelCompactData& sd = summary_data();
2154   assert(sd.is_region_aligned(region_start_addr), "precondition");
2155 
2156   // Use per-region partial_obj_size to locate the end of the obj, that extends to region_start_addr.
2157   SplitInfo& split_info = _space_info[space_id(region_start_addr)].split_info();
2158   size_t start_region_idx = sd.addr_to_region_idx(region_start_addr);
2159   size_t end_region_idx = sd.region_count();
2160   size_t accumulated_size = 0;
2161   for (size_t region_idx = start_region_idx; region_idx < end_region_idx; ++region_idx) {
2162     if (split_info.is_split(region_idx)) {
2163       accumulated_size += split_info.partial_obj_size();
2164       break;
2165     }
2166     size_t cur_partial_obj_size = sd.region(region_idx)->partial_obj_size();
2167     accumulated_size += cur_partial_obj_size;
2168     if (cur_partial_obj_size != ParallelCompactData::RegionSize) {
2169       break;
2170     }
2171   }
2172   return region_start_addr + accumulated_size;
2173 }
2174 
2175 void PSParallelCompact::fill_region(ParCompactionManager* cm, MoveAndUpdateClosure& closure, size_t region_idx)
2176 {
2177   ParMarkBitMap* const bitmap = mark_bitmap();
2178   ParallelCompactData& sd = summary_data();
2179   RegionData* const region_ptr = sd.region(region_idx);
2180 
2181   // Get the source region and related info.
2182   size_t src_region_idx = region_ptr->source_region();
2183   SpaceId src_space_id = space_id(sd.region_to_addr(src_region_idx));
2184   HeapWord* src_space_top = _space_info[src_space_id].space()->top();
2185   HeapWord* dest_addr = sd.region_to_addr(region_idx);
2186 
2187   closure.set_source(first_src_addr(dest_addr, src_space_id, src_region_idx));
2188 
2189   // Adjust src_region_idx to prepare for decrementing destination counts (the
2190   // destination count is not decremented when a region is copied to itself).
2191   if (src_region_idx == region_idx) {
2192     src_region_idx += 1;
2193   }
2194 
2195   if (bitmap->is_unmarked(closure.source())) {
2196     // The first source word is in the middle of an object; copy the remainder
2197     // of the object or as much as will fit.  The fact that pointer updates were
2198     // deferred will be noted when the object header is processed.
2199     HeapWord* const old_src_addr = closure.source();
2200     {
2201       HeapWord* region_start = sd.region_align_down(closure.source());
2202       HeapWord* obj_start = bitmap->find_obj_beg_reverse(region_start, closure.source());
2203       HeapWord* obj_end;
2204       if (bitmap->is_marked(obj_start)) {
2205         HeapWord* next_region_start = region_start + ParallelCompactData::RegionSize;
2206         HeapWord* partial_obj_start = (next_region_start >= src_space_top)
2207                                       ? nullptr
2208                                       : sd.addr_to_region_ptr(next_region_start)->partial_obj_addr();
2209         if (partial_obj_start == obj_start) {
2210           // This obj extends to next region.
2211           obj_end = partial_obj_end(next_region_start);
2212         } else {
2213           // Completely contained in this region; safe to use size().
2214           obj_end = obj_start + cast_to_oop(obj_start)->size();
2215         }
2216       } else {
2217         // This obj extends to current region.
2218         obj_end = partial_obj_end(region_start);
2219       }
2220       size_t partial_obj_size = pointer_delta(obj_end, closure.source());
2221       closure.copy_partial_obj(partial_obj_size);
2222     }
2223 
2224     if (closure.is_full()) {
2225       decrement_destination_counts(cm, src_space_id, src_region_idx,
2226                                    closure.source());
2227       closure.complete_region(dest_addr, region_ptr);
2228       return;
2229     }
2230 
2231     HeapWord* const end_addr = sd.region_align_down(closure.source());
2232     if (sd.region_align_down(old_src_addr) != end_addr) {
2233       // The partial object was copied from more than one source region.
2234       decrement_destination_counts(cm, src_space_id, src_region_idx, end_addr);
2235 
2236       // Move to the next source region, possibly switching spaces as well.  All
2237       // args except end_addr may be modified.
2238       src_region_idx = next_src_region(closure, src_space_id, src_space_top,
2239                                        end_addr);
2240     }
2241   }
2242 
2243   do {
2244     HeapWord* cur_addr = closure.source();
2245     HeapWord* const end_addr = MIN2(sd.region_align_up(cur_addr + 1),
2246                                     src_space_top);
2247     HeapWord* partial_obj_start = (end_addr == src_space_top)
2248                                 ? nullptr
2249                                 : sd.addr_to_region_ptr(end_addr)->partial_obj_addr();
2250     // apply closure on objs inside [cur_addr, end_addr)
2251     do {
2252       cur_addr = bitmap->find_obj_beg(cur_addr, end_addr);
2253       if (cur_addr == end_addr) {
2254         break;
2255       }
2256       size_t obj_size;
2257       if (partial_obj_start == cur_addr) {
2258         obj_size = pointer_delta(partial_obj_end(end_addr), cur_addr);
2259       } else {
2260         // This obj doesn't extend into next region; size() is safe to use.
2261         obj_size = cast_to_oop(cur_addr)->size();
2262       }
2263       closure.do_addr(cur_addr, obj_size);
2264       cur_addr += obj_size;
2265     } while (cur_addr < end_addr && !closure.is_full());
2266 
2267     if (closure.is_full()) {
2268       decrement_destination_counts(cm, src_space_id, src_region_idx,
2269                                    closure.source());
2270       closure.complete_region(dest_addr, region_ptr);
2271       return;
2272     }
2273 
2274     decrement_destination_counts(cm, src_space_id, src_region_idx, end_addr);
2275 
2276     // Move to the next source region, possibly switching spaces as well.  All
2277     // args except end_addr may be modified.
2278     src_region_idx = next_src_region(closure, src_space_id, src_space_top,
2279                                      end_addr);
2280   } while (true);
2281 }
2282 
2283 void PSParallelCompact::fill_and_update_region(ParCompactionManager* cm, size_t region_idx)
2284 {
2285   MoveAndUpdateClosure cl(mark_bitmap(), region_idx);
2286   fill_region(cm, cl, region_idx);
2287 }
2288 
2289 void PSParallelCompact::fill_and_update_shadow_region(ParCompactionManager* cm, size_t region_idx)
2290 {
2291   // Get a shadow region first
2292   ParallelCompactData& sd = summary_data();
2293   RegionData* const region_ptr = sd.region(region_idx);
2294   size_t shadow_region = ParCompactionManager::pop_shadow_region_mt_safe(region_ptr);
2295   // The InvalidShadow return value indicates the corresponding heap region is available,
2296   // so use MoveAndUpdateClosure to fill the normal region. Otherwise, use
2297   // MoveAndUpdateShadowClosure to fill the acquired shadow region.
2298   if (shadow_region == ParCompactionManager::InvalidShadow) {
2299     MoveAndUpdateClosure cl(mark_bitmap(), region_idx);
2300     region_ptr->shadow_to_normal();
2301     return fill_region(cm, cl, region_idx);
2302   } else {
2303     MoveAndUpdateShadowClosure cl(mark_bitmap(), region_idx, shadow_region);
2304     return fill_region(cm, cl, region_idx);
2305   }
2306 }
2307 
2308 void PSParallelCompact::copy_back(HeapWord *shadow_addr, HeapWord *region_addr)
2309 {
2310   Copy::aligned_conjoint_words(shadow_addr, region_addr, _summary_data.RegionSize);
2311 }
2312 
2313 bool PSParallelCompact::steal_unavailable_region(ParCompactionManager* cm, size_t &region_idx)
2314 {
2315   size_t next = cm->next_shadow_region();
2316   ParallelCompactData& sd = summary_data();
2317   size_t old_new_top = sd.addr_to_region_idx(_space_info[old_space_id].new_top());
2318   uint active_gc_threads = ParallelScavengeHeap::heap()->workers().active_workers();
2319 
2320   while (next < old_new_top) {
2321     if (sd.region(next)->mark_shadow()) {
2322       region_idx = next;
2323       return true;
2324     }
2325     next = cm->move_next_shadow_region_by(active_gc_threads);
2326   }
2327 
2328   return false;
2329 }
2330 
2331 // The shadow region is an optimization to address region dependencies in full GC. The basic
2332 // idea is making more regions available by temporally storing their live objects in empty
2333 // shadow regions to resolve dependencies between them and the destination regions. Therefore,
2334 // GC threads need not wait destination regions to be available before processing sources.
2335 //
2336 // A typical workflow would be:
2337 // After draining its own stack and failing to steal from others, a GC worker would pick an
2338 // unavailable region (destination count > 0) and get a shadow region. Then the worker fills
2339 // the shadow region by copying live objects from source regions of the unavailable one. Once
2340 // the unavailable region becomes available, the data in the shadow region will be copied back.
2341 // Shadow regions are empty regions in the to-space and regions between top and end of other spaces.
2342 void PSParallelCompact::initialize_shadow_regions(uint parallel_gc_threads)
2343 {
2344   const ParallelCompactData& sd = PSParallelCompact::summary_data();
2345 
2346   for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2347     SpaceInfo* const space_info = _space_info + id;
2348     MutableSpace* const space = space_info->space();
2349 
2350     const size_t beg_region =
2351       sd.addr_to_region_idx(sd.region_align_up(MAX2(space_info->new_top(), space->top())));
2352     const size_t end_region =
2353       sd.addr_to_region_idx(sd.region_align_down(space->end()));
2354 
2355     for (size_t cur = beg_region; cur < end_region; ++cur) {
2356       ParCompactionManager::push_shadow_region(cur);
2357     }
2358   }
2359 
2360   size_t beg_region = sd.addr_to_region_idx(_space_info[old_space_id].dense_prefix());
2361   for (uint i = 0; i < parallel_gc_threads; i++) {
2362     ParCompactionManager *cm = ParCompactionManager::gc_thread_compaction_manager(i);
2363     cm->set_next_shadow_region(beg_region + i);
2364   }
2365 }
2366 
2367 void MoveAndUpdateClosure::copy_partial_obj(size_t partial_obj_size)
2368 {
2369   size_t words = MIN2(partial_obj_size, words_remaining());
2370 
2371   // This test is necessary; if omitted, the pointer updates to a partial object
2372   // that crosses the dense prefix boundary could be overwritten.
2373   if (source() != copy_destination()) {
2374     DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
2375     Copy::aligned_conjoint_words(source(), copy_destination(), words);
2376   }
2377   update_state(words);
2378 }
2379 
2380 void MoveAndUpdateClosure::complete_region(HeapWord* dest_addr, PSParallelCompact::RegionData* region_ptr) {
2381   assert(region_ptr->shadow_state() == ParallelCompactData::RegionData::NormalRegion, "Region should be finished");
2382   region_ptr->set_completed();
2383 }
2384 
2385 void MoveAndUpdateClosure::do_addr(HeapWord* addr, size_t words) {
2386   assert(destination() != nullptr, "sanity");
2387   _source = addr;
2388 
2389   // The start_array must be updated even if the object is not moving.
2390   if (_start_array != nullptr) {
2391     _start_array->update_for_block(destination(), destination() + words);
2392   }
2393 
2394   // Avoid overflow
2395   words = MIN2(words, words_remaining());
2396   assert(words > 0, "inv");
2397 
2398   if (copy_destination() != source()) {
2399     DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
2400     assert(source() != destination(), "inv");
2401     assert(cast_to_oop(source())->is_forwarded(), "inv");
2402     assert(cast_to_oop(source())->forwardee() == cast_to_oop(destination()), "inv");
2403     Copy::aligned_conjoint_words(source(), copy_destination(), words);
2404     cast_to_oop(copy_destination())->init_mark();
2405   }
2406 
2407   update_state(words);
2408 }
2409 
2410 void MoveAndUpdateShadowClosure::complete_region(HeapWord* dest_addr, PSParallelCompact::RegionData* region_ptr) {
2411   assert(region_ptr->shadow_state() == ParallelCompactData::RegionData::ShadowRegion, "Region should be shadow");
2412   // Record the shadow region index
2413   region_ptr->set_shadow_region(_shadow);
2414   // Mark the shadow region as filled to indicate the data is ready to be
2415   // copied back
2416   region_ptr->mark_filled();
2417   // Try to copy the content of the shadow region back to its corresponding
2418   // heap region if available; the GC thread that decreases the destination
2419   // count to zero will do the copying otherwise (see
2420   // PSParallelCompact::decrement_destination_counts).
2421   if (((region_ptr->available() && region_ptr->claim()) || region_ptr->claimed()) && region_ptr->mark_copied()) {
2422     region_ptr->set_completed();
2423     PSParallelCompact::copy_back(PSParallelCompact::summary_data().region_to_addr(_shadow), dest_addr);
2424     ParCompactionManager::push_shadow_region_mt_safe(_shadow);
2425   }
2426 }
2427