1 /*
   2  * Copyright (c) 2005, 2024, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/classLoaderDataGraph.hpp"
  27 #include "classfile/javaClasses.inline.hpp"
  28 #include "classfile/stringTable.hpp"
  29 #include "classfile/symbolTable.hpp"
  30 #include "classfile/systemDictionary.hpp"
  31 #include "code/codeCache.hpp"
  32 #include "compiler/oopMap.hpp"
  33 #include "gc/parallel/objectStartArray.inline.hpp"
  34 #include "gc/parallel/parallelArguments.hpp"
  35 #include "gc/parallel/parallelScavengeHeap.inline.hpp"
  36 #include "gc/parallel/parMarkBitMap.inline.hpp"
  37 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
  38 #include "gc/parallel/psCompactionManager.inline.hpp"
  39 #include "gc/parallel/psOldGen.hpp"
  40 #include "gc/parallel/psParallelCompact.inline.hpp"
  41 #include "gc/parallel/psPromotionManager.inline.hpp"
  42 #include "gc/parallel/psRootType.hpp"
  43 #include "gc/parallel/psScavenge.hpp"
  44 #include "gc/parallel/psStringDedup.hpp"
  45 #include "gc/parallel/psYoungGen.hpp"
  46 #include "gc/shared/classUnloadingContext.hpp"
  47 #include "gc/shared/gcCause.hpp"
  48 #include "gc/shared/gcHeapSummary.hpp"
  49 #include "gc/shared/gcId.hpp"
  50 #include "gc/shared/gcLocker.hpp"
  51 #include "gc/shared/gcTimer.hpp"
  52 #include "gc/shared/gcTrace.hpp"
  53 #include "gc/shared/gcTraceTime.inline.hpp"
  54 #include "gc/shared/gcVMOperations.hpp"
  55 #include "gc/shared/isGCActiveMark.hpp"
  56 #include "gc/shared/oopStorage.inline.hpp"
  57 #include "gc/shared/oopStorageSet.inline.hpp"
  58 #include "gc/shared/oopStorageSetParState.inline.hpp"
  59 #include "gc/shared/preservedMarks.inline.hpp"
  60 #include "gc/shared/referencePolicy.hpp"
  61 #include "gc/shared/referenceProcessor.hpp"
  62 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
  63 #include "gc/shared/spaceDecorator.hpp"
  64 #include "gc/shared/strongRootsScope.hpp"
  65 #include "gc/shared/taskTerminator.hpp"
  66 #include "gc/shared/weakProcessor.inline.hpp"
  67 #include "gc/shared/workerPolicy.hpp"
  68 #include "gc/shared/workerThread.hpp"
  69 #include "gc/shared/workerUtils.hpp"
  70 #include "logging/log.hpp"
  71 #include "memory/iterator.inline.hpp"
  72 #include "memory/metaspaceUtils.hpp"
  73 #include "memory/resourceArea.hpp"
  74 #include "memory/universe.hpp"
  75 #include "nmt/memTracker.hpp"
  76 #include "oops/access.inline.hpp"
  77 #include "oops/flatArrayKlass.inline.hpp"
  78 #include "oops/instanceClassLoaderKlass.inline.hpp"
  79 #include "oops/instanceKlass.inline.hpp"
  80 #include "oops/instanceMirrorKlass.inline.hpp"
  81 #include "oops/methodData.hpp"
  82 #include "oops/objArrayKlass.inline.hpp"
  83 #include "oops/oop.inline.hpp"
  84 #include "runtime/atomic.hpp"
  85 #include "runtime/handles.inline.hpp"
  86 #include "runtime/java.hpp"
  87 #include "runtime/safepoint.hpp"
  88 #include "runtime/threads.hpp"
  89 #include "runtime/vmThread.hpp"
  90 #include "services/memoryService.hpp"
  91 #include "utilities/align.hpp"
  92 #include "utilities/debug.hpp"
  93 #include "utilities/events.hpp"
  94 #include "utilities/formatBuffer.hpp"
  95 #include "utilities/macros.hpp"
  96 #include "utilities/stack.inline.hpp"
  97 #if INCLUDE_JVMCI
  98 #include "jvmci/jvmci.hpp"
  99 #endif
 100 
 101 #include <math.h>
 102 
 103 // All sizes are in HeapWords.
 104 const size_t ParallelCompactData::Log2RegionSize  = 16; // 64K words
 105 const size_t ParallelCompactData::RegionSize      = (size_t)1 << Log2RegionSize;
 106 static_assert(ParallelCompactData::RegionSize >= BitsPerWord, "region-start bit word-aligned");
 107 const size_t ParallelCompactData::RegionSizeBytes =
 108   RegionSize << LogHeapWordSize;
 109 const size_t ParallelCompactData::RegionSizeOffsetMask = RegionSize - 1;
 110 const size_t ParallelCompactData::RegionAddrOffsetMask = RegionSizeBytes - 1;
 111 const size_t ParallelCompactData::RegionAddrMask       = ~RegionAddrOffsetMask;
 112 
 113 const ParallelCompactData::RegionData::region_sz_t
 114 ParallelCompactData::RegionData::dc_shift = 27;
 115 
 116 const ParallelCompactData::RegionData::region_sz_t
 117 ParallelCompactData::RegionData::dc_mask = ~0U << dc_shift;
 118 
 119 const ParallelCompactData::RegionData::region_sz_t
 120 ParallelCompactData::RegionData::dc_one = 0x1U << dc_shift;
 121 
 122 const ParallelCompactData::RegionData::region_sz_t
 123 ParallelCompactData::RegionData::los_mask = ~dc_mask;
 124 
 125 const ParallelCompactData::RegionData::region_sz_t
 126 ParallelCompactData::RegionData::dc_claimed = 0x8U << dc_shift;
 127 
 128 const ParallelCompactData::RegionData::region_sz_t
 129 ParallelCompactData::RegionData::dc_completed = 0xcU << dc_shift;
 130 
 131 SpaceInfo PSParallelCompact::_space_info[PSParallelCompact::last_space_id];
 132 
 133 SpanSubjectToDiscoveryClosure PSParallelCompact::_span_based_discoverer;
 134 ReferenceProcessor* PSParallelCompact::_ref_processor = nullptr;
 135 
 136 void SplitInfo::record(size_t src_region_idx, size_t partial_obj_size,
 137                        HeapWord* destination)
 138 {
 139   assert(src_region_idx != 0, "invalid src_region_idx");
 140   assert(partial_obj_size != 0, "invalid partial_obj_size argument");
 141   assert(destination != nullptr, "invalid destination argument");
 142 
 143   _src_region_idx = src_region_idx;
 144   _partial_obj_size = partial_obj_size;
 145   _destination = destination;
 146 
 147   // These fields may not be updated below, so make sure they're clear.
 148   assert(_dest_region_addr == nullptr, "should have been cleared");
 149   assert(_first_src_addr == nullptr, "should have been cleared");
 150 
 151   // Determine the number of destination regions for the partial object.
 152   HeapWord* const last_word = destination + partial_obj_size - 1;
 153   const ParallelCompactData& sd = PSParallelCompact::summary_data();
 154   HeapWord* const beg_region_addr = sd.region_align_down(destination);
 155   HeapWord* const end_region_addr = sd.region_align_down(last_word);
 156 
 157   if (beg_region_addr == end_region_addr) {
 158     // One destination region.
 159     _destination_count = 1;
 160     if (end_region_addr == destination) {
 161       // The destination falls on a region boundary, thus the first word of the
 162       // partial object will be the first word copied to the destination region.
 163       _dest_region_addr = end_region_addr;
 164       _first_src_addr = sd.region_to_addr(src_region_idx);
 165     }
 166   } else {
 167     // Two destination regions.  When copied, the partial object will cross a
 168     // destination region boundary, so a word somewhere within the partial
 169     // object will be the first word copied to the second destination region.
 170     _destination_count = 2;
 171     _dest_region_addr = end_region_addr;
 172     const size_t ofs = pointer_delta(end_region_addr, destination);
 173     assert(ofs < _partial_obj_size, "sanity");
 174     _first_src_addr = sd.region_to_addr(src_region_idx) + ofs;
 175   }
 176 }
 177 
 178 void SplitInfo::clear()
 179 {
 180   _src_region_idx = 0;
 181   _partial_obj_size = 0;
 182   _destination = nullptr;
 183   _destination_count = 0;
 184   _dest_region_addr = nullptr;
 185   _first_src_addr = nullptr;
 186   assert(!is_valid(), "sanity");
 187 }
 188 
 189 #ifdef  ASSERT
 190 void SplitInfo::verify_clear()
 191 {
 192   assert(_src_region_idx == 0, "not clear");
 193   assert(_partial_obj_size == 0, "not clear");
 194   assert(_destination == nullptr, "not clear");
 195   assert(_destination_count == 0, "not clear");
 196   assert(_dest_region_addr == nullptr, "not clear");
 197   assert(_first_src_addr == nullptr, "not clear");
 198 }
 199 #endif  // #ifdef ASSERT
 200 
 201 
 202 void PSParallelCompact::print_on_error(outputStream* st) {
 203   _mark_bitmap.print_on_error(st);
 204 }
 205 
 206 ParallelCompactData::ParallelCompactData() :
 207   _heap_start(nullptr),
 208   DEBUG_ONLY(_heap_end(nullptr) COMMA)
 209   _region_vspace(nullptr),
 210   _reserved_byte_size(0),
 211   _region_data(nullptr),
 212   _region_count(0) {}
 213 
 214 bool ParallelCompactData::initialize(MemRegion reserved_heap)
 215 {
 216   _heap_start = reserved_heap.start();
 217   const size_t heap_size = reserved_heap.word_size();
 218   DEBUG_ONLY(_heap_end = _heap_start + heap_size;)
 219 
 220   assert(region_align_down(_heap_start) == _heap_start,
 221          "region start not aligned");
 222 
 223   return initialize_region_data(heap_size);
 224 }
 225 
 226 PSVirtualSpace*
 227 ParallelCompactData::create_vspace(size_t count, size_t element_size)
 228 {
 229   const size_t raw_bytes = count * element_size;
 230   const size_t page_sz = os::page_size_for_region_aligned(raw_bytes, 10);
 231   const size_t granularity = os::vm_allocation_granularity();
 232   _reserved_byte_size = align_up(raw_bytes, MAX2(page_sz, granularity));
 233 
 234   const size_t rs_align = page_sz == os::vm_page_size() ? 0 :
 235     MAX2(page_sz, granularity);
 236   ReservedSpace rs(_reserved_byte_size, rs_align, page_sz);
 237   os::trace_page_sizes("Parallel Compact Data", raw_bytes, raw_bytes, rs.base(),
 238                        rs.size(), page_sz);
 239 
 240   MemTracker::record_virtual_memory_tag((address)rs.base(), mtGC);
 241 
 242   PSVirtualSpace* vspace = new PSVirtualSpace(rs, page_sz);
 243   if (vspace != nullptr) {
 244     if (vspace->expand_by(_reserved_byte_size)) {
 245       return vspace;
 246     }
 247     delete vspace;
 248     // Release memory reserved in the space.
 249     rs.release();
 250   }
 251 
 252   return nullptr;
 253 }
 254 
 255 bool ParallelCompactData::initialize_region_data(size_t heap_size)
 256 {
 257   assert(is_aligned(heap_size, RegionSize), "precondition");
 258 
 259   const size_t count = heap_size >> Log2RegionSize;
 260   _region_vspace = create_vspace(count, sizeof(RegionData));
 261   if (_region_vspace != nullptr) {
 262     _region_data = (RegionData*)_region_vspace->reserved_low_addr();
 263     _region_count = count;
 264     return true;
 265   }
 266   return false;
 267 }
 268 
 269 void ParallelCompactData::clear_range(size_t beg_region, size_t end_region) {
 270   assert(beg_region <= _region_count, "beg_region out of range");
 271   assert(end_region <= _region_count, "end_region out of range");
 272 
 273   const size_t region_cnt = end_region - beg_region;
 274   memset(_region_data + beg_region, 0, region_cnt * sizeof(RegionData));
 275 }
 276 
 277 void
 278 ParallelCompactData::summarize_dense_prefix(HeapWord* beg, HeapWord* end)
 279 {
 280   assert(is_region_aligned(beg), "not RegionSize aligned");
 281   assert(is_region_aligned(end), "not RegionSize aligned");
 282 
 283   size_t cur_region = addr_to_region_idx(beg);
 284   const size_t end_region = addr_to_region_idx(end);
 285   HeapWord* addr = beg;
 286   while (cur_region < end_region) {
 287     _region_data[cur_region].set_destination(addr);
 288     _region_data[cur_region].set_destination_count(0);
 289     _region_data[cur_region].set_source_region(cur_region);
 290 
 291     // Update live_obj_size so the region appears completely full.
 292     size_t live_size = RegionSize - _region_data[cur_region].partial_obj_size();
 293     _region_data[cur_region].set_live_obj_size(live_size);
 294 
 295     ++cur_region;
 296     addr += RegionSize;
 297   }
 298 }
 299 
 300 // Find the point at which a space can be split and, if necessary, record the
 301 // split point.
 302 //
 303 // If the current src region (which overflowed the destination space) doesn't
 304 // have a partial object, the split point is at the beginning of the current src
 305 // region (an "easy" split, no extra bookkeeping required).
 306 //
 307 // If the current src region has a partial object, the split point is in the
 308 // region where that partial object starts (call it the split_region).  If
 309 // split_region has a partial object, then the split point is just after that
 310 // partial object (a "hard" split where we have to record the split data and
 311 // zero the partial_obj_size field).  With a "hard" split, we know that the
 312 // partial_obj ends within split_region because the partial object that caused
 313 // the overflow starts in split_region.  If split_region doesn't have a partial
 314 // obj, then the split is at the beginning of split_region (another "easy"
 315 // split).
 316 HeapWord*
 317 ParallelCompactData::summarize_split_space(size_t src_region,
 318                                            SplitInfo& split_info,
 319                                            HeapWord* destination,
 320                                            HeapWord* target_end,
 321                                            HeapWord** target_next)
 322 {
 323   assert(destination <= target_end, "sanity");
 324   assert(destination + _region_data[src_region].data_size() > target_end,
 325     "region should not fit into target space");
 326   assert(is_region_aligned(target_end), "sanity");
 327 
 328   size_t split_region = src_region;
 329   HeapWord* split_destination = destination;
 330   size_t partial_obj_size = _region_data[src_region].partial_obj_size();
 331 
 332   if (destination + partial_obj_size > target_end) {
 333     // The split point is just after the partial object (if any) in the
 334     // src_region that contains the start of the object that overflowed the
 335     // destination space.
 336     //
 337     // Find the start of the "overflow" object and set split_region to the
 338     // region containing it.
 339     HeapWord* const overflow_obj = _region_data[src_region].partial_obj_addr();
 340     split_region = addr_to_region_idx(overflow_obj);
 341 
 342     // Clear the source_region field of all destination regions whose first word
 343     // came from data after the split point (a non-null source_region field
 344     // implies a region must be filled).
 345     //
 346     // An alternative to the simple loop below:  clear during post_compact(),
 347     // which uses memcpy instead of individual stores, and is easy to
 348     // parallelize.  (The downside is that it clears the entire RegionData
 349     // object as opposed to just one field.)
 350     //
 351     // post_compact() would have to clear the summary data up to the highest
 352     // address that was written during the summary phase, which would be
 353     //
 354     //         max(top, max(new_top, clear_top))
 355     //
 356     // where clear_top is a new field in SpaceInfo.  Would have to set clear_top
 357     // to target_end.
 358     const RegionData* const sr = region(split_region);
 359     const size_t beg_idx =
 360       addr_to_region_idx(region_align_up(sr->destination() +
 361                                          sr->partial_obj_size()));
 362     const size_t end_idx = addr_to_region_idx(target_end);
 363 
 364     log_develop_trace(gc, compaction)("split:  clearing source_region field in [" SIZE_FORMAT ", " SIZE_FORMAT ")", beg_idx, end_idx);
 365     for (size_t idx = beg_idx; idx < end_idx; ++idx) {
 366       _region_data[idx].set_source_region(0);
 367     }
 368 
 369     // Set split_destination and partial_obj_size to reflect the split region.
 370     split_destination = sr->destination();
 371     partial_obj_size = sr->partial_obj_size();
 372   }
 373 
 374   // The split is recorded only if a partial object extends onto the region.
 375   if (partial_obj_size != 0) {
 376     _region_data[split_region].set_partial_obj_size(0);
 377     split_info.record(split_region, partial_obj_size, split_destination);
 378   }
 379 
 380   // Setup the continuation addresses.
 381   *target_next = split_destination + partial_obj_size;
 382   HeapWord* const source_next = region_to_addr(split_region) + partial_obj_size;
 383 
 384   if (log_develop_is_enabled(Trace, gc, compaction)) {
 385     const char * split_type = partial_obj_size == 0 ? "easy" : "hard";
 386     log_develop_trace(gc, compaction)("%s split:  src=" PTR_FORMAT " src_c=" SIZE_FORMAT " pos=" SIZE_FORMAT,
 387                                       split_type, p2i(source_next), split_region, partial_obj_size);
 388     log_develop_trace(gc, compaction)("%s split:  dst=" PTR_FORMAT " dst_c=" SIZE_FORMAT " tn=" PTR_FORMAT,
 389                                       split_type, p2i(split_destination),
 390                                       addr_to_region_idx(split_destination),
 391                                       p2i(*target_next));
 392 
 393     if (partial_obj_size != 0) {
 394       HeapWord* const po_beg = split_info.destination();
 395       HeapWord* const po_end = po_beg + split_info.partial_obj_size();
 396       log_develop_trace(gc, compaction)("%s split:  po_beg=" PTR_FORMAT " " SIZE_FORMAT " po_end=" PTR_FORMAT " " SIZE_FORMAT,
 397                                         split_type,
 398                                         p2i(po_beg), addr_to_region_idx(po_beg),
 399                                         p2i(po_end), addr_to_region_idx(po_end));
 400     }
 401   }
 402 
 403   return source_next;
 404 }
 405 
 406 size_t ParallelCompactData::live_words_in_space(const MutableSpace* space,
 407                                                 HeapWord** full_region_prefix_end) {
 408   size_t cur_region = addr_to_region_idx(space->bottom());
 409   const size_t end_region = addr_to_region_idx(region_align_up(space->top()));
 410   size_t live_words = 0;
 411   if (full_region_prefix_end == nullptr) {
 412     for (/* empty */; cur_region < end_region; ++cur_region) {
 413       live_words += _region_data[cur_region].data_size();
 414     }
 415   } else {
 416     bool first_set = false;
 417     for (/* empty */; cur_region < end_region; ++cur_region) {
 418       size_t live_words_in_region = _region_data[cur_region].data_size();
 419       if (!first_set && live_words_in_region < RegionSize) {
 420         *full_region_prefix_end = region_to_addr(cur_region);
 421         first_set = true;
 422       }
 423       live_words += live_words_in_region;
 424     }
 425     if (!first_set) {
 426       // All regions are full of live objs.
 427       assert(is_region_aligned(space->top()), "inv");
 428       *full_region_prefix_end = space->top();
 429     }
 430     assert(*full_region_prefix_end != nullptr, "postcondition");
 431     assert(is_region_aligned(*full_region_prefix_end), "inv");
 432     assert(*full_region_prefix_end >= space->bottom(), "in-range");
 433     assert(*full_region_prefix_end <= space->top(), "in-range");
 434   }
 435   return live_words;
 436 }
 437 
 438 bool ParallelCompactData::summarize(SplitInfo& split_info,
 439                                     HeapWord* source_beg, HeapWord* source_end,
 440                                     HeapWord** source_next,
 441                                     HeapWord* target_beg, HeapWord* target_end,
 442                                     HeapWord** target_next)
 443 {
 444   HeapWord* const source_next_val = source_next == nullptr ? nullptr : *source_next;
 445   log_develop_trace(gc, compaction)(
 446       "sb=" PTR_FORMAT " se=" PTR_FORMAT " sn=" PTR_FORMAT
 447       "tb=" PTR_FORMAT " te=" PTR_FORMAT " tn=" PTR_FORMAT,
 448       p2i(source_beg), p2i(source_end), p2i(source_next_val),
 449       p2i(target_beg), p2i(target_end), p2i(*target_next));
 450 
 451   size_t cur_region = addr_to_region_idx(source_beg);
 452   const size_t end_region = addr_to_region_idx(region_align_up(source_end));
 453 
 454   HeapWord *dest_addr = target_beg;
 455   while (cur_region < end_region) {
 456     // The destination must be set even if the region has no data.
 457     _region_data[cur_region].set_destination(dest_addr);
 458 
 459     size_t words = _region_data[cur_region].data_size();
 460     if (words > 0) {
 461       // If cur_region does not fit entirely into the target space, find a point
 462       // at which the source space can be 'split' so that part is copied to the
 463       // target space and the rest is copied elsewhere.
 464       if (dest_addr + words > target_end) {
 465         assert(source_next != nullptr, "source_next is null when splitting");
 466         *source_next = summarize_split_space(cur_region, split_info, dest_addr,
 467                                              target_end, target_next);
 468         return false;
 469       }
 470 
 471       // Compute the destination_count for cur_region, and if necessary, update
 472       // source_region for a destination region.  The source_region field is
 473       // updated if cur_region is the first (left-most) region to be copied to a
 474       // destination region.
 475       //
 476       // The destination_count calculation is a bit subtle.  A region that has
 477       // data that compacts into itself does not count itself as a destination.
 478       // This maintains the invariant that a zero count means the region is
 479       // available and can be claimed and then filled.
 480       uint destination_count = 0;
 481       if (split_info.is_split(cur_region)) {
 482         // The current region has been split:  the partial object will be copied
 483         // to one destination space and the remaining data will be copied to
 484         // another destination space.  Adjust the initial destination_count and,
 485         // if necessary, set the source_region field if the partial object will
 486         // cross a destination region boundary.
 487         destination_count = split_info.destination_count();
 488         if (destination_count == 2) {
 489           size_t dest_idx = addr_to_region_idx(split_info.dest_region_addr());
 490           _region_data[dest_idx].set_source_region(cur_region);
 491         }
 492       }
 493 
 494       HeapWord* const last_addr = dest_addr + words - 1;
 495       const size_t dest_region_1 = addr_to_region_idx(dest_addr);
 496       const size_t dest_region_2 = addr_to_region_idx(last_addr);
 497 
 498       // Initially assume that the destination regions will be the same and
 499       // adjust the value below if necessary.  Under this assumption, if
 500       // cur_region == dest_region_2, then cur_region will be compacted
 501       // completely into itself.
 502       destination_count += cur_region == dest_region_2 ? 0 : 1;
 503       if (dest_region_1 != dest_region_2) {
 504         // Destination regions differ; adjust destination_count.
 505         destination_count += 1;
 506         // Data from cur_region will be copied to the start of dest_region_2.
 507         _region_data[dest_region_2].set_source_region(cur_region);
 508       } else if (is_region_aligned(dest_addr)) {
 509         // Data from cur_region will be copied to the start of the destination
 510         // region.
 511         _region_data[dest_region_1].set_source_region(cur_region);
 512       }
 513 
 514       _region_data[cur_region].set_destination_count(destination_count);
 515       dest_addr += words;
 516     }
 517 
 518     ++cur_region;
 519   }
 520 
 521   *target_next = dest_addr;
 522   return true;
 523 }
 524 
 525 #ifdef ASSERT
 526 void ParallelCompactData::verify_clear()
 527 {
 528   const size_t* const beg = (const size_t*) _region_vspace->committed_low_addr();
 529   const size_t* const end = (const size_t*) _region_vspace->committed_high_addr();
 530   for (const size_t* p = beg; p < end; ++p) {
 531     assert(*p == 0, "not zero");
 532   }
 533 }
 534 #endif  // #ifdef ASSERT
 535 
 536 STWGCTimer          PSParallelCompact::_gc_timer;
 537 ParallelOldTracer   PSParallelCompact::_gc_tracer;
 538 elapsedTimer        PSParallelCompact::_accumulated_time;
 539 unsigned int        PSParallelCompact::_maximum_compaction_gc_num = 0;
 540 CollectorCounters*  PSParallelCompact::_counters = nullptr;
 541 ParMarkBitMap       PSParallelCompact::_mark_bitmap;
 542 ParallelCompactData PSParallelCompact::_summary_data;
 543 
 544 PSParallelCompact::IsAliveClosure PSParallelCompact::_is_alive_closure;
 545 
 546 class PCAdjustPointerClosure: public BasicOopIterateClosure {
 547   template <typename T>
 548   void do_oop_work(T* p) { PSParallelCompact::adjust_pointer(p); }
 549 
 550 public:
 551   virtual void do_oop(oop* p)                { do_oop_work(p); }
 552   virtual void do_oop(narrowOop* p)          { do_oop_work(p); }
 553 
 554   virtual ReferenceIterationMode reference_iteration_mode() { return DO_FIELDS; }
 555 };
 556 
 557 static PCAdjustPointerClosure pc_adjust_pointer_closure;
 558 
 559 bool PSParallelCompact::IsAliveClosure::do_object_b(oop p) { return mark_bitmap()->is_marked(p); }
 560 
 561 void PSParallelCompact::post_initialize() {
 562   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 563   _span_based_discoverer.set_span(heap->reserved_region());
 564   _ref_processor =
 565     new ReferenceProcessor(&_span_based_discoverer,
 566                            ParallelGCThreads,   // mt processing degree
 567                            ParallelGCThreads,   // mt discovery degree
 568                            false,               // concurrent_discovery
 569                            &_is_alive_closure); // non-header is alive closure
 570 
 571   _counters = new CollectorCounters("Parallel full collection pauses", 1);
 572 
 573   // Initialize static fields in ParCompactionManager.
 574   ParCompactionManager::initialize(mark_bitmap());
 575 }
 576 
 577 bool PSParallelCompact::initialize_aux_data() {
 578   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 579   MemRegion mr = heap->reserved_region();
 580   assert(mr.byte_size() != 0, "heap should be reserved");
 581 
 582   initialize_space_info();
 583 
 584   if (!_mark_bitmap.initialize(mr)) {
 585     vm_shutdown_during_initialization(
 586       err_msg("Unable to allocate " SIZE_FORMAT "KB bitmaps for parallel "
 587       "garbage collection for the requested " SIZE_FORMAT "KB heap.",
 588       _mark_bitmap.reserved_byte_size()/K, mr.byte_size()/K));
 589     return false;
 590   }
 591 
 592   if (!_summary_data.initialize(mr)) {
 593     vm_shutdown_during_initialization(
 594       err_msg("Unable to allocate " SIZE_FORMAT "KB card tables for parallel "
 595       "garbage collection for the requested " SIZE_FORMAT "KB heap.",
 596       _summary_data.reserved_byte_size()/K, mr.byte_size()/K));
 597     return false;
 598   }
 599 
 600   return true;
 601 }
 602 
 603 void PSParallelCompact::initialize_space_info()
 604 {
 605   memset(&_space_info, 0, sizeof(_space_info));
 606 
 607   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 608   PSYoungGen* young_gen = heap->young_gen();
 609 
 610   _space_info[old_space_id].set_space(heap->old_gen()->object_space());
 611   _space_info[eden_space_id].set_space(young_gen->eden_space());
 612   _space_info[from_space_id].set_space(young_gen->from_space());
 613   _space_info[to_space_id].set_space(young_gen->to_space());
 614 
 615   _space_info[old_space_id].set_start_array(heap->old_gen()->start_array());
 616 }
 617 
 618 void
 619 PSParallelCompact::clear_data_covering_space(SpaceId id)
 620 {
 621   // At this point, top is the value before GC, new_top() is the value that will
 622   // be set at the end of GC.  The marking bitmap is cleared to top; nothing
 623   // should be marked above top.  The summary data is cleared to the larger of
 624   // top & new_top.
 625   MutableSpace* const space = _space_info[id].space();
 626   HeapWord* const bot = space->bottom();
 627   HeapWord* const top = space->top();
 628   HeapWord* const max_top = MAX2(top, _space_info[id].new_top());
 629 
 630   _mark_bitmap.clear_range(bot, top);
 631 
 632   const size_t beg_region = _summary_data.addr_to_region_idx(bot);
 633   const size_t end_region =
 634     _summary_data.addr_to_region_idx(_summary_data.region_align_up(max_top));
 635   _summary_data.clear_range(beg_region, end_region);
 636 
 637   // Clear the data used to 'split' regions.
 638   SplitInfo& split_info = _space_info[id].split_info();
 639   if (split_info.is_valid()) {
 640     split_info.clear();
 641   }
 642   DEBUG_ONLY(split_info.verify_clear();)
 643 }
 644 
 645 void PSParallelCompact::pre_compact()
 646 {
 647   // Update the from & to space pointers in space_info, since they are swapped
 648   // at each young gen gc.  Do the update unconditionally (even though a
 649   // promotion failure does not swap spaces) because an unknown number of young
 650   // collections will have swapped the spaces an unknown number of times.
 651   GCTraceTime(Debug, gc, phases) tm("Pre Compact", &_gc_timer);
 652   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 653   _space_info[from_space_id].set_space(heap->young_gen()->from_space());
 654   _space_info[to_space_id].set_space(heap->young_gen()->to_space());
 655 
 656   // Increment the invocation count
 657   heap->increment_total_collections(true);
 658 
 659   CodeCache::on_gc_marking_cycle_start();
 660 
 661   heap->print_heap_before_gc();
 662   heap->trace_heap_before_gc(&_gc_tracer);
 663 
 664   // Fill in TLABs
 665   heap->ensure_parsability(true);  // retire TLABs
 666 
 667   if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
 668     Universe::verify("Before GC");
 669   }
 670 
 671   DEBUG_ONLY(mark_bitmap()->verify_clear();)
 672   DEBUG_ONLY(summary_data().verify_clear();)
 673 }
 674 
 675 void PSParallelCompact::post_compact()
 676 {
 677   GCTraceTime(Info, gc, phases) tm("Post Compact", &_gc_timer);
 678   ParCompactionManager::remove_all_shadow_regions();
 679 
 680   CodeCache::on_gc_marking_cycle_finish();
 681   CodeCache::arm_all_nmethods();
 682 
 683   for (unsigned int id = old_space_id; id < last_space_id; ++id) {
 684     // Clear the marking bitmap, summary data and split info.
 685     clear_data_covering_space(SpaceId(id));
 686     {
 687       MutableSpace* space = _space_info[id].space();
 688       HeapWord* top = space->top();
 689       HeapWord* new_top = _space_info[id].new_top();
 690       if (ZapUnusedHeapArea && new_top < top) {
 691         space->mangle_region(MemRegion(new_top, top));
 692       }
 693       // Update top().  Must be done after clearing the bitmap and summary data.
 694       space->set_top(new_top);
 695     }
 696   }
 697 
 698   ParCompactionManager::flush_all_string_dedup_requests();
 699 
 700   MutableSpace* const eden_space = _space_info[eden_space_id].space();
 701   MutableSpace* const from_space = _space_info[from_space_id].space();
 702   MutableSpace* const to_space   = _space_info[to_space_id].space();
 703 
 704   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 705   bool eden_empty = eden_space->is_empty();
 706 
 707   // Update heap occupancy information which is used as input to the soft ref
 708   // clearing policy at the next gc.
 709   Universe::heap()->update_capacity_and_used_at_gc();
 710 
 711   bool young_gen_empty = eden_empty && from_space->is_empty() &&
 712     to_space->is_empty();
 713 
 714   PSCardTable* ct = heap->card_table();
 715   MemRegion old_mr = heap->old_gen()->committed();
 716   if (young_gen_empty) {
 717     ct->clear_MemRegion(old_mr);
 718   } else {
 719     ct->dirty_MemRegion(old_mr);
 720   }
 721 
 722   {
 723     // Delete metaspaces for unloaded class loaders and clean up loader_data graph
 724     GCTraceTime(Debug, gc, phases) t("Purge Class Loader Data", gc_timer());
 725     ClassLoaderDataGraph::purge(true /* at_safepoint */);
 726     DEBUG_ONLY(MetaspaceUtils::verify();)
 727   }
 728 
 729   // Need to clear claim bits for the next mark.
 730   ClassLoaderDataGraph::clear_claimed_marks();
 731 
 732   heap->prune_scavengable_nmethods();
 733 
 734 #if COMPILER2_OR_JVMCI
 735   DerivedPointerTable::update_pointers();
 736 #endif
 737 
 738   // Signal that we have completed a visit to all live objects.
 739   Universe::heap()->record_whole_heap_examined_timestamp();
 740 }
 741 
 742 HeapWord* PSParallelCompact::compute_dense_prefix_for_old_space(MutableSpace* old_space,
 743                                                                 HeapWord* full_region_prefix_end) {
 744   const size_t region_size = ParallelCompactData::RegionSize;
 745   const ParallelCompactData& sd = summary_data();
 746 
 747   // Iteration starts with the region *after* the full-region-prefix-end.
 748   const RegionData* const start_region = sd.addr_to_region_ptr(full_region_prefix_end);
 749   // If final region is not full, iteration stops before that region,
 750   // because fill_dense_prefix_end assumes that prefix_end <= top.
 751   const RegionData* const end_region = sd.addr_to_region_ptr(old_space->top());
 752   assert(start_region <= end_region, "inv");
 753 
 754   size_t max_waste = old_space->capacity_in_words() * (MarkSweepDeadRatio / 100.0);
 755   const RegionData* cur_region = start_region;
 756   for (/* empty */; cur_region < end_region; ++cur_region) {
 757     assert(region_size >= cur_region->data_size(), "inv");
 758     size_t dead_size = region_size - cur_region->data_size();
 759     if (max_waste < dead_size) {
 760       break;
 761     }
 762     max_waste -= dead_size;
 763   }
 764 
 765   HeapWord* const prefix_end = sd.region_to_addr(cur_region);
 766   assert(sd.is_region_aligned(prefix_end), "postcondition");
 767   assert(prefix_end >= full_region_prefix_end, "in-range");
 768   assert(prefix_end <= old_space->top(), "in-range");
 769   return prefix_end;
 770 }
 771 
 772 void PSParallelCompact::fill_dense_prefix_end(SpaceId id) {
 773   // Comparing two sizes to decide if filling is required:
 774   //
 775   // The size of the filler (min-obj-size) is 2 heap words with the default
 776   // MinObjAlignment, since both markword and klass take 1 heap word.
 777   //
 778   // The size of the gap (if any) right before dense-prefix-end is
 779   // MinObjAlignment.
 780   //
 781   // Need to fill in the gap only if it's smaller than min-obj-size, and the
 782   // filler obj will extend to next region.
 783 
 784   // Note: If min-fill-size decreases to 1, this whole method becomes redundant.
 785   assert(CollectedHeap::min_fill_size() >= 2, "inv");
 786 #ifndef _LP64
 787   // In 32-bit system, each heap word is 4 bytes, so MinObjAlignment == 2.
 788   // The gap is always equal to min-fill-size, so nothing to do.
 789   return;
 790 #endif
 791   if (MinObjAlignment > 1) {
 792     return;
 793   }
 794   assert(CollectedHeap::min_fill_size() == 2, "inv");
 795   HeapWord* const dense_prefix_end = dense_prefix(id);
 796   assert(_summary_data.is_region_aligned(dense_prefix_end), "precondition");
 797   assert(dense_prefix_end <= space(id)->top(), "precondition");
 798   if (dense_prefix_end == space(id)->top()) {
 799     // Must not have single-word gap right before prefix-end/top.
 800     return;
 801   }
 802   RegionData* const region_after_dense_prefix = _summary_data.addr_to_region_ptr(dense_prefix_end);
 803 
 804   if (region_after_dense_prefix->partial_obj_size() != 0 ||
 805       _mark_bitmap.is_marked(dense_prefix_end)) {
 806     // The region after the dense prefix starts with live bytes.
 807     return;
 808   }
 809 
 810   HeapWord* block_start = start_array(id)->block_start_reaching_into_card(dense_prefix_end);
 811   if (block_start == dense_prefix_end - 1) {
 812     assert(!_mark_bitmap.is_marked(block_start), "inv");
 813     // There is exactly one heap word gap right before the dense prefix end, so we need a filler object.
 814     // The filler object will extend into region_after_dense_prefix.
 815     const size_t obj_len = 2; // min-fill-size
 816     HeapWord* const obj_beg = dense_prefix_end - 1;
 817     CollectedHeap::fill_with_object(obj_beg, obj_len);
 818     _mark_bitmap.mark_obj(obj_beg);
 819     _summary_data.addr_to_region_ptr(obj_beg)->add_live_obj(1);
 820     region_after_dense_prefix->set_partial_obj_size(1);
 821     region_after_dense_prefix->set_partial_obj_addr(obj_beg);
 822     assert(start_array(id) != nullptr, "sanity");
 823     start_array(id)->update_for_block(obj_beg, obj_beg + obj_len);
 824   }
 825 }
 826 
 827 bool PSParallelCompact::check_maximum_compaction(size_t total_live_words,
 828                                                  MutableSpace* const old_space,
 829                                                  HeapWord* full_region_prefix_end) {
 830 
 831   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 832 
 833   // Check System.GC
 834   bool is_max_on_system_gc = UseMaximumCompactionOnSystemGC
 835                           && GCCause::is_user_requested_gc(heap->gc_cause());
 836 
 837   // Check if all live objs are larger than old-gen.
 838   const bool is_old_gen_overflowing = (total_live_words > old_space->capacity_in_words());
 839 
 840   // JVM flags
 841   const uint total_invocations = heap->total_full_collections();
 842   assert(total_invocations >= _maximum_compaction_gc_num, "sanity");
 843   const size_t gcs_since_max = total_invocations - _maximum_compaction_gc_num;
 844   const bool is_interval_ended = gcs_since_max > HeapMaximumCompactionInterval;
 845 
 846   // If all regions in old-gen are full
 847   const bool is_region_full =
 848     full_region_prefix_end >= _summary_data.region_align_down(old_space->top());
 849 
 850   if (is_max_on_system_gc || is_old_gen_overflowing || is_interval_ended || is_region_full) {
 851     _maximum_compaction_gc_num = total_invocations;
 852     return true;
 853   }
 854 
 855   return false;
 856 }
 857 
 858 void PSParallelCompact::summary_phase()
 859 {
 860   GCTraceTime(Info, gc, phases) tm("Summary Phase", &_gc_timer);
 861 
 862   MutableSpace* const old_space = _space_info[old_space_id].space();
 863   {
 864     size_t total_live_words = 0;
 865     HeapWord* full_region_prefix_end = nullptr;
 866     {
 867       // old-gen
 868       size_t live_words = _summary_data.live_words_in_space(old_space,
 869                                                             &full_region_prefix_end);
 870       total_live_words += live_words;
 871     }
 872     // young-gen
 873     for (uint i = eden_space_id; i < last_space_id; ++i) {
 874       const MutableSpace* space = _space_info[i].space();
 875       size_t live_words = _summary_data.live_words_in_space(space);
 876       total_live_words += live_words;
 877       _space_info[i].set_new_top(space->bottom() + live_words);
 878       _space_info[i].set_dense_prefix(space->bottom());
 879     }
 880 
 881     bool maximum_compaction = check_maximum_compaction(total_live_words,
 882                                                        old_space,
 883                                                        full_region_prefix_end);
 884     HeapWord* dense_prefix_end =
 885       maximum_compaction ? full_region_prefix_end
 886                          : compute_dense_prefix_for_old_space(old_space,
 887                                                               full_region_prefix_end);
 888     SpaceId id = old_space_id;
 889     _space_info[id].set_dense_prefix(dense_prefix_end);
 890 
 891     if (dense_prefix_end != old_space->bottom()) {
 892       fill_dense_prefix_end(id);
 893       _summary_data.summarize_dense_prefix(old_space->bottom(), dense_prefix_end);
 894     }
 895     _summary_data.summarize(_space_info[id].split_info(),
 896                             dense_prefix_end, old_space->top(), nullptr,
 897                             dense_prefix_end, old_space->end(),
 898                             _space_info[id].new_top_addr());
 899   }
 900 
 901   // Summarize the remaining spaces in the young gen.  The initial target space
 902   // is the old gen.  If a space does not fit entirely into the target, then the
 903   // remainder is compacted into the space itself and that space becomes the new
 904   // target.
 905   SpaceId dst_space_id = old_space_id;
 906   HeapWord* dst_space_end = old_space->end();
 907   HeapWord** new_top_addr = _space_info[dst_space_id].new_top_addr();
 908   for (unsigned int id = eden_space_id; id < last_space_id; ++id) {
 909     const MutableSpace* space = _space_info[id].space();
 910     const size_t live = pointer_delta(_space_info[id].new_top(),
 911                                       space->bottom());
 912     const size_t available = pointer_delta(dst_space_end, *new_top_addr);
 913 
 914     if (live > 0 && live <= available) {
 915       // All the live data will fit.
 916       bool done = _summary_data.summarize(_space_info[id].split_info(),
 917                                           space->bottom(), space->top(),
 918                                           nullptr,
 919                                           *new_top_addr, dst_space_end,
 920                                           new_top_addr);
 921       assert(done, "space must fit into old gen");
 922 
 923       // Reset the new_top value for the space.
 924       _space_info[id].set_new_top(space->bottom());
 925     } else if (live > 0) {
 926       // Attempt to fit part of the source space into the target space.
 927       HeapWord* next_src_addr = nullptr;
 928       bool done = _summary_data.summarize(_space_info[id].split_info(),
 929                                           space->bottom(), space->top(),
 930                                           &next_src_addr,
 931                                           *new_top_addr, dst_space_end,
 932                                           new_top_addr);
 933       assert(!done, "space should not fit into old gen");
 934       assert(next_src_addr != nullptr, "sanity");
 935 
 936       // The source space becomes the new target, so the remainder is compacted
 937       // within the space itself.
 938       dst_space_id = SpaceId(id);
 939       dst_space_end = space->end();
 940       new_top_addr = _space_info[id].new_top_addr();
 941       done = _summary_data.summarize(_space_info[id].split_info(),
 942                                      next_src_addr, space->top(),
 943                                      nullptr,
 944                                      space->bottom(), dst_space_end,
 945                                      new_top_addr);
 946       assert(done, "space must fit when compacted into itself");
 947       assert(*new_top_addr <= space->top(), "usage should not grow");
 948     }
 949   }
 950 }
 951 
 952 // This method should contain all heap-specific policy for invoking a full
 953 // collection.  invoke_no_policy() will only attempt to compact the heap; it
 954 // will do nothing further.  If we need to bail out for policy reasons, scavenge
 955 // before full gc, or any other specialized behavior, it needs to be added here.
 956 //
 957 // Note that this method should only be called from the vm_thread while at a
 958 // safepoint.
 959 //
 960 // Note that the all_soft_refs_clear flag in the soft ref policy
 961 // may be true because this method can be called without intervening
 962 // activity.  For example when the heap space is tight and full measure
 963 // are being taken to free space.
 964 bool PSParallelCompact::invoke(bool clear_all_soft_refs) {
 965   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
 966   assert(Thread::current() == (Thread*)VMThread::vm_thread(),
 967          "should be in vm thread");
 968 
 969   SvcGCMarker sgcm(SvcGCMarker::FULL);
 970   IsSTWGCActiveMark mark;
 971 
 972   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 973   clear_all_soft_refs = clear_all_soft_refs
 974                      || heap->soft_ref_policy()->should_clear_all_soft_refs();
 975 
 976   return PSParallelCompact::invoke_no_policy(clear_all_soft_refs);
 977 }
 978 
 979 // This method contains no policy. You should probably
 980 // be calling invoke() instead.
 981 bool PSParallelCompact::invoke_no_policy(bool clear_all_soft_refs) {
 982   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
 983   assert(ref_processor() != nullptr, "Sanity");
 984 
 985   if (GCLocker::check_active_before_gc()) {
 986     return false;
 987   }
 988 
 989   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 990 
 991   GCIdMark gc_id_mark;
 992   _gc_timer.register_gc_start();
 993   _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start());
 994 
 995   GCCause::Cause gc_cause = heap->gc_cause();
 996   PSYoungGen* young_gen = heap->young_gen();
 997   PSOldGen* old_gen = heap->old_gen();
 998   PSAdaptiveSizePolicy* size_policy = heap->size_policy();
 999 
1000   // The scope of casr should end after code that can change
1001   // SoftRefPolicy::_should_clear_all_soft_refs.
1002   ClearedAllSoftRefs casr(clear_all_soft_refs,
1003                           heap->soft_ref_policy());
1004 
1005   // Make sure data structures are sane, make the heap parsable, and do other
1006   // miscellaneous bookkeeping.
1007   pre_compact();
1008 
1009   const PreGenGCValues pre_gc_values = heap->get_pre_gc_values();
1010 
1011   {
1012     const uint active_workers =
1013       WorkerPolicy::calc_active_workers(ParallelScavengeHeap::heap()->workers().max_workers(),
1014                                         ParallelScavengeHeap::heap()->workers().active_workers(),
1015                                         Threads::number_of_non_daemon_threads());
1016     ParallelScavengeHeap::heap()->workers().set_active_workers(active_workers);
1017 
1018     GCTraceCPUTime tcpu(&_gc_tracer);
1019     GCTraceTime(Info, gc) tm("Pause Full", nullptr, gc_cause, true);
1020 
1021     heap->pre_full_gc_dump(&_gc_timer);
1022 
1023     TraceCollectorStats tcs(counters());
1024     TraceMemoryManagerStats tms(heap->old_gc_manager(), gc_cause, "end of major GC");
1025 
1026     if (log_is_enabled(Debug, gc, heap, exit)) {
1027       accumulated_time()->start();
1028     }
1029 
1030     // Let the size policy know we're starting
1031     size_policy->major_collection_begin();
1032 
1033 #if COMPILER2_OR_JVMCI
1034     DerivedPointerTable::clear();
1035 #endif
1036 
1037     ref_processor()->start_discovery(clear_all_soft_refs);
1038 
1039     ClassUnloadingContext ctx(1 /* num_nmethod_unlink_workers */,
1040                               false /* unregister_nmethods_during_purge */,
1041                               false /* lock_nmethod_free_separately */);
1042 
1043     marking_phase(&_gc_tracer);
1044 
1045     summary_phase();
1046 
1047 #if COMPILER2_OR_JVMCI
1048     assert(DerivedPointerTable::is_active(), "Sanity");
1049     DerivedPointerTable::set_active(false);
1050 #endif
1051 
1052     forward_to_new_addr();
1053 
1054     adjust_pointers();
1055 
1056     compact();
1057 
1058     ParCompactionManager::_preserved_marks_set->restore(&ParallelScavengeHeap::heap()->workers());
1059 
1060     ParCompactionManager::verify_all_region_stack_empty();
1061 
1062     // Reset the mark bitmap, summary data, and do other bookkeeping.  Must be
1063     // done before resizing.
1064     post_compact();
1065 
1066     // Let the size policy know we're done
1067     size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause);
1068 
1069     if (UseAdaptiveSizePolicy) {
1070       log_debug(gc, ergo)("AdaptiveSizeStart: collection: %d ", heap->total_collections());
1071       log_trace(gc, ergo)("old_gen_capacity: " SIZE_FORMAT " young_gen_capacity: " SIZE_FORMAT,
1072                           old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes());
1073 
1074       // Don't check if the size_policy is ready here.  Let
1075       // the size_policy check that internally.
1076       if (UseAdaptiveGenerationSizePolicyAtMajorCollection &&
1077           AdaptiveSizePolicy::should_update_promo_stats(gc_cause)) {
1078         // Swap the survivor spaces if from_space is empty. The
1079         // resize_young_gen() called below is normally used after
1080         // a successful young GC and swapping of survivor spaces;
1081         // otherwise, it will fail to resize the young gen with
1082         // the current implementation.
1083         if (young_gen->from_space()->is_empty()) {
1084           young_gen->from_space()->clear(SpaceDecorator::Mangle);
1085           young_gen->swap_spaces();
1086         }
1087 
1088         // Calculate optimal free space amounts
1089         assert(young_gen->max_gen_size() >
1090           young_gen->from_space()->capacity_in_bytes() +
1091           young_gen->to_space()->capacity_in_bytes(),
1092           "Sizes of space in young gen are out-of-bounds");
1093 
1094         size_t young_live = young_gen->used_in_bytes();
1095         size_t eden_live = young_gen->eden_space()->used_in_bytes();
1096         size_t old_live = old_gen->used_in_bytes();
1097         size_t cur_eden = young_gen->eden_space()->capacity_in_bytes();
1098         size_t max_old_gen_size = old_gen->max_gen_size();
1099         size_t max_eden_size = young_gen->max_gen_size() -
1100           young_gen->from_space()->capacity_in_bytes() -
1101           young_gen->to_space()->capacity_in_bytes();
1102 
1103         // Used for diagnostics
1104         size_policy->clear_generation_free_space_flags();
1105 
1106         size_policy->compute_generations_free_space(young_live,
1107                                                     eden_live,
1108                                                     old_live,
1109                                                     cur_eden,
1110                                                     max_old_gen_size,
1111                                                     max_eden_size,
1112                                                     true /* full gc*/);
1113 
1114         size_policy->check_gc_overhead_limit(eden_live,
1115                                              max_old_gen_size,
1116                                              max_eden_size,
1117                                              true /* full gc*/,
1118                                              gc_cause,
1119                                              heap->soft_ref_policy());
1120 
1121         size_policy->decay_supplemental_growth(true /* full gc*/);
1122 
1123         heap->resize_old_gen(
1124           size_policy->calculated_old_free_size_in_bytes());
1125 
1126         heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(),
1127                                size_policy->calculated_survivor_size_in_bytes());
1128       }
1129 
1130       log_debug(gc, ergo)("AdaptiveSizeStop: collection: %d ", heap->total_collections());
1131     }
1132 
1133     if (UsePerfData) {
1134       PSGCAdaptivePolicyCounters* const counters = heap->gc_policy_counters();
1135       counters->update_counters();
1136       counters->update_old_capacity(old_gen->capacity_in_bytes());
1137       counters->update_young_capacity(young_gen->capacity_in_bytes());
1138     }
1139 
1140     heap->resize_all_tlabs();
1141 
1142     // Resize the metaspace capacity after a collection
1143     MetaspaceGC::compute_new_size();
1144 
1145     if (log_is_enabled(Debug, gc, heap, exit)) {
1146       accumulated_time()->stop();
1147     }
1148 
1149     heap->print_heap_change(pre_gc_values);
1150 
1151     // Track memory usage and detect low memory
1152     MemoryService::track_memory_usage();
1153     heap->update_counters();
1154 
1155     heap->post_full_gc_dump(&_gc_timer);
1156   }
1157 
1158   if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
1159     Universe::verify("After GC");
1160   }
1161 
1162   heap->print_heap_after_gc();
1163   heap->trace_heap_after_gc(&_gc_tracer);
1164 
1165   AdaptiveSizePolicyOutput::print(size_policy, heap->total_collections());
1166 
1167   _gc_timer.register_gc_end();
1168 
1169   _gc_tracer.report_dense_prefix(dense_prefix(old_space_id));
1170   _gc_tracer.report_gc_end(_gc_timer.gc_end(), _gc_timer.time_partitions());
1171 
1172   return true;
1173 }
1174 
1175 class PCAddThreadRootsMarkingTaskClosure : public ThreadClosure {
1176 private:
1177   uint _worker_id;
1178 
1179 public:
1180   PCAddThreadRootsMarkingTaskClosure(uint worker_id) : _worker_id(worker_id) { }
1181   void do_thread(Thread* thread) {
1182     assert(ParallelScavengeHeap::heap()->is_stw_gc_active(), "called outside gc");
1183 
1184     ResourceMark rm;
1185 
1186     ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(_worker_id);
1187 
1188     MarkingNMethodClosure mark_and_push_in_blobs(&cm->_mark_and_push_closure,
1189                                                  !NMethodToOopClosure::FixRelocations,
1190                                                  true /* keepalive nmethods */);
1191 
1192     thread->oops_do(&cm->_mark_and_push_closure, &mark_and_push_in_blobs);
1193 
1194     // Do the real work
1195     cm->follow_marking_stacks();
1196   }
1197 };
1198 
1199 void steal_marking_work(TaskTerminator& terminator, uint worker_id) {
1200   assert(ParallelScavengeHeap::heap()->is_stw_gc_active(), "called outside gc");
1201 
1202   ParCompactionManager* cm =
1203     ParCompactionManager::gc_thread_compaction_manager(worker_id);
1204 
1205   do {
1206     oop obj = nullptr;
1207     ObjArrayTask task;
1208     if (ParCompactionManager::steal_objarray(worker_id,  task)) {
1209       cm->follow_array((objArrayOop)task.obj(), task.index());
1210     } else if (ParCompactionManager::steal(worker_id, obj)) {
1211       cm->follow_contents(obj);
1212     }
1213     cm->follow_marking_stacks();
1214   } while (!terminator.offer_termination());
1215 }
1216 
1217 class MarkFromRootsTask : public WorkerTask {
1218   StrongRootsScope _strong_roots_scope; // needed for Threads::possibly_parallel_threads_do
1219   OopStorageSetStrongParState<false /* concurrent */, false /* is_const */> _oop_storage_set_par_state;
1220   TaskTerminator _terminator;
1221   uint _active_workers;
1222 
1223 public:
1224   MarkFromRootsTask(uint active_workers) :
1225       WorkerTask("MarkFromRootsTask"),
1226       _strong_roots_scope(active_workers),
1227       _terminator(active_workers, ParCompactionManager::oop_task_queues()),
1228       _active_workers(active_workers) {}
1229 
1230   virtual void work(uint worker_id) {
1231     ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(worker_id);
1232     cm->create_marking_stats_cache();
1233     {
1234       CLDToOopClosure cld_closure(&cm->_mark_and_push_closure, ClassLoaderData::_claim_stw_fullgc_mark);
1235       ClassLoaderDataGraph::always_strong_cld_do(&cld_closure);
1236 
1237       // Do the real work
1238       cm->follow_marking_stacks();
1239     }
1240 
1241     {
1242       PCAddThreadRootsMarkingTaskClosure closure(worker_id);
1243       Threads::possibly_parallel_threads_do(_active_workers > 1 /* is_par */, &closure);
1244     }
1245 
1246     // Mark from OopStorages
1247     {
1248       _oop_storage_set_par_state.oops_do(&cm->_mark_and_push_closure);
1249       // Do the real work
1250       cm->follow_marking_stacks();
1251     }
1252 
1253     if (_active_workers > 1) {
1254       steal_marking_work(_terminator, worker_id);
1255     }
1256   }
1257 };
1258 
1259 class ParallelCompactRefProcProxyTask : public RefProcProxyTask {
1260   TaskTerminator _terminator;
1261 
1262 public:
1263   ParallelCompactRefProcProxyTask(uint max_workers)
1264     : RefProcProxyTask("ParallelCompactRefProcProxyTask", max_workers),
1265       _terminator(_max_workers, ParCompactionManager::oop_task_queues()) {}
1266 
1267   void work(uint worker_id) override {
1268     assert(worker_id < _max_workers, "sanity");
1269     ParCompactionManager* cm = (_tm == RefProcThreadModel::Single) ? ParCompactionManager::get_vmthread_cm() : ParCompactionManager::gc_thread_compaction_manager(worker_id);
1270     BarrierEnqueueDiscoveredFieldClosure enqueue;
1271     ParCompactionManager::FollowStackClosure complete_gc(cm, (_tm == RefProcThreadModel::Single) ? nullptr : &_terminator, worker_id);
1272     _rp_task->rp_work(worker_id, PSParallelCompact::is_alive_closure(), &cm->_mark_and_push_closure, &enqueue, &complete_gc);
1273   }
1274 
1275   void prepare_run_task_hook() override {
1276     _terminator.reset_for_reuse(_queue_count);
1277   }
1278 };
1279 
1280 static void flush_marking_stats_cache(const uint num_workers) {
1281   for (uint i = 0; i < num_workers; ++i) {
1282     ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(i);
1283     cm->flush_and_destroy_marking_stats_cache();
1284   }
1285 }
1286 
1287 void PSParallelCompact::marking_phase(ParallelOldTracer *gc_tracer) {
1288   // Recursively traverse all live objects and mark them
1289   GCTraceTime(Info, gc, phases) tm("Marking Phase", &_gc_timer);
1290 
1291   uint active_gc_threads = ParallelScavengeHeap::heap()->workers().active_workers();
1292 
1293   ClassLoaderDataGraph::verify_claimed_marks_cleared(ClassLoaderData::_claim_stw_fullgc_mark);
1294   {
1295     GCTraceTime(Debug, gc, phases) tm("Par Mark", &_gc_timer);
1296 
1297     MarkFromRootsTask task(active_gc_threads);
1298     ParallelScavengeHeap::heap()->workers().run_task(&task);
1299   }
1300 
1301   // Process reference objects found during marking
1302   {
1303     GCTraceTime(Debug, gc, phases) tm("Reference Processing", &_gc_timer);
1304 
1305     ReferenceProcessorStats stats;
1306     ReferenceProcessorPhaseTimes pt(&_gc_timer, ref_processor()->max_num_queues());
1307 
1308     ref_processor()->set_active_mt_degree(active_gc_threads);
1309     ParallelCompactRefProcProxyTask task(ref_processor()->max_num_queues());
1310     stats = ref_processor()->process_discovered_references(task, pt);
1311 
1312     gc_tracer->report_gc_reference_stats(stats);
1313     pt.print_all_references();
1314   }
1315 
1316   {
1317     GCTraceTime(Debug, gc, phases) tm("Flush Marking Stats", &_gc_timer);
1318 
1319     flush_marking_stats_cache(active_gc_threads);
1320   }
1321 
1322   // This is the point where the entire marking should have completed.
1323   ParCompactionManager::verify_all_marking_stack_empty();
1324 
1325   {
1326     GCTraceTime(Debug, gc, phases) tm("Weak Processing", &_gc_timer);
1327     WeakProcessor::weak_oops_do(&ParallelScavengeHeap::heap()->workers(),
1328                                 is_alive_closure(),
1329                                 &do_nothing_cl,
1330                                 1);
1331   }
1332 
1333   {
1334     GCTraceTime(Debug, gc, phases) tm_m("Class Unloading", &_gc_timer);
1335 
1336     ClassUnloadingContext* ctx = ClassUnloadingContext::context();
1337 
1338     bool unloading_occurred;
1339     {
1340       CodeCache::UnlinkingScope scope(is_alive_closure());
1341 
1342       // Follow system dictionary roots and unload classes.
1343       unloading_occurred = SystemDictionary::do_unloading(&_gc_timer);
1344 
1345       // Unload nmethods.
1346       CodeCache::do_unloading(unloading_occurred);
1347     }
1348 
1349     {
1350       GCTraceTime(Debug, gc, phases) t("Purge Unlinked NMethods", gc_timer());
1351       // Release unloaded nmethod's memory.
1352       ctx->purge_nmethods();
1353     }
1354     {
1355       GCTraceTime(Debug, gc, phases) ur("Unregister NMethods", &_gc_timer);
1356       ParallelScavengeHeap::heap()->prune_unlinked_nmethods();
1357     }
1358     {
1359       GCTraceTime(Debug, gc, phases) t("Free Code Blobs", gc_timer());
1360       ctx->free_nmethods();
1361     }
1362 
1363     // Prune dead klasses from subklass/sibling/implementor lists.
1364     Klass::clean_weak_klass_links(unloading_occurred);
1365 
1366     // Clean JVMCI metadata handles.
1367     JVMCI_ONLY(JVMCI::do_unloading(unloading_occurred));
1368   }
1369 
1370   {
1371     GCTraceTime(Debug, gc, phases) tm("Report Object Count", &_gc_timer);
1372     _gc_tracer.report_object_count_after_gc(is_alive_closure(), &ParallelScavengeHeap::heap()->workers());
1373   }
1374 #if TASKQUEUE_STATS
1375   ParCompactionManager::oop_task_queues()->print_and_reset_taskqueue_stats("Oop Queue");
1376   ParCompactionManager::_objarray_task_queues->print_and_reset_taskqueue_stats("ObjArrayOop Queue");
1377 #endif
1378 }
1379 
1380 template<typename Func>
1381 void PSParallelCompact::adjust_in_space_helper(SpaceId id, volatile uint* claim_counter, Func&& on_stripe) {
1382   MutableSpace* sp = PSParallelCompact::space(id);
1383   HeapWord* const bottom = sp->bottom();
1384   HeapWord* const top = sp->top();
1385   if (bottom == top) {
1386     return;
1387   }
1388 
1389   const uint num_regions_per_stripe = 2;
1390   const size_t region_size = ParallelCompactData::RegionSize;
1391   const size_t stripe_size = num_regions_per_stripe * region_size;
1392 
1393   while (true) {
1394     uint counter = Atomic::fetch_then_add(claim_counter, num_regions_per_stripe);
1395     HeapWord* cur_stripe = bottom + counter * region_size;
1396     if (cur_stripe >= top) {
1397       break;
1398     }
1399     HeapWord* stripe_end = MIN2(cur_stripe + stripe_size, top);
1400     on_stripe(cur_stripe, stripe_end);
1401   }
1402 }
1403 
1404 void PSParallelCompact::adjust_in_old_space(volatile uint* claim_counter) {
1405   // Regions in old-space shouldn't be split.
1406   assert(!_space_info[old_space_id].split_info().is_valid(), "inv");
1407 
1408   auto scan_obj_with_limit = [&] (HeapWord* obj_start, HeapWord* left, HeapWord* right) {
1409     assert(mark_bitmap()->is_marked(obj_start), "inv");
1410     oop obj = cast_to_oop(obj_start);
1411     return obj->oop_iterate_size(&pc_adjust_pointer_closure, MemRegion(left, right));
1412   };
1413 
1414   adjust_in_space_helper(old_space_id, claim_counter, [&] (HeapWord* stripe_start, HeapWord* stripe_end) {
1415     assert(_summary_data.is_region_aligned(stripe_start), "inv");
1416     RegionData* cur_region = _summary_data.addr_to_region_ptr(stripe_start);
1417     HeapWord* obj_start;
1418     if (cur_region->partial_obj_size() != 0) {
1419       obj_start = cur_region->partial_obj_addr();
1420       obj_start += scan_obj_with_limit(obj_start, stripe_start, stripe_end);
1421     } else {
1422       obj_start = stripe_start;
1423     }
1424 
1425     while (obj_start < stripe_end) {
1426       obj_start = mark_bitmap()->find_obj_beg(obj_start, stripe_end);
1427       if (obj_start >= stripe_end) {
1428         break;
1429       }
1430       obj_start += scan_obj_with_limit(obj_start, stripe_start, stripe_end);
1431     }
1432   });
1433 }
1434 
1435 void PSParallelCompact::adjust_in_young_space(SpaceId id, volatile uint* claim_counter) {
1436   adjust_in_space_helper(id, claim_counter, [](HeapWord* stripe_start, HeapWord* stripe_end) {
1437     HeapWord* obj_start = stripe_start;
1438     while (obj_start < stripe_end) {
1439       obj_start = mark_bitmap()->find_obj_beg(obj_start, stripe_end);
1440       if (obj_start >= stripe_end) {
1441         break;
1442       }
1443       oop obj = cast_to_oop(obj_start);
1444       obj_start += obj->oop_iterate_size(&pc_adjust_pointer_closure);
1445     }
1446   });
1447 }
1448 
1449 void PSParallelCompact::adjust_pointers_in_spaces(uint worker_id, volatile uint* claim_counters) {
1450   auto start_time = Ticks::now();
1451   adjust_in_old_space(&claim_counters[0]);
1452   for (uint id = eden_space_id; id < last_space_id; ++id) {
1453     adjust_in_young_space(SpaceId(id), &claim_counters[id]);
1454   }
1455   log_trace(gc, phases)("adjust_pointers_in_spaces worker %u: %.3f ms", worker_id, (Ticks::now() - start_time).seconds() * 1000);
1456 }
1457 
1458 class PSAdjustTask final : public WorkerTask {
1459   SubTasksDone                               _sub_tasks;
1460   WeakProcessor::Task                        _weak_proc_task;
1461   OopStorageSetStrongParState<false, false>  _oop_storage_iter;
1462   uint                                       _nworkers;
1463   volatile uint _claim_counters[PSParallelCompact::last_space_id] = {};
1464 
1465   enum PSAdjustSubTask {
1466     PSAdjustSubTask_code_cache,
1467 
1468     PSAdjustSubTask_num_elements
1469   };
1470 
1471 public:
1472   PSAdjustTask(uint nworkers) :
1473     WorkerTask("PSAdjust task"),
1474     _sub_tasks(PSAdjustSubTask_num_elements),
1475     _weak_proc_task(nworkers),
1476     _nworkers(nworkers) {
1477 
1478     ClassLoaderDataGraph::verify_claimed_marks_cleared(ClassLoaderData::_claim_stw_fullgc_adjust);
1479     if (nworkers > 1) {
1480       Threads::change_thread_claim_token();
1481     }
1482   }
1483 
1484   ~PSAdjustTask() {
1485     Threads::assert_all_threads_claimed();
1486   }
1487 
1488   void work(uint worker_id) {
1489     ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(worker_id);
1490     cm->preserved_marks()->adjust_during_full_gc();
1491     {
1492       // adjust pointers in all spaces
1493       PSParallelCompact::adjust_pointers_in_spaces(worker_id, _claim_counters);
1494     }
1495     {
1496       ResourceMark rm;
1497       Threads::possibly_parallel_oops_do(_nworkers > 1, &pc_adjust_pointer_closure, nullptr);
1498     }
1499     _oop_storage_iter.oops_do(&pc_adjust_pointer_closure);
1500     {
1501       CLDToOopClosure cld_closure(&pc_adjust_pointer_closure, ClassLoaderData::_claim_stw_fullgc_adjust);
1502       ClassLoaderDataGraph::cld_do(&cld_closure);
1503     }
1504     {
1505       AlwaysTrueClosure always_alive;
1506       _weak_proc_task.work(worker_id, &always_alive, &pc_adjust_pointer_closure);
1507     }
1508     if (_sub_tasks.try_claim_task(PSAdjustSubTask_code_cache)) {
1509       NMethodToOopClosure adjust_code(&pc_adjust_pointer_closure, NMethodToOopClosure::FixRelocations);
1510       CodeCache::nmethods_do(&adjust_code);
1511     }
1512     _sub_tasks.all_tasks_claimed();
1513   }
1514 };
1515 
1516 void PSParallelCompact::adjust_pointers() {
1517   // Adjust the pointers to reflect the new locations
1518   GCTraceTime(Info, gc, phases) tm("Adjust Pointers", &_gc_timer);
1519   uint nworkers = ParallelScavengeHeap::heap()->workers().active_workers();
1520   PSAdjustTask task(nworkers);
1521   ParallelScavengeHeap::heap()->workers().run_task(&task);
1522 }
1523 
1524 // Split [start, end) evenly for a number of workers and return the
1525 // range for worker_id.
1526 static void split_regions_for_worker(size_t start, size_t end,
1527                                      uint worker_id, uint num_workers,
1528                                      size_t* worker_start, size_t* worker_end) {
1529   assert(start < end, "precondition");
1530   assert(num_workers > 0, "precondition");
1531   assert(worker_id < num_workers, "precondition");
1532 
1533   size_t num_regions = end - start;
1534   size_t num_regions_per_worker = num_regions / num_workers;
1535   size_t remainder = num_regions % num_workers;
1536   // The first few workers will get one extra.
1537   *worker_start = start + worker_id * num_regions_per_worker
1538                   + MIN2(checked_cast<size_t>(worker_id), remainder);
1539   *worker_end = *worker_start + num_regions_per_worker
1540                 + (worker_id < remainder ? 1 : 0);
1541 }
1542 
1543 void PSParallelCompact::forward_to_new_addr() {
1544   GCTraceTime(Info, gc, phases) tm("Forward", &_gc_timer);
1545   uint nworkers = ParallelScavengeHeap::heap()->workers().active_workers();
1546 
1547   struct ForwardTask final : public WorkerTask {
1548     uint _num_workers;
1549 
1550     explicit ForwardTask(uint num_workers) :
1551       WorkerTask("PSForward task"),
1552       _num_workers(num_workers) {}
1553 
1554     void work(uint worker_id) override {
1555       ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(worker_id);
1556       for (uint id = old_space_id; id < last_space_id; ++id) {
1557         MutableSpace* sp = PSParallelCompact::space(SpaceId(id));
1558         HeapWord* dense_prefix_addr = dense_prefix(SpaceId(id));
1559         HeapWord* top = sp->top();
1560 
1561         if (dense_prefix_addr == top) {
1562           continue;
1563         }
1564 
1565         size_t dense_prefix_region = _summary_data.addr_to_region_idx(dense_prefix_addr);
1566         size_t top_region = _summary_data.addr_to_region_idx(_summary_data.region_align_up(top));
1567         size_t start_region;
1568         size_t end_region;
1569         split_regions_for_worker(dense_prefix_region, top_region,
1570                                  worker_id, _num_workers,
1571                                  &start_region, &end_region);
1572         for (size_t cur_region = start_region; cur_region < end_region; ++cur_region) {
1573           RegionData* region_ptr = _summary_data.region(cur_region);
1574           size_t live_words = region_ptr->partial_obj_size();
1575 
1576           if (live_words == ParallelCompactData::RegionSize) {
1577             // No obj-start
1578             continue;
1579           }
1580 
1581           HeapWord* region_start = _summary_data.region_to_addr(cur_region);
1582           HeapWord* region_end = region_start + ParallelCompactData::RegionSize;
1583 
1584           HeapWord* cur_addr = region_start + live_words;
1585 
1586           HeapWord* destination = region_ptr->destination();
1587           while (cur_addr < region_end) {
1588             cur_addr = mark_bitmap()->find_obj_beg(cur_addr, region_end);
1589             if (cur_addr >= region_end) {
1590               break;
1591             }
1592             assert(mark_bitmap()->is_marked(cur_addr), "inv");
1593             HeapWord* new_addr = destination + live_words;
1594             oop obj = cast_to_oop(cur_addr);
1595             if (new_addr != cur_addr) {
1596               cm->preserved_marks()->push_if_necessary(obj, obj->mark());
1597               obj->forward_to(cast_to_oop(new_addr));
1598             }
1599             size_t obj_size = obj->size();
1600             live_words += obj_size;
1601             cur_addr += obj_size;
1602           }
1603         }
1604       }
1605     }
1606   } task(nworkers);
1607 
1608   ParallelScavengeHeap::heap()->workers().run_task(&task);
1609   debug_only(verify_forward();)
1610 }
1611 
1612 #ifdef ASSERT
1613 void PSParallelCompact::verify_forward() {
1614   HeapWord* old_dense_prefix_addr = dense_prefix(SpaceId(old_space_id));
1615   RegionData* old_region = _summary_data.region(_summary_data.addr_to_region_idx(old_dense_prefix_addr));
1616   HeapWord* bump_ptr = old_region->partial_obj_size() != 0
1617                        ? old_dense_prefix_addr + old_region->partial_obj_size()
1618                        : old_dense_prefix_addr;
1619   SpaceId bump_ptr_space = old_space_id;
1620 
1621   for (uint id = old_space_id; id < last_space_id; ++id) {
1622     MutableSpace* sp = PSParallelCompact::space(SpaceId(id));
1623     HeapWord* dense_prefix_addr = dense_prefix(SpaceId(id));
1624     HeapWord* top = sp->top();
1625     HeapWord* cur_addr = dense_prefix_addr;
1626 
1627     while (cur_addr < top) {
1628       cur_addr = mark_bitmap()->find_obj_beg(cur_addr, top);
1629       if (cur_addr >= top) {
1630         break;
1631       }
1632       assert(mark_bitmap()->is_marked(cur_addr), "inv");
1633       // Move to the space containing cur_addr
1634       if (bump_ptr == _space_info[bump_ptr_space].new_top()) {
1635         bump_ptr = space(space_id(cur_addr))->bottom();
1636         bump_ptr_space = space_id(bump_ptr);
1637       }
1638       oop obj = cast_to_oop(cur_addr);
1639       if (cur_addr != bump_ptr) {
1640         assert(obj->forwardee() == cast_to_oop(bump_ptr), "inv");
1641       }
1642       bump_ptr += obj->size();
1643       cur_addr += obj->size();
1644     }
1645   }
1646 }
1647 #endif
1648 
1649 // Helper class to print 8 region numbers per line and then print the total at the end.
1650 class FillableRegionLogger : public StackObj {
1651 private:
1652   Log(gc, compaction) log;
1653   static const int LineLength = 8;
1654   size_t _regions[LineLength];
1655   int _next_index;
1656   bool _enabled;
1657   size_t _total_regions;
1658 public:
1659   FillableRegionLogger() : _next_index(0), _enabled(log_develop_is_enabled(Trace, gc, compaction)), _total_regions(0) { }
1660   ~FillableRegionLogger() {
1661     log.trace(SIZE_FORMAT " initially fillable regions", _total_regions);
1662   }
1663 
1664   void print_line() {
1665     if (!_enabled || _next_index == 0) {
1666       return;
1667     }
1668     FormatBuffer<> line("Fillable: ");
1669     for (int i = 0; i < _next_index; i++) {
1670       line.append(" " SIZE_FORMAT_W(7), _regions[i]);
1671     }
1672     log.trace("%s", line.buffer());
1673     _next_index = 0;
1674   }
1675 
1676   void handle(size_t region) {
1677     if (!_enabled) {
1678       return;
1679     }
1680     _regions[_next_index++] = region;
1681     if (_next_index == LineLength) {
1682       print_line();
1683     }
1684     _total_regions++;
1685   }
1686 };
1687 
1688 void PSParallelCompact::prepare_region_draining_tasks(uint parallel_gc_threads)
1689 {
1690   GCTraceTime(Trace, gc, phases) tm("Drain Task Setup", &_gc_timer);
1691 
1692   // Find the threads that are active
1693   uint worker_id = 0;
1694 
1695   // Find all regions that are available (can be filled immediately) and
1696   // distribute them to the thread stacks.  The iteration is done in reverse
1697   // order (high to low) so the regions will be removed in ascending order.
1698 
1699   const ParallelCompactData& sd = PSParallelCompact::summary_data();
1700 
1701   // id + 1 is used to test termination so unsigned  can
1702   // be used with an old_space_id == 0.
1703   FillableRegionLogger region_logger;
1704   for (unsigned int id = to_space_id; id + 1 > old_space_id; --id) {
1705     SpaceInfo* const space_info = _space_info + id;
1706     HeapWord* const new_top = space_info->new_top();
1707 
1708     const size_t beg_region = sd.addr_to_region_idx(space_info->dense_prefix());
1709     const size_t end_region =
1710       sd.addr_to_region_idx(sd.region_align_up(new_top));
1711 
1712     for (size_t cur = end_region - 1; cur + 1 > beg_region; --cur) {
1713       if (sd.region(cur)->claim_unsafe()) {
1714         ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(worker_id);
1715         bool result = sd.region(cur)->mark_normal();
1716         assert(result, "Must succeed at this point.");
1717         cm->region_stack()->push(cur);
1718         region_logger.handle(cur);
1719         // Assign regions to tasks in round-robin fashion.
1720         if (++worker_id == parallel_gc_threads) {
1721           worker_id = 0;
1722         }
1723       }
1724     }
1725     region_logger.print_line();
1726   }
1727 }
1728 
1729 static void compaction_with_stealing_work(TaskTerminator* terminator, uint worker_id) {
1730   assert(ParallelScavengeHeap::heap()->is_stw_gc_active(), "called outside gc");
1731 
1732   ParCompactionManager* cm =
1733     ParCompactionManager::gc_thread_compaction_manager(worker_id);
1734 
1735   // Drain the stacks that have been preloaded with regions
1736   // that are ready to fill.
1737 
1738   cm->drain_region_stacks();
1739 
1740   guarantee(cm->region_stack()->is_empty(), "Not empty");
1741 
1742   size_t region_index = 0;
1743 
1744   while (true) {
1745     if (ParCompactionManager::steal(worker_id, region_index)) {
1746       PSParallelCompact::fill_and_update_region(cm, region_index);
1747       cm->drain_region_stacks();
1748     } else if (PSParallelCompact::steal_unavailable_region(cm, region_index)) {
1749       // Fill and update an unavailable region with the help of a shadow region
1750       PSParallelCompact::fill_and_update_shadow_region(cm, region_index);
1751       cm->drain_region_stacks();
1752     } else {
1753       if (terminator->offer_termination()) {
1754         break;
1755       }
1756       // Go around again.
1757     }
1758   }
1759 }
1760 
1761 class FillDensePrefixAndCompactionTask: public WorkerTask {
1762   uint _num_workers;
1763   TaskTerminator _terminator;
1764 
1765 public:
1766   FillDensePrefixAndCompactionTask(uint active_workers) :
1767       WorkerTask("FillDensePrefixAndCompactionTask"),
1768       _num_workers(active_workers),
1769       _terminator(active_workers, ParCompactionManager::region_task_queues()) {
1770   }
1771 
1772   virtual void work(uint worker_id) {
1773     {
1774       auto start = Ticks::now();
1775       PSParallelCompact::fill_dead_objs_in_dense_prefix(worker_id, _num_workers);
1776       log_trace(gc, phases)("Fill dense prefix by worker %u: %.3f ms", worker_id, (Ticks::now() - start).seconds() * 1000);
1777     }
1778     compaction_with_stealing_work(&_terminator, worker_id);
1779   }
1780 };
1781 
1782 void PSParallelCompact::fill_range_in_dense_prefix(HeapWord* start, HeapWord* end) {
1783 #ifdef ASSERT
1784   {
1785     assert(start < end, "precondition");
1786     assert(mark_bitmap()->find_obj_beg(start, end) == end, "precondition");
1787     HeapWord* bottom = _space_info[old_space_id].space()->bottom();
1788     if (start != bottom) {
1789       HeapWord* obj_start = mark_bitmap()->find_obj_beg_reverse(bottom, start);
1790       HeapWord* after_obj = obj_start + cast_to_oop(obj_start)->size();
1791       assert(after_obj == start, "precondition");
1792     }
1793   }
1794 #endif
1795 
1796   CollectedHeap::fill_with_objects(start, pointer_delta(end, start));
1797   HeapWord* addr = start;
1798   do {
1799     size_t size = cast_to_oop(addr)->size();
1800     start_array(old_space_id)->update_for_block(addr, addr + size);
1801     addr += size;
1802   } while (addr < end);
1803 }
1804 
1805 void PSParallelCompact::fill_dead_objs_in_dense_prefix(uint worker_id, uint num_workers) {
1806   ParMarkBitMap* bitmap = mark_bitmap();
1807 
1808   HeapWord* const bottom = _space_info[old_space_id].space()->bottom();
1809   HeapWord* const prefix_end = dense_prefix(old_space_id);
1810 
1811   if (bottom == prefix_end) {
1812     return;
1813   }
1814 
1815   size_t bottom_region = _summary_data.addr_to_region_idx(bottom);
1816   size_t prefix_end_region = _summary_data.addr_to_region_idx(prefix_end);
1817 
1818   size_t start_region;
1819   size_t end_region;
1820   split_regions_for_worker(bottom_region, prefix_end_region,
1821                            worker_id, num_workers,
1822                            &start_region, &end_region);
1823 
1824   if (start_region == end_region) {
1825     return;
1826   }
1827 
1828   HeapWord* const start_addr = _summary_data.region_to_addr(start_region);
1829   HeapWord* const end_addr = _summary_data.region_to_addr(end_region);
1830 
1831   // Skip live partial obj (if any) from previous region.
1832   HeapWord* cur_addr;
1833   RegionData* start_region_ptr = _summary_data.region(start_region);
1834   if (start_region_ptr->partial_obj_size() != 0) {
1835     HeapWord* partial_obj_start = start_region_ptr->partial_obj_addr();
1836     assert(bitmap->is_marked(partial_obj_start), "inv");
1837     cur_addr = partial_obj_start + cast_to_oop(partial_obj_start)->size();
1838   } else {
1839     cur_addr = start_addr;
1840   }
1841 
1842   // end_addr is inclusive to handle regions starting with dead space.
1843   while (cur_addr <= end_addr) {
1844     // Use prefix_end to handle trailing obj in each worker region-chunk.
1845     HeapWord* live_start = bitmap->find_obj_beg(cur_addr, prefix_end);
1846     if (cur_addr != live_start) {
1847       // Only worker 0 handles proceeding dead space.
1848       if (cur_addr != start_addr || worker_id == 0) {
1849         fill_range_in_dense_prefix(cur_addr, live_start);
1850       }
1851     }
1852     if (live_start >= end_addr) {
1853       break;
1854     }
1855     assert(bitmap->is_marked(live_start), "inv");
1856     cur_addr = live_start + cast_to_oop(live_start)->size();
1857   }
1858 }
1859 
1860 void PSParallelCompact::compact() {
1861   GCTraceTime(Info, gc, phases) tm("Compaction Phase", &_gc_timer);
1862 
1863   uint active_gc_threads = ParallelScavengeHeap::heap()->workers().active_workers();
1864 
1865   initialize_shadow_regions(active_gc_threads);
1866   prepare_region_draining_tasks(active_gc_threads);
1867 
1868   {
1869     GCTraceTime(Trace, gc, phases) tm("Par Compact", &_gc_timer);
1870 
1871     FillDensePrefixAndCompactionTask task(active_gc_threads);
1872     ParallelScavengeHeap::heap()->workers().run_task(&task);
1873 
1874 #ifdef  ASSERT
1875     verify_filler_in_dense_prefix();
1876 
1877     // Verify that all regions have been processed.
1878     for (unsigned int id = old_space_id; id < last_space_id; ++id) {
1879       verify_complete(SpaceId(id));
1880     }
1881 #endif
1882   }
1883 }
1884 
1885 #ifdef  ASSERT
1886 void PSParallelCompact::verify_filler_in_dense_prefix() {
1887   HeapWord* bottom = _space_info[old_space_id].space()->bottom();
1888   HeapWord* dense_prefix_end = dense_prefix(old_space_id);
1889   HeapWord* cur_addr = bottom;
1890   while (cur_addr < dense_prefix_end) {
1891     oop obj = cast_to_oop(cur_addr);
1892     oopDesc::verify(obj);
1893     if (!mark_bitmap()->is_marked(cur_addr)) {
1894       Klass* k = cast_to_oop(cur_addr)->klass_without_asserts();
1895       assert(k == Universe::fillerArrayKlass() || k == vmClasses::FillerObject_klass(), "inv");
1896     }
1897     cur_addr += obj->size();
1898   }
1899 }
1900 
1901 void PSParallelCompact::verify_complete(SpaceId space_id) {
1902   // All Regions served as compaction targets, from dense_prefix() to
1903   // new_top(), should be marked as filled and all Regions between new_top()
1904   // and top() should be available (i.e., should have been emptied).
1905   ParallelCompactData& sd = summary_data();
1906   SpaceInfo si = _space_info[space_id];
1907   HeapWord* new_top_addr = sd.region_align_up(si.new_top());
1908   HeapWord* old_top_addr = sd.region_align_up(si.space()->top());
1909   const size_t beg_region = sd.addr_to_region_idx(si.dense_prefix());
1910   const size_t new_top_region = sd.addr_to_region_idx(new_top_addr);
1911   const size_t old_top_region = sd.addr_to_region_idx(old_top_addr);
1912 
1913   size_t cur_region;
1914   for (cur_region = beg_region; cur_region < new_top_region; ++cur_region) {
1915     const RegionData* const c = sd.region(cur_region);
1916     assert(c->completed(), "region %zu not filled: destination_count=%u",
1917            cur_region, c->destination_count());
1918   }
1919 
1920   for (cur_region = new_top_region; cur_region < old_top_region; ++cur_region) {
1921     const RegionData* const c = sd.region(cur_region);
1922     assert(c->available(), "region %zu not empty: destination_count=%u",
1923            cur_region, c->destination_count());
1924   }
1925 }
1926 #endif  // #ifdef ASSERT
1927 
1928 // Return the SpaceId for the space containing addr.  If addr is not in the
1929 // heap, last_space_id is returned.  In debug mode it expects the address to be
1930 // in the heap and asserts such.
1931 PSParallelCompact::SpaceId PSParallelCompact::space_id(HeapWord* addr) {
1932   assert(ParallelScavengeHeap::heap()->is_in_reserved(addr), "addr not in the heap");
1933 
1934   for (unsigned int id = old_space_id; id < last_space_id; ++id) {
1935     if (_space_info[id].space()->contains(addr)) {
1936       return SpaceId(id);
1937     }
1938   }
1939 
1940   assert(false, "no space contains the addr");
1941   return last_space_id;
1942 }
1943 
1944 // Skip over count live words starting from beg, and return the address of the
1945 // next live word.  Unless marked, the word corresponding to beg is assumed to
1946 // be dead.  Callers must either ensure beg does not correspond to the middle of
1947 // an object, or account for those live words in some other way.  Callers must
1948 // also ensure that there are enough live words in the range [beg, end) to skip.
1949 HeapWord*
1950 PSParallelCompact::skip_live_words(HeapWord* beg, HeapWord* end, size_t count)
1951 {
1952   assert(count > 0, "sanity");
1953 
1954   ParMarkBitMap* m = mark_bitmap();
1955   HeapWord* cur_addr = beg;
1956   while (true) {
1957     cur_addr = m->find_obj_beg(cur_addr, end);
1958     assert(cur_addr < end, "inv");
1959     size_t obj_size = cast_to_oop(cur_addr)->size();
1960     // Strictly greater-than
1961     if (obj_size > count) {
1962       return cur_addr + count;
1963     }
1964     count -= obj_size;
1965     cur_addr += obj_size;
1966   }
1967 }
1968 
1969 HeapWord* PSParallelCompact::first_src_addr(HeapWord* const dest_addr,
1970                                             SpaceId src_space_id,
1971                                             size_t src_region_idx)
1972 {
1973   assert(summary_data().is_region_aligned(dest_addr), "not aligned");
1974 
1975   const SplitInfo& split_info = _space_info[src_space_id].split_info();
1976   if (split_info.dest_region_addr() == dest_addr) {
1977     // The partial object ending at the split point contains the first word to
1978     // be copied to dest_addr.
1979     return split_info.first_src_addr();
1980   }
1981 
1982   const ParallelCompactData& sd = summary_data();
1983   ParMarkBitMap* const bitmap = mark_bitmap();
1984   const size_t RegionSize = ParallelCompactData::RegionSize;
1985 
1986   assert(sd.is_region_aligned(dest_addr), "not aligned");
1987   const RegionData* const src_region_ptr = sd.region(src_region_idx);
1988   const size_t partial_obj_size = src_region_ptr->partial_obj_size();
1989   HeapWord* const src_region_destination = src_region_ptr->destination();
1990 
1991   assert(dest_addr >= src_region_destination, "wrong src region");
1992   assert(src_region_ptr->data_size() > 0, "src region cannot be empty");
1993 
1994   HeapWord* const src_region_beg = sd.region_to_addr(src_region_idx);
1995   HeapWord* const src_region_end = src_region_beg + RegionSize;
1996 
1997   HeapWord* addr = src_region_beg;
1998   if (dest_addr == src_region_destination) {
1999     // Return the first live word in the source region.
2000     if (partial_obj_size == 0) {
2001       addr = bitmap->find_obj_beg(addr, src_region_end);
2002       assert(addr < src_region_end, "no objects start in src region");
2003     }
2004     return addr;
2005   }
2006 
2007   // Must skip some live data.
2008   size_t words_to_skip = dest_addr - src_region_destination;
2009   assert(src_region_ptr->data_size() > words_to_skip, "wrong src region");
2010 
2011   if (partial_obj_size >= words_to_skip) {
2012     // All the live words to skip are part of the partial object.
2013     addr += words_to_skip;
2014     if (partial_obj_size == words_to_skip) {
2015       // Find the first live word past the partial object.
2016       addr = bitmap->find_obj_beg(addr, src_region_end);
2017       assert(addr < src_region_end, "wrong src region");
2018     }
2019     return addr;
2020   }
2021 
2022   // Skip over the partial object (if any).
2023   if (partial_obj_size != 0) {
2024     words_to_skip -= partial_obj_size;
2025     addr += partial_obj_size;
2026   }
2027 
2028   // Skip over live words due to objects that start in the region.
2029   addr = skip_live_words(addr, src_region_end, words_to_skip);
2030   assert(addr < src_region_end, "wrong src region");
2031   return addr;
2032 }
2033 
2034 void PSParallelCompact::decrement_destination_counts(ParCompactionManager* cm,
2035                                                      SpaceId src_space_id,
2036                                                      size_t beg_region,
2037                                                      HeapWord* end_addr)
2038 {
2039   ParallelCompactData& sd = summary_data();
2040 
2041 #ifdef ASSERT
2042   MutableSpace* const src_space = _space_info[src_space_id].space();
2043   HeapWord* const beg_addr = sd.region_to_addr(beg_region);
2044   assert(src_space->contains(beg_addr) || beg_addr == src_space->end(),
2045          "src_space_id does not match beg_addr");
2046   assert(src_space->contains(end_addr) || end_addr == src_space->end(),
2047          "src_space_id does not match end_addr");
2048 #endif // #ifdef ASSERT
2049 
2050   RegionData* const beg = sd.region(beg_region);
2051   RegionData* const end = sd.addr_to_region_ptr(sd.region_align_up(end_addr));
2052 
2053   // Regions up to new_top() are enqueued if they become available.
2054   HeapWord* const new_top = _space_info[src_space_id].new_top();
2055   RegionData* const enqueue_end =
2056     sd.addr_to_region_ptr(sd.region_align_up(new_top));
2057 
2058   for (RegionData* cur = beg; cur < end; ++cur) {
2059     assert(cur->data_size() > 0, "region must have live data");
2060     cur->decrement_destination_count();
2061     if (cur < enqueue_end && cur->available() && cur->claim()) {
2062       if (cur->mark_normal()) {
2063         cm->push_region(sd.region(cur));
2064       } else if (cur->mark_copied()) {
2065         // Try to copy the content of the shadow region back to its corresponding
2066         // heap region if the shadow region is filled. Otherwise, the GC thread
2067         // fills the shadow region will copy the data back (see
2068         // MoveAndUpdateShadowClosure::complete_region).
2069         copy_back(sd.region_to_addr(cur->shadow_region()), sd.region_to_addr(cur));
2070         ParCompactionManager::push_shadow_region_mt_safe(cur->shadow_region());
2071         cur->set_completed();
2072       }
2073     }
2074   }
2075 }
2076 
2077 size_t PSParallelCompact::next_src_region(MoveAndUpdateClosure& closure,
2078                                           SpaceId& src_space_id,
2079                                           HeapWord*& src_space_top,
2080                                           HeapWord* end_addr)
2081 {
2082   typedef ParallelCompactData::RegionData RegionData;
2083 
2084   ParallelCompactData& sd = PSParallelCompact::summary_data();
2085   const size_t region_size = ParallelCompactData::RegionSize;
2086 
2087   size_t src_region_idx = 0;
2088 
2089   // Skip empty regions (if any) up to the top of the space.
2090   HeapWord* const src_aligned_up = sd.region_align_up(end_addr);
2091   RegionData* src_region_ptr = sd.addr_to_region_ptr(src_aligned_up);
2092   HeapWord* const top_aligned_up = sd.region_align_up(src_space_top);
2093   const RegionData* const top_region_ptr =
2094     sd.addr_to_region_ptr(top_aligned_up);
2095   while (src_region_ptr < top_region_ptr && src_region_ptr->data_size() == 0) {
2096     ++src_region_ptr;
2097   }
2098 
2099   if (src_region_ptr < top_region_ptr) {
2100     // The next source region is in the current space.  Update src_region_idx
2101     // and the source address to match src_region_ptr.
2102     src_region_idx = sd.region(src_region_ptr);
2103     HeapWord* const src_region_addr = sd.region_to_addr(src_region_idx);
2104     if (src_region_addr > closure.source()) {
2105       closure.set_source(src_region_addr);
2106     }
2107     return src_region_idx;
2108   }
2109 
2110   // Switch to a new source space and find the first non-empty region.
2111   unsigned int space_id = src_space_id + 1;
2112   assert(space_id < last_space_id, "not enough spaces");
2113 
2114   HeapWord* const destination = closure.destination();
2115 
2116   do {
2117     MutableSpace* space = _space_info[space_id].space();
2118     HeapWord* const bottom = space->bottom();
2119     const RegionData* const bottom_cp = sd.addr_to_region_ptr(bottom);
2120 
2121     // Iterate over the spaces that do not compact into themselves.
2122     if (bottom_cp->destination() != bottom) {
2123       HeapWord* const top_aligned_up = sd.region_align_up(space->top());
2124       const RegionData* const top_cp = sd.addr_to_region_ptr(top_aligned_up);
2125 
2126       for (const RegionData* src_cp = bottom_cp; src_cp < top_cp; ++src_cp) {
2127         if (src_cp->live_obj_size() > 0) {
2128           // Found it.
2129           assert(src_cp->destination() == destination,
2130                  "first live obj in the space must match the destination");
2131           assert(src_cp->partial_obj_size() == 0,
2132                  "a space cannot begin with a partial obj");
2133 
2134           src_space_id = SpaceId(space_id);
2135           src_space_top = space->top();
2136           const size_t src_region_idx = sd.region(src_cp);
2137           closure.set_source(sd.region_to_addr(src_region_idx));
2138           return src_region_idx;
2139         } else {
2140           assert(src_cp->data_size() == 0, "sanity");
2141         }
2142       }
2143     }
2144   } while (++space_id < last_space_id);
2145 
2146   assert(false, "no source region was found");
2147   return 0;
2148 }
2149 
2150 HeapWord* PSParallelCompact::partial_obj_end(HeapWord* region_start_addr) {
2151   ParallelCompactData& sd = summary_data();
2152   assert(sd.is_region_aligned(region_start_addr), "precondition");
2153 
2154   // Use per-region partial_obj_size to locate the end of the obj, that extends to region_start_addr.
2155   SplitInfo& split_info = _space_info[space_id(region_start_addr)].split_info();
2156   size_t start_region_idx = sd.addr_to_region_idx(region_start_addr);
2157   size_t end_region_idx = sd.region_count();
2158   size_t accumulated_size = 0;
2159   for (size_t region_idx = start_region_idx; region_idx < end_region_idx; ++region_idx) {
2160     if (split_info.is_split(region_idx)) {
2161       accumulated_size += split_info.partial_obj_size();
2162       break;
2163     }
2164     size_t cur_partial_obj_size = sd.region(region_idx)->partial_obj_size();
2165     accumulated_size += cur_partial_obj_size;
2166     if (cur_partial_obj_size != ParallelCompactData::RegionSize) {
2167       break;
2168     }
2169   }
2170   return region_start_addr + accumulated_size;
2171 }
2172 
2173 void PSParallelCompact::fill_region(ParCompactionManager* cm, MoveAndUpdateClosure& closure, size_t region_idx)
2174 {
2175   ParMarkBitMap* const bitmap = mark_bitmap();
2176   ParallelCompactData& sd = summary_data();
2177   RegionData* const region_ptr = sd.region(region_idx);
2178 
2179   // Get the source region and related info.
2180   size_t src_region_idx = region_ptr->source_region();
2181   SpaceId src_space_id = space_id(sd.region_to_addr(src_region_idx));
2182   HeapWord* src_space_top = _space_info[src_space_id].space()->top();
2183   HeapWord* dest_addr = sd.region_to_addr(region_idx);
2184 
2185   closure.set_source(first_src_addr(dest_addr, src_space_id, src_region_idx));
2186 
2187   // Adjust src_region_idx to prepare for decrementing destination counts (the
2188   // destination count is not decremented when a region is copied to itself).
2189   if (src_region_idx == region_idx) {
2190     src_region_idx += 1;
2191   }
2192 
2193   if (bitmap->is_unmarked(closure.source())) {
2194     // The first source word is in the middle of an object; copy the remainder
2195     // of the object or as much as will fit.  The fact that pointer updates were
2196     // deferred will be noted when the object header is processed.
2197     HeapWord* const old_src_addr = closure.source();
2198     {
2199       HeapWord* region_start = sd.region_align_down(closure.source());
2200       HeapWord* obj_start = bitmap->find_obj_beg_reverse(region_start, closure.source());
2201       HeapWord* obj_end;
2202       if (bitmap->is_marked(obj_start)) {
2203         HeapWord* next_region_start = region_start + ParallelCompactData::RegionSize;
2204         HeapWord* partial_obj_start = (next_region_start >= src_space_top)
2205                                       ? nullptr
2206                                       : sd.addr_to_region_ptr(next_region_start)->partial_obj_addr();
2207         if (partial_obj_start == obj_start) {
2208           // This obj extends to next region.
2209           obj_end = partial_obj_end(next_region_start);
2210         } else {
2211           // Completely contained in this region; safe to use size().
2212           obj_end = obj_start + cast_to_oop(obj_start)->size();
2213         }
2214       } else {
2215         // This obj extends to current region.
2216         obj_end = partial_obj_end(region_start);
2217       }
2218       size_t partial_obj_size = pointer_delta(obj_end, closure.source());
2219       closure.copy_partial_obj(partial_obj_size);
2220     }
2221 
2222     if (closure.is_full()) {
2223       decrement_destination_counts(cm, src_space_id, src_region_idx,
2224                                    closure.source());
2225       closure.complete_region(dest_addr, region_ptr);
2226       return;
2227     }
2228 
2229     HeapWord* const end_addr = sd.region_align_down(closure.source());
2230     if (sd.region_align_down(old_src_addr) != end_addr) {
2231       // The partial object was copied from more than one source region.
2232       decrement_destination_counts(cm, src_space_id, src_region_idx, end_addr);
2233 
2234       // Move to the next source region, possibly switching spaces as well.  All
2235       // args except end_addr may be modified.
2236       src_region_idx = next_src_region(closure, src_space_id, src_space_top,
2237                                        end_addr);
2238     }
2239   }
2240 
2241   do {
2242     HeapWord* cur_addr = closure.source();
2243     HeapWord* const end_addr = MIN2(sd.region_align_up(cur_addr + 1),
2244                                     src_space_top);
2245     HeapWord* partial_obj_start = (end_addr == src_space_top)
2246                                 ? nullptr
2247                                 : sd.addr_to_region_ptr(end_addr)->partial_obj_addr();
2248     // apply closure on objs inside [cur_addr, end_addr)
2249     do {
2250       cur_addr = bitmap->find_obj_beg(cur_addr, end_addr);
2251       if (cur_addr == end_addr) {
2252         break;
2253       }
2254       size_t obj_size;
2255       if (partial_obj_start == cur_addr) {
2256         obj_size = pointer_delta(partial_obj_end(end_addr), cur_addr);
2257       } else {
2258         // This obj doesn't extend into next region; size() is safe to use.
2259         obj_size = cast_to_oop(cur_addr)->size();
2260       }
2261       closure.do_addr(cur_addr, obj_size);
2262       cur_addr += obj_size;
2263     } while (cur_addr < end_addr && !closure.is_full());
2264 
2265     if (closure.is_full()) {
2266       decrement_destination_counts(cm, src_space_id, src_region_idx,
2267                                    closure.source());
2268       closure.complete_region(dest_addr, region_ptr);
2269       return;
2270     }
2271 
2272     decrement_destination_counts(cm, src_space_id, src_region_idx, end_addr);
2273 
2274     // Move to the next source region, possibly switching spaces as well.  All
2275     // args except end_addr may be modified.
2276     src_region_idx = next_src_region(closure, src_space_id, src_space_top,
2277                                      end_addr);
2278   } while (true);
2279 }
2280 
2281 void PSParallelCompact::fill_and_update_region(ParCompactionManager* cm, size_t region_idx)
2282 {
2283   MoveAndUpdateClosure cl(mark_bitmap(), region_idx);
2284   fill_region(cm, cl, region_idx);
2285 }
2286 
2287 void PSParallelCompact::fill_and_update_shadow_region(ParCompactionManager* cm, size_t region_idx)
2288 {
2289   // Get a shadow region first
2290   ParallelCompactData& sd = summary_data();
2291   RegionData* const region_ptr = sd.region(region_idx);
2292   size_t shadow_region = ParCompactionManager::pop_shadow_region_mt_safe(region_ptr);
2293   // The InvalidShadow return value indicates the corresponding heap region is available,
2294   // so use MoveAndUpdateClosure to fill the normal region. Otherwise, use
2295   // MoveAndUpdateShadowClosure to fill the acquired shadow region.
2296   if (shadow_region == ParCompactionManager::InvalidShadow) {
2297     MoveAndUpdateClosure cl(mark_bitmap(), region_idx);
2298     region_ptr->shadow_to_normal();
2299     return fill_region(cm, cl, region_idx);
2300   } else {
2301     MoveAndUpdateShadowClosure cl(mark_bitmap(), region_idx, shadow_region);
2302     return fill_region(cm, cl, region_idx);
2303   }
2304 }
2305 
2306 void PSParallelCompact::copy_back(HeapWord *shadow_addr, HeapWord *region_addr)
2307 {
2308   Copy::aligned_conjoint_words(shadow_addr, region_addr, _summary_data.RegionSize);
2309 }
2310 
2311 bool PSParallelCompact::steal_unavailable_region(ParCompactionManager* cm, size_t &region_idx)
2312 {
2313   size_t next = cm->next_shadow_region();
2314   ParallelCompactData& sd = summary_data();
2315   size_t old_new_top = sd.addr_to_region_idx(_space_info[old_space_id].new_top());
2316   uint active_gc_threads = ParallelScavengeHeap::heap()->workers().active_workers();
2317 
2318   while (next < old_new_top) {
2319     if (sd.region(next)->mark_shadow()) {
2320       region_idx = next;
2321       return true;
2322     }
2323     next = cm->move_next_shadow_region_by(active_gc_threads);
2324   }
2325 
2326   return false;
2327 }
2328 
2329 // The shadow region is an optimization to address region dependencies in full GC. The basic
2330 // idea is making more regions available by temporally storing their live objects in empty
2331 // shadow regions to resolve dependencies between them and the destination regions. Therefore,
2332 // GC threads need not wait destination regions to be available before processing sources.
2333 //
2334 // A typical workflow would be:
2335 // After draining its own stack and failing to steal from others, a GC worker would pick an
2336 // unavailable region (destination count > 0) and get a shadow region. Then the worker fills
2337 // the shadow region by copying live objects from source regions of the unavailable one. Once
2338 // the unavailable region becomes available, the data in the shadow region will be copied back.
2339 // Shadow regions are empty regions in the to-space and regions between top and end of other spaces.
2340 void PSParallelCompact::initialize_shadow_regions(uint parallel_gc_threads)
2341 {
2342   const ParallelCompactData& sd = PSParallelCompact::summary_data();
2343 
2344   for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2345     SpaceInfo* const space_info = _space_info + id;
2346     MutableSpace* const space = space_info->space();
2347 
2348     const size_t beg_region =
2349       sd.addr_to_region_idx(sd.region_align_up(MAX2(space_info->new_top(), space->top())));
2350     const size_t end_region =
2351       sd.addr_to_region_idx(sd.region_align_down(space->end()));
2352 
2353     for (size_t cur = beg_region; cur < end_region; ++cur) {
2354       ParCompactionManager::push_shadow_region(cur);
2355     }
2356   }
2357 
2358   size_t beg_region = sd.addr_to_region_idx(_space_info[old_space_id].dense_prefix());
2359   for (uint i = 0; i < parallel_gc_threads; i++) {
2360     ParCompactionManager *cm = ParCompactionManager::gc_thread_compaction_manager(i);
2361     cm->set_next_shadow_region(beg_region + i);
2362   }
2363 }
2364 
2365 void MoveAndUpdateClosure::copy_partial_obj(size_t partial_obj_size)
2366 {
2367   size_t words = MIN2(partial_obj_size, words_remaining());
2368 
2369   // This test is necessary; if omitted, the pointer updates to a partial object
2370   // that crosses the dense prefix boundary could be overwritten.
2371   if (source() != copy_destination()) {
2372     DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
2373     Copy::aligned_conjoint_words(source(), copy_destination(), words);
2374   }
2375   update_state(words);
2376 }
2377 
2378 void MoveAndUpdateClosure::complete_region(HeapWord* dest_addr, PSParallelCompact::RegionData* region_ptr) {
2379   assert(region_ptr->shadow_state() == ParallelCompactData::RegionData::NormalRegion, "Region should be finished");
2380   region_ptr->set_completed();
2381 }
2382 
2383 void MoveAndUpdateClosure::do_addr(HeapWord* addr, size_t words) {
2384   assert(destination() != nullptr, "sanity");
2385   _source = addr;
2386 
2387   // The start_array must be updated even if the object is not moving.
2388   if (_start_array != nullptr) {
2389     _start_array->update_for_block(destination(), destination() + words);
2390   }
2391 
2392   // Avoid overflow
2393   words = MIN2(words, words_remaining());
2394   assert(words > 0, "inv");
2395 
2396   if (copy_destination() != source()) {
2397     DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
2398     assert(source() != destination(), "inv");
2399     assert(cast_to_oop(source())->is_forwarded(), "inv");
2400     assert(cast_to_oop(source())->forwardee() == cast_to_oop(destination()), "inv");
2401     // Read the klass before the copying, since it might destroy the klass (i.e. overlapping copy)
2402     // and if partial copy, the destination klass may not be copied yet
2403     Klass* klass = cast_to_oop(source())->klass();
2404     Copy::aligned_conjoint_words(source(), copy_destination(), words);
2405     cast_to_oop(copy_destination())->set_mark(Klass::default_prototype_header(klass));
2406   }
2407 
2408   update_state(words);
2409 }
2410 
2411 void MoveAndUpdateShadowClosure::complete_region(HeapWord* dest_addr, PSParallelCompact::RegionData* region_ptr) {
2412   assert(region_ptr->shadow_state() == ParallelCompactData::RegionData::ShadowRegion, "Region should be shadow");
2413   // Record the shadow region index
2414   region_ptr->set_shadow_region(_shadow);
2415   // Mark the shadow region as filled to indicate the data is ready to be
2416   // copied back
2417   region_ptr->mark_filled();
2418   // Try to copy the content of the shadow region back to its corresponding
2419   // heap region if available; the GC thread that decreases the destination
2420   // count to zero will do the copying otherwise (see
2421   // PSParallelCompact::decrement_destination_counts).
2422   if (((region_ptr->available() && region_ptr->claim()) || region_ptr->claimed()) && region_ptr->mark_copied()) {
2423     region_ptr->set_completed();
2424     PSParallelCompact::copy_back(PSParallelCompact::summary_data().region_to_addr(_shadow), dest_addr);
2425     ParCompactionManager::push_shadow_region_mt_safe(_shadow);
2426   }
2427 }
2428