1 /*
   2  * Copyright (c) 2005, 2024, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/classLoaderDataGraph.hpp"
  27 #include "classfile/javaClasses.inline.hpp"
  28 #include "classfile/stringTable.hpp"
  29 #include "classfile/symbolTable.hpp"
  30 #include "classfile/systemDictionary.hpp"
  31 #include "code/codeCache.hpp"
  32 #include "compiler/oopMap.hpp"
  33 #include "gc/parallel/objectStartArray.inline.hpp"
  34 #include "gc/parallel/parallelArguments.hpp"
  35 #include "gc/parallel/parallelScavengeHeap.inline.hpp"
  36 #include "gc/parallel/parMarkBitMap.inline.hpp"
  37 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
  38 #include "gc/parallel/psCompactionManager.inline.hpp"
  39 #include "gc/parallel/psOldGen.hpp"
  40 #include "gc/parallel/psParallelCompact.inline.hpp"
  41 #include "gc/parallel/psPromotionManager.inline.hpp"
  42 #include "gc/parallel/psRootType.hpp"
  43 #include "gc/parallel/psScavenge.hpp"
  44 #include "gc/parallel/psStringDedup.hpp"
  45 #include "gc/parallel/psYoungGen.hpp"
  46 #include "gc/shared/classUnloadingContext.hpp"
  47 #include "gc/shared/gcCause.hpp"
  48 #include "gc/shared/gcHeapSummary.hpp"
  49 #include "gc/shared/gcId.hpp"
  50 #include "gc/shared/gcLocker.hpp"
  51 #include "gc/shared/gcTimer.hpp"
  52 #include "gc/shared/gcTrace.hpp"
  53 #include "gc/shared/gcTraceTime.inline.hpp"
  54 #include "gc/shared/isGCActiveMark.hpp"
  55 #include "gc/shared/oopStorage.inline.hpp"
  56 #include "gc/shared/oopStorageSet.inline.hpp"
  57 #include "gc/shared/oopStorageSetParState.inline.hpp"
  58 #include "gc/shared/preservedMarks.inline.hpp"
  59 #include "gc/shared/referencePolicy.hpp"
  60 #include "gc/shared/referenceProcessor.hpp"
  61 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
  62 #include "gc/shared/slidingForwarding.inline.hpp"
  63 #include "gc/shared/strongRootsScope.hpp"
  64 #include "gc/shared/taskTerminator.hpp"
  65 #include "gc/shared/weakProcessor.inline.hpp"
  66 #include "gc/shared/workerPolicy.hpp"
  67 #include "gc/shared/workerThread.hpp"
  68 #include "gc/shared/workerUtils.hpp"
  69 #include "logging/log.hpp"
  70 #include "memory/iterator.inline.hpp"
  71 #include "memory/metaspaceUtils.hpp"
  72 #include "memory/resourceArea.hpp"
  73 #include "memory/universe.hpp"
  74 #include "nmt/memTracker.hpp"
  75 #include "oops/access.inline.hpp"
  76 #include "oops/instanceClassLoaderKlass.inline.hpp"
  77 #include "oops/instanceKlass.inline.hpp"
  78 #include "oops/instanceMirrorKlass.inline.hpp"
  79 #include "oops/methodData.hpp"
  80 #include "oops/objArrayKlass.inline.hpp"
  81 #include "oops/oop.inline.hpp"
  82 #include "runtime/atomic.hpp"
  83 #include "runtime/handles.inline.hpp"
  84 #include "runtime/java.hpp"
  85 #include "runtime/safepoint.hpp"
  86 #include "runtime/threads.hpp"
  87 #include "runtime/vmThread.hpp"
  88 #include "services/memoryService.hpp"
  89 #include "utilities/align.hpp"
  90 #include "utilities/debug.hpp"
  91 #include "utilities/events.hpp"
  92 #include "utilities/formatBuffer.hpp"
  93 #include "utilities/macros.hpp"
  94 #include "utilities/stack.inline.hpp"
  95 #if INCLUDE_JVMCI
  96 #include "jvmci/jvmci.hpp"
  97 #endif
  98 
  99 #include <math.h>
 100 
 101 // All sizes are in HeapWords.
 102 const size_t ParallelCompactData::Log2RegionSize  = 16; // 64K words
 103 const size_t ParallelCompactData::RegionSize      = (size_t)1 << Log2RegionSize;
 104 static_assert(ParallelCompactData::RegionSize >= BitsPerWord, "region-start bit word-aligned");
 105 const size_t ParallelCompactData::RegionSizeBytes =
 106   RegionSize << LogHeapWordSize;
 107 const size_t ParallelCompactData::RegionSizeOffsetMask = RegionSize - 1;
 108 const size_t ParallelCompactData::RegionAddrOffsetMask = RegionSizeBytes - 1;
 109 const size_t ParallelCompactData::RegionAddrMask       = ~RegionAddrOffsetMask;
 110 
 111 const ParallelCompactData::RegionData::region_sz_t
 112 ParallelCompactData::RegionData::dc_shift = 27;
 113 
 114 const ParallelCompactData::RegionData::region_sz_t
 115 ParallelCompactData::RegionData::dc_mask = ~0U << dc_shift;
 116 
 117 const ParallelCompactData::RegionData::region_sz_t
 118 ParallelCompactData::RegionData::dc_one = 0x1U << dc_shift;
 119 
 120 const ParallelCompactData::RegionData::region_sz_t
 121 ParallelCompactData::RegionData::los_mask = ~dc_mask;
 122 
 123 const ParallelCompactData::RegionData::region_sz_t
 124 ParallelCompactData::RegionData::dc_claimed = 0x8U << dc_shift;
 125 
 126 const ParallelCompactData::RegionData::region_sz_t
 127 ParallelCompactData::RegionData::dc_completed = 0xcU << dc_shift;
 128 
 129 SpaceInfo PSParallelCompact::_space_info[PSParallelCompact::last_space_id];
 130 
 131 SpanSubjectToDiscoveryClosure PSParallelCompact::_span_based_discoverer;
 132 ReferenceProcessor* PSParallelCompact::_ref_processor = nullptr;
 133 
 134 void SplitInfo::record(size_t src_region_idx, size_t partial_obj_size,
 135                        HeapWord* destination)
 136 {
 137   assert(src_region_idx != 0, "invalid src_region_idx");
 138   assert(partial_obj_size != 0, "invalid partial_obj_size argument");
 139   assert(destination != nullptr, "invalid destination argument");
 140 
 141   _src_region_idx = src_region_idx;
 142   _partial_obj_size = partial_obj_size;
 143   _destination = destination;
 144 
 145   // These fields may not be updated below, so make sure they're clear.
 146   assert(_dest_region_addr == nullptr, "should have been cleared");
 147   assert(_first_src_addr == nullptr, "should have been cleared");
 148 
 149   // Determine the number of destination regions for the partial object.
 150   HeapWord* const last_word = destination + partial_obj_size - 1;
 151   const ParallelCompactData& sd = PSParallelCompact::summary_data();
 152   HeapWord* const beg_region_addr = sd.region_align_down(destination);
 153   HeapWord* const end_region_addr = sd.region_align_down(last_word);
 154 
 155   if (beg_region_addr == end_region_addr) {
 156     // One destination region.
 157     _destination_count = 1;
 158     if (end_region_addr == destination) {
 159       // The destination falls on a region boundary, thus the first word of the
 160       // partial object will be the first word copied to the destination region.
 161       _dest_region_addr = end_region_addr;
 162       _first_src_addr = sd.region_to_addr(src_region_idx);
 163     }
 164   } else {
 165     // Two destination regions.  When copied, the partial object will cross a
 166     // destination region boundary, so a word somewhere within the partial
 167     // object will be the first word copied to the second destination region.
 168     _destination_count = 2;
 169     _dest_region_addr = end_region_addr;
 170     const size_t ofs = pointer_delta(end_region_addr, destination);
 171     assert(ofs < _partial_obj_size, "sanity");
 172     _first_src_addr = sd.region_to_addr(src_region_idx) + ofs;
 173   }
 174 }
 175 
 176 void SplitInfo::clear()
 177 {
 178   _src_region_idx = 0;
 179   _partial_obj_size = 0;
 180   _destination = nullptr;
 181   _destination_count = 0;
 182   _dest_region_addr = nullptr;
 183   _first_src_addr = nullptr;
 184   assert(!is_valid(), "sanity");
 185 }
 186 
 187 #ifdef  ASSERT
 188 void SplitInfo::verify_clear()
 189 {
 190   assert(_src_region_idx == 0, "not clear");
 191   assert(_partial_obj_size == 0, "not clear");
 192   assert(_destination == nullptr, "not clear");
 193   assert(_destination_count == 0, "not clear");
 194   assert(_dest_region_addr == nullptr, "not clear");
 195   assert(_first_src_addr == nullptr, "not clear");
 196 }
 197 #endif  // #ifdef ASSERT
 198 
 199 
 200 void PSParallelCompact::print_on_error(outputStream* st) {
 201   _mark_bitmap.print_on_error(st);
 202 }
 203 
 204 ParallelCompactData::ParallelCompactData() :
 205   _heap_start(nullptr),
 206   DEBUG_ONLY(_heap_end(nullptr) COMMA)
 207   _region_vspace(nullptr),
 208   _reserved_byte_size(0),
 209   _region_data(nullptr),
 210   _region_count(0) {}
 211 
 212 bool ParallelCompactData::initialize(MemRegion reserved_heap)
 213 {
 214   _heap_start = reserved_heap.start();
 215   const size_t heap_size = reserved_heap.word_size();
 216   DEBUG_ONLY(_heap_end = _heap_start + heap_size;)
 217 
 218   assert(region_align_down(_heap_start) == _heap_start,
 219          "region start not aligned");
 220 
 221   return initialize_region_data(heap_size);
 222 }
 223 
 224 PSVirtualSpace*
 225 ParallelCompactData::create_vspace(size_t count, size_t element_size)
 226 {
 227   const size_t raw_bytes = count * element_size;
 228   const size_t page_sz = os::page_size_for_region_aligned(raw_bytes, 10);
 229   const size_t granularity = os::vm_allocation_granularity();
 230   _reserved_byte_size = align_up(raw_bytes, MAX2(page_sz, granularity));
 231 
 232   const size_t rs_align = page_sz == os::vm_page_size() ? 0 :
 233     MAX2(page_sz, granularity);
 234   ReservedSpace rs(_reserved_byte_size, rs_align, page_sz);
 235   os::trace_page_sizes("Parallel Compact Data", raw_bytes, raw_bytes, rs.base(),
 236                        rs.size(), page_sz);
 237 
 238   MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
 239 
 240   PSVirtualSpace* vspace = new PSVirtualSpace(rs, page_sz);
 241   if (vspace != 0) {
 242     if (vspace->expand_by(_reserved_byte_size)) {
 243       return vspace;
 244     }
 245     delete vspace;
 246     // Release memory reserved in the space.
 247     rs.release();
 248   }
 249 
 250   return 0;
 251 }
 252 
 253 bool ParallelCompactData::initialize_region_data(size_t heap_size)
 254 {
 255   assert(is_aligned(heap_size, RegionSize), "precondition");
 256 
 257   const size_t count = heap_size >> Log2RegionSize;
 258   _region_vspace = create_vspace(count, sizeof(RegionData));
 259   if (_region_vspace != 0) {
 260     _region_data = (RegionData*)_region_vspace->reserved_low_addr();
 261     _region_count = count;
 262     return true;
 263   }
 264   return false;
 265 }
 266 
 267 void ParallelCompactData::clear_range(size_t beg_region, size_t end_region) {
 268   assert(beg_region <= _region_count, "beg_region out of range");
 269   assert(end_region <= _region_count, "end_region out of range");
 270 
 271   const size_t region_cnt = end_region - beg_region;
 272   memset(_region_data + beg_region, 0, region_cnt * sizeof(RegionData));
 273 }
 274 
 275 void
 276 ParallelCompactData::summarize_dense_prefix(HeapWord* beg, HeapWord* end)
 277 {
 278   assert(is_region_aligned(beg), "not RegionSize aligned");
 279   assert(is_region_aligned(end), "not RegionSize aligned");
 280 
 281   size_t cur_region = addr_to_region_idx(beg);
 282   const size_t end_region = addr_to_region_idx(end);
 283   HeapWord* addr = beg;
 284   while (cur_region < end_region) {
 285     _region_data[cur_region].set_destination(addr);
 286     _region_data[cur_region].set_destination_count(0);
 287     _region_data[cur_region].set_source_region(cur_region);
 288 
 289     // Update live_obj_size so the region appears completely full.
 290     size_t live_size = RegionSize - _region_data[cur_region].partial_obj_size();
 291     _region_data[cur_region].set_live_obj_size(live_size);
 292 
 293     ++cur_region;
 294     addr += RegionSize;
 295   }
 296 }
 297 
 298 // Find the point at which a space can be split and, if necessary, record the
 299 // split point.
 300 //
 301 // If the current src region (which overflowed the destination space) doesn't
 302 // have a partial object, the split point is at the beginning of the current src
 303 // region (an "easy" split, no extra bookkeeping required).
 304 //
 305 // If the current src region has a partial object, the split point is in the
 306 // region where that partial object starts (call it the split_region).  If
 307 // split_region has a partial object, then the split point is just after that
 308 // partial object (a "hard" split where we have to record the split data and
 309 // zero the partial_obj_size field).  With a "hard" split, we know that the
 310 // partial_obj ends within split_region because the partial object that caused
 311 // the overflow starts in split_region.  If split_region doesn't have a partial
 312 // obj, then the split is at the beginning of split_region (another "easy"
 313 // split).
 314 HeapWord*
 315 ParallelCompactData::summarize_split_space(size_t src_region,
 316                                            SplitInfo& split_info,
 317                                            HeapWord* destination,
 318                                            HeapWord* target_end,
 319                                            HeapWord** target_next)
 320 {
 321   assert(destination <= target_end, "sanity");
 322   assert(destination + _region_data[src_region].data_size() > target_end,
 323     "region should not fit into target space");
 324   assert(is_region_aligned(target_end), "sanity");
 325 
 326   size_t split_region = src_region;
 327   HeapWord* split_destination = destination;
 328   size_t partial_obj_size = _region_data[src_region].partial_obj_size();
 329 
 330   if (destination + partial_obj_size > target_end) {
 331     // The split point is just after the partial object (if any) in the
 332     // src_region that contains the start of the object that overflowed the
 333     // destination space.
 334     //
 335     // Find the start of the "overflow" object and set split_region to the
 336     // region containing it.
 337     HeapWord* const overflow_obj = _region_data[src_region].partial_obj_addr();
 338     split_region = addr_to_region_idx(overflow_obj);
 339 
 340     // Clear the source_region field of all destination regions whose first word
 341     // came from data after the split point (a non-null source_region field
 342     // implies a region must be filled).
 343     //
 344     // An alternative to the simple loop below:  clear during post_compact(),
 345     // which uses memcpy instead of individual stores, and is easy to
 346     // parallelize.  (The downside is that it clears the entire RegionData
 347     // object as opposed to just one field.)
 348     //
 349     // post_compact() would have to clear the summary data up to the highest
 350     // address that was written during the summary phase, which would be
 351     //
 352     //         max(top, max(new_top, clear_top))
 353     //
 354     // where clear_top is a new field in SpaceInfo.  Would have to set clear_top
 355     // to target_end.
 356     const RegionData* const sr = region(split_region);
 357     const size_t beg_idx =
 358       addr_to_region_idx(region_align_up(sr->destination() +
 359                                          sr->partial_obj_size()));
 360     const size_t end_idx = addr_to_region_idx(target_end);
 361 
 362     log_develop_trace(gc, compaction)("split:  clearing source_region field in [" SIZE_FORMAT ", " SIZE_FORMAT ")", beg_idx, end_idx);
 363     for (size_t idx = beg_idx; idx < end_idx; ++idx) {
 364       _region_data[idx].set_source_region(0);
 365     }
 366 
 367     // Set split_destination and partial_obj_size to reflect the split region.
 368     split_destination = sr->destination();
 369     partial_obj_size = sr->partial_obj_size();
 370   }
 371 
 372   // The split is recorded only if a partial object extends onto the region.
 373   if (partial_obj_size != 0) {
 374     _region_data[split_region].set_partial_obj_size(0);
 375     split_info.record(split_region, partial_obj_size, split_destination);
 376   }
 377 
 378   // Setup the continuation addresses.
 379   *target_next = split_destination + partial_obj_size;
 380   HeapWord* const source_next = region_to_addr(split_region) + partial_obj_size;
 381 
 382   if (log_develop_is_enabled(Trace, gc, compaction)) {
 383     const char * split_type = partial_obj_size == 0 ? "easy" : "hard";
 384     log_develop_trace(gc, compaction)("%s split:  src=" PTR_FORMAT " src_c=" SIZE_FORMAT " pos=" SIZE_FORMAT,
 385                                       split_type, p2i(source_next), split_region, partial_obj_size);
 386     log_develop_trace(gc, compaction)("%s split:  dst=" PTR_FORMAT " dst_c=" SIZE_FORMAT " tn=" PTR_FORMAT,
 387                                       split_type, p2i(split_destination),
 388                                       addr_to_region_idx(split_destination),
 389                                       p2i(*target_next));
 390 
 391     if (partial_obj_size != 0) {
 392       HeapWord* const po_beg = split_info.destination();
 393       HeapWord* const po_end = po_beg + split_info.partial_obj_size();
 394       log_develop_trace(gc, compaction)("%s split:  po_beg=" PTR_FORMAT " " SIZE_FORMAT " po_end=" PTR_FORMAT " " SIZE_FORMAT,
 395                                         split_type,
 396                                         p2i(po_beg), addr_to_region_idx(po_beg),
 397                                         p2i(po_end), addr_to_region_idx(po_end));
 398     }
 399   }
 400 
 401   return source_next;
 402 }
 403 
 404 size_t ParallelCompactData::live_words_in_space(const MutableSpace* space,
 405                                                 HeapWord** full_region_prefix_end) {
 406   size_t cur_region = addr_to_region_idx(space->bottom());
 407   const size_t end_region = addr_to_region_idx(region_align_up(space->top()));
 408   size_t live_words = 0;
 409   if (full_region_prefix_end == nullptr) {
 410     for (/* empty */; cur_region < end_region; ++cur_region) {
 411       live_words += _region_data[cur_region].data_size();
 412     }
 413   } else {
 414     bool first_set = false;
 415     for (/* empty */; cur_region < end_region; ++cur_region) {
 416       size_t live_words_in_region = _region_data[cur_region].data_size();
 417       if (!first_set && live_words_in_region < RegionSize) {
 418         *full_region_prefix_end = region_to_addr(cur_region);
 419         first_set = true;
 420       }
 421       live_words += live_words_in_region;
 422     }
 423     if (!first_set) {
 424       // All regions are full of live objs.
 425       assert(is_region_aligned(space->top()), "inv");
 426       *full_region_prefix_end = space->top();
 427     }
 428     assert(*full_region_prefix_end != nullptr, "postcondition");
 429     assert(is_region_aligned(*full_region_prefix_end), "inv");
 430     assert(*full_region_prefix_end >= space->bottom(), "in-range");
 431     assert(*full_region_prefix_end <= space->top(), "in-range");
 432   }
 433   return live_words;
 434 }
 435 
 436 bool ParallelCompactData::summarize(SplitInfo& split_info,
 437                                     HeapWord* source_beg, HeapWord* source_end,
 438                                     HeapWord** source_next,
 439                                     HeapWord* target_beg, HeapWord* target_end,
 440                                     HeapWord** target_next)
 441 {
 442   HeapWord* const source_next_val = source_next == nullptr ? nullptr : *source_next;
 443   log_develop_trace(gc, compaction)(
 444       "sb=" PTR_FORMAT " se=" PTR_FORMAT " sn=" PTR_FORMAT
 445       "tb=" PTR_FORMAT " te=" PTR_FORMAT " tn=" PTR_FORMAT,
 446       p2i(source_beg), p2i(source_end), p2i(source_next_val),
 447       p2i(target_beg), p2i(target_end), p2i(*target_next));
 448 
 449   size_t cur_region = addr_to_region_idx(source_beg);
 450   const size_t end_region = addr_to_region_idx(region_align_up(source_end));
 451 
 452   HeapWord *dest_addr = target_beg;
 453   while (cur_region < end_region) {
 454     // The destination must be set even if the region has no data.
 455     _region_data[cur_region].set_destination(dest_addr);
 456 
 457     size_t words = _region_data[cur_region].data_size();
 458     if (words > 0) {
 459       // If cur_region does not fit entirely into the target space, find a point
 460       // at which the source space can be 'split' so that part is copied to the
 461       // target space and the rest is copied elsewhere.
 462       if (dest_addr + words > target_end) {
 463         assert(source_next != nullptr, "source_next is null when splitting");
 464         *source_next = summarize_split_space(cur_region, split_info, dest_addr,
 465                                              target_end, target_next);
 466         return false;
 467       }
 468 
 469       // Compute the destination_count for cur_region, and if necessary, update
 470       // source_region for a destination region.  The source_region field is
 471       // updated if cur_region is the first (left-most) region to be copied to a
 472       // destination region.
 473       //
 474       // The destination_count calculation is a bit subtle.  A region that has
 475       // data that compacts into itself does not count itself as a destination.
 476       // This maintains the invariant that a zero count means the region is
 477       // available and can be claimed and then filled.
 478       uint destination_count = 0;
 479       if (split_info.is_split(cur_region)) {
 480         // The current region has been split:  the partial object will be copied
 481         // to one destination space and the remaining data will be copied to
 482         // another destination space.  Adjust the initial destination_count and,
 483         // if necessary, set the source_region field if the partial object will
 484         // cross a destination region boundary.
 485         destination_count = split_info.destination_count();
 486         if (destination_count == 2) {
 487           size_t dest_idx = addr_to_region_idx(split_info.dest_region_addr());
 488           _region_data[dest_idx].set_source_region(cur_region);
 489         }
 490       }
 491 
 492       HeapWord* const last_addr = dest_addr + words - 1;
 493       const size_t dest_region_1 = addr_to_region_idx(dest_addr);
 494       const size_t dest_region_2 = addr_to_region_idx(last_addr);
 495 
 496       // Initially assume that the destination regions will be the same and
 497       // adjust the value below if necessary.  Under this assumption, if
 498       // cur_region == dest_region_2, then cur_region will be compacted
 499       // completely into itself.
 500       destination_count += cur_region == dest_region_2 ? 0 : 1;
 501       if (dest_region_1 != dest_region_2) {
 502         // Destination regions differ; adjust destination_count.
 503         destination_count += 1;
 504         // Data from cur_region will be copied to the start of dest_region_2.
 505         _region_data[dest_region_2].set_source_region(cur_region);
 506       } else if (is_region_aligned(dest_addr)) {
 507         // Data from cur_region will be copied to the start of the destination
 508         // region.
 509         _region_data[dest_region_1].set_source_region(cur_region);
 510       }
 511 
 512       _region_data[cur_region].set_destination_count(destination_count);
 513       dest_addr += words;
 514     }
 515 
 516     ++cur_region;
 517   }
 518 
 519   *target_next = dest_addr;
 520   return true;
 521 }
 522 
 523 #ifdef ASSERT
 524 void ParallelCompactData::verify_clear(const PSVirtualSpace* vspace)
 525 {
 526   const size_t* const beg = (const size_t*)vspace->committed_low_addr();
 527   const size_t* const end = (const size_t*)vspace->committed_high_addr();
 528   for (const size_t* p = beg; p < end; ++p) {
 529     assert(*p == 0, "not zero");
 530   }
 531 }
 532 
 533 void ParallelCompactData::verify_clear()
 534 {
 535   verify_clear(_region_vspace);
 536 }
 537 #endif  // #ifdef ASSERT
 538 
 539 STWGCTimer          PSParallelCompact::_gc_timer;
 540 ParallelOldTracer   PSParallelCompact::_gc_tracer;
 541 elapsedTimer        PSParallelCompact::_accumulated_time;
 542 unsigned int        PSParallelCompact::_maximum_compaction_gc_num = 0;
 543 CollectorCounters*  PSParallelCompact::_counters = nullptr;
 544 ParMarkBitMap       PSParallelCompact::_mark_bitmap;
 545 ParallelCompactData PSParallelCompact::_summary_data;
 546 
 547 PSParallelCompact::IsAliveClosure PSParallelCompact::_is_alive_closure;
 548 
 549 class PCAdjustPointerClosure: public BasicOopIterateClosure {
 550   template <typename T>
 551   void do_oop_work(T* p) { PSParallelCompact::adjust_pointer(p); }
 552 
 553 public:
 554   virtual void do_oop(oop* p)                { do_oop_work(p); }
 555   virtual void do_oop(narrowOop* p)          { do_oop_work(p); }
 556 
 557   virtual ReferenceIterationMode reference_iteration_mode() { return DO_FIELDS; }
 558 };
 559 
 560 static PCAdjustPointerClosure pc_adjust_pointer_closure;
 561 
 562 bool PSParallelCompact::IsAliveClosure::do_object_b(oop p) { return mark_bitmap()->is_marked(p); }
 563 
 564 void PSParallelCompact::post_initialize() {
 565   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 566   _span_based_discoverer.set_span(heap->reserved_region());
 567   _ref_processor =
 568     new ReferenceProcessor(&_span_based_discoverer,
 569                            ParallelGCThreads,   // mt processing degree
 570                            ParallelGCThreads,   // mt discovery degree
 571                            false,               // concurrent_discovery
 572                            &_is_alive_closure); // non-header is alive closure
 573 
 574   _counters = new CollectorCounters("Parallel full collection pauses", 1);
 575 
 576   // Initialize static fields in ParCompactionManager.
 577   ParCompactionManager::initialize(mark_bitmap());
 578 }
 579 
 580 bool PSParallelCompact::initialize_aux_data() {
 581   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 582   MemRegion mr = heap->reserved_region();
 583   assert(mr.byte_size() != 0, "heap should be reserved");
 584 
 585   initialize_space_info();
 586 
 587   if (!_mark_bitmap.initialize(mr)) {
 588     vm_shutdown_during_initialization(
 589       err_msg("Unable to allocate " SIZE_FORMAT "KB bitmaps for parallel "
 590       "garbage collection for the requested " SIZE_FORMAT "KB heap.",
 591       _mark_bitmap.reserved_byte_size()/K, mr.byte_size()/K));
 592     return false;
 593   }
 594 
 595   if (!_summary_data.initialize(mr)) {
 596     vm_shutdown_during_initialization(
 597       err_msg("Unable to allocate " SIZE_FORMAT "KB card tables for parallel "
 598       "garbage collection for the requested " SIZE_FORMAT "KB heap.",
 599       _summary_data.reserved_byte_size()/K, mr.byte_size()/K));
 600     return false;
 601   }
 602 
 603   return true;
 604 }
 605 
 606 void PSParallelCompact::initialize_space_info()
 607 {
 608   memset(&_space_info, 0, sizeof(_space_info));
 609 
 610   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 611   PSYoungGen* young_gen = heap->young_gen();
 612 
 613   _space_info[old_space_id].set_space(heap->old_gen()->object_space());
 614   _space_info[eden_space_id].set_space(young_gen->eden_space());
 615   _space_info[from_space_id].set_space(young_gen->from_space());
 616   _space_info[to_space_id].set_space(young_gen->to_space());
 617 
 618   _space_info[old_space_id].set_start_array(heap->old_gen()->start_array());
 619 }
 620 
 621 void
 622 PSParallelCompact::clear_data_covering_space(SpaceId id)
 623 {
 624   // At this point, top is the value before GC, new_top() is the value that will
 625   // be set at the end of GC.  The marking bitmap is cleared to top; nothing
 626   // should be marked above top.  The summary data is cleared to the larger of
 627   // top & new_top.
 628   MutableSpace* const space = _space_info[id].space();
 629   HeapWord* const bot = space->bottom();
 630   HeapWord* const top = space->top();
 631   HeapWord* const max_top = MAX2(top, _space_info[id].new_top());
 632 
 633   _mark_bitmap.clear_range(bot, top);
 634 
 635   const size_t beg_region = _summary_data.addr_to_region_idx(bot);
 636   const size_t end_region =
 637     _summary_data.addr_to_region_idx(_summary_data.region_align_up(max_top));
 638   _summary_data.clear_range(beg_region, end_region);
 639 
 640   // Clear the data used to 'split' regions.
 641   SplitInfo& split_info = _space_info[id].split_info();
 642   if (split_info.is_valid()) {
 643     split_info.clear();
 644   }
 645   DEBUG_ONLY(split_info.verify_clear();)
 646 }
 647 
 648 void PSParallelCompact::pre_compact()
 649 {
 650   // Update the from & to space pointers in space_info, since they are swapped
 651   // at each young gen gc.  Do the update unconditionally (even though a
 652   // promotion failure does not swap spaces) because an unknown number of young
 653   // collections will have swapped the spaces an unknown number of times.
 654   GCTraceTime(Debug, gc, phases) tm("Pre Compact", &_gc_timer);
 655   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 656   _space_info[from_space_id].set_space(heap->young_gen()->from_space());
 657   _space_info[to_space_id].set_space(heap->young_gen()->to_space());
 658 
 659   // Increment the invocation count
 660   heap->increment_total_collections(true);
 661 
 662   CodeCache::on_gc_marking_cycle_start();
 663 
 664   heap->print_heap_before_gc();
 665   heap->trace_heap_before_gc(&_gc_tracer);
 666 
 667   // Fill in TLABs
 668   heap->ensure_parsability(true);  // retire TLABs
 669 
 670   if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
 671     Universe::verify("Before GC");
 672   }
 673 
 674   DEBUG_ONLY(mark_bitmap()->verify_clear();)
 675   DEBUG_ONLY(summary_data().verify_clear();)
 676 }
 677 
 678 void PSParallelCompact::post_compact()
 679 {
 680   GCTraceTime(Info, gc, phases) tm("Post Compact", &_gc_timer);
 681   ParCompactionManager::remove_all_shadow_regions();
 682 
 683   CodeCache::on_gc_marking_cycle_finish();
 684   CodeCache::arm_all_nmethods();
 685 
 686   for (unsigned int id = old_space_id; id < last_space_id; ++id) {
 687     // Clear the marking bitmap, summary data and split info.
 688     clear_data_covering_space(SpaceId(id));
 689     {
 690       MutableSpace* space = _space_info[id].space();
 691       HeapWord* top = space->top();
 692       HeapWord* new_top = _space_info[id].new_top();
 693       if (ZapUnusedHeapArea && new_top < top) {
 694         space->mangle_region(MemRegion(new_top, top));
 695       }
 696       // Update top().  Must be done after clearing the bitmap and summary data.
 697       space->set_top(new_top);
 698     }
 699   }
 700 
 701   ParCompactionManager::flush_all_string_dedup_requests();
 702 
 703   MutableSpace* const eden_space = _space_info[eden_space_id].space();
 704   MutableSpace* const from_space = _space_info[from_space_id].space();
 705   MutableSpace* const to_space   = _space_info[to_space_id].space();
 706 
 707   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 708   bool eden_empty = eden_space->is_empty();
 709 
 710   // Update heap occupancy information which is used as input to the soft ref
 711   // clearing policy at the next gc.
 712   Universe::heap()->update_capacity_and_used_at_gc();
 713 
 714   bool young_gen_empty = eden_empty && from_space->is_empty() &&
 715     to_space->is_empty();
 716 
 717   PSCardTable* ct = heap->card_table();
 718   MemRegion old_mr = heap->old_gen()->committed();
 719   if (young_gen_empty) {
 720     ct->clear_MemRegion(old_mr);
 721   } else {
 722     ct->dirty_MemRegion(old_mr);
 723   }
 724 
 725   {
 726     // Delete metaspaces for unloaded class loaders and clean up loader_data graph
 727     GCTraceTime(Debug, gc, phases) t("Purge Class Loader Data", gc_timer());
 728     ClassLoaderDataGraph::purge(true /* at_safepoint */);
 729     DEBUG_ONLY(MetaspaceUtils::verify();)
 730   }
 731 
 732   // Need to clear claim bits for the next mark.
 733   ClassLoaderDataGraph::clear_claimed_marks();
 734 
 735   heap->prune_scavengable_nmethods();
 736 
 737 #if COMPILER2_OR_JVMCI
 738   DerivedPointerTable::update_pointers();
 739 #endif
 740 
 741   // Signal that we have completed a visit to all live objects.
 742   Universe::heap()->record_whole_heap_examined_timestamp();
 743 }
 744 
 745 HeapWord* PSParallelCompact::compute_dense_prefix_for_old_space(MutableSpace* old_space,
 746                                                                 HeapWord* full_region_prefix_end) {
 747   const size_t region_size = ParallelCompactData::RegionSize;
 748   const ParallelCompactData& sd = summary_data();
 749 
 750   // Iteration starts with the region *after* the full-region-prefix-end.
 751   const RegionData* const start_region = sd.addr_to_region_ptr(full_region_prefix_end);
 752   // If final region is not full, iteration stops before that region,
 753   // because fill_dense_prefix_end assumes that prefix_end <= top.
 754   const RegionData* const end_region = sd.addr_to_region_ptr(old_space->top());
 755   assert(start_region <= end_region, "inv");
 756 
 757   size_t max_waste = old_space->capacity_in_words() * (MarkSweepDeadRatio / 100.0);
 758   const RegionData* cur_region = start_region;
 759   for (/* empty */; cur_region < end_region; ++cur_region) {
 760     assert(region_size >= cur_region->data_size(), "inv");
 761     size_t dead_size = region_size - cur_region->data_size();
 762     if (max_waste < dead_size) {
 763       break;
 764     }
 765     max_waste -= dead_size;
 766   }
 767 
 768   HeapWord* const prefix_end = sd.region_to_addr(cur_region);
 769   assert(sd.is_region_aligned(prefix_end), "postcondition");
 770   assert(prefix_end >= full_region_prefix_end, "in-range");
 771   assert(prefix_end <= old_space->top(), "in-range");
 772   return prefix_end;
 773 }
 774 
 775 void PSParallelCompact::fill_dense_prefix_end(SpaceId id) {
 776   // Comparing two sizes to decide if filling is required:
 777   //
 778   // The size of the filler (min-obj-size) is 2 heap words with the default
 779   // MinObjAlignment, since both markword and klass take 1 heap word.
 780   //
 781   // The size of the gap (if any) right before dense-prefix-end is
 782   // MinObjAlignment.
 783   //
 784   // Need to fill in the gap only if it's smaller than min-obj-size, and the
 785   // filler obj will extend to next region.
 786 
 787   // Note: If min-fill-size decreases to 1, this whole method becomes redundant.
 788   if (UseCompactObjectHeaders) {
 789     // The gap is always equal to min-fill-size, so nothing to do.
 790     return;
 791   }
 792   assert(CollectedHeap::min_fill_size() >= 2, "inv");
 793 #ifndef _LP64
 794   // In 32-bit system, each heap word is 4 bytes, so MinObjAlignment == 2.
 795   // The gap is always equal to min-fill-size, so nothing to do.
 796   return;
 797 #endif
 798   if (MinObjAlignment > 1) {
 799     return;
 800   }
 801   assert(CollectedHeap::min_fill_size() == 2, "inv");
 802   HeapWord* const dense_prefix_end = dense_prefix(id);
 803   assert(_summary_data.is_region_aligned(dense_prefix_end), "precondition");
 804   assert(dense_prefix_end <= space(id)->top(), "precondition");
 805   if (dense_prefix_end == space(id)->top()) {
 806     // Must not have single-word gap right before prefix-end/top.
 807     return;
 808   }
 809   RegionData* const region_after_dense_prefix = _summary_data.addr_to_region_ptr(dense_prefix_end);
 810 
 811   if (region_after_dense_prefix->partial_obj_size() != 0 ||
 812       _mark_bitmap.is_marked(dense_prefix_end)) {
 813     // The region after the dense prefix starts with live bytes.
 814     return;
 815   }
 816 
 817   HeapWord* block_start = start_array(id)->block_start_reaching_into_card(dense_prefix_end);
 818   if (block_start == dense_prefix_end - 1) {
 819     assert(!_mark_bitmap.is_marked(block_start), "inv");
 820     // There is exactly one heap word gap right before the dense prefix end, so we need a filler object.
 821     // The filler object will extend into region_after_dense_prefix.
 822     const size_t obj_len = 2; // min-fill-size
 823     HeapWord* const obj_beg = dense_prefix_end - 1;
 824     CollectedHeap::fill_with_object(obj_beg, obj_len);
 825     _mark_bitmap.mark_obj(obj_beg);
 826     _summary_data.addr_to_region_ptr(obj_beg)->add_live_obj(1);
 827     region_after_dense_prefix->set_partial_obj_size(1);
 828     region_after_dense_prefix->set_partial_obj_addr(obj_beg);
 829     assert(start_array(id) != nullptr, "sanity");
 830     start_array(id)->update_for_block(obj_beg, obj_beg + obj_len);
 831   }
 832 }
 833 
 834 bool PSParallelCompact::reassess_maximum_compaction(bool maximum_compaction,
 835                                                     size_t total_live_words,
 836                                                     MutableSpace* const old_space,
 837                                                     HeapWord* full_region_prefix_end) {
 838   // Check if all live objs are larger than old-gen.
 839   const bool is_old_gen_overflowing = (total_live_words > old_space->capacity_in_words());
 840 
 841   // JVM flags
 842   const uint total_invocations = ParallelScavengeHeap::heap()->total_full_collections();
 843   assert(total_invocations >= _maximum_compaction_gc_num, "sanity");
 844   const size_t gcs_since_max = total_invocations - _maximum_compaction_gc_num;
 845   const bool is_interval_ended = gcs_since_max > HeapMaximumCompactionInterval;
 846 
 847   // If all regions in old-gen are full
 848   const bool is_region_full =
 849     full_region_prefix_end >= _summary_data.region_align_down(old_space->top());
 850 
 851   if (maximum_compaction || is_old_gen_overflowing || is_interval_ended || is_region_full) {
 852     _maximum_compaction_gc_num = total_invocations;
 853     return true;
 854   }
 855 
 856   return false;
 857 }
 858 
 859 void PSParallelCompact::summary_phase(bool maximum_compaction)
 860 {
 861   GCTraceTime(Info, gc, phases) tm("Summary Phase", &_gc_timer);
 862 
 863   MutableSpace* const old_space = _space_info[old_space_id].space();
 864   {
 865     size_t total_live_words = 0;
 866     HeapWord* full_region_prefix_end = nullptr;
 867     {
 868       // old-gen
 869       size_t live_words = _summary_data.live_words_in_space(old_space,
 870                                                             &full_region_prefix_end);
 871       total_live_words += live_words;
 872     }
 873     // young-gen
 874     for (uint i = eden_space_id; i < last_space_id; ++i) {
 875       const MutableSpace* space = _space_info[i].space();
 876       size_t live_words = _summary_data.live_words_in_space(space);
 877       total_live_words += live_words;
 878       _space_info[i].set_new_top(space->bottom() + live_words);
 879       _space_info[i].set_dense_prefix(space->bottom());
 880     }
 881 
 882     maximum_compaction = reassess_maximum_compaction(maximum_compaction,
 883                                                      total_live_words,
 884                                                      old_space,
 885                                                      full_region_prefix_end);
 886     HeapWord* dense_prefix_end =
 887       maximum_compaction ? full_region_prefix_end
 888                          : compute_dense_prefix_for_old_space(old_space,
 889                                                               full_region_prefix_end);
 890     SpaceId id = old_space_id;
 891     _space_info[id].set_dense_prefix(dense_prefix_end);
 892 
 893     if (dense_prefix_end != old_space->bottom()) {
 894       fill_dense_prefix_end(id);
 895       _summary_data.summarize_dense_prefix(old_space->bottom(), dense_prefix_end);
 896     }
 897     _summary_data.summarize(_space_info[id].split_info(),
 898                             dense_prefix_end, old_space->top(), nullptr,
 899                             dense_prefix_end, old_space->end(),
 900                             _space_info[id].new_top_addr());
 901   }
 902 
 903   // Summarize the remaining spaces in the young gen.  The initial target space
 904   // is the old gen.  If a space does not fit entirely into the target, then the
 905   // remainder is compacted into the space itself and that space becomes the new
 906   // target.
 907   SpaceId dst_space_id = old_space_id;
 908   HeapWord* dst_space_end = old_space->end();
 909   HeapWord** new_top_addr = _space_info[dst_space_id].new_top_addr();
 910   for (unsigned int id = eden_space_id; id < last_space_id; ++id) {
 911     const MutableSpace* space = _space_info[id].space();
 912     const size_t live = pointer_delta(_space_info[id].new_top(),
 913                                       space->bottom());
 914     const size_t available = pointer_delta(dst_space_end, *new_top_addr);
 915 
 916     if (live > 0 && live <= available) {
 917       // All the live data will fit.
 918       bool done = _summary_data.summarize(_space_info[id].split_info(),
 919                                           space->bottom(), space->top(),
 920                                           nullptr,
 921                                           *new_top_addr, dst_space_end,
 922                                           new_top_addr);
 923       assert(done, "space must fit into old gen");
 924 
 925       // Reset the new_top value for the space.
 926       _space_info[id].set_new_top(space->bottom());
 927     } else if (live > 0) {
 928       // Attempt to fit part of the source space into the target space.
 929       HeapWord* next_src_addr = nullptr;
 930       bool done = _summary_data.summarize(_space_info[id].split_info(),
 931                                           space->bottom(), space->top(),
 932                                           &next_src_addr,
 933                                           *new_top_addr, dst_space_end,
 934                                           new_top_addr);
 935       assert(!done, "space should not fit into old gen");
 936       assert(next_src_addr != nullptr, "sanity");
 937 
 938       // The source space becomes the new target, so the remainder is compacted
 939       // within the space itself.
 940       dst_space_id = SpaceId(id);
 941       dst_space_end = space->end();
 942       new_top_addr = _space_info[id].new_top_addr();
 943       done = _summary_data.summarize(_space_info[id].split_info(),
 944                                      next_src_addr, space->top(),
 945                                      nullptr,
 946                                      space->bottom(), dst_space_end,
 947                                      new_top_addr);
 948       assert(done, "space must fit when compacted into itself");
 949       assert(*new_top_addr <= space->top(), "usage should not grow");
 950     }
 951   }
 952 }
 953 
 954 // This method should contain all heap-specific policy for invoking a full
 955 // collection.  invoke_no_policy() will only attempt to compact the heap; it
 956 // will do nothing further.  If we need to bail out for policy reasons, scavenge
 957 // before full gc, or any other specialized behavior, it needs to be added here.
 958 //
 959 // Note that this method should only be called from the vm_thread while at a
 960 // safepoint.
 961 //
 962 // Note that the all_soft_refs_clear flag in the soft ref policy
 963 // may be true because this method can be called without intervening
 964 // activity.  For example when the heap space is tight and full measure
 965 // are being taken to free space.
 966 bool PSParallelCompact::invoke(bool maximum_heap_compaction) {
 967   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
 968   assert(Thread::current() == (Thread*)VMThread::vm_thread(),
 969          "should be in vm thread");
 970 
 971   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 972   assert(!heap->is_stw_gc_active(), "not reentrant");
 973 
 974   IsSTWGCActiveMark mark;
 975 
 976   const bool clear_all_soft_refs =
 977     heap->soft_ref_policy()->should_clear_all_soft_refs();
 978 
 979   return PSParallelCompact::invoke_no_policy(clear_all_soft_refs ||
 980                                              maximum_heap_compaction);
 981 }
 982 
 983 // This method contains no policy. You should probably
 984 // be calling invoke() instead.
 985 bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
 986   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
 987   assert(ref_processor() != nullptr, "Sanity");
 988 
 989   if (GCLocker::check_active_before_gc()) {
 990     return false;
 991   }
 992 
 993   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 994 
 995   GCIdMark gc_id_mark;
 996   _gc_timer.register_gc_start();
 997   _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start());
 998 
 999   GCCause::Cause gc_cause = heap->gc_cause();
1000   PSYoungGen* young_gen = heap->young_gen();
1001   PSOldGen* old_gen = heap->old_gen();
1002   PSAdaptiveSizePolicy* size_policy = heap->size_policy();
1003 
1004   // The scope of casr should end after code that can change
1005   // SoftRefPolicy::_should_clear_all_soft_refs.
1006   ClearedAllSoftRefs casr(maximum_heap_compaction,
1007                           heap->soft_ref_policy());
1008 
1009   // Make sure data structures are sane, make the heap parsable, and do other
1010   // miscellaneous bookkeeping.
1011   pre_compact();
1012 
1013   const PreGenGCValues pre_gc_values = heap->get_pre_gc_values();
1014 
1015   {
1016     const uint active_workers =
1017       WorkerPolicy::calc_active_workers(ParallelScavengeHeap::heap()->workers().max_workers(),
1018                                         ParallelScavengeHeap::heap()->workers().active_workers(),
1019                                         Threads::number_of_non_daemon_threads());
1020     ParallelScavengeHeap::heap()->workers().set_active_workers(active_workers);
1021 
1022     GCTraceCPUTime tcpu(&_gc_tracer);
1023     GCTraceTime(Info, gc) tm("Pause Full", nullptr, gc_cause, true);
1024 
1025     heap->pre_full_gc_dump(&_gc_timer);
1026 
1027     TraceCollectorStats tcs(counters());
1028     TraceMemoryManagerStats tms(heap->old_gc_manager(), gc_cause, "end of major GC");
1029 
1030     if (log_is_enabled(Debug, gc, heap, exit)) {
1031       accumulated_time()->start();
1032     }
1033 
1034     // Let the size policy know we're starting
1035     size_policy->major_collection_begin();
1036 
1037 #if COMPILER2_OR_JVMCI
1038     DerivedPointerTable::clear();
1039 #endif
1040 
1041     ref_processor()->start_discovery(maximum_heap_compaction);
1042 
1043     ClassUnloadingContext ctx(1 /* num_nmethod_unlink_workers */,
1044                               false /* unregister_nmethods_during_purge */,
1045                               false /* lock_nmethod_free_separately */);
1046 
1047     marking_phase(&_gc_tracer);
1048 
1049     bool max_on_system_gc = UseMaximumCompactionOnSystemGC
1050       && GCCause::is_user_requested_gc(gc_cause);
1051     summary_phase(maximum_heap_compaction || max_on_system_gc);
1052 
1053 #if COMPILER2_OR_JVMCI
1054     assert(DerivedPointerTable::is_active(), "Sanity");
1055     DerivedPointerTable::set_active(false);
1056 #endif
1057 
1058     SlidingForwarding::begin();
1059 
1060     forward_to_new_addr();
1061 
1062     adjust_pointers();
1063 
1064     compact();
1065 
1066     SlidingForwarding::end();
1067 
1068     ParCompactionManager::_preserved_marks_set->restore(&ParallelScavengeHeap::heap()->workers());
1069 
1070     ParCompactionManager::verify_all_region_stack_empty();
1071 
1072     // Reset the mark bitmap, summary data, and do other bookkeeping.  Must be
1073     // done before resizing.
1074     post_compact();
1075 
1076     // Let the size policy know we're done
1077     size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause);
1078 
1079     if (UseAdaptiveSizePolicy) {
1080       log_debug(gc, ergo)("AdaptiveSizeStart: collection: %d ", heap->total_collections());
1081       log_trace(gc, ergo)("old_gen_capacity: " SIZE_FORMAT " young_gen_capacity: " SIZE_FORMAT,
1082                           old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes());
1083 
1084       // Don't check if the size_policy is ready here.  Let
1085       // the size_policy check that internally.
1086       if (UseAdaptiveGenerationSizePolicyAtMajorCollection &&
1087           AdaptiveSizePolicy::should_update_promo_stats(gc_cause)) {
1088         // Swap the survivor spaces if from_space is empty. The
1089         // resize_young_gen() called below is normally used after
1090         // a successful young GC and swapping of survivor spaces;
1091         // otherwise, it will fail to resize the young gen with
1092         // the current implementation.
1093         if (young_gen->from_space()->is_empty()) {
1094           young_gen->from_space()->clear(SpaceDecorator::Mangle);
1095           young_gen->swap_spaces();
1096         }
1097 
1098         // Calculate optimal free space amounts
1099         assert(young_gen->max_gen_size() >
1100           young_gen->from_space()->capacity_in_bytes() +
1101           young_gen->to_space()->capacity_in_bytes(),
1102           "Sizes of space in young gen are out-of-bounds");
1103 
1104         size_t young_live = young_gen->used_in_bytes();
1105         size_t eden_live = young_gen->eden_space()->used_in_bytes();
1106         size_t old_live = old_gen->used_in_bytes();
1107         size_t cur_eden = young_gen->eden_space()->capacity_in_bytes();
1108         size_t max_old_gen_size = old_gen->max_gen_size();
1109         size_t max_eden_size = young_gen->max_gen_size() -
1110           young_gen->from_space()->capacity_in_bytes() -
1111           young_gen->to_space()->capacity_in_bytes();
1112 
1113         // Used for diagnostics
1114         size_policy->clear_generation_free_space_flags();
1115 
1116         size_policy->compute_generations_free_space(young_live,
1117                                                     eden_live,
1118                                                     old_live,
1119                                                     cur_eden,
1120                                                     max_old_gen_size,
1121                                                     max_eden_size,
1122                                                     true /* full gc*/);
1123 
1124         size_policy->check_gc_overhead_limit(eden_live,
1125                                              max_old_gen_size,
1126                                              max_eden_size,
1127                                              true /* full gc*/,
1128                                              gc_cause,
1129                                              heap->soft_ref_policy());
1130 
1131         size_policy->decay_supplemental_growth(true /* full gc*/);
1132 
1133         heap->resize_old_gen(
1134           size_policy->calculated_old_free_size_in_bytes());
1135 
1136         heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(),
1137                                size_policy->calculated_survivor_size_in_bytes());
1138       }
1139 
1140       log_debug(gc, ergo)("AdaptiveSizeStop: collection: %d ", heap->total_collections());
1141     }
1142 
1143     if (UsePerfData) {
1144       PSGCAdaptivePolicyCounters* const counters = heap->gc_policy_counters();
1145       counters->update_counters();
1146       counters->update_old_capacity(old_gen->capacity_in_bytes());
1147       counters->update_young_capacity(young_gen->capacity_in_bytes());
1148     }
1149 
1150     heap->resize_all_tlabs();
1151 
1152     // Resize the metaspace capacity after a collection
1153     MetaspaceGC::compute_new_size();
1154 
1155     if (log_is_enabled(Debug, gc, heap, exit)) {
1156       accumulated_time()->stop();
1157     }
1158 
1159     heap->print_heap_change(pre_gc_values);
1160 
1161     // Track memory usage and detect low memory
1162     MemoryService::track_memory_usage();
1163     heap->update_counters();
1164 
1165     heap->post_full_gc_dump(&_gc_timer);
1166   }
1167 
1168   if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
1169     Universe::verify("After GC");
1170   }
1171 
1172   heap->print_heap_after_gc();
1173   heap->trace_heap_after_gc(&_gc_tracer);
1174 
1175   AdaptiveSizePolicyOutput::print(size_policy, heap->total_collections());
1176 
1177   _gc_timer.register_gc_end();
1178 
1179   _gc_tracer.report_dense_prefix(dense_prefix(old_space_id));
1180   _gc_tracer.report_gc_end(_gc_timer.gc_end(), _gc_timer.time_partitions());
1181 
1182   return true;
1183 }
1184 
1185 class PCAddThreadRootsMarkingTaskClosure : public ThreadClosure {
1186 private:
1187   uint _worker_id;
1188 
1189 public:
1190   PCAddThreadRootsMarkingTaskClosure(uint worker_id) : _worker_id(worker_id) { }
1191   void do_thread(Thread* thread) {
1192     assert(ParallelScavengeHeap::heap()->is_stw_gc_active(), "called outside gc");
1193 
1194     ResourceMark rm;
1195 
1196     ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(_worker_id);
1197 
1198     PCMarkAndPushClosure mark_and_push_closure(cm);
1199     MarkingNMethodClosure mark_and_push_in_blobs(&mark_and_push_closure, !NMethodToOopClosure::FixRelocations, true /* keepalive nmethods */);
1200 
1201     thread->oops_do(&mark_and_push_closure, &mark_and_push_in_blobs);
1202 
1203     // Do the real work
1204     cm->follow_marking_stacks();
1205   }
1206 };
1207 
1208 void steal_marking_work(TaskTerminator& terminator, uint worker_id) {
1209   assert(ParallelScavengeHeap::heap()->is_stw_gc_active(), "called outside gc");
1210 
1211   ParCompactionManager* cm =
1212     ParCompactionManager::gc_thread_compaction_manager(worker_id);
1213 
1214   do {
1215     oop obj = nullptr;
1216     ObjArrayTask task;
1217     if (ParCompactionManager::steal_objarray(worker_id,  task)) {
1218       cm->follow_array((objArrayOop)task.obj(), task.index());
1219     } else if (ParCompactionManager::steal(worker_id, obj)) {
1220       cm->follow_contents(obj);
1221     }
1222     cm->follow_marking_stacks();
1223   } while (!terminator.offer_termination());
1224 }
1225 
1226 class MarkFromRootsTask : public WorkerTask {
1227   StrongRootsScope _strong_roots_scope; // needed for Threads::possibly_parallel_threads_do
1228   OopStorageSetStrongParState<false /* concurrent */, false /* is_const */> _oop_storage_set_par_state;
1229   TaskTerminator _terminator;
1230   uint _active_workers;
1231 
1232 public:
1233   MarkFromRootsTask(uint active_workers) :
1234       WorkerTask("MarkFromRootsTask"),
1235       _strong_roots_scope(active_workers),
1236       _terminator(active_workers, ParCompactionManager::oop_task_queues()),
1237       _active_workers(active_workers) {}
1238 
1239   virtual void work(uint worker_id) {
1240     ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(worker_id);
1241     cm->create_marking_stats_cache();
1242     PCMarkAndPushClosure mark_and_push_closure(cm);
1243 
1244     {
1245       CLDToOopClosure cld_closure(&mark_and_push_closure, ClassLoaderData::_claim_stw_fullgc_mark);
1246       ClassLoaderDataGraph::always_strong_cld_do(&cld_closure);
1247 
1248       // Do the real work
1249       cm->follow_marking_stacks();
1250     }
1251 
1252     PCAddThreadRootsMarkingTaskClosure closure(worker_id);
1253     Threads::possibly_parallel_threads_do(true /* is_par */, &closure);
1254 
1255     // Mark from OopStorages
1256     {
1257       _oop_storage_set_par_state.oops_do(&mark_and_push_closure);
1258       // Do the real work
1259       cm->follow_marking_stacks();
1260     }
1261 
1262     if (_active_workers > 1) {
1263       steal_marking_work(_terminator, worker_id);
1264     }
1265   }
1266 };
1267 
1268 class ParallelCompactRefProcProxyTask : public RefProcProxyTask {
1269   TaskTerminator _terminator;
1270 
1271 public:
1272   ParallelCompactRefProcProxyTask(uint max_workers)
1273     : RefProcProxyTask("ParallelCompactRefProcProxyTask", max_workers),
1274       _terminator(_max_workers, ParCompactionManager::oop_task_queues()) {}
1275 
1276   void work(uint worker_id) override {
1277     assert(worker_id < _max_workers, "sanity");
1278     ParCompactionManager* cm = (_tm == RefProcThreadModel::Single) ? ParCompactionManager::get_vmthread_cm() : ParCompactionManager::gc_thread_compaction_manager(worker_id);
1279     PCMarkAndPushClosure keep_alive(cm);
1280     BarrierEnqueueDiscoveredFieldClosure enqueue;
1281     ParCompactionManager::FollowStackClosure complete_gc(cm, (_tm == RefProcThreadModel::Single) ? nullptr : &_terminator, worker_id);
1282     _rp_task->rp_work(worker_id, PSParallelCompact::is_alive_closure(), &keep_alive, &enqueue, &complete_gc);
1283   }
1284 
1285   void prepare_run_task_hook() override {
1286     _terminator.reset_for_reuse(_queue_count);
1287   }
1288 };
1289 
1290 static void flush_marking_stats_cache(const uint num_workers) {
1291   for (uint i = 0; i < num_workers; ++i) {
1292     ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(i);
1293     cm->flush_and_destroy_marking_stats_cache();
1294   }
1295 }
1296 
1297 void PSParallelCompact::marking_phase(ParallelOldTracer *gc_tracer) {
1298   // Recursively traverse all live objects and mark them
1299   GCTraceTime(Info, gc, phases) tm("Marking Phase", &_gc_timer);
1300 
1301   uint active_gc_threads = ParallelScavengeHeap::heap()->workers().active_workers();
1302 
1303   ClassLoaderDataGraph::verify_claimed_marks_cleared(ClassLoaderData::_claim_stw_fullgc_mark);
1304   {
1305     GCTraceTime(Debug, gc, phases) tm("Par Mark", &_gc_timer);
1306 
1307     MarkFromRootsTask task(active_gc_threads);
1308     ParallelScavengeHeap::heap()->workers().run_task(&task);
1309   }
1310 
1311   // Process reference objects found during marking
1312   {
1313     GCTraceTime(Debug, gc, phases) tm("Reference Processing", &_gc_timer);
1314 
1315     ReferenceProcessorStats stats;
1316     ReferenceProcessorPhaseTimes pt(&_gc_timer, ref_processor()->max_num_queues());
1317 
1318     ref_processor()->set_active_mt_degree(active_gc_threads);
1319     ParallelCompactRefProcProxyTask task(ref_processor()->max_num_queues());
1320     stats = ref_processor()->process_discovered_references(task, pt);
1321 
1322     gc_tracer->report_gc_reference_stats(stats);
1323     pt.print_all_references();
1324   }
1325 
1326   {
1327     GCTraceTime(Debug, gc, phases) tm("Flush Marking Stats", &_gc_timer);
1328 
1329     flush_marking_stats_cache(active_gc_threads);
1330   }
1331 
1332   // This is the point where the entire marking should have completed.
1333   ParCompactionManager::verify_all_marking_stack_empty();
1334 
1335   {
1336     GCTraceTime(Debug, gc, phases) tm("Weak Processing", &_gc_timer);
1337     WeakProcessor::weak_oops_do(&ParallelScavengeHeap::heap()->workers(),
1338                                 is_alive_closure(),
1339                                 &do_nothing_cl,
1340                                 1);
1341   }
1342 
1343   {
1344     GCTraceTime(Debug, gc, phases) tm_m("Class Unloading", &_gc_timer);
1345 
1346     ClassUnloadingContext* ctx = ClassUnloadingContext::context();
1347 
1348     bool unloading_occurred;
1349     {
1350       CodeCache::UnlinkingScope scope(is_alive_closure());
1351 
1352       // Follow system dictionary roots and unload classes.
1353       unloading_occurred = SystemDictionary::do_unloading(&_gc_timer);
1354 
1355       // Unload nmethods.
1356       CodeCache::do_unloading(unloading_occurred);
1357     }
1358 
1359     {
1360       GCTraceTime(Debug, gc, phases) t("Purge Unlinked NMethods", gc_timer());
1361       // Release unloaded nmethod's memory.
1362       ctx->purge_nmethods();
1363     }
1364     {
1365       GCTraceTime(Debug, gc, phases) ur("Unregister NMethods", &_gc_timer);
1366       ParallelScavengeHeap::heap()->prune_unlinked_nmethods();
1367     }
1368     {
1369       GCTraceTime(Debug, gc, phases) t("Free Code Blobs", gc_timer());
1370       ctx->free_nmethods();
1371     }
1372 
1373     // Prune dead klasses from subklass/sibling/implementor lists.
1374     Klass::clean_weak_klass_links(unloading_occurred);
1375 
1376     // Clean JVMCI metadata handles.
1377     JVMCI_ONLY(JVMCI::do_unloading(unloading_occurred));
1378   }
1379 
1380   {
1381     GCTraceTime(Debug, gc, phases) tm("Report Object Count", &_gc_timer);
1382     _gc_tracer.report_object_count_after_gc(is_alive_closure(), &ParallelScavengeHeap::heap()->workers());
1383   }
1384 #if TASKQUEUE_STATS
1385   ParCompactionManager::oop_task_queues()->print_and_reset_taskqueue_stats("Oop Queue");
1386   ParCompactionManager::_objarray_task_queues->print_and_reset_taskqueue_stats("ObjArrayOop Queue");
1387 #endif
1388 }
1389 
1390 template<typename Func>
1391 void PSParallelCompact::adjust_in_space_helper(SpaceId id, volatile uint* claim_counter, Func&& on_stripe) {
1392   MutableSpace* sp = PSParallelCompact::space(id);
1393   HeapWord* const bottom = sp->bottom();
1394   HeapWord* const top = sp->top();
1395   if (bottom == top) {
1396     return;
1397   }
1398 
1399   const uint num_regions_per_stripe = 2;
1400   const size_t region_size = ParallelCompactData::RegionSize;
1401   const size_t stripe_size = num_regions_per_stripe * region_size;
1402 
1403   while (true) {
1404     uint counter = Atomic::fetch_then_add(claim_counter, num_regions_per_stripe);
1405     HeapWord* cur_stripe = bottom + counter * region_size;
1406     if (cur_stripe >= top) {
1407       break;
1408     }
1409     HeapWord* stripe_end = MIN2(cur_stripe + stripe_size, top);
1410     on_stripe(cur_stripe, stripe_end);
1411   }
1412 }
1413 
1414 void PSParallelCompact::adjust_in_old_space(volatile uint* claim_counter) {
1415   // Regions in old-space shouldn't be split.
1416   assert(!_space_info[old_space_id].split_info().is_valid(), "inv");
1417 
1418   auto scan_obj_with_limit = [&] (HeapWord* obj_start, HeapWord* left, HeapWord* right) {
1419     assert(mark_bitmap()->is_marked(obj_start), "inv");
1420     oop obj = cast_to_oop(obj_start);
1421     return obj->oop_iterate_size(&pc_adjust_pointer_closure, MemRegion(left, right));
1422   };
1423 
1424   adjust_in_space_helper(old_space_id, claim_counter, [&] (HeapWord* stripe_start, HeapWord* stripe_end) {
1425     assert(_summary_data.is_region_aligned(stripe_start), "inv");
1426     RegionData* cur_region = _summary_data.addr_to_region_ptr(stripe_start);
1427     HeapWord* obj_start;
1428     if (cur_region->partial_obj_size() != 0) {
1429       obj_start = cur_region->partial_obj_addr();
1430       obj_start += scan_obj_with_limit(obj_start, stripe_start, stripe_end);
1431     } else {
1432       obj_start = stripe_start;
1433     }
1434 
1435     while (obj_start < stripe_end) {
1436       obj_start = mark_bitmap()->find_obj_beg(obj_start, stripe_end);
1437       if (obj_start >= stripe_end) {
1438         break;
1439       }
1440       obj_start += scan_obj_with_limit(obj_start, stripe_start, stripe_end);
1441     }
1442   });
1443 }
1444 
1445 void PSParallelCompact::adjust_in_young_space(SpaceId id, volatile uint* claim_counter) {
1446   adjust_in_space_helper(id, claim_counter, [](HeapWord* stripe_start, HeapWord* stripe_end) {
1447     HeapWord* obj_start = stripe_start;
1448     while (obj_start < stripe_end) {
1449       obj_start = mark_bitmap()->find_obj_beg(obj_start, stripe_end);
1450       if (obj_start >= stripe_end) {
1451         break;
1452       }
1453       oop obj = cast_to_oop(obj_start);
1454       obj_start += obj->oop_iterate_size(&pc_adjust_pointer_closure);
1455     }
1456   });
1457 }
1458 
1459 void PSParallelCompact::adjust_pointers_in_spaces(uint worker_id, volatile uint* claim_counters) {
1460   auto start_time = Ticks::now();
1461   adjust_in_old_space(&claim_counters[0]);
1462   for (uint id = eden_space_id; id < last_space_id; ++id) {
1463     adjust_in_young_space(SpaceId(id), &claim_counters[id]);
1464   }
1465   log_trace(gc, phases)("adjust_pointers_in_spaces worker %u: %.3f ms", worker_id, (Ticks::now() - start_time).seconds() * 1000);
1466 }
1467 
1468 class PSAdjustTask final : public WorkerTask {
1469   SubTasksDone                               _sub_tasks;
1470   WeakProcessor::Task                        _weak_proc_task;
1471   OopStorageSetStrongParState<false, false>  _oop_storage_iter;
1472   uint                                       _nworkers;
1473   volatile uint _claim_counters[PSParallelCompact::last_space_id] = {};
1474 
1475   enum PSAdjustSubTask {
1476     PSAdjustSubTask_code_cache,
1477 
1478     PSAdjustSubTask_num_elements
1479   };
1480 
1481 public:
1482   PSAdjustTask(uint nworkers) :
1483     WorkerTask("PSAdjust task"),
1484     _sub_tasks(PSAdjustSubTask_num_elements),
1485     _weak_proc_task(nworkers),
1486     _nworkers(nworkers) {
1487 
1488     ClassLoaderDataGraph::verify_claimed_marks_cleared(ClassLoaderData::_claim_stw_fullgc_adjust);
1489     if (nworkers > 1) {
1490       Threads::change_thread_claim_token();
1491     }
1492   }
1493 
1494   ~PSAdjustTask() {
1495     Threads::assert_all_threads_claimed();
1496   }
1497 
1498   void work(uint worker_id) {
1499     ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(worker_id);
1500     cm->preserved_marks()->adjust_during_full_gc();
1501     {
1502       // adjust pointers in all spaces
1503       PSParallelCompact::adjust_pointers_in_spaces(worker_id, _claim_counters);
1504     }
1505     {
1506       ResourceMark rm;
1507       Threads::possibly_parallel_oops_do(_nworkers > 1, &pc_adjust_pointer_closure, nullptr);
1508     }
1509     _oop_storage_iter.oops_do(&pc_adjust_pointer_closure);
1510     {
1511       CLDToOopClosure cld_closure(&pc_adjust_pointer_closure, ClassLoaderData::_claim_stw_fullgc_adjust);
1512       ClassLoaderDataGraph::cld_do(&cld_closure);
1513     }
1514     {
1515       AlwaysTrueClosure always_alive;
1516       _weak_proc_task.work(worker_id, &always_alive, &pc_adjust_pointer_closure);
1517     }
1518     if (_sub_tasks.try_claim_task(PSAdjustSubTask_code_cache)) {
1519       NMethodToOopClosure adjust_code(&pc_adjust_pointer_closure, NMethodToOopClosure::FixRelocations);
1520       CodeCache::nmethods_do(&adjust_code);
1521     }
1522     _sub_tasks.all_tasks_claimed();
1523   }
1524 };
1525 
1526 void PSParallelCompact::adjust_pointers() {
1527   // Adjust the pointers to reflect the new locations
1528   GCTraceTime(Info, gc, phases) tm("Adjust Pointers", &_gc_timer);
1529   uint nworkers = ParallelScavengeHeap::heap()->workers().active_workers();
1530   PSAdjustTask task(nworkers);
1531   ParallelScavengeHeap::heap()->workers().run_task(&task);
1532 }
1533 
1534 // Split [start, end) evenly for a number of workers and return the
1535 // range for worker_id.
1536 static void split_regions_for_worker(size_t start, size_t end,
1537                                      uint worker_id, uint num_workers,
1538                                      size_t* worker_start, size_t* worker_end) {
1539   assert(start < end, "precondition");
1540   assert(num_workers > 0, "precondition");
1541   assert(worker_id < num_workers, "precondition");
1542 
1543   size_t num_regions = end - start;
1544   size_t num_regions_per_worker = num_regions / num_workers;
1545   size_t remainder = num_regions % num_workers;
1546   // The first few workers will get one extra.
1547   *worker_start = start + worker_id * num_regions_per_worker
1548                   + MIN2(checked_cast<size_t>(worker_id), remainder);
1549   *worker_end = *worker_start + num_regions_per_worker
1550                 + (worker_id < remainder ? 1 : 0);
1551 }
1552 
1553 void PSParallelCompact::forward_to_new_addr() {
1554   GCTraceTime(Info, gc, phases) tm("Forward", &_gc_timer);
1555   uint nworkers = ParallelScavengeHeap::heap()->workers().active_workers();
1556 
1557   struct ForwardTask final : public WorkerTask {
1558     uint _num_workers;
1559 
1560     explicit ForwardTask(uint num_workers) :
1561       WorkerTask("PSForward task"),
1562       _num_workers(num_workers) {}
1563 
1564     void work(uint worker_id) override {
1565       ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(worker_id);
1566       for (uint id = old_space_id; id < last_space_id; ++id) {
1567         MutableSpace* sp = PSParallelCompact::space(SpaceId(id));
1568         HeapWord* dense_prefix_addr = dense_prefix(SpaceId(id));
1569         HeapWord* top = sp->top();
1570 
1571         if (dense_prefix_addr == top) {
1572           continue;
1573         }
1574 
1575         size_t dense_prefix_region = _summary_data.addr_to_region_idx(dense_prefix_addr);
1576         size_t top_region = _summary_data.addr_to_region_idx(_summary_data.region_align_up(top));
1577         size_t start_region;
1578         size_t end_region;
1579         split_regions_for_worker(dense_prefix_region, top_region,
1580                                  worker_id, _num_workers,
1581                                  &start_region, &end_region);
1582         for (size_t cur_region = start_region; cur_region < end_region; ++cur_region) {
1583           RegionData* region_ptr = _summary_data.region(cur_region);
1584           size_t live_words = region_ptr->partial_obj_size();
1585 
1586           if (live_words == ParallelCompactData::RegionSize) {
1587             // No obj-start
1588             continue;
1589           }
1590 
1591           HeapWord* region_start = _summary_data.region_to_addr(cur_region);
1592           HeapWord* region_end = region_start + ParallelCompactData::RegionSize;
1593 
1594           HeapWord* cur_addr = region_start + live_words;
1595 
1596           HeapWord* destination = region_ptr->destination();
1597           while (cur_addr < region_end) {
1598             cur_addr = mark_bitmap()->find_obj_beg(cur_addr, region_end);
1599             if (cur_addr >= region_end) {
1600               break;
1601             }
1602             assert(mark_bitmap()->is_marked(cur_addr), "inv");
1603             HeapWord* new_addr = destination + live_words;
1604             oop obj = cast_to_oop(cur_addr);
1605             if (new_addr != cur_addr) {
1606               cm->preserved_marks()->push_if_necessary(obj, obj->mark());
1607               SlidingForwarding::forward_to(obj, cast_to_oop(new_addr));
1608             }
1609             size_t obj_size = obj->size();
1610             live_words += obj_size;
1611             cur_addr += obj_size;
1612           }
1613         }
1614       }
1615     }
1616   } task(nworkers);
1617 
1618   ParallelScavengeHeap::heap()->workers().run_task(&task);
1619   debug_only(verify_forward();)
1620 }
1621 
1622 #ifdef ASSERT
1623 void PSParallelCompact::verify_forward() {
1624   HeapWord* old_dense_prefix_addr = dense_prefix(SpaceId(old_space_id));
1625   RegionData* old_region = _summary_data.region(_summary_data.addr_to_region_idx(old_dense_prefix_addr));
1626   HeapWord* bump_ptr = old_region->partial_obj_size() != 0
1627                        ? old_dense_prefix_addr + old_region->partial_obj_size()
1628                        : old_dense_prefix_addr;
1629   SpaceId bump_ptr_space = old_space_id;
1630 
1631   for (uint id = old_space_id; id < last_space_id; ++id) {
1632     MutableSpace* sp = PSParallelCompact::space(SpaceId(id));
1633     HeapWord* dense_prefix_addr = dense_prefix(SpaceId(id));
1634     HeapWord* top = sp->top();
1635     HeapWord* cur_addr = dense_prefix_addr;
1636 
1637     while (cur_addr < top) {
1638       cur_addr = mark_bitmap()->find_obj_beg(cur_addr, top);
1639       if (cur_addr >= top) {
1640         break;
1641       }
1642       assert(mark_bitmap()->is_marked(cur_addr), "inv");
1643       // Move to the space containing cur_addr
1644       if (bump_ptr == _space_info[bump_ptr_space].new_top()) {
1645         bump_ptr = space(space_id(cur_addr))->bottom();
1646         bump_ptr_space = space_id(bump_ptr);
1647       }
1648       oop obj = cast_to_oop(cur_addr);
1649       if (cur_addr != bump_ptr) {
1650         assert(SlidingForwarding::forwardee(obj) == cast_to_oop(bump_ptr), "inv");
1651       }
1652       bump_ptr += obj->size();
1653       cur_addr += obj->size();
1654     }
1655   }
1656 }
1657 #endif
1658 
1659 // Helper class to print 8 region numbers per line and then print the total at the end.
1660 class FillableRegionLogger : public StackObj {
1661 private:
1662   Log(gc, compaction) log;
1663   static const int LineLength = 8;
1664   size_t _regions[LineLength];
1665   int _next_index;
1666   bool _enabled;
1667   size_t _total_regions;
1668 public:
1669   FillableRegionLogger() : _next_index(0), _enabled(log_develop_is_enabled(Trace, gc, compaction)), _total_regions(0) { }
1670   ~FillableRegionLogger() {
1671     log.trace(SIZE_FORMAT " initially fillable regions", _total_regions);
1672   }
1673 
1674   void print_line() {
1675     if (!_enabled || _next_index == 0) {
1676       return;
1677     }
1678     FormatBuffer<> line("Fillable: ");
1679     for (int i = 0; i < _next_index; i++) {
1680       line.append(" " SIZE_FORMAT_W(7), _regions[i]);
1681     }
1682     log.trace("%s", line.buffer());
1683     _next_index = 0;
1684   }
1685 
1686   void handle(size_t region) {
1687     if (!_enabled) {
1688       return;
1689     }
1690     _regions[_next_index++] = region;
1691     if (_next_index == LineLength) {
1692       print_line();
1693     }
1694     _total_regions++;
1695   }
1696 };
1697 
1698 void PSParallelCompact::prepare_region_draining_tasks(uint parallel_gc_threads)
1699 {
1700   GCTraceTime(Trace, gc, phases) tm("Drain Task Setup", &_gc_timer);
1701 
1702   // Find the threads that are active
1703   uint worker_id = 0;
1704 
1705   // Find all regions that are available (can be filled immediately) and
1706   // distribute them to the thread stacks.  The iteration is done in reverse
1707   // order (high to low) so the regions will be removed in ascending order.
1708 
1709   const ParallelCompactData& sd = PSParallelCompact::summary_data();
1710 
1711   // id + 1 is used to test termination so unsigned  can
1712   // be used with an old_space_id == 0.
1713   FillableRegionLogger region_logger;
1714   for (unsigned int id = to_space_id; id + 1 > old_space_id; --id) {
1715     SpaceInfo* const space_info = _space_info + id;
1716     HeapWord* const new_top = space_info->new_top();
1717 
1718     const size_t beg_region = sd.addr_to_region_idx(space_info->dense_prefix());
1719     const size_t end_region =
1720       sd.addr_to_region_idx(sd.region_align_up(new_top));
1721 
1722     for (size_t cur = end_region - 1; cur + 1 > beg_region; --cur) {
1723       if (sd.region(cur)->claim_unsafe()) {
1724         ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(worker_id);
1725         bool result = sd.region(cur)->mark_normal();
1726         assert(result, "Must succeed at this point.");
1727         cm->region_stack()->push(cur);
1728         region_logger.handle(cur);
1729         // Assign regions to tasks in round-robin fashion.
1730         if (++worker_id == parallel_gc_threads) {
1731           worker_id = 0;
1732         }
1733       }
1734     }
1735     region_logger.print_line();
1736   }
1737 }
1738 
1739 static void compaction_with_stealing_work(TaskTerminator* terminator, uint worker_id) {
1740   assert(ParallelScavengeHeap::heap()->is_stw_gc_active(), "called outside gc");
1741 
1742   ParCompactionManager* cm =
1743     ParCompactionManager::gc_thread_compaction_manager(worker_id);
1744 
1745   // Drain the stacks that have been preloaded with regions
1746   // that are ready to fill.
1747 
1748   cm->drain_region_stacks();
1749 
1750   guarantee(cm->region_stack()->is_empty(), "Not empty");
1751 
1752   size_t region_index = 0;
1753 
1754   while (true) {
1755     if (ParCompactionManager::steal(worker_id, region_index)) {
1756       PSParallelCompact::fill_and_update_region(cm, region_index);
1757       cm->drain_region_stacks();
1758     } else if (PSParallelCompact::steal_unavailable_region(cm, region_index)) {
1759       // Fill and update an unavailable region with the help of a shadow region
1760       PSParallelCompact::fill_and_update_shadow_region(cm, region_index);
1761       cm->drain_region_stacks();
1762     } else {
1763       if (terminator->offer_termination()) {
1764         break;
1765       }
1766       // Go around again.
1767     }
1768   }
1769 }
1770 
1771 class FillDensePrefixAndCompactionTask: public WorkerTask {
1772   uint _num_workers;
1773   TaskTerminator _terminator;
1774 
1775 public:
1776   FillDensePrefixAndCompactionTask(uint active_workers) :
1777       WorkerTask("FillDensePrefixAndCompactionTask"),
1778       _num_workers(active_workers),
1779       _terminator(active_workers, ParCompactionManager::region_task_queues()) {
1780   }
1781 
1782   virtual void work(uint worker_id) {
1783     {
1784       auto start = Ticks::now();
1785       PSParallelCompact::fill_dead_objs_in_dense_prefix(worker_id, _num_workers);
1786       log_trace(gc, phases)("Fill dense prefix by worker %u: %.3f ms", worker_id, (Ticks::now() - start).seconds() * 1000);
1787     }
1788     compaction_with_stealing_work(&_terminator, worker_id);
1789   }
1790 };
1791 
1792 void PSParallelCompact::fill_range_in_dense_prefix(HeapWord* start, HeapWord* end) {
1793 #ifdef ASSERT
1794   {
1795     assert(start < end, "precondition");
1796     assert(mark_bitmap()->find_obj_beg(start, end) == end, "precondition");
1797     HeapWord* bottom = _space_info[old_space_id].space()->bottom();
1798     if (start != bottom) {
1799       HeapWord* obj_start = mark_bitmap()->find_obj_beg_reverse(bottom, start);
1800       HeapWord* after_obj = obj_start + cast_to_oop(obj_start)->size();
1801       assert(after_obj == start, "precondition");
1802     }
1803   }
1804 #endif
1805 
1806   CollectedHeap::fill_with_objects(start, pointer_delta(end, start));
1807   HeapWord* addr = start;
1808   do {
1809     size_t size = cast_to_oop(addr)->size();
1810     start_array(old_space_id)->update_for_block(addr, addr + size);
1811     addr += size;
1812   } while (addr < end);
1813 }
1814 
1815 void PSParallelCompact::fill_dead_objs_in_dense_prefix(uint worker_id, uint num_workers) {
1816   ParMarkBitMap* bitmap = mark_bitmap();
1817 
1818   HeapWord* const bottom = _space_info[old_space_id].space()->bottom();
1819   HeapWord* const prefix_end = dense_prefix(old_space_id);
1820 
1821   if (bottom == prefix_end) {
1822     return;
1823   }
1824 
1825   size_t bottom_region = _summary_data.addr_to_region_idx(bottom);
1826   size_t prefix_end_region = _summary_data.addr_to_region_idx(prefix_end);
1827 
1828   size_t start_region;
1829   size_t end_region;
1830   split_regions_for_worker(bottom_region, prefix_end_region,
1831                            worker_id, num_workers,
1832                            &start_region, &end_region);
1833 
1834   if (start_region == end_region) {
1835     return;
1836   }
1837 
1838   HeapWord* const start_addr = _summary_data.region_to_addr(start_region);
1839   HeapWord* const end_addr = _summary_data.region_to_addr(end_region);
1840 
1841   // Skip live partial obj (if any) from previous region.
1842   HeapWord* cur_addr;
1843   RegionData* start_region_ptr = _summary_data.region(start_region);
1844   if (start_region_ptr->partial_obj_size() != 0) {
1845     HeapWord* partial_obj_start = start_region_ptr->partial_obj_addr();
1846     assert(bitmap->is_marked(partial_obj_start), "inv");
1847     cur_addr = partial_obj_start + cast_to_oop(partial_obj_start)->size();
1848   } else {
1849     cur_addr = start_addr;
1850   }
1851 
1852   // end_addr is inclusive to handle regions starting with dead space.
1853   while (cur_addr <= end_addr) {
1854     // Use prefix_end to handle trailing obj in each worker region-chunk.
1855     HeapWord* live_start = bitmap->find_obj_beg(cur_addr, prefix_end);
1856     if (cur_addr != live_start) {
1857       // Only worker 0 handles proceeding dead space.
1858       if (cur_addr != start_addr || worker_id == 0) {
1859         fill_range_in_dense_prefix(cur_addr, live_start);
1860       }
1861     }
1862     if (live_start >= end_addr) {
1863       break;
1864     }
1865     assert(bitmap->is_marked(live_start), "inv");
1866     cur_addr = live_start + cast_to_oop(live_start)->size();
1867   }
1868 }
1869 
1870 void PSParallelCompact::compact() {
1871   GCTraceTime(Info, gc, phases) tm("Compaction Phase", &_gc_timer);
1872 
1873   uint active_gc_threads = ParallelScavengeHeap::heap()->workers().active_workers();
1874 
1875   initialize_shadow_regions(active_gc_threads);
1876   prepare_region_draining_tasks(active_gc_threads);
1877 
1878   {
1879     GCTraceTime(Trace, gc, phases) tm("Par Compact", &_gc_timer);
1880 
1881     FillDensePrefixAndCompactionTask task(active_gc_threads);
1882     ParallelScavengeHeap::heap()->workers().run_task(&task);
1883 
1884 #ifdef  ASSERT
1885     verify_filler_in_dense_prefix();
1886 
1887     // Verify that all regions have been processed.
1888     for (unsigned int id = old_space_id; id < last_space_id; ++id) {
1889       verify_complete(SpaceId(id));
1890     }
1891 #endif
1892   }
1893 }
1894 
1895 #ifdef  ASSERT
1896 void PSParallelCompact::verify_filler_in_dense_prefix() {
1897   HeapWord* bottom = _space_info[old_space_id].space()->bottom();
1898   HeapWord* dense_prefix_end = dense_prefix(old_space_id);
1899   HeapWord* cur_addr = bottom;
1900   while (cur_addr < dense_prefix_end) {
1901     oop obj = cast_to_oop(cur_addr);
1902     oopDesc::verify(obj);
1903     if (!mark_bitmap()->is_marked(cur_addr)) {
1904       Klass* k = cast_to_oop(cur_addr)->klass_without_asserts();
1905       assert(k == Universe::fillerArrayKlass() || k == vmClasses::FillerObject_klass(), "inv");
1906     }
1907     cur_addr += obj->size();
1908   }
1909 }
1910 
1911 void PSParallelCompact::verify_complete(SpaceId space_id) {
1912   // All Regions served as compaction targets, from dense_prefix() to
1913   // new_top(), should be marked as filled and all Regions between new_top()
1914   // and top() should be available (i.e., should have been emptied).
1915   ParallelCompactData& sd = summary_data();
1916   SpaceInfo si = _space_info[space_id];
1917   HeapWord* new_top_addr = sd.region_align_up(si.new_top());
1918   HeapWord* old_top_addr = sd.region_align_up(si.space()->top());
1919   const size_t beg_region = sd.addr_to_region_idx(si.dense_prefix());
1920   const size_t new_top_region = sd.addr_to_region_idx(new_top_addr);
1921   const size_t old_top_region = sd.addr_to_region_idx(old_top_addr);
1922 
1923   size_t cur_region;
1924   for (cur_region = beg_region; cur_region < new_top_region; ++cur_region) {
1925     const RegionData* const c = sd.region(cur_region);
1926     if (!c->completed()) {
1927       log_warning(gc)("region " SIZE_FORMAT " not filled: destination_count=%u",
1928                       cur_region, c->destination_count());
1929     }
1930   }
1931 
1932   for (cur_region = new_top_region; cur_region < old_top_region; ++cur_region) {
1933     const RegionData* const c = sd.region(cur_region);
1934     if (!c->available()) {
1935       log_warning(gc)("region " SIZE_FORMAT " not empty: destination_count=%u",
1936                       cur_region, c->destination_count());
1937     }
1938   }
1939 }
1940 #endif  // #ifdef ASSERT
1941 
1942 // Return the SpaceId for the space containing addr.  If addr is not in the
1943 // heap, last_space_id is returned.  In debug mode it expects the address to be
1944 // in the heap and asserts such.
1945 PSParallelCompact::SpaceId PSParallelCompact::space_id(HeapWord* addr) {
1946   assert(ParallelScavengeHeap::heap()->is_in_reserved(addr), "addr not in the heap");
1947 
1948   for (unsigned int id = old_space_id; id < last_space_id; ++id) {
1949     if (_space_info[id].space()->contains(addr)) {
1950       return SpaceId(id);
1951     }
1952   }
1953 
1954   assert(false, "no space contains the addr");
1955   return last_space_id;
1956 }
1957 
1958 // Skip over count live words starting from beg, and return the address of the
1959 // next live word.  Unless marked, the word corresponding to beg is assumed to
1960 // be dead.  Callers must either ensure beg does not correspond to the middle of
1961 // an object, or account for those live words in some other way.  Callers must
1962 // also ensure that there are enough live words in the range [beg, end) to skip.
1963 HeapWord*
1964 PSParallelCompact::skip_live_words(HeapWord* beg, HeapWord* end, size_t count)
1965 {
1966   assert(count > 0, "sanity");
1967 
1968   ParMarkBitMap* m = mark_bitmap();
1969   HeapWord* cur_addr = beg;
1970   while (true) {
1971     cur_addr = m->find_obj_beg(cur_addr, end);
1972     assert(cur_addr < end, "inv");
1973     size_t obj_size = cast_to_oop(cur_addr)->size();
1974     // Strictly greater-than
1975     if (obj_size > count) {
1976       return cur_addr + count;
1977     }
1978     count -= obj_size;
1979     cur_addr += obj_size;
1980   }
1981 }
1982 
1983 HeapWord* PSParallelCompact::first_src_addr(HeapWord* const dest_addr,
1984                                             SpaceId src_space_id,
1985                                             size_t src_region_idx)
1986 {
1987   assert(summary_data().is_region_aligned(dest_addr), "not aligned");
1988 
1989   const SplitInfo& split_info = _space_info[src_space_id].split_info();
1990   if (split_info.dest_region_addr() == dest_addr) {
1991     // The partial object ending at the split point contains the first word to
1992     // be copied to dest_addr.
1993     return split_info.first_src_addr();
1994   }
1995 
1996   const ParallelCompactData& sd = summary_data();
1997   ParMarkBitMap* const bitmap = mark_bitmap();
1998   const size_t RegionSize = ParallelCompactData::RegionSize;
1999 
2000   assert(sd.is_region_aligned(dest_addr), "not aligned");
2001   const RegionData* const src_region_ptr = sd.region(src_region_idx);
2002   const size_t partial_obj_size = src_region_ptr->partial_obj_size();
2003   HeapWord* const src_region_destination = src_region_ptr->destination();
2004 
2005   assert(dest_addr >= src_region_destination, "wrong src region");
2006   assert(src_region_ptr->data_size() > 0, "src region cannot be empty");
2007 
2008   HeapWord* const src_region_beg = sd.region_to_addr(src_region_idx);
2009   HeapWord* const src_region_end = src_region_beg + RegionSize;
2010 
2011   HeapWord* addr = src_region_beg;
2012   if (dest_addr == src_region_destination) {
2013     // Return the first live word in the source region.
2014     if (partial_obj_size == 0) {
2015       addr = bitmap->find_obj_beg(addr, src_region_end);
2016       assert(addr < src_region_end, "no objects start in src region");
2017     }
2018     return addr;
2019   }
2020 
2021   // Must skip some live data.
2022   size_t words_to_skip = dest_addr - src_region_destination;
2023   assert(src_region_ptr->data_size() > words_to_skip, "wrong src region");
2024 
2025   if (partial_obj_size >= words_to_skip) {
2026     // All the live words to skip are part of the partial object.
2027     addr += words_to_skip;
2028     if (partial_obj_size == words_to_skip) {
2029       // Find the first live word past the partial object.
2030       addr = bitmap->find_obj_beg(addr, src_region_end);
2031       assert(addr < src_region_end, "wrong src region");
2032     }
2033     return addr;
2034   }
2035 
2036   // Skip over the partial object (if any).
2037   if (partial_obj_size != 0) {
2038     words_to_skip -= partial_obj_size;
2039     addr += partial_obj_size;
2040   }
2041 
2042   // Skip over live words due to objects that start in the region.
2043   addr = skip_live_words(addr, src_region_end, words_to_skip);
2044   assert(addr < src_region_end, "wrong src region");
2045   return addr;
2046 }
2047 
2048 void PSParallelCompact::decrement_destination_counts(ParCompactionManager* cm,
2049                                                      SpaceId src_space_id,
2050                                                      size_t beg_region,
2051                                                      HeapWord* end_addr)
2052 {
2053   ParallelCompactData& sd = summary_data();
2054 
2055 #ifdef ASSERT
2056   MutableSpace* const src_space = _space_info[src_space_id].space();
2057   HeapWord* const beg_addr = sd.region_to_addr(beg_region);
2058   assert(src_space->contains(beg_addr) || beg_addr == src_space->end(),
2059          "src_space_id does not match beg_addr");
2060   assert(src_space->contains(end_addr) || end_addr == src_space->end(),
2061          "src_space_id does not match end_addr");
2062 #endif // #ifdef ASSERT
2063 
2064   RegionData* const beg = sd.region(beg_region);
2065   RegionData* const end = sd.addr_to_region_ptr(sd.region_align_up(end_addr));
2066 
2067   // Regions up to new_top() are enqueued if they become available.
2068   HeapWord* const new_top = _space_info[src_space_id].new_top();
2069   RegionData* const enqueue_end =
2070     sd.addr_to_region_ptr(sd.region_align_up(new_top));
2071 
2072   for (RegionData* cur = beg; cur < end; ++cur) {
2073     assert(cur->data_size() > 0, "region must have live data");
2074     cur->decrement_destination_count();
2075     if (cur < enqueue_end && cur->available() && cur->claim()) {
2076       if (cur->mark_normal()) {
2077         cm->push_region(sd.region(cur));
2078       } else if (cur->mark_copied()) {
2079         // Try to copy the content of the shadow region back to its corresponding
2080         // heap region if the shadow region is filled. Otherwise, the GC thread
2081         // fills the shadow region will copy the data back (see
2082         // MoveAndUpdateShadowClosure::complete_region).
2083         copy_back(sd.region_to_addr(cur->shadow_region()), sd.region_to_addr(cur));
2084         ParCompactionManager::push_shadow_region_mt_safe(cur->shadow_region());
2085         cur->set_completed();
2086       }
2087     }
2088   }
2089 }
2090 
2091 size_t PSParallelCompact::next_src_region(MoveAndUpdateClosure& closure,
2092                                           SpaceId& src_space_id,
2093                                           HeapWord*& src_space_top,
2094                                           HeapWord* end_addr)
2095 {
2096   typedef ParallelCompactData::RegionData RegionData;
2097 
2098   ParallelCompactData& sd = PSParallelCompact::summary_data();
2099   const size_t region_size = ParallelCompactData::RegionSize;
2100 
2101   size_t src_region_idx = 0;
2102 
2103   // Skip empty regions (if any) up to the top of the space.
2104   HeapWord* const src_aligned_up = sd.region_align_up(end_addr);
2105   RegionData* src_region_ptr = sd.addr_to_region_ptr(src_aligned_up);
2106   HeapWord* const top_aligned_up = sd.region_align_up(src_space_top);
2107   const RegionData* const top_region_ptr =
2108     sd.addr_to_region_ptr(top_aligned_up);
2109   while (src_region_ptr < top_region_ptr && src_region_ptr->data_size() == 0) {
2110     ++src_region_ptr;
2111   }
2112 
2113   if (src_region_ptr < top_region_ptr) {
2114     // The next source region is in the current space.  Update src_region_idx
2115     // and the source address to match src_region_ptr.
2116     src_region_idx = sd.region(src_region_ptr);
2117     HeapWord* const src_region_addr = sd.region_to_addr(src_region_idx);
2118     if (src_region_addr > closure.source()) {
2119       closure.set_source(src_region_addr);
2120     }
2121     return src_region_idx;
2122   }
2123 
2124   // Switch to a new source space and find the first non-empty region.
2125   unsigned int space_id = src_space_id + 1;
2126   assert(space_id < last_space_id, "not enough spaces");
2127 
2128   HeapWord* const destination = closure.destination();
2129 
2130   do {
2131     MutableSpace* space = _space_info[space_id].space();
2132     HeapWord* const bottom = space->bottom();
2133     const RegionData* const bottom_cp = sd.addr_to_region_ptr(bottom);
2134 
2135     // Iterate over the spaces that do not compact into themselves.
2136     if (bottom_cp->destination() != bottom) {
2137       HeapWord* const top_aligned_up = sd.region_align_up(space->top());
2138       const RegionData* const top_cp = sd.addr_to_region_ptr(top_aligned_up);
2139 
2140       for (const RegionData* src_cp = bottom_cp; src_cp < top_cp; ++src_cp) {
2141         if (src_cp->live_obj_size() > 0) {
2142           // Found it.
2143           assert(src_cp->destination() == destination,
2144                  "first live obj in the space must match the destination");
2145           assert(src_cp->partial_obj_size() == 0,
2146                  "a space cannot begin with a partial obj");
2147 
2148           src_space_id = SpaceId(space_id);
2149           src_space_top = space->top();
2150           const size_t src_region_idx = sd.region(src_cp);
2151           closure.set_source(sd.region_to_addr(src_region_idx));
2152           return src_region_idx;
2153         } else {
2154           assert(src_cp->data_size() == 0, "sanity");
2155         }
2156       }
2157     }
2158   } while (++space_id < last_space_id);
2159 
2160   assert(false, "no source region was found");
2161   return 0;
2162 }
2163 
2164 HeapWord* PSParallelCompact::partial_obj_end(HeapWord* region_start_addr) {
2165   ParallelCompactData& sd = summary_data();
2166   assert(sd.is_region_aligned(region_start_addr), "precondition");
2167 
2168   // Use per-region partial_obj_size to locate the end of the obj, that extends to region_start_addr.
2169   SplitInfo& split_info = _space_info[space_id(region_start_addr)].split_info();
2170   size_t start_region_idx = sd.addr_to_region_idx(region_start_addr);
2171   size_t end_region_idx = sd.region_count();
2172   size_t accumulated_size = 0;
2173   for (size_t region_idx = start_region_idx; region_idx < end_region_idx; ++region_idx) {
2174     if (split_info.is_split(region_idx)) {
2175       accumulated_size += split_info.partial_obj_size();
2176       break;
2177     }
2178     size_t cur_partial_obj_size = sd.region(region_idx)->partial_obj_size();
2179     accumulated_size += cur_partial_obj_size;
2180     if (cur_partial_obj_size != ParallelCompactData::RegionSize) {
2181       break;
2182     }
2183   }
2184   return region_start_addr + accumulated_size;
2185 }
2186 
2187 void PSParallelCompact::fill_region(ParCompactionManager* cm, MoveAndUpdateClosure& closure, size_t region_idx)
2188 {
2189   ParMarkBitMap* const bitmap = mark_bitmap();
2190   ParallelCompactData& sd = summary_data();
2191   RegionData* const region_ptr = sd.region(region_idx);
2192 
2193   // Get the source region and related info.
2194   size_t src_region_idx = region_ptr->source_region();
2195   SpaceId src_space_id = space_id(sd.region_to_addr(src_region_idx));
2196   HeapWord* src_space_top = _space_info[src_space_id].space()->top();
2197   HeapWord* dest_addr = sd.region_to_addr(region_idx);
2198 
2199   closure.set_source(first_src_addr(dest_addr, src_space_id, src_region_idx));
2200 
2201   // Adjust src_region_idx to prepare for decrementing destination counts (the
2202   // destination count is not decremented when a region is copied to itself).
2203   if (src_region_idx == region_idx) {
2204     src_region_idx += 1;
2205   }
2206 
2207   if (bitmap->is_unmarked(closure.source())) {
2208     // The first source word is in the middle of an object; copy the remainder
2209     // of the object or as much as will fit.  The fact that pointer updates were
2210     // deferred will be noted when the object header is processed.
2211     HeapWord* const old_src_addr = closure.source();
2212     {
2213       HeapWord* region_start = sd.region_align_down(closure.source());
2214       HeapWord* obj_start = bitmap->find_obj_beg_reverse(region_start, closure.source());
2215       HeapWord* obj_end;
2216       if (bitmap->is_marked(obj_start)) {
2217         HeapWord* next_region_start = region_start + ParallelCompactData::RegionSize;
2218         HeapWord* partial_obj_start = (next_region_start >= src_space_top)
2219                                       ? nullptr
2220                                       : sd.addr_to_region_ptr(next_region_start)->partial_obj_addr();
2221         if (partial_obj_start == obj_start) {
2222           // This obj extends to next region.
2223           obj_end = partial_obj_end(next_region_start);
2224         } else {
2225           // Completely contained in this region; safe to use size().
2226           obj_end = obj_start + cast_to_oop(obj_start)->size();
2227         }
2228       } else {
2229         // This obj extends to current region.
2230         obj_end = partial_obj_end(region_start);
2231       }
2232       size_t partial_obj_size = pointer_delta(obj_end, closure.source());
2233       closure.copy_partial_obj(partial_obj_size);
2234     }
2235 
2236     if (closure.is_full()) {
2237       decrement_destination_counts(cm, src_space_id, src_region_idx,
2238                                    closure.source());
2239       closure.complete_region(dest_addr, region_ptr);
2240       return;
2241     }
2242 
2243     HeapWord* const end_addr = sd.region_align_down(closure.source());
2244     if (sd.region_align_down(old_src_addr) != end_addr) {
2245       // The partial object was copied from more than one source region.
2246       decrement_destination_counts(cm, src_space_id, src_region_idx, end_addr);
2247 
2248       // Move to the next source region, possibly switching spaces as well.  All
2249       // args except end_addr may be modified.
2250       src_region_idx = next_src_region(closure, src_space_id, src_space_top,
2251                                        end_addr);
2252     }
2253   }
2254 
2255   do {
2256     HeapWord* cur_addr = closure.source();
2257     HeapWord* const end_addr = MIN2(sd.region_align_up(cur_addr + 1),
2258                                     src_space_top);
2259     HeapWord* partial_obj_start = (end_addr == src_space_top)
2260                                 ? nullptr
2261                                 : sd.addr_to_region_ptr(end_addr)->partial_obj_addr();
2262     // apply closure on objs inside [cur_addr, end_addr)
2263     do {
2264       cur_addr = bitmap->find_obj_beg(cur_addr, end_addr);
2265       if (cur_addr == end_addr) {
2266         break;
2267       }
2268       size_t obj_size;
2269       if (partial_obj_start == cur_addr) {
2270         obj_size = pointer_delta(partial_obj_end(end_addr), cur_addr);
2271       } else {
2272         // This obj doesn't extend into next region; size() is safe to use.
2273         obj_size = cast_to_oop(cur_addr)->size();
2274       }
2275       closure.do_addr(cur_addr, obj_size);
2276       cur_addr += obj_size;
2277     } while (cur_addr < end_addr && !closure.is_full());
2278 
2279     if (closure.is_full()) {
2280       decrement_destination_counts(cm, src_space_id, src_region_idx,
2281                                    closure.source());
2282       closure.complete_region(dest_addr, region_ptr);
2283       return;
2284     }
2285 
2286     decrement_destination_counts(cm, src_space_id, src_region_idx, end_addr);
2287 
2288     // Move to the next source region, possibly switching spaces as well.  All
2289     // args except end_addr may be modified.
2290     src_region_idx = next_src_region(closure, src_space_id, src_space_top,
2291                                      end_addr);
2292   } while (true);
2293 }
2294 
2295 void PSParallelCompact::fill_and_update_region(ParCompactionManager* cm, size_t region_idx)
2296 {
2297   MoveAndUpdateClosure cl(mark_bitmap(), region_idx);
2298   fill_region(cm, cl, region_idx);
2299 }
2300 
2301 void PSParallelCompact::fill_and_update_shadow_region(ParCompactionManager* cm, size_t region_idx)
2302 {
2303   // Get a shadow region first
2304   ParallelCompactData& sd = summary_data();
2305   RegionData* const region_ptr = sd.region(region_idx);
2306   size_t shadow_region = ParCompactionManager::pop_shadow_region_mt_safe(region_ptr);
2307   // The InvalidShadow return value indicates the corresponding heap region is available,
2308   // so use MoveAndUpdateClosure to fill the normal region. Otherwise, use
2309   // MoveAndUpdateShadowClosure to fill the acquired shadow region.
2310   if (shadow_region == ParCompactionManager::InvalidShadow) {
2311     MoveAndUpdateClosure cl(mark_bitmap(), region_idx);
2312     region_ptr->shadow_to_normal();
2313     return fill_region(cm, cl, region_idx);
2314   } else {
2315     MoveAndUpdateShadowClosure cl(mark_bitmap(), region_idx, shadow_region);
2316     return fill_region(cm, cl, region_idx);
2317   }
2318 }
2319 
2320 void PSParallelCompact::copy_back(HeapWord *shadow_addr, HeapWord *region_addr)
2321 {
2322   Copy::aligned_conjoint_words(shadow_addr, region_addr, _summary_data.RegionSize);
2323 }
2324 
2325 bool PSParallelCompact::steal_unavailable_region(ParCompactionManager* cm, size_t &region_idx)
2326 {
2327   size_t next = cm->next_shadow_region();
2328   ParallelCompactData& sd = summary_data();
2329   size_t old_new_top = sd.addr_to_region_idx(_space_info[old_space_id].new_top());
2330   uint active_gc_threads = ParallelScavengeHeap::heap()->workers().active_workers();
2331 
2332   while (next < old_new_top) {
2333     if (sd.region(next)->mark_shadow()) {
2334       region_idx = next;
2335       return true;
2336     }
2337     next = cm->move_next_shadow_region_by(active_gc_threads);
2338   }
2339 
2340   return false;
2341 }
2342 
2343 // The shadow region is an optimization to address region dependencies in full GC. The basic
2344 // idea is making more regions available by temporally storing their live objects in empty
2345 // shadow regions to resolve dependencies between them and the destination regions. Therefore,
2346 // GC threads need not wait destination regions to be available before processing sources.
2347 //
2348 // A typical workflow would be:
2349 // After draining its own stack and failing to steal from others, a GC worker would pick an
2350 // unavailable region (destination count > 0) and get a shadow region. Then the worker fills
2351 // the shadow region by copying live objects from source regions of the unavailable one. Once
2352 // the unavailable region becomes available, the data in the shadow region will be copied back.
2353 // Shadow regions are empty regions in the to-space and regions between top and end of other spaces.
2354 void PSParallelCompact::initialize_shadow_regions(uint parallel_gc_threads)
2355 {
2356   const ParallelCompactData& sd = PSParallelCompact::summary_data();
2357 
2358   for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2359     SpaceInfo* const space_info = _space_info + id;
2360     MutableSpace* const space = space_info->space();
2361 
2362     const size_t beg_region =
2363       sd.addr_to_region_idx(sd.region_align_up(MAX2(space_info->new_top(), space->top())));
2364     const size_t end_region =
2365       sd.addr_to_region_idx(sd.region_align_down(space->end()));
2366 
2367     for (size_t cur = beg_region; cur < end_region; ++cur) {
2368       ParCompactionManager::push_shadow_region(cur);
2369     }
2370   }
2371 
2372   size_t beg_region = sd.addr_to_region_idx(_space_info[old_space_id].dense_prefix());
2373   for (uint i = 0; i < parallel_gc_threads; i++) {
2374     ParCompactionManager *cm = ParCompactionManager::gc_thread_compaction_manager(i);
2375     cm->set_next_shadow_region(beg_region + i);
2376   }
2377 }
2378 
2379 void MoveAndUpdateClosure::copy_partial_obj(size_t partial_obj_size)
2380 {
2381   size_t words = MIN2(partial_obj_size, words_remaining());
2382 
2383   // This test is necessary; if omitted, the pointer updates to a partial object
2384   // that crosses the dense prefix boundary could be overwritten.
2385   if (source() != copy_destination()) {
2386     DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
2387     Copy::aligned_conjoint_words(source(), copy_destination(), words);
2388   }
2389   update_state(words);
2390 }
2391 
2392 void MoveAndUpdateClosure::complete_region(HeapWord* dest_addr, PSParallelCompact::RegionData* region_ptr) {
2393   assert(region_ptr->shadow_state() == ParallelCompactData::RegionData::NormalRegion, "Region should be finished");
2394   region_ptr->set_completed();
2395 }
2396 
2397 void MoveAndUpdateClosure::do_addr(HeapWord* addr, size_t words) {
2398   assert(destination() != nullptr, "sanity");
2399   _source = addr;
2400 
2401   // The start_array must be updated even if the object is not moving.
2402   if (_start_array != nullptr) {
2403     _start_array->update_for_block(destination(), destination() + words);
2404   }
2405 
2406   // Avoid overflow
2407   words = MIN2(words, words_remaining());
2408   assert(words > 0, "inv");
2409 
2410   if (copy_destination() != source()) {
2411     DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
2412     assert(source() != destination(), "inv");
2413     assert(cast_to_oop(source())->is_forwarded(), "inv");
2414     assert(SlidingForwarding::forwardee(cast_to_oop(source())) == cast_to_oop(destination()), "inv");
2415     Copy::aligned_conjoint_words(source(), copy_destination(), words);
2416     cast_to_oop(copy_destination())->init_mark();
2417   }
2418 
2419   update_state(words);
2420 }
2421 
2422 void MoveAndUpdateShadowClosure::complete_region(HeapWord* dest_addr, PSParallelCompact::RegionData* region_ptr) {
2423   assert(region_ptr->shadow_state() == ParallelCompactData::RegionData::ShadowRegion, "Region should be shadow");
2424   // Record the shadow region index
2425   region_ptr->set_shadow_region(_shadow);
2426   // Mark the shadow region as filled to indicate the data is ready to be
2427   // copied back
2428   region_ptr->mark_filled();
2429   // Try to copy the content of the shadow region back to its corresponding
2430   // heap region if available; the GC thread that decreases the destination
2431   // count to zero will do the copying otherwise (see
2432   // PSParallelCompact::decrement_destination_counts).
2433   if (((region_ptr->available() && region_ptr->claim()) || region_ptr->claimed()) && region_ptr->mark_copied()) {
2434     region_ptr->set_completed();
2435     PSParallelCompact::copy_back(PSParallelCompact::summary_data().region_to_addr(_shadow), dest_addr);
2436     ParCompactionManager::push_shadow_region_mt_safe(_shadow);
2437   }
2438 }
2439