1 /*
   2  * Copyright (c) 2005, 2024, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/classLoaderDataGraph.hpp"
  27 #include "classfile/javaClasses.inline.hpp"
  28 #include "classfile/stringTable.hpp"
  29 #include "classfile/symbolTable.hpp"
  30 #include "classfile/systemDictionary.hpp"
  31 #include "code/codeCache.hpp"
  32 #include "compiler/oopMap.hpp"
  33 #include "gc/parallel/objectStartArray.inline.hpp"
  34 #include "gc/parallel/parallelArguments.hpp"
  35 #include "gc/parallel/parallelScavengeHeap.inline.hpp"
  36 #include "gc/parallel/parMarkBitMap.inline.hpp"
  37 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
  38 #include "gc/parallel/psCompactionManager.inline.hpp"
  39 #include "gc/parallel/psOldGen.hpp"
  40 #include "gc/parallel/psParallelCompact.inline.hpp"
  41 #include "gc/parallel/psPromotionManager.inline.hpp"
  42 #include "gc/parallel/psRootType.hpp"
  43 #include "gc/parallel/psScavenge.hpp"
  44 #include "gc/parallel/psStringDedup.hpp"
  45 #include "gc/parallel/psYoungGen.hpp"
  46 #include "gc/shared/classUnloadingContext.hpp"
  47 #include "gc/shared/gcCause.hpp"
  48 #include "gc/shared/gcHeapSummary.hpp"
  49 #include "gc/shared/gcId.hpp"
  50 #include "gc/shared/gcLocker.hpp"
  51 #include "gc/shared/gcTimer.hpp"
  52 #include "gc/shared/gcTrace.hpp"
  53 #include "gc/shared/gcTraceTime.inline.hpp"
  54 #include "gc/shared/gcVMOperations.hpp"
  55 #include "gc/shared/isGCActiveMark.hpp"
  56 #include "gc/shared/oopStorage.inline.hpp"
  57 #include "gc/shared/oopStorageSet.inline.hpp"
  58 #include "gc/shared/oopStorageSetParState.inline.hpp"
  59 #include "gc/shared/preservedMarks.inline.hpp"
  60 #include "gc/shared/referencePolicy.hpp"
  61 #include "gc/shared/referenceProcessor.hpp"
  62 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
  63 #include "gc/shared/spaceDecorator.hpp"
  64 #include "gc/shared/strongRootsScope.hpp"
  65 #include "gc/shared/taskTerminator.hpp"
  66 #include "gc/shared/weakProcessor.inline.hpp"
  67 #include "gc/shared/workerPolicy.hpp"
  68 #include "gc/shared/workerThread.hpp"
  69 #include "gc/shared/workerUtils.hpp"
  70 #include "logging/log.hpp"
  71 #include "memory/iterator.inline.hpp"
  72 #include "memory/metaspaceUtils.hpp"
  73 #include "memory/resourceArea.hpp"
  74 #include "memory/universe.hpp"
  75 #include "nmt/memTracker.hpp"
  76 #include "oops/access.inline.hpp"
  77 #include "oops/instanceClassLoaderKlass.inline.hpp"
  78 #include "oops/instanceKlass.inline.hpp"
  79 #include "oops/instanceMirrorKlass.inline.hpp"
  80 #include "oops/methodData.hpp"
  81 #include "oops/objArrayKlass.inline.hpp"
  82 #include "oops/oop.inline.hpp"
  83 #include "runtime/atomic.hpp"
  84 #include "runtime/handles.inline.hpp"
  85 #include "runtime/java.hpp"
  86 #include "runtime/safepoint.hpp"
  87 #include "runtime/threads.hpp"
  88 #include "runtime/vmThread.hpp"
  89 #include "services/memoryService.hpp"
  90 #include "utilities/align.hpp"
  91 #include "utilities/debug.hpp"
  92 #include "utilities/events.hpp"
  93 #include "utilities/formatBuffer.hpp"
  94 #include "utilities/macros.hpp"
  95 #include "utilities/stack.inline.hpp"
  96 #if INCLUDE_JVMCI
  97 #include "jvmci/jvmci.hpp"
  98 #endif
  99 
 100 #include <math.h>
 101 
 102 // All sizes are in HeapWords.
 103 const size_t ParallelCompactData::Log2RegionSize  = 16; // 64K words
 104 const size_t ParallelCompactData::RegionSize      = (size_t)1 << Log2RegionSize;
 105 static_assert(ParallelCompactData::RegionSize >= BitsPerWord, "region-start bit word-aligned");
 106 const size_t ParallelCompactData::RegionSizeBytes =
 107   RegionSize << LogHeapWordSize;
 108 const size_t ParallelCompactData::RegionSizeOffsetMask = RegionSize - 1;
 109 const size_t ParallelCompactData::RegionAddrOffsetMask = RegionSizeBytes - 1;
 110 const size_t ParallelCompactData::RegionAddrMask       = ~RegionAddrOffsetMask;
 111 
 112 const ParallelCompactData::RegionData::region_sz_t
 113 ParallelCompactData::RegionData::dc_shift = 27;
 114 
 115 const ParallelCompactData::RegionData::region_sz_t
 116 ParallelCompactData::RegionData::dc_mask = ~0U << dc_shift;
 117 
 118 const ParallelCompactData::RegionData::region_sz_t
 119 ParallelCompactData::RegionData::dc_one = 0x1U << dc_shift;
 120 
 121 const ParallelCompactData::RegionData::region_sz_t
 122 ParallelCompactData::RegionData::los_mask = ~dc_mask;
 123 
 124 const ParallelCompactData::RegionData::region_sz_t
 125 ParallelCompactData::RegionData::dc_claimed = 0x8U << dc_shift;
 126 
 127 const ParallelCompactData::RegionData::region_sz_t
 128 ParallelCompactData::RegionData::dc_completed = 0xcU << dc_shift;
 129 
 130 SpaceInfo PSParallelCompact::_space_info[PSParallelCompact::last_space_id];
 131 
 132 SpanSubjectToDiscoveryClosure PSParallelCompact::_span_based_discoverer;
 133 ReferenceProcessor* PSParallelCompact::_ref_processor = nullptr;
 134 
 135 void SplitInfo::record(size_t src_region_idx, size_t partial_obj_size,
 136                        HeapWord* destination)
 137 {
 138   assert(src_region_idx != 0, "invalid src_region_idx");
 139   assert(partial_obj_size != 0, "invalid partial_obj_size argument");
 140   assert(destination != nullptr, "invalid destination argument");
 141 
 142   _src_region_idx = src_region_idx;
 143   _partial_obj_size = partial_obj_size;
 144   _destination = destination;
 145 
 146   // These fields may not be updated below, so make sure they're clear.
 147   assert(_dest_region_addr == nullptr, "should have been cleared");
 148   assert(_first_src_addr == nullptr, "should have been cleared");
 149 
 150   // Determine the number of destination regions for the partial object.
 151   HeapWord* const last_word = destination + partial_obj_size - 1;
 152   const ParallelCompactData& sd = PSParallelCompact::summary_data();
 153   HeapWord* const beg_region_addr = sd.region_align_down(destination);
 154   HeapWord* const end_region_addr = sd.region_align_down(last_word);
 155 
 156   if (beg_region_addr == end_region_addr) {
 157     // One destination region.
 158     _destination_count = 1;
 159     if (end_region_addr == destination) {
 160       // The destination falls on a region boundary, thus the first word of the
 161       // partial object will be the first word copied to the destination region.
 162       _dest_region_addr = end_region_addr;
 163       _first_src_addr = sd.region_to_addr(src_region_idx);
 164     }
 165   } else {
 166     // Two destination regions.  When copied, the partial object will cross a
 167     // destination region boundary, so a word somewhere within the partial
 168     // object will be the first word copied to the second destination region.
 169     _destination_count = 2;
 170     _dest_region_addr = end_region_addr;
 171     const size_t ofs = pointer_delta(end_region_addr, destination);
 172     assert(ofs < _partial_obj_size, "sanity");
 173     _first_src_addr = sd.region_to_addr(src_region_idx) + ofs;
 174   }
 175 }
 176 
 177 void SplitInfo::clear()
 178 {
 179   _src_region_idx = 0;
 180   _partial_obj_size = 0;
 181   _destination = nullptr;
 182   _destination_count = 0;
 183   _dest_region_addr = nullptr;
 184   _first_src_addr = nullptr;
 185   assert(!is_valid(), "sanity");
 186 }
 187 
 188 #ifdef  ASSERT
 189 void SplitInfo::verify_clear()
 190 {
 191   assert(_src_region_idx == 0, "not clear");
 192   assert(_partial_obj_size == 0, "not clear");
 193   assert(_destination == nullptr, "not clear");
 194   assert(_destination_count == 0, "not clear");
 195   assert(_dest_region_addr == nullptr, "not clear");
 196   assert(_first_src_addr == nullptr, "not clear");
 197 }
 198 #endif  // #ifdef ASSERT
 199 
 200 
 201 void PSParallelCompact::print_on_error(outputStream* st) {
 202   _mark_bitmap.print_on_error(st);
 203 }
 204 
 205 ParallelCompactData::ParallelCompactData() :
 206   _heap_start(nullptr),
 207   DEBUG_ONLY(_heap_end(nullptr) COMMA)
 208   _region_vspace(nullptr),
 209   _reserved_byte_size(0),
 210   _region_data(nullptr),
 211   _region_count(0) {}
 212 
 213 bool ParallelCompactData::initialize(MemRegion reserved_heap)
 214 {
 215   _heap_start = reserved_heap.start();
 216   const size_t heap_size = reserved_heap.word_size();
 217   DEBUG_ONLY(_heap_end = _heap_start + heap_size;)
 218 
 219   assert(region_align_down(_heap_start) == _heap_start,
 220          "region start not aligned");
 221 
 222   return initialize_region_data(heap_size);
 223 }
 224 
 225 PSVirtualSpace*
 226 ParallelCompactData::create_vspace(size_t count, size_t element_size)
 227 {
 228   const size_t raw_bytes = count * element_size;
 229   const size_t page_sz = os::page_size_for_region_aligned(raw_bytes, 10);
 230   const size_t granularity = os::vm_allocation_granularity();
 231   _reserved_byte_size = align_up(raw_bytes, MAX2(page_sz, granularity));
 232 
 233   const size_t rs_align = page_sz == os::vm_page_size() ? 0 :
 234     MAX2(page_sz, granularity);
 235   ReservedSpace rs(_reserved_byte_size, rs_align, page_sz);
 236   os::trace_page_sizes("Parallel Compact Data", raw_bytes, raw_bytes, rs.base(),
 237                        rs.size(), page_sz);
 238 
 239   MemTracker::record_virtual_memory_tag((address)rs.base(), mtGC);
 240 
 241   PSVirtualSpace* vspace = new PSVirtualSpace(rs, page_sz);
 242   if (vspace != nullptr) {
 243     if (vspace->expand_by(_reserved_byte_size)) {
 244       return vspace;
 245     }
 246     delete vspace;
 247     // Release memory reserved in the space.
 248     rs.release();
 249   }
 250 
 251   return nullptr;
 252 }
 253 
 254 bool ParallelCompactData::initialize_region_data(size_t heap_size)
 255 {
 256   assert(is_aligned(heap_size, RegionSize), "precondition");
 257 
 258   const size_t count = heap_size >> Log2RegionSize;
 259   _region_vspace = create_vspace(count, sizeof(RegionData));
 260   if (_region_vspace != nullptr) {
 261     _region_data = (RegionData*)_region_vspace->reserved_low_addr();
 262     _region_count = count;
 263     return true;
 264   }
 265   return false;
 266 }
 267 
 268 void ParallelCompactData::clear_range(size_t beg_region, size_t end_region) {
 269   assert(beg_region <= _region_count, "beg_region out of range");
 270   assert(end_region <= _region_count, "end_region out of range");
 271 
 272   const size_t region_cnt = end_region - beg_region;
 273   memset(_region_data + beg_region, 0, region_cnt * sizeof(RegionData));
 274 }
 275 
 276 void
 277 ParallelCompactData::summarize_dense_prefix(HeapWord* beg, HeapWord* end)
 278 {
 279   assert(is_region_aligned(beg), "not RegionSize aligned");
 280   assert(is_region_aligned(end), "not RegionSize aligned");
 281 
 282   size_t cur_region = addr_to_region_idx(beg);
 283   const size_t end_region = addr_to_region_idx(end);
 284   HeapWord* addr = beg;
 285   while (cur_region < end_region) {
 286     _region_data[cur_region].set_destination(addr);
 287     _region_data[cur_region].set_destination_count(0);
 288     _region_data[cur_region].set_source_region(cur_region);
 289 
 290     // Update live_obj_size so the region appears completely full.
 291     size_t live_size = RegionSize - _region_data[cur_region].partial_obj_size();
 292     _region_data[cur_region].set_live_obj_size(live_size);
 293 
 294     ++cur_region;
 295     addr += RegionSize;
 296   }
 297 }
 298 
 299 // Find the point at which a space can be split and, if necessary, record the
 300 // split point.
 301 //
 302 // If the current src region (which overflowed the destination space) doesn't
 303 // have a partial object, the split point is at the beginning of the current src
 304 // region (an "easy" split, no extra bookkeeping required).
 305 //
 306 // If the current src region has a partial object, the split point is in the
 307 // region where that partial object starts (call it the split_region).  If
 308 // split_region has a partial object, then the split point is just after that
 309 // partial object (a "hard" split where we have to record the split data and
 310 // zero the partial_obj_size field).  With a "hard" split, we know that the
 311 // partial_obj ends within split_region because the partial object that caused
 312 // the overflow starts in split_region.  If split_region doesn't have a partial
 313 // obj, then the split is at the beginning of split_region (another "easy"
 314 // split).
 315 HeapWord*
 316 ParallelCompactData::summarize_split_space(size_t src_region,
 317                                            SplitInfo& split_info,
 318                                            HeapWord* destination,
 319                                            HeapWord* target_end,
 320                                            HeapWord** target_next)
 321 {
 322   assert(destination <= target_end, "sanity");
 323   assert(destination + _region_data[src_region].data_size() > target_end,
 324     "region should not fit into target space");
 325   assert(is_region_aligned(target_end), "sanity");
 326 
 327   size_t split_region = src_region;
 328   HeapWord* split_destination = destination;
 329   size_t partial_obj_size = _region_data[src_region].partial_obj_size();
 330 
 331   if (destination + partial_obj_size > target_end) {
 332     // The split point is just after the partial object (if any) in the
 333     // src_region that contains the start of the object that overflowed the
 334     // destination space.
 335     //
 336     // Find the start of the "overflow" object and set split_region to the
 337     // region containing it.
 338     HeapWord* const overflow_obj = _region_data[src_region].partial_obj_addr();
 339     split_region = addr_to_region_idx(overflow_obj);
 340 
 341     // Clear the source_region field of all destination regions whose first word
 342     // came from data after the split point (a non-null source_region field
 343     // implies a region must be filled).
 344     //
 345     // An alternative to the simple loop below:  clear during post_compact(),
 346     // which uses memcpy instead of individual stores, and is easy to
 347     // parallelize.  (The downside is that it clears the entire RegionData
 348     // object as opposed to just one field.)
 349     //
 350     // post_compact() would have to clear the summary data up to the highest
 351     // address that was written during the summary phase, which would be
 352     //
 353     //         max(top, max(new_top, clear_top))
 354     //
 355     // where clear_top is a new field in SpaceInfo.  Would have to set clear_top
 356     // to target_end.
 357     const RegionData* const sr = region(split_region);
 358     const size_t beg_idx =
 359       addr_to_region_idx(region_align_up(sr->destination() +
 360                                          sr->partial_obj_size()));
 361     const size_t end_idx = addr_to_region_idx(target_end);
 362 
 363     log_develop_trace(gc, compaction)("split:  clearing source_region field in [" SIZE_FORMAT ", " SIZE_FORMAT ")", beg_idx, end_idx);
 364     for (size_t idx = beg_idx; idx < end_idx; ++idx) {
 365       _region_data[idx].set_source_region(0);
 366     }
 367 
 368     // Set split_destination and partial_obj_size to reflect the split region.
 369     split_destination = sr->destination();
 370     partial_obj_size = sr->partial_obj_size();
 371   }
 372 
 373   // The split is recorded only if a partial object extends onto the region.
 374   if (partial_obj_size != 0) {
 375     _region_data[split_region].set_partial_obj_size(0);
 376     split_info.record(split_region, partial_obj_size, split_destination);
 377   }
 378 
 379   // Setup the continuation addresses.
 380   *target_next = split_destination + partial_obj_size;
 381   HeapWord* const source_next = region_to_addr(split_region) + partial_obj_size;
 382 
 383   if (log_develop_is_enabled(Trace, gc, compaction)) {
 384     const char * split_type = partial_obj_size == 0 ? "easy" : "hard";
 385     log_develop_trace(gc, compaction)("%s split:  src=" PTR_FORMAT " src_c=" SIZE_FORMAT " pos=" SIZE_FORMAT,
 386                                       split_type, p2i(source_next), split_region, partial_obj_size);
 387     log_develop_trace(gc, compaction)("%s split:  dst=" PTR_FORMAT " dst_c=" SIZE_FORMAT " tn=" PTR_FORMAT,
 388                                       split_type, p2i(split_destination),
 389                                       addr_to_region_idx(split_destination),
 390                                       p2i(*target_next));
 391 
 392     if (partial_obj_size != 0) {
 393       HeapWord* const po_beg = split_info.destination();
 394       HeapWord* const po_end = po_beg + split_info.partial_obj_size();
 395       log_develop_trace(gc, compaction)("%s split:  po_beg=" PTR_FORMAT " " SIZE_FORMAT " po_end=" PTR_FORMAT " " SIZE_FORMAT,
 396                                         split_type,
 397                                         p2i(po_beg), addr_to_region_idx(po_beg),
 398                                         p2i(po_end), addr_to_region_idx(po_end));
 399     }
 400   }
 401 
 402   return source_next;
 403 }
 404 
 405 size_t ParallelCompactData::live_words_in_space(const MutableSpace* space,
 406                                                 HeapWord** full_region_prefix_end) {
 407   size_t cur_region = addr_to_region_idx(space->bottom());
 408   const size_t end_region = addr_to_region_idx(region_align_up(space->top()));
 409   size_t live_words = 0;
 410   if (full_region_prefix_end == nullptr) {
 411     for (/* empty */; cur_region < end_region; ++cur_region) {
 412       live_words += _region_data[cur_region].data_size();
 413     }
 414   } else {
 415     bool first_set = false;
 416     for (/* empty */; cur_region < end_region; ++cur_region) {
 417       size_t live_words_in_region = _region_data[cur_region].data_size();
 418       if (!first_set && live_words_in_region < RegionSize) {
 419         *full_region_prefix_end = region_to_addr(cur_region);
 420         first_set = true;
 421       }
 422       live_words += live_words_in_region;
 423     }
 424     if (!first_set) {
 425       // All regions are full of live objs.
 426       assert(is_region_aligned(space->top()), "inv");
 427       *full_region_prefix_end = space->top();
 428     }
 429     assert(*full_region_prefix_end != nullptr, "postcondition");
 430     assert(is_region_aligned(*full_region_prefix_end), "inv");
 431     assert(*full_region_prefix_end >= space->bottom(), "in-range");
 432     assert(*full_region_prefix_end <= space->top(), "in-range");
 433   }
 434   return live_words;
 435 }
 436 
 437 bool ParallelCompactData::summarize(SplitInfo& split_info,
 438                                     HeapWord* source_beg, HeapWord* source_end,
 439                                     HeapWord** source_next,
 440                                     HeapWord* target_beg, HeapWord* target_end,
 441                                     HeapWord** target_next)
 442 {
 443   HeapWord* const source_next_val = source_next == nullptr ? nullptr : *source_next;
 444   log_develop_trace(gc, compaction)(
 445       "sb=" PTR_FORMAT " se=" PTR_FORMAT " sn=" PTR_FORMAT
 446       "tb=" PTR_FORMAT " te=" PTR_FORMAT " tn=" PTR_FORMAT,
 447       p2i(source_beg), p2i(source_end), p2i(source_next_val),
 448       p2i(target_beg), p2i(target_end), p2i(*target_next));
 449 
 450   size_t cur_region = addr_to_region_idx(source_beg);
 451   const size_t end_region = addr_to_region_idx(region_align_up(source_end));
 452 
 453   HeapWord *dest_addr = target_beg;
 454   while (cur_region < end_region) {
 455     // The destination must be set even if the region has no data.
 456     _region_data[cur_region].set_destination(dest_addr);
 457 
 458     size_t words = _region_data[cur_region].data_size();
 459     if (words > 0) {
 460       // If cur_region does not fit entirely into the target space, find a point
 461       // at which the source space can be 'split' so that part is copied to the
 462       // target space and the rest is copied elsewhere.
 463       if (dest_addr + words > target_end) {
 464         assert(source_next != nullptr, "source_next is null when splitting");
 465         *source_next = summarize_split_space(cur_region, split_info, dest_addr,
 466                                              target_end, target_next);
 467         return false;
 468       }
 469 
 470       // Compute the destination_count for cur_region, and if necessary, update
 471       // source_region for a destination region.  The source_region field is
 472       // updated if cur_region is the first (left-most) region to be copied to a
 473       // destination region.
 474       //
 475       // The destination_count calculation is a bit subtle.  A region that has
 476       // data that compacts into itself does not count itself as a destination.
 477       // This maintains the invariant that a zero count means the region is
 478       // available and can be claimed and then filled.
 479       uint destination_count = 0;
 480       if (split_info.is_split(cur_region)) {
 481         // The current region has been split:  the partial object will be copied
 482         // to one destination space and the remaining data will be copied to
 483         // another destination space.  Adjust the initial destination_count and,
 484         // if necessary, set the source_region field if the partial object will
 485         // cross a destination region boundary.
 486         destination_count = split_info.destination_count();
 487         if (destination_count == 2) {
 488           size_t dest_idx = addr_to_region_idx(split_info.dest_region_addr());
 489           _region_data[dest_idx].set_source_region(cur_region);
 490         }
 491       }
 492 
 493       HeapWord* const last_addr = dest_addr + words - 1;
 494       const size_t dest_region_1 = addr_to_region_idx(dest_addr);
 495       const size_t dest_region_2 = addr_to_region_idx(last_addr);
 496 
 497       // Initially assume that the destination regions will be the same and
 498       // adjust the value below if necessary.  Under this assumption, if
 499       // cur_region == dest_region_2, then cur_region will be compacted
 500       // completely into itself.
 501       destination_count += cur_region == dest_region_2 ? 0 : 1;
 502       if (dest_region_1 != dest_region_2) {
 503         // Destination regions differ; adjust destination_count.
 504         destination_count += 1;
 505         // Data from cur_region will be copied to the start of dest_region_2.
 506         _region_data[dest_region_2].set_source_region(cur_region);
 507       } else if (is_region_aligned(dest_addr)) {
 508         // Data from cur_region will be copied to the start of the destination
 509         // region.
 510         _region_data[dest_region_1].set_source_region(cur_region);
 511       }
 512 
 513       _region_data[cur_region].set_destination_count(destination_count);
 514       dest_addr += words;
 515     }
 516 
 517     ++cur_region;
 518   }
 519 
 520   *target_next = dest_addr;
 521   return true;
 522 }
 523 
 524 #ifdef ASSERT
 525 void ParallelCompactData::verify_clear()
 526 {
 527   const size_t* const beg = (const size_t*) _region_vspace->committed_low_addr();
 528   const size_t* const end = (const size_t*) _region_vspace->committed_high_addr();
 529   for (const size_t* p = beg; p < end; ++p) {
 530     assert(*p == 0, "not zero");
 531   }
 532 }
 533 #endif  // #ifdef ASSERT
 534 
 535 STWGCTimer          PSParallelCompact::_gc_timer;
 536 ParallelOldTracer   PSParallelCompact::_gc_tracer;
 537 elapsedTimer        PSParallelCompact::_accumulated_time;
 538 unsigned int        PSParallelCompact::_maximum_compaction_gc_num = 0;
 539 CollectorCounters*  PSParallelCompact::_counters = nullptr;
 540 ParMarkBitMap       PSParallelCompact::_mark_bitmap;
 541 ParallelCompactData PSParallelCompact::_summary_data;
 542 
 543 PSParallelCompact::IsAliveClosure PSParallelCompact::_is_alive_closure;
 544 
 545 class PCAdjustPointerClosure: public BasicOopIterateClosure {
 546   template <typename T>
 547   void do_oop_work(T* p) { PSParallelCompact::adjust_pointer(p); }
 548 
 549 public:
 550   virtual void do_oop(oop* p)                { do_oop_work(p); }
 551   virtual void do_oop(narrowOop* p)          { do_oop_work(p); }
 552 
 553   virtual ReferenceIterationMode reference_iteration_mode() { return DO_FIELDS; }
 554 };
 555 
 556 static PCAdjustPointerClosure pc_adjust_pointer_closure;
 557 
 558 bool PSParallelCompact::IsAliveClosure::do_object_b(oop p) { return mark_bitmap()->is_marked(p); }
 559 
 560 void PSParallelCompact::post_initialize() {
 561   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 562   _span_based_discoverer.set_span(heap->reserved_region());
 563   _ref_processor =
 564     new ReferenceProcessor(&_span_based_discoverer,
 565                            ParallelGCThreads,   // mt processing degree
 566                            ParallelGCThreads,   // mt discovery degree
 567                            false,               // concurrent_discovery
 568                            &_is_alive_closure); // non-header is alive closure
 569 
 570   _counters = new CollectorCounters("Parallel full collection pauses", 1);
 571 
 572   // Initialize static fields in ParCompactionManager.
 573   ParCompactionManager::initialize(mark_bitmap());
 574 }
 575 
 576 bool PSParallelCompact::initialize_aux_data() {
 577   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 578   MemRegion mr = heap->reserved_region();
 579   assert(mr.byte_size() != 0, "heap should be reserved");
 580 
 581   initialize_space_info();
 582 
 583   if (!_mark_bitmap.initialize(mr)) {
 584     vm_shutdown_during_initialization(
 585       err_msg("Unable to allocate " SIZE_FORMAT "KB bitmaps for parallel "
 586       "garbage collection for the requested " SIZE_FORMAT "KB heap.",
 587       _mark_bitmap.reserved_byte_size()/K, mr.byte_size()/K));
 588     return false;
 589   }
 590 
 591   if (!_summary_data.initialize(mr)) {
 592     vm_shutdown_during_initialization(
 593       err_msg("Unable to allocate " SIZE_FORMAT "KB card tables for parallel "
 594       "garbage collection for the requested " SIZE_FORMAT "KB heap.",
 595       _summary_data.reserved_byte_size()/K, mr.byte_size()/K));
 596     return false;
 597   }
 598 
 599   return true;
 600 }
 601 
 602 void PSParallelCompact::initialize_space_info()
 603 {
 604   memset(&_space_info, 0, sizeof(_space_info));
 605 
 606   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 607   PSYoungGen* young_gen = heap->young_gen();
 608 
 609   _space_info[old_space_id].set_space(heap->old_gen()->object_space());
 610   _space_info[eden_space_id].set_space(young_gen->eden_space());
 611   _space_info[from_space_id].set_space(young_gen->from_space());
 612   _space_info[to_space_id].set_space(young_gen->to_space());
 613 
 614   _space_info[old_space_id].set_start_array(heap->old_gen()->start_array());
 615 }
 616 
 617 void
 618 PSParallelCompact::clear_data_covering_space(SpaceId id)
 619 {
 620   // At this point, top is the value before GC, new_top() is the value that will
 621   // be set at the end of GC.  The marking bitmap is cleared to top; nothing
 622   // should be marked above top.  The summary data is cleared to the larger of
 623   // top & new_top.
 624   MutableSpace* const space = _space_info[id].space();
 625   HeapWord* const bot = space->bottom();
 626   HeapWord* const top = space->top();
 627   HeapWord* const max_top = MAX2(top, _space_info[id].new_top());
 628 
 629   _mark_bitmap.clear_range(bot, top);
 630 
 631   const size_t beg_region = _summary_data.addr_to_region_idx(bot);
 632   const size_t end_region =
 633     _summary_data.addr_to_region_idx(_summary_data.region_align_up(max_top));
 634   _summary_data.clear_range(beg_region, end_region);
 635 
 636   // Clear the data used to 'split' regions.
 637   SplitInfo& split_info = _space_info[id].split_info();
 638   if (split_info.is_valid()) {
 639     split_info.clear();
 640   }
 641   DEBUG_ONLY(split_info.verify_clear();)
 642 }
 643 
 644 void PSParallelCompact::pre_compact()
 645 {
 646   // Update the from & to space pointers in space_info, since they are swapped
 647   // at each young gen gc.  Do the update unconditionally (even though a
 648   // promotion failure does not swap spaces) because an unknown number of young
 649   // collections will have swapped the spaces an unknown number of times.
 650   GCTraceTime(Debug, gc, phases) tm("Pre Compact", &_gc_timer);
 651   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 652   _space_info[from_space_id].set_space(heap->young_gen()->from_space());
 653   _space_info[to_space_id].set_space(heap->young_gen()->to_space());
 654 
 655   // Increment the invocation count
 656   heap->increment_total_collections(true);
 657 
 658   CodeCache::on_gc_marking_cycle_start();
 659 
 660   heap->print_heap_before_gc();
 661   heap->trace_heap_before_gc(&_gc_tracer);
 662 
 663   // Fill in TLABs
 664   heap->ensure_parsability(true);  // retire TLABs
 665 
 666   if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
 667     Universe::verify("Before GC");
 668   }
 669 
 670   DEBUG_ONLY(mark_bitmap()->verify_clear();)
 671   DEBUG_ONLY(summary_data().verify_clear();)
 672 }
 673 
 674 void PSParallelCompact::post_compact()
 675 {
 676   GCTraceTime(Info, gc, phases) tm("Post Compact", &_gc_timer);
 677   ParCompactionManager::remove_all_shadow_regions();
 678 
 679   CodeCache::on_gc_marking_cycle_finish();
 680   CodeCache::arm_all_nmethods();
 681 
 682   for (unsigned int id = old_space_id; id < last_space_id; ++id) {
 683     // Clear the marking bitmap, summary data and split info.
 684     clear_data_covering_space(SpaceId(id));
 685     {
 686       MutableSpace* space = _space_info[id].space();
 687       HeapWord* top = space->top();
 688       HeapWord* new_top = _space_info[id].new_top();
 689       if (ZapUnusedHeapArea && new_top < top) {
 690         space->mangle_region(MemRegion(new_top, top));
 691       }
 692       // Update top().  Must be done after clearing the bitmap and summary data.
 693       space->set_top(new_top);
 694     }
 695   }
 696 
 697   ParCompactionManager::flush_all_string_dedup_requests();
 698 
 699   MutableSpace* const eden_space = _space_info[eden_space_id].space();
 700   MutableSpace* const from_space = _space_info[from_space_id].space();
 701   MutableSpace* const to_space   = _space_info[to_space_id].space();
 702 
 703   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 704   bool eden_empty = eden_space->is_empty();
 705 
 706   // Update heap occupancy information which is used as input to the soft ref
 707   // clearing policy at the next gc.
 708   Universe::heap()->update_capacity_and_used_at_gc();
 709 
 710   bool young_gen_empty = eden_empty && from_space->is_empty() &&
 711     to_space->is_empty();
 712 
 713   PSCardTable* ct = heap->card_table();
 714   MemRegion old_mr = heap->old_gen()->committed();
 715   if (young_gen_empty) {
 716     ct->clear_MemRegion(old_mr);
 717   } else {
 718     ct->dirty_MemRegion(old_mr);
 719   }
 720 
 721   {
 722     // Delete metaspaces for unloaded class loaders and clean up loader_data graph
 723     GCTraceTime(Debug, gc, phases) t("Purge Class Loader Data", gc_timer());
 724     ClassLoaderDataGraph::purge(true /* at_safepoint */);
 725     DEBUG_ONLY(MetaspaceUtils::verify();)
 726   }
 727 
 728   // Need to clear claim bits for the next mark.
 729   ClassLoaderDataGraph::clear_claimed_marks();
 730 
 731   heap->prune_scavengable_nmethods();
 732 
 733 #if COMPILER2_OR_JVMCI
 734   DerivedPointerTable::update_pointers();
 735 #endif
 736 
 737   // Signal that we have completed a visit to all live objects.
 738   Universe::heap()->record_whole_heap_examined_timestamp();
 739 }
 740 
 741 HeapWord* PSParallelCompact::compute_dense_prefix_for_old_space(MutableSpace* old_space,
 742                                                                 HeapWord* full_region_prefix_end) {
 743   const size_t region_size = ParallelCompactData::RegionSize;
 744   const ParallelCompactData& sd = summary_data();
 745 
 746   // Iteration starts with the region *after* the full-region-prefix-end.
 747   const RegionData* const start_region = sd.addr_to_region_ptr(full_region_prefix_end);
 748   // If final region is not full, iteration stops before that region,
 749   // because fill_dense_prefix_end assumes that prefix_end <= top.
 750   const RegionData* const end_region = sd.addr_to_region_ptr(old_space->top());
 751   assert(start_region <= end_region, "inv");
 752 
 753   size_t max_waste = old_space->capacity_in_words() * (MarkSweepDeadRatio / 100.0);
 754   const RegionData* cur_region = start_region;
 755   for (/* empty */; cur_region < end_region; ++cur_region) {
 756     assert(region_size >= cur_region->data_size(), "inv");
 757     size_t dead_size = region_size - cur_region->data_size();
 758     if (max_waste < dead_size) {
 759       break;
 760     }
 761     max_waste -= dead_size;
 762   }
 763 
 764   HeapWord* const prefix_end = sd.region_to_addr(cur_region);
 765   assert(sd.is_region_aligned(prefix_end), "postcondition");
 766   assert(prefix_end >= full_region_prefix_end, "in-range");
 767   assert(prefix_end <= old_space->top(), "in-range");
 768   return prefix_end;
 769 }
 770 
 771 void PSParallelCompact::fill_dense_prefix_end(SpaceId id) {
 772   // Comparing two sizes to decide if filling is required:
 773   //
 774   // The size of the filler (min-obj-size) is 2 heap words with the default
 775   // MinObjAlignment, since both markword and klass take 1 heap word.
 776   //
 777   // The size of the gap (if any) right before dense-prefix-end is
 778   // MinObjAlignment.
 779   //
 780   // Need to fill in the gap only if it's smaller than min-obj-size, and the
 781   // filler obj will extend to next region.
 782 
 783   // Note: If min-fill-size decreases to 1, this whole method becomes redundant.
 784   assert(CollectedHeap::min_fill_size() >= 2, "inv");
 785 #ifndef _LP64
 786   // In 32-bit system, each heap word is 4 bytes, so MinObjAlignment == 2.
 787   // The gap is always equal to min-fill-size, so nothing to do.
 788   return;
 789 #endif
 790   if (MinObjAlignment > 1) {
 791     return;
 792   }
 793   assert(CollectedHeap::min_fill_size() == 2, "inv");
 794   HeapWord* const dense_prefix_end = dense_prefix(id);
 795   assert(_summary_data.is_region_aligned(dense_prefix_end), "precondition");
 796   assert(dense_prefix_end <= space(id)->top(), "precondition");
 797   if (dense_prefix_end == space(id)->top()) {
 798     // Must not have single-word gap right before prefix-end/top.
 799     return;
 800   }
 801   RegionData* const region_after_dense_prefix = _summary_data.addr_to_region_ptr(dense_prefix_end);
 802 
 803   if (region_after_dense_prefix->partial_obj_size() != 0 ||
 804       _mark_bitmap.is_marked(dense_prefix_end)) {
 805     // The region after the dense prefix starts with live bytes.
 806     return;
 807   }
 808 
 809   HeapWord* block_start = start_array(id)->block_start_reaching_into_card(dense_prefix_end);
 810   if (block_start == dense_prefix_end - 1) {
 811     assert(!_mark_bitmap.is_marked(block_start), "inv");
 812     // There is exactly one heap word gap right before the dense prefix end, so we need a filler object.
 813     // The filler object will extend into region_after_dense_prefix.
 814     const size_t obj_len = 2; // min-fill-size
 815     HeapWord* const obj_beg = dense_prefix_end - 1;
 816     CollectedHeap::fill_with_object(obj_beg, obj_len);
 817     _mark_bitmap.mark_obj(obj_beg);
 818     _summary_data.addr_to_region_ptr(obj_beg)->add_live_obj(1);
 819     region_after_dense_prefix->set_partial_obj_size(1);
 820     region_after_dense_prefix->set_partial_obj_addr(obj_beg);
 821     assert(start_array(id) != nullptr, "sanity");
 822     start_array(id)->update_for_block(obj_beg, obj_beg + obj_len);
 823   }
 824 }
 825 
 826 bool PSParallelCompact::check_maximum_compaction(size_t total_live_words,
 827                                                  MutableSpace* const old_space,
 828                                                  HeapWord* full_region_prefix_end) {
 829 
 830   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 831 
 832   // Check System.GC
 833   bool is_max_on_system_gc = UseMaximumCompactionOnSystemGC
 834                           && GCCause::is_user_requested_gc(heap->gc_cause());
 835 
 836   // Check if all live objs are larger than old-gen.
 837   const bool is_old_gen_overflowing = (total_live_words > old_space->capacity_in_words());
 838 
 839   // JVM flags
 840   const uint total_invocations = heap->total_full_collections();
 841   assert(total_invocations >= _maximum_compaction_gc_num, "sanity");
 842   const size_t gcs_since_max = total_invocations - _maximum_compaction_gc_num;
 843   const bool is_interval_ended = gcs_since_max > HeapMaximumCompactionInterval;
 844 
 845   // If all regions in old-gen are full
 846   const bool is_region_full =
 847     full_region_prefix_end >= _summary_data.region_align_down(old_space->top());
 848 
 849   if (is_max_on_system_gc || is_old_gen_overflowing || is_interval_ended || is_region_full) {
 850     _maximum_compaction_gc_num = total_invocations;
 851     return true;
 852   }
 853 
 854   return false;
 855 }
 856 
 857 void PSParallelCompact::summary_phase()
 858 {
 859   GCTraceTime(Info, gc, phases) tm("Summary Phase", &_gc_timer);
 860 
 861   MutableSpace* const old_space = _space_info[old_space_id].space();
 862   {
 863     size_t total_live_words = 0;
 864     HeapWord* full_region_prefix_end = nullptr;
 865     {
 866       // old-gen
 867       size_t live_words = _summary_data.live_words_in_space(old_space,
 868                                                             &full_region_prefix_end);
 869       total_live_words += live_words;
 870     }
 871     // young-gen
 872     for (uint i = eden_space_id; i < last_space_id; ++i) {
 873       const MutableSpace* space = _space_info[i].space();
 874       size_t live_words = _summary_data.live_words_in_space(space);
 875       total_live_words += live_words;
 876       _space_info[i].set_new_top(space->bottom() + live_words);
 877       _space_info[i].set_dense_prefix(space->bottom());
 878     }
 879 
 880     bool maximum_compaction = check_maximum_compaction(total_live_words,
 881                                                        old_space,
 882                                                        full_region_prefix_end);
 883     HeapWord* dense_prefix_end =
 884       maximum_compaction ? full_region_prefix_end
 885                          : compute_dense_prefix_for_old_space(old_space,
 886                                                               full_region_prefix_end);
 887     SpaceId id = old_space_id;
 888     _space_info[id].set_dense_prefix(dense_prefix_end);
 889 
 890     if (dense_prefix_end != old_space->bottom()) {
 891       fill_dense_prefix_end(id);
 892       _summary_data.summarize_dense_prefix(old_space->bottom(), dense_prefix_end);
 893     }
 894     _summary_data.summarize(_space_info[id].split_info(),
 895                             dense_prefix_end, old_space->top(), nullptr,
 896                             dense_prefix_end, old_space->end(),
 897                             _space_info[id].new_top_addr());
 898   }
 899 
 900   // Summarize the remaining spaces in the young gen.  The initial target space
 901   // is the old gen.  If a space does not fit entirely into the target, then the
 902   // remainder is compacted into the space itself and that space becomes the new
 903   // target.
 904   SpaceId dst_space_id = old_space_id;
 905   HeapWord* dst_space_end = old_space->end();
 906   HeapWord** new_top_addr = _space_info[dst_space_id].new_top_addr();
 907   for (unsigned int id = eden_space_id; id < last_space_id; ++id) {
 908     const MutableSpace* space = _space_info[id].space();
 909     const size_t live = pointer_delta(_space_info[id].new_top(),
 910                                       space->bottom());
 911     const size_t available = pointer_delta(dst_space_end, *new_top_addr);
 912 
 913     if (live > 0 && live <= available) {
 914       // All the live data will fit.
 915       bool done = _summary_data.summarize(_space_info[id].split_info(),
 916                                           space->bottom(), space->top(),
 917                                           nullptr,
 918                                           *new_top_addr, dst_space_end,
 919                                           new_top_addr);
 920       assert(done, "space must fit into old gen");
 921 
 922       // Reset the new_top value for the space.
 923       _space_info[id].set_new_top(space->bottom());
 924     } else if (live > 0) {
 925       // Attempt to fit part of the source space into the target space.
 926       HeapWord* next_src_addr = nullptr;
 927       bool done = _summary_data.summarize(_space_info[id].split_info(),
 928                                           space->bottom(), space->top(),
 929                                           &next_src_addr,
 930                                           *new_top_addr, dst_space_end,
 931                                           new_top_addr);
 932       assert(!done, "space should not fit into old gen");
 933       assert(next_src_addr != nullptr, "sanity");
 934 
 935       // The source space becomes the new target, so the remainder is compacted
 936       // within the space itself.
 937       dst_space_id = SpaceId(id);
 938       dst_space_end = space->end();
 939       new_top_addr = _space_info[id].new_top_addr();
 940       done = _summary_data.summarize(_space_info[id].split_info(),
 941                                      next_src_addr, space->top(),
 942                                      nullptr,
 943                                      space->bottom(), dst_space_end,
 944                                      new_top_addr);
 945       assert(done, "space must fit when compacted into itself");
 946       assert(*new_top_addr <= space->top(), "usage should not grow");
 947     }
 948   }
 949 }
 950 
 951 // This method should contain all heap-specific policy for invoking a full
 952 // collection.  invoke_no_policy() will only attempt to compact the heap; it
 953 // will do nothing further.  If we need to bail out for policy reasons, scavenge
 954 // before full gc, or any other specialized behavior, it needs to be added here.
 955 //
 956 // Note that this method should only be called from the vm_thread while at a
 957 // safepoint.
 958 //
 959 // Note that the all_soft_refs_clear flag in the soft ref policy
 960 // may be true because this method can be called without intervening
 961 // activity.  For example when the heap space is tight and full measure
 962 // are being taken to free space.
 963 bool PSParallelCompact::invoke(bool clear_all_soft_refs) {
 964   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
 965   assert(Thread::current() == (Thread*)VMThread::vm_thread(),
 966          "should be in vm thread");
 967 
 968   SvcGCMarker sgcm(SvcGCMarker::FULL);
 969   IsSTWGCActiveMark mark;
 970 
 971   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 972   clear_all_soft_refs = clear_all_soft_refs
 973                      || heap->soft_ref_policy()->should_clear_all_soft_refs();
 974 
 975   return PSParallelCompact::invoke_no_policy(clear_all_soft_refs);
 976 }
 977 
 978 // This method contains no policy. You should probably
 979 // be calling invoke() instead.
 980 bool PSParallelCompact::invoke_no_policy(bool clear_all_soft_refs) {
 981   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
 982   assert(ref_processor() != nullptr, "Sanity");
 983 
 984   if (GCLocker::check_active_before_gc()) {
 985     return false;
 986   }
 987 
 988   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 989 
 990   GCIdMark gc_id_mark;
 991   _gc_timer.register_gc_start();
 992   _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start());
 993 
 994   GCCause::Cause gc_cause = heap->gc_cause();
 995   PSYoungGen* young_gen = heap->young_gen();
 996   PSOldGen* old_gen = heap->old_gen();
 997   PSAdaptiveSizePolicy* size_policy = heap->size_policy();
 998 
 999   // The scope of casr should end after code that can change
1000   // SoftRefPolicy::_should_clear_all_soft_refs.
1001   ClearedAllSoftRefs casr(clear_all_soft_refs,
1002                           heap->soft_ref_policy());
1003 
1004   // Make sure data structures are sane, make the heap parsable, and do other
1005   // miscellaneous bookkeeping.
1006   pre_compact();
1007 
1008   const PreGenGCValues pre_gc_values = heap->get_pre_gc_values();
1009 
1010   {
1011     const uint active_workers =
1012       WorkerPolicy::calc_active_workers(ParallelScavengeHeap::heap()->workers().max_workers(),
1013                                         ParallelScavengeHeap::heap()->workers().active_workers(),
1014                                         Threads::number_of_non_daemon_threads());
1015     ParallelScavengeHeap::heap()->workers().set_active_workers(active_workers);
1016 
1017     GCTraceCPUTime tcpu(&_gc_tracer);
1018     GCTraceTime(Info, gc) tm("Pause Full", nullptr, gc_cause, true);
1019 
1020     heap->pre_full_gc_dump(&_gc_timer);
1021 
1022     TraceCollectorStats tcs(counters());
1023     TraceMemoryManagerStats tms(heap->old_gc_manager(), gc_cause, "end of major GC");
1024 
1025     if (log_is_enabled(Debug, gc, heap, exit)) {
1026       accumulated_time()->start();
1027     }
1028 
1029     // Let the size policy know we're starting
1030     size_policy->major_collection_begin();
1031 
1032 #if COMPILER2_OR_JVMCI
1033     DerivedPointerTable::clear();
1034 #endif
1035 
1036     ref_processor()->start_discovery(clear_all_soft_refs);
1037 
1038     ClassUnloadingContext ctx(1 /* num_nmethod_unlink_workers */,
1039                               false /* unregister_nmethods_during_purge */,
1040                               false /* lock_nmethod_free_separately */);
1041 
1042     marking_phase(&_gc_tracer);
1043 
1044     summary_phase();
1045 
1046 #if COMPILER2_OR_JVMCI
1047     assert(DerivedPointerTable::is_active(), "Sanity");
1048     DerivedPointerTable::set_active(false);
1049 #endif
1050 
1051     forward_to_new_addr();
1052 
1053     adjust_pointers();
1054 
1055     compact();
1056 
1057     ParCompactionManager::_preserved_marks_set->restore(&ParallelScavengeHeap::heap()->workers());
1058 
1059     ParCompactionManager::verify_all_region_stack_empty();
1060 
1061     // Reset the mark bitmap, summary data, and do other bookkeeping.  Must be
1062     // done before resizing.
1063     post_compact();
1064 
1065     // Let the size policy know we're done
1066     size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause);
1067 
1068     if (UseAdaptiveSizePolicy) {
1069       log_debug(gc, ergo)("AdaptiveSizeStart: collection: %d ", heap->total_collections());
1070       log_trace(gc, ergo)("old_gen_capacity: " SIZE_FORMAT " young_gen_capacity: " SIZE_FORMAT,
1071                           old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes());
1072 
1073       // Don't check if the size_policy is ready here.  Let
1074       // the size_policy check that internally.
1075       if (UseAdaptiveGenerationSizePolicyAtMajorCollection &&
1076           AdaptiveSizePolicy::should_update_promo_stats(gc_cause)) {
1077         // Swap the survivor spaces if from_space is empty. The
1078         // resize_young_gen() called below is normally used after
1079         // a successful young GC and swapping of survivor spaces;
1080         // otherwise, it will fail to resize the young gen with
1081         // the current implementation.
1082         if (young_gen->from_space()->is_empty()) {
1083           young_gen->from_space()->clear(SpaceDecorator::Mangle);
1084           young_gen->swap_spaces();
1085         }
1086 
1087         // Calculate optimal free space amounts
1088         assert(young_gen->max_gen_size() >
1089           young_gen->from_space()->capacity_in_bytes() +
1090           young_gen->to_space()->capacity_in_bytes(),
1091           "Sizes of space in young gen are out-of-bounds");
1092 
1093         size_t young_live = young_gen->used_in_bytes();
1094         size_t eden_live = young_gen->eden_space()->used_in_bytes();
1095         size_t old_live = old_gen->used_in_bytes();
1096         size_t cur_eden = young_gen->eden_space()->capacity_in_bytes();
1097         size_t max_old_gen_size = old_gen->max_gen_size();
1098         size_t max_eden_size = young_gen->max_gen_size() -
1099           young_gen->from_space()->capacity_in_bytes() -
1100           young_gen->to_space()->capacity_in_bytes();
1101 
1102         // Used for diagnostics
1103         size_policy->clear_generation_free_space_flags();
1104 
1105         size_policy->compute_generations_free_space(young_live,
1106                                                     eden_live,
1107                                                     old_live,
1108                                                     cur_eden,
1109                                                     max_old_gen_size,
1110                                                     max_eden_size,
1111                                                     true /* full gc*/);
1112 
1113         size_policy->check_gc_overhead_limit(eden_live,
1114                                              max_old_gen_size,
1115                                              max_eden_size,
1116                                              true /* full gc*/,
1117                                              gc_cause,
1118                                              heap->soft_ref_policy());
1119 
1120         size_policy->decay_supplemental_growth(true /* full gc*/);
1121 
1122         heap->resize_old_gen(
1123           size_policy->calculated_old_free_size_in_bytes());
1124 
1125         heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(),
1126                                size_policy->calculated_survivor_size_in_bytes());
1127       }
1128 
1129       log_debug(gc, ergo)("AdaptiveSizeStop: collection: %d ", heap->total_collections());
1130     }
1131 
1132     if (UsePerfData) {
1133       PSGCAdaptivePolicyCounters* const counters = heap->gc_policy_counters();
1134       counters->update_counters();
1135       counters->update_old_capacity(old_gen->capacity_in_bytes());
1136       counters->update_young_capacity(young_gen->capacity_in_bytes());
1137     }
1138 
1139     heap->resize_all_tlabs();
1140 
1141     // Resize the metaspace capacity after a collection
1142     MetaspaceGC::compute_new_size();
1143 
1144     if (log_is_enabled(Debug, gc, heap, exit)) {
1145       accumulated_time()->stop();
1146     }
1147 
1148     heap->print_heap_change(pre_gc_values);
1149 
1150     // Track memory usage and detect low memory
1151     MemoryService::track_memory_usage();
1152     heap->update_counters();
1153 
1154     heap->post_full_gc_dump(&_gc_timer);
1155   }
1156 
1157   if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
1158     Universe::verify("After GC");
1159   }
1160 
1161   heap->print_heap_after_gc();
1162   heap->trace_heap_after_gc(&_gc_tracer);
1163 
1164   AdaptiveSizePolicyOutput::print(size_policy, heap->total_collections());
1165 
1166   _gc_timer.register_gc_end();
1167 
1168   _gc_tracer.report_dense_prefix(dense_prefix(old_space_id));
1169   _gc_tracer.report_gc_end(_gc_timer.gc_end(), _gc_timer.time_partitions());
1170 
1171   return true;
1172 }
1173 
1174 class PCAddThreadRootsMarkingTaskClosure : public ThreadClosure {
1175 private:
1176   uint _worker_id;
1177 
1178 public:
1179   PCAddThreadRootsMarkingTaskClosure(uint worker_id) : _worker_id(worker_id) { }
1180   void do_thread(Thread* thread) {
1181     assert(ParallelScavengeHeap::heap()->is_stw_gc_active(), "called outside gc");
1182 
1183     ResourceMark rm;
1184 
1185     ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(_worker_id);
1186 
1187     MarkingNMethodClosure mark_and_push_in_blobs(&cm->_mark_and_push_closure,
1188                                                  !NMethodToOopClosure::FixRelocations,
1189                                                  true /* keepalive nmethods */);
1190 
1191     thread->oops_do(&cm->_mark_and_push_closure, &mark_and_push_in_blobs);
1192 
1193     // Do the real work
1194     cm->follow_marking_stacks();
1195   }
1196 };
1197 
1198 void steal_marking_work(TaskTerminator& terminator, uint worker_id) {
1199   assert(ParallelScavengeHeap::heap()->is_stw_gc_active(), "called outside gc");
1200 
1201   ParCompactionManager* cm =
1202     ParCompactionManager::gc_thread_compaction_manager(worker_id);
1203 
1204   do {
1205     oop obj = nullptr;
1206     ObjArrayTask task;
1207     if (ParCompactionManager::steal_objarray(worker_id,  task)) {
1208       cm->follow_array((objArrayOop)task.obj(), task.index());
1209     } else if (ParCompactionManager::steal(worker_id, obj)) {
1210       cm->follow_contents(obj);
1211     }
1212     cm->follow_marking_stacks();
1213   } while (!terminator.offer_termination());
1214 }
1215 
1216 class MarkFromRootsTask : public WorkerTask {
1217   StrongRootsScope _strong_roots_scope; // needed for Threads::possibly_parallel_threads_do
1218   OopStorageSetStrongParState<false /* concurrent */, false /* is_const */> _oop_storage_set_par_state;
1219   TaskTerminator _terminator;
1220   uint _active_workers;
1221 
1222 public:
1223   MarkFromRootsTask(uint active_workers) :
1224       WorkerTask("MarkFromRootsTask"),
1225       _strong_roots_scope(active_workers),
1226       _terminator(active_workers, ParCompactionManager::oop_task_queues()),
1227       _active_workers(active_workers) {}
1228 
1229   virtual void work(uint worker_id) {
1230     ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(worker_id);
1231     cm->create_marking_stats_cache();
1232     {
1233       CLDToOopClosure cld_closure(&cm->_mark_and_push_closure, ClassLoaderData::_claim_stw_fullgc_mark);
1234       ClassLoaderDataGraph::always_strong_cld_do(&cld_closure);
1235 
1236       // Do the real work
1237       cm->follow_marking_stacks();
1238     }
1239 
1240     {
1241       PCAddThreadRootsMarkingTaskClosure closure(worker_id);
1242       Threads::possibly_parallel_threads_do(_active_workers > 1 /* is_par */, &closure);
1243     }
1244 
1245     // Mark from OopStorages
1246     {
1247       _oop_storage_set_par_state.oops_do(&cm->_mark_and_push_closure);
1248       // Do the real work
1249       cm->follow_marking_stacks();
1250     }
1251 
1252     if (_active_workers > 1) {
1253       steal_marking_work(_terminator, worker_id);
1254     }
1255   }
1256 };
1257 
1258 class ParallelCompactRefProcProxyTask : public RefProcProxyTask {
1259   TaskTerminator _terminator;
1260 
1261 public:
1262   ParallelCompactRefProcProxyTask(uint max_workers)
1263     : RefProcProxyTask("ParallelCompactRefProcProxyTask", max_workers),
1264       _terminator(_max_workers, ParCompactionManager::oop_task_queues()) {}
1265 
1266   void work(uint worker_id) override {
1267     assert(worker_id < _max_workers, "sanity");
1268     ParCompactionManager* cm = (_tm == RefProcThreadModel::Single) ? ParCompactionManager::get_vmthread_cm() : ParCompactionManager::gc_thread_compaction_manager(worker_id);
1269     BarrierEnqueueDiscoveredFieldClosure enqueue;
1270     ParCompactionManager::FollowStackClosure complete_gc(cm, (_tm == RefProcThreadModel::Single) ? nullptr : &_terminator, worker_id);
1271     _rp_task->rp_work(worker_id, PSParallelCompact::is_alive_closure(), &cm->_mark_and_push_closure, &enqueue, &complete_gc);
1272   }
1273 
1274   void prepare_run_task_hook() override {
1275     _terminator.reset_for_reuse(_queue_count);
1276   }
1277 };
1278 
1279 static void flush_marking_stats_cache(const uint num_workers) {
1280   for (uint i = 0; i < num_workers; ++i) {
1281     ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(i);
1282     cm->flush_and_destroy_marking_stats_cache();
1283   }
1284 }
1285 
1286 void PSParallelCompact::marking_phase(ParallelOldTracer *gc_tracer) {
1287   // Recursively traverse all live objects and mark them
1288   GCTraceTime(Info, gc, phases) tm("Marking Phase", &_gc_timer);
1289 
1290   uint active_gc_threads = ParallelScavengeHeap::heap()->workers().active_workers();
1291 
1292   ClassLoaderDataGraph::verify_claimed_marks_cleared(ClassLoaderData::_claim_stw_fullgc_mark);
1293   {
1294     GCTraceTime(Debug, gc, phases) tm("Par Mark", &_gc_timer);
1295 
1296     MarkFromRootsTask task(active_gc_threads);
1297     ParallelScavengeHeap::heap()->workers().run_task(&task);
1298   }
1299 
1300   // Process reference objects found during marking
1301   {
1302     GCTraceTime(Debug, gc, phases) tm("Reference Processing", &_gc_timer);
1303 
1304     ReferenceProcessorStats stats;
1305     ReferenceProcessorPhaseTimes pt(&_gc_timer, ref_processor()->max_num_queues());
1306 
1307     ref_processor()->set_active_mt_degree(active_gc_threads);
1308     ParallelCompactRefProcProxyTask task(ref_processor()->max_num_queues());
1309     stats = ref_processor()->process_discovered_references(task, pt);
1310 
1311     gc_tracer->report_gc_reference_stats(stats);
1312     pt.print_all_references();
1313   }
1314 
1315   {
1316     GCTraceTime(Debug, gc, phases) tm("Flush Marking Stats", &_gc_timer);
1317 
1318     flush_marking_stats_cache(active_gc_threads);
1319   }
1320 
1321   // This is the point where the entire marking should have completed.
1322   ParCompactionManager::verify_all_marking_stack_empty();
1323 
1324   {
1325     GCTraceTime(Debug, gc, phases) tm("Weak Processing", &_gc_timer);
1326     WeakProcessor::weak_oops_do(&ParallelScavengeHeap::heap()->workers(),
1327                                 is_alive_closure(),
1328                                 &do_nothing_cl,
1329                                 1);
1330   }
1331 
1332   {
1333     GCTraceTime(Debug, gc, phases) tm_m("Class Unloading", &_gc_timer);
1334 
1335     ClassUnloadingContext* ctx = ClassUnloadingContext::context();
1336 
1337     bool unloading_occurred;
1338     {
1339       CodeCache::UnlinkingScope scope(is_alive_closure());
1340 
1341       // Follow system dictionary roots and unload classes.
1342       unloading_occurred = SystemDictionary::do_unloading(&_gc_timer);
1343 
1344       // Unload nmethods.
1345       CodeCache::do_unloading(unloading_occurred);
1346     }
1347 
1348     {
1349       GCTraceTime(Debug, gc, phases) t("Purge Unlinked NMethods", gc_timer());
1350       // Release unloaded nmethod's memory.
1351       ctx->purge_nmethods();
1352     }
1353     {
1354       GCTraceTime(Debug, gc, phases) ur("Unregister NMethods", &_gc_timer);
1355       ParallelScavengeHeap::heap()->prune_unlinked_nmethods();
1356     }
1357     {
1358       GCTraceTime(Debug, gc, phases) t("Free Code Blobs", gc_timer());
1359       ctx->free_nmethods();
1360     }
1361 
1362     // Prune dead klasses from subklass/sibling/implementor lists.
1363     Klass::clean_weak_klass_links(unloading_occurred);
1364 
1365     // Clean JVMCI metadata handles.
1366     JVMCI_ONLY(JVMCI::do_unloading(unloading_occurred));
1367   }
1368 
1369   {
1370     GCTraceTime(Debug, gc, phases) tm("Report Object Count", &_gc_timer);
1371     _gc_tracer.report_object_count_after_gc(is_alive_closure(), &ParallelScavengeHeap::heap()->workers());
1372   }
1373 #if TASKQUEUE_STATS
1374   ParCompactionManager::oop_task_queues()->print_and_reset_taskqueue_stats("Oop Queue");
1375   ParCompactionManager::_objarray_task_queues->print_and_reset_taskqueue_stats("ObjArrayOop Queue");
1376 #endif
1377 }
1378 
1379 template<typename Func>
1380 void PSParallelCompact::adjust_in_space_helper(SpaceId id, volatile uint* claim_counter, Func&& on_stripe) {
1381   MutableSpace* sp = PSParallelCompact::space(id);
1382   HeapWord* const bottom = sp->bottom();
1383   HeapWord* const top = sp->top();
1384   if (bottom == top) {
1385     return;
1386   }
1387 
1388   const uint num_regions_per_stripe = 2;
1389   const size_t region_size = ParallelCompactData::RegionSize;
1390   const size_t stripe_size = num_regions_per_stripe * region_size;
1391 
1392   while (true) {
1393     uint counter = Atomic::fetch_then_add(claim_counter, num_regions_per_stripe);
1394     HeapWord* cur_stripe = bottom + counter * region_size;
1395     if (cur_stripe >= top) {
1396       break;
1397     }
1398     HeapWord* stripe_end = MIN2(cur_stripe + stripe_size, top);
1399     on_stripe(cur_stripe, stripe_end);
1400   }
1401 }
1402 
1403 void PSParallelCompact::adjust_in_old_space(volatile uint* claim_counter) {
1404   // Regions in old-space shouldn't be split.
1405   assert(!_space_info[old_space_id].split_info().is_valid(), "inv");
1406 
1407   auto scan_obj_with_limit = [&] (HeapWord* obj_start, HeapWord* left, HeapWord* right) {
1408     assert(mark_bitmap()->is_marked(obj_start), "inv");
1409     oop obj = cast_to_oop(obj_start);
1410     return obj->oop_iterate_size(&pc_adjust_pointer_closure, MemRegion(left, right));
1411   };
1412 
1413   adjust_in_space_helper(old_space_id, claim_counter, [&] (HeapWord* stripe_start, HeapWord* stripe_end) {
1414     assert(_summary_data.is_region_aligned(stripe_start), "inv");
1415     RegionData* cur_region = _summary_data.addr_to_region_ptr(stripe_start);
1416     HeapWord* obj_start;
1417     if (cur_region->partial_obj_size() != 0) {
1418       obj_start = cur_region->partial_obj_addr();
1419       obj_start += scan_obj_with_limit(obj_start, stripe_start, stripe_end);
1420     } else {
1421       obj_start = stripe_start;
1422     }
1423 
1424     while (obj_start < stripe_end) {
1425       obj_start = mark_bitmap()->find_obj_beg(obj_start, stripe_end);
1426       if (obj_start >= stripe_end) {
1427         break;
1428       }
1429       obj_start += scan_obj_with_limit(obj_start, stripe_start, stripe_end);
1430     }
1431   });
1432 }
1433 
1434 void PSParallelCompact::adjust_in_young_space(SpaceId id, volatile uint* claim_counter) {
1435   adjust_in_space_helper(id, claim_counter, [](HeapWord* stripe_start, HeapWord* stripe_end) {
1436     HeapWord* obj_start = stripe_start;
1437     while (obj_start < stripe_end) {
1438       obj_start = mark_bitmap()->find_obj_beg(obj_start, stripe_end);
1439       if (obj_start >= stripe_end) {
1440         break;
1441       }
1442       oop obj = cast_to_oop(obj_start);
1443       obj_start += obj->oop_iterate_size(&pc_adjust_pointer_closure);
1444     }
1445   });
1446 }
1447 
1448 void PSParallelCompact::adjust_pointers_in_spaces(uint worker_id, volatile uint* claim_counters) {
1449   auto start_time = Ticks::now();
1450   adjust_in_old_space(&claim_counters[0]);
1451   for (uint id = eden_space_id; id < last_space_id; ++id) {
1452     adjust_in_young_space(SpaceId(id), &claim_counters[id]);
1453   }
1454   log_trace(gc, phases)("adjust_pointers_in_spaces worker %u: %.3f ms", worker_id, (Ticks::now() - start_time).seconds() * 1000);
1455 }
1456 
1457 class PSAdjustTask final : public WorkerTask {
1458   SubTasksDone                               _sub_tasks;
1459   WeakProcessor::Task                        _weak_proc_task;
1460   OopStorageSetStrongParState<false, false>  _oop_storage_iter;
1461   uint                                       _nworkers;
1462   volatile uint _claim_counters[PSParallelCompact::last_space_id] = {};
1463 
1464   enum PSAdjustSubTask {
1465     PSAdjustSubTask_code_cache,
1466 
1467     PSAdjustSubTask_num_elements
1468   };
1469 
1470 public:
1471   PSAdjustTask(uint nworkers) :
1472     WorkerTask("PSAdjust task"),
1473     _sub_tasks(PSAdjustSubTask_num_elements),
1474     _weak_proc_task(nworkers),
1475     _nworkers(nworkers) {
1476 
1477     ClassLoaderDataGraph::verify_claimed_marks_cleared(ClassLoaderData::_claim_stw_fullgc_adjust);
1478     if (nworkers > 1) {
1479       Threads::change_thread_claim_token();
1480     }
1481   }
1482 
1483   ~PSAdjustTask() {
1484     Threads::assert_all_threads_claimed();
1485   }
1486 
1487   void work(uint worker_id) {
1488     ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(worker_id);
1489     cm->preserved_marks()->adjust_during_full_gc();
1490     {
1491       // adjust pointers in all spaces
1492       PSParallelCompact::adjust_pointers_in_spaces(worker_id, _claim_counters);
1493     }
1494     {
1495       ResourceMark rm;
1496       Threads::possibly_parallel_oops_do(_nworkers > 1, &pc_adjust_pointer_closure, nullptr);
1497     }
1498     _oop_storage_iter.oops_do(&pc_adjust_pointer_closure);
1499     {
1500       CLDToOopClosure cld_closure(&pc_adjust_pointer_closure, ClassLoaderData::_claim_stw_fullgc_adjust);
1501       ClassLoaderDataGraph::cld_do(&cld_closure);
1502     }
1503     {
1504       AlwaysTrueClosure always_alive;
1505       _weak_proc_task.work(worker_id, &always_alive, &pc_adjust_pointer_closure);
1506     }
1507     if (_sub_tasks.try_claim_task(PSAdjustSubTask_code_cache)) {
1508       NMethodToOopClosure adjust_code(&pc_adjust_pointer_closure, NMethodToOopClosure::FixRelocations);
1509       CodeCache::nmethods_do(&adjust_code);
1510     }
1511     _sub_tasks.all_tasks_claimed();
1512   }
1513 };
1514 
1515 void PSParallelCompact::adjust_pointers() {
1516   // Adjust the pointers to reflect the new locations
1517   GCTraceTime(Info, gc, phases) tm("Adjust Pointers", &_gc_timer);
1518   uint nworkers = ParallelScavengeHeap::heap()->workers().active_workers();
1519   PSAdjustTask task(nworkers);
1520   ParallelScavengeHeap::heap()->workers().run_task(&task);
1521 }
1522 
1523 // Split [start, end) evenly for a number of workers and return the
1524 // range for worker_id.
1525 static void split_regions_for_worker(size_t start, size_t end,
1526                                      uint worker_id, uint num_workers,
1527                                      size_t* worker_start, size_t* worker_end) {
1528   assert(start < end, "precondition");
1529   assert(num_workers > 0, "precondition");
1530   assert(worker_id < num_workers, "precondition");
1531 
1532   size_t num_regions = end - start;
1533   size_t num_regions_per_worker = num_regions / num_workers;
1534   size_t remainder = num_regions % num_workers;
1535   // The first few workers will get one extra.
1536   *worker_start = start + worker_id * num_regions_per_worker
1537                   + MIN2(checked_cast<size_t>(worker_id), remainder);
1538   *worker_end = *worker_start + num_regions_per_worker
1539                 + (worker_id < remainder ? 1 : 0);
1540 }
1541 
1542 void PSParallelCompact::forward_to_new_addr() {
1543   GCTraceTime(Info, gc, phases) tm("Forward", &_gc_timer);
1544   uint nworkers = ParallelScavengeHeap::heap()->workers().active_workers();
1545 
1546   struct ForwardTask final : public WorkerTask {
1547     uint _num_workers;
1548 
1549     explicit ForwardTask(uint num_workers) :
1550       WorkerTask("PSForward task"),
1551       _num_workers(num_workers) {}
1552 
1553     void work(uint worker_id) override {
1554       ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(worker_id);
1555       for (uint id = old_space_id; id < last_space_id; ++id) {
1556         MutableSpace* sp = PSParallelCompact::space(SpaceId(id));
1557         HeapWord* dense_prefix_addr = dense_prefix(SpaceId(id));
1558         HeapWord* top = sp->top();
1559 
1560         if (dense_prefix_addr == top) {
1561           continue;
1562         }
1563 
1564         size_t dense_prefix_region = _summary_data.addr_to_region_idx(dense_prefix_addr);
1565         size_t top_region = _summary_data.addr_to_region_idx(_summary_data.region_align_up(top));
1566         size_t start_region;
1567         size_t end_region;
1568         split_regions_for_worker(dense_prefix_region, top_region,
1569                                  worker_id, _num_workers,
1570                                  &start_region, &end_region);
1571         for (size_t cur_region = start_region; cur_region < end_region; ++cur_region) {
1572           RegionData* region_ptr = _summary_data.region(cur_region);
1573           size_t live_words = region_ptr->partial_obj_size();
1574 
1575           if (live_words == ParallelCompactData::RegionSize) {
1576             // No obj-start
1577             continue;
1578           }
1579 
1580           HeapWord* region_start = _summary_data.region_to_addr(cur_region);
1581           HeapWord* region_end = region_start + ParallelCompactData::RegionSize;
1582 
1583           HeapWord* cur_addr = region_start + live_words;
1584 
1585           HeapWord* destination = region_ptr->destination();
1586           while (cur_addr < region_end) {
1587             cur_addr = mark_bitmap()->find_obj_beg(cur_addr, region_end);
1588             if (cur_addr >= region_end) {
1589               break;
1590             }
1591             assert(mark_bitmap()->is_marked(cur_addr), "inv");
1592             HeapWord* new_addr = destination + live_words;
1593             oop obj = cast_to_oop(cur_addr);
1594             if (new_addr != cur_addr) {
1595               cm->preserved_marks()->push_if_necessary(obj, obj->mark());
1596               obj->forward_to(cast_to_oop(new_addr));
1597             }
1598             size_t obj_size = obj->size();
1599             live_words += obj_size;
1600             cur_addr += obj_size;
1601           }
1602         }
1603       }
1604     }
1605   } task(nworkers);
1606 
1607   ParallelScavengeHeap::heap()->workers().run_task(&task);
1608   debug_only(verify_forward();)
1609 }
1610 
1611 #ifdef ASSERT
1612 void PSParallelCompact::verify_forward() {
1613   HeapWord* old_dense_prefix_addr = dense_prefix(SpaceId(old_space_id));
1614   RegionData* old_region = _summary_data.region(_summary_data.addr_to_region_idx(old_dense_prefix_addr));
1615   HeapWord* bump_ptr = old_region->partial_obj_size() != 0
1616                        ? old_dense_prefix_addr + old_region->partial_obj_size()
1617                        : old_dense_prefix_addr;
1618   SpaceId bump_ptr_space = old_space_id;
1619 
1620   for (uint id = old_space_id; id < last_space_id; ++id) {
1621     MutableSpace* sp = PSParallelCompact::space(SpaceId(id));
1622     HeapWord* dense_prefix_addr = dense_prefix(SpaceId(id));
1623     HeapWord* top = sp->top();
1624     HeapWord* cur_addr = dense_prefix_addr;
1625 
1626     while (cur_addr < top) {
1627       cur_addr = mark_bitmap()->find_obj_beg(cur_addr, top);
1628       if (cur_addr >= top) {
1629         break;
1630       }
1631       assert(mark_bitmap()->is_marked(cur_addr), "inv");
1632       // Move to the space containing cur_addr
1633       if (bump_ptr == _space_info[bump_ptr_space].new_top()) {
1634         bump_ptr = space(space_id(cur_addr))->bottom();
1635         bump_ptr_space = space_id(bump_ptr);
1636       }
1637       oop obj = cast_to_oop(cur_addr);
1638       if (cur_addr != bump_ptr) {
1639         assert(obj->forwardee() == cast_to_oop(bump_ptr), "inv");
1640       }
1641       bump_ptr += obj->size();
1642       cur_addr += obj->size();
1643     }
1644   }
1645 }
1646 #endif
1647 
1648 // Helper class to print 8 region numbers per line and then print the total at the end.
1649 class FillableRegionLogger : public StackObj {
1650 private:
1651   Log(gc, compaction) log;
1652   static const int LineLength = 8;
1653   size_t _regions[LineLength];
1654   int _next_index;
1655   bool _enabled;
1656   size_t _total_regions;
1657 public:
1658   FillableRegionLogger() : _next_index(0), _enabled(log_develop_is_enabled(Trace, gc, compaction)), _total_regions(0) { }
1659   ~FillableRegionLogger() {
1660     log.trace(SIZE_FORMAT " initially fillable regions", _total_regions);
1661   }
1662 
1663   void print_line() {
1664     if (!_enabled || _next_index == 0) {
1665       return;
1666     }
1667     FormatBuffer<> line("Fillable: ");
1668     for (int i = 0; i < _next_index; i++) {
1669       line.append(" " SIZE_FORMAT_W(7), _regions[i]);
1670     }
1671     log.trace("%s", line.buffer());
1672     _next_index = 0;
1673   }
1674 
1675   void handle(size_t region) {
1676     if (!_enabled) {
1677       return;
1678     }
1679     _regions[_next_index++] = region;
1680     if (_next_index == LineLength) {
1681       print_line();
1682     }
1683     _total_regions++;
1684   }
1685 };
1686 
1687 void PSParallelCompact::prepare_region_draining_tasks(uint parallel_gc_threads)
1688 {
1689   GCTraceTime(Trace, gc, phases) tm("Drain Task Setup", &_gc_timer);
1690 
1691   // Find the threads that are active
1692   uint worker_id = 0;
1693 
1694   // Find all regions that are available (can be filled immediately) and
1695   // distribute them to the thread stacks.  The iteration is done in reverse
1696   // order (high to low) so the regions will be removed in ascending order.
1697 
1698   const ParallelCompactData& sd = PSParallelCompact::summary_data();
1699 
1700   // id + 1 is used to test termination so unsigned  can
1701   // be used with an old_space_id == 0.
1702   FillableRegionLogger region_logger;
1703   for (unsigned int id = to_space_id; id + 1 > old_space_id; --id) {
1704     SpaceInfo* const space_info = _space_info + id;
1705     HeapWord* const new_top = space_info->new_top();
1706 
1707     const size_t beg_region = sd.addr_to_region_idx(space_info->dense_prefix());
1708     const size_t end_region =
1709       sd.addr_to_region_idx(sd.region_align_up(new_top));
1710 
1711     for (size_t cur = end_region - 1; cur + 1 > beg_region; --cur) {
1712       if (sd.region(cur)->claim_unsafe()) {
1713         ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(worker_id);
1714         bool result = sd.region(cur)->mark_normal();
1715         assert(result, "Must succeed at this point.");
1716         cm->region_stack()->push(cur);
1717         region_logger.handle(cur);
1718         // Assign regions to tasks in round-robin fashion.
1719         if (++worker_id == parallel_gc_threads) {
1720           worker_id = 0;
1721         }
1722       }
1723     }
1724     region_logger.print_line();
1725   }
1726 }
1727 
1728 static void compaction_with_stealing_work(TaskTerminator* terminator, uint worker_id) {
1729   assert(ParallelScavengeHeap::heap()->is_stw_gc_active(), "called outside gc");
1730 
1731   ParCompactionManager* cm =
1732     ParCompactionManager::gc_thread_compaction_manager(worker_id);
1733 
1734   // Drain the stacks that have been preloaded with regions
1735   // that are ready to fill.
1736 
1737   cm->drain_region_stacks();
1738 
1739   guarantee(cm->region_stack()->is_empty(), "Not empty");
1740 
1741   size_t region_index = 0;
1742 
1743   while (true) {
1744     if (ParCompactionManager::steal(worker_id, region_index)) {
1745       PSParallelCompact::fill_and_update_region(cm, region_index);
1746       cm->drain_region_stacks();
1747     } else if (PSParallelCompact::steal_unavailable_region(cm, region_index)) {
1748       // Fill and update an unavailable region with the help of a shadow region
1749       PSParallelCompact::fill_and_update_shadow_region(cm, region_index);
1750       cm->drain_region_stacks();
1751     } else {
1752       if (terminator->offer_termination()) {
1753         break;
1754       }
1755       // Go around again.
1756     }
1757   }
1758 }
1759 
1760 class FillDensePrefixAndCompactionTask: public WorkerTask {
1761   uint _num_workers;
1762   TaskTerminator _terminator;
1763 
1764 public:
1765   FillDensePrefixAndCompactionTask(uint active_workers) :
1766       WorkerTask("FillDensePrefixAndCompactionTask"),
1767       _num_workers(active_workers),
1768       _terminator(active_workers, ParCompactionManager::region_task_queues()) {
1769   }
1770 
1771   virtual void work(uint worker_id) {
1772     {
1773       auto start = Ticks::now();
1774       PSParallelCompact::fill_dead_objs_in_dense_prefix(worker_id, _num_workers);
1775       log_trace(gc, phases)("Fill dense prefix by worker %u: %.3f ms", worker_id, (Ticks::now() - start).seconds() * 1000);
1776     }
1777     compaction_with_stealing_work(&_terminator, worker_id);
1778   }
1779 };
1780 
1781 void PSParallelCompact::fill_range_in_dense_prefix(HeapWord* start, HeapWord* end) {
1782 #ifdef ASSERT
1783   {
1784     assert(start < end, "precondition");
1785     assert(mark_bitmap()->find_obj_beg(start, end) == end, "precondition");
1786     HeapWord* bottom = _space_info[old_space_id].space()->bottom();
1787     if (start != bottom) {
1788       HeapWord* obj_start = mark_bitmap()->find_obj_beg_reverse(bottom, start);
1789       HeapWord* after_obj = obj_start + cast_to_oop(obj_start)->size();
1790       assert(after_obj == start, "precondition");
1791     }
1792   }
1793 #endif
1794 
1795   CollectedHeap::fill_with_objects(start, pointer_delta(end, start));
1796   HeapWord* addr = start;
1797   do {
1798     size_t size = cast_to_oop(addr)->size();
1799     start_array(old_space_id)->update_for_block(addr, addr + size);
1800     addr += size;
1801   } while (addr < end);
1802 }
1803 
1804 void PSParallelCompact::fill_dead_objs_in_dense_prefix(uint worker_id, uint num_workers) {
1805   ParMarkBitMap* bitmap = mark_bitmap();
1806 
1807   HeapWord* const bottom = _space_info[old_space_id].space()->bottom();
1808   HeapWord* const prefix_end = dense_prefix(old_space_id);
1809 
1810   if (bottom == prefix_end) {
1811     return;
1812   }
1813 
1814   size_t bottom_region = _summary_data.addr_to_region_idx(bottom);
1815   size_t prefix_end_region = _summary_data.addr_to_region_idx(prefix_end);
1816 
1817   size_t start_region;
1818   size_t end_region;
1819   split_regions_for_worker(bottom_region, prefix_end_region,
1820                            worker_id, num_workers,
1821                            &start_region, &end_region);
1822 
1823   if (start_region == end_region) {
1824     return;
1825   }
1826 
1827   HeapWord* const start_addr = _summary_data.region_to_addr(start_region);
1828   HeapWord* const end_addr = _summary_data.region_to_addr(end_region);
1829 
1830   // Skip live partial obj (if any) from previous region.
1831   HeapWord* cur_addr;
1832   RegionData* start_region_ptr = _summary_data.region(start_region);
1833   if (start_region_ptr->partial_obj_size() != 0) {
1834     HeapWord* partial_obj_start = start_region_ptr->partial_obj_addr();
1835     assert(bitmap->is_marked(partial_obj_start), "inv");
1836     cur_addr = partial_obj_start + cast_to_oop(partial_obj_start)->size();
1837   } else {
1838     cur_addr = start_addr;
1839   }
1840 
1841   // end_addr is inclusive to handle regions starting with dead space.
1842   while (cur_addr <= end_addr) {
1843     // Use prefix_end to handle trailing obj in each worker region-chunk.
1844     HeapWord* live_start = bitmap->find_obj_beg(cur_addr, prefix_end);
1845     if (cur_addr != live_start) {
1846       // Only worker 0 handles proceeding dead space.
1847       if (cur_addr != start_addr || worker_id == 0) {
1848         fill_range_in_dense_prefix(cur_addr, live_start);
1849       }
1850     }
1851     if (live_start >= end_addr) {
1852       break;
1853     }
1854     assert(bitmap->is_marked(live_start), "inv");
1855     cur_addr = live_start + cast_to_oop(live_start)->size();
1856   }
1857 }
1858 
1859 void PSParallelCompact::compact() {
1860   GCTraceTime(Info, gc, phases) tm("Compaction Phase", &_gc_timer);
1861 
1862   uint active_gc_threads = ParallelScavengeHeap::heap()->workers().active_workers();
1863 
1864   initialize_shadow_regions(active_gc_threads);
1865   prepare_region_draining_tasks(active_gc_threads);
1866 
1867   {
1868     GCTraceTime(Trace, gc, phases) tm("Par Compact", &_gc_timer);
1869 
1870     FillDensePrefixAndCompactionTask task(active_gc_threads);
1871     ParallelScavengeHeap::heap()->workers().run_task(&task);
1872 
1873 #ifdef  ASSERT
1874     verify_filler_in_dense_prefix();
1875 
1876     // Verify that all regions have been processed.
1877     for (unsigned int id = old_space_id; id < last_space_id; ++id) {
1878       verify_complete(SpaceId(id));
1879     }
1880 #endif
1881   }
1882 }
1883 
1884 #ifdef  ASSERT
1885 void PSParallelCompact::verify_filler_in_dense_prefix() {
1886   HeapWord* bottom = _space_info[old_space_id].space()->bottom();
1887   HeapWord* dense_prefix_end = dense_prefix(old_space_id);
1888   HeapWord* cur_addr = bottom;
1889   while (cur_addr < dense_prefix_end) {
1890     oop obj = cast_to_oop(cur_addr);
1891     oopDesc::verify(obj);
1892     if (!mark_bitmap()->is_marked(cur_addr)) {
1893       Klass* k = cast_to_oop(cur_addr)->klass_without_asserts();
1894       assert(k == Universe::fillerArrayKlass() || k == vmClasses::FillerObject_klass(), "inv");
1895     }
1896     cur_addr += obj->size();
1897   }
1898 }
1899 
1900 void PSParallelCompact::verify_complete(SpaceId space_id) {
1901   // All Regions served as compaction targets, from dense_prefix() to
1902   // new_top(), should be marked as filled and all Regions between new_top()
1903   // and top() should be available (i.e., should have been emptied).
1904   ParallelCompactData& sd = summary_data();
1905   SpaceInfo si = _space_info[space_id];
1906   HeapWord* new_top_addr = sd.region_align_up(si.new_top());
1907   HeapWord* old_top_addr = sd.region_align_up(si.space()->top());
1908   const size_t beg_region = sd.addr_to_region_idx(si.dense_prefix());
1909   const size_t new_top_region = sd.addr_to_region_idx(new_top_addr);
1910   const size_t old_top_region = sd.addr_to_region_idx(old_top_addr);
1911 
1912   size_t cur_region;
1913   for (cur_region = beg_region; cur_region < new_top_region; ++cur_region) {
1914     const RegionData* const c = sd.region(cur_region);
1915     assert(c->completed(), "region %zu not filled: destination_count=%u",
1916            cur_region, c->destination_count());
1917   }
1918 
1919   for (cur_region = new_top_region; cur_region < old_top_region; ++cur_region) {
1920     const RegionData* const c = sd.region(cur_region);
1921     assert(c->available(), "region %zu not empty: destination_count=%u",
1922            cur_region, c->destination_count());
1923   }
1924 }
1925 #endif  // #ifdef ASSERT
1926 
1927 // Return the SpaceId for the space containing addr.  If addr is not in the
1928 // heap, last_space_id is returned.  In debug mode it expects the address to be
1929 // in the heap and asserts such.
1930 PSParallelCompact::SpaceId PSParallelCompact::space_id(HeapWord* addr) {
1931   assert(ParallelScavengeHeap::heap()->is_in_reserved(addr), "addr not in the heap");
1932 
1933   for (unsigned int id = old_space_id; id < last_space_id; ++id) {
1934     if (_space_info[id].space()->contains(addr)) {
1935       return SpaceId(id);
1936     }
1937   }
1938 
1939   assert(false, "no space contains the addr");
1940   return last_space_id;
1941 }
1942 
1943 // Skip over count live words starting from beg, and return the address of the
1944 // next live word.  Unless marked, the word corresponding to beg is assumed to
1945 // be dead.  Callers must either ensure beg does not correspond to the middle of
1946 // an object, or account for those live words in some other way.  Callers must
1947 // also ensure that there are enough live words in the range [beg, end) to skip.
1948 HeapWord*
1949 PSParallelCompact::skip_live_words(HeapWord* beg, HeapWord* end, size_t count)
1950 {
1951   assert(count > 0, "sanity");
1952 
1953   ParMarkBitMap* m = mark_bitmap();
1954   HeapWord* cur_addr = beg;
1955   while (true) {
1956     cur_addr = m->find_obj_beg(cur_addr, end);
1957     assert(cur_addr < end, "inv");
1958     size_t obj_size = cast_to_oop(cur_addr)->size();
1959     // Strictly greater-than
1960     if (obj_size > count) {
1961       return cur_addr + count;
1962     }
1963     count -= obj_size;
1964     cur_addr += obj_size;
1965   }
1966 }
1967 
1968 HeapWord* PSParallelCompact::first_src_addr(HeapWord* const dest_addr,
1969                                             SpaceId src_space_id,
1970                                             size_t src_region_idx)
1971 {
1972   assert(summary_data().is_region_aligned(dest_addr), "not aligned");
1973 
1974   const SplitInfo& split_info = _space_info[src_space_id].split_info();
1975   if (split_info.dest_region_addr() == dest_addr) {
1976     // The partial object ending at the split point contains the first word to
1977     // be copied to dest_addr.
1978     return split_info.first_src_addr();
1979   }
1980 
1981   const ParallelCompactData& sd = summary_data();
1982   ParMarkBitMap* const bitmap = mark_bitmap();
1983   const size_t RegionSize = ParallelCompactData::RegionSize;
1984 
1985   assert(sd.is_region_aligned(dest_addr), "not aligned");
1986   const RegionData* const src_region_ptr = sd.region(src_region_idx);
1987   const size_t partial_obj_size = src_region_ptr->partial_obj_size();
1988   HeapWord* const src_region_destination = src_region_ptr->destination();
1989 
1990   assert(dest_addr >= src_region_destination, "wrong src region");
1991   assert(src_region_ptr->data_size() > 0, "src region cannot be empty");
1992 
1993   HeapWord* const src_region_beg = sd.region_to_addr(src_region_idx);
1994   HeapWord* const src_region_end = src_region_beg + RegionSize;
1995 
1996   HeapWord* addr = src_region_beg;
1997   if (dest_addr == src_region_destination) {
1998     // Return the first live word in the source region.
1999     if (partial_obj_size == 0) {
2000       addr = bitmap->find_obj_beg(addr, src_region_end);
2001       assert(addr < src_region_end, "no objects start in src region");
2002     }
2003     return addr;
2004   }
2005 
2006   // Must skip some live data.
2007   size_t words_to_skip = dest_addr - src_region_destination;
2008   assert(src_region_ptr->data_size() > words_to_skip, "wrong src region");
2009 
2010   if (partial_obj_size >= words_to_skip) {
2011     // All the live words to skip are part of the partial object.
2012     addr += words_to_skip;
2013     if (partial_obj_size == words_to_skip) {
2014       // Find the first live word past the partial object.
2015       addr = bitmap->find_obj_beg(addr, src_region_end);
2016       assert(addr < src_region_end, "wrong src region");
2017     }
2018     return addr;
2019   }
2020 
2021   // Skip over the partial object (if any).
2022   if (partial_obj_size != 0) {
2023     words_to_skip -= partial_obj_size;
2024     addr += partial_obj_size;
2025   }
2026 
2027   // Skip over live words due to objects that start in the region.
2028   addr = skip_live_words(addr, src_region_end, words_to_skip);
2029   assert(addr < src_region_end, "wrong src region");
2030   return addr;
2031 }
2032 
2033 void PSParallelCompact::decrement_destination_counts(ParCompactionManager* cm,
2034                                                      SpaceId src_space_id,
2035                                                      size_t beg_region,
2036                                                      HeapWord* end_addr)
2037 {
2038   ParallelCompactData& sd = summary_data();
2039 
2040 #ifdef ASSERT
2041   MutableSpace* const src_space = _space_info[src_space_id].space();
2042   HeapWord* const beg_addr = sd.region_to_addr(beg_region);
2043   assert(src_space->contains(beg_addr) || beg_addr == src_space->end(),
2044          "src_space_id does not match beg_addr");
2045   assert(src_space->contains(end_addr) || end_addr == src_space->end(),
2046          "src_space_id does not match end_addr");
2047 #endif // #ifdef ASSERT
2048 
2049   RegionData* const beg = sd.region(beg_region);
2050   RegionData* const end = sd.addr_to_region_ptr(sd.region_align_up(end_addr));
2051 
2052   // Regions up to new_top() are enqueued if they become available.
2053   HeapWord* const new_top = _space_info[src_space_id].new_top();
2054   RegionData* const enqueue_end =
2055     sd.addr_to_region_ptr(sd.region_align_up(new_top));
2056 
2057   for (RegionData* cur = beg; cur < end; ++cur) {
2058     assert(cur->data_size() > 0, "region must have live data");
2059     cur->decrement_destination_count();
2060     if (cur < enqueue_end && cur->available() && cur->claim()) {
2061       if (cur->mark_normal()) {
2062         cm->push_region(sd.region(cur));
2063       } else if (cur->mark_copied()) {
2064         // Try to copy the content of the shadow region back to its corresponding
2065         // heap region if the shadow region is filled. Otherwise, the GC thread
2066         // fills the shadow region will copy the data back (see
2067         // MoveAndUpdateShadowClosure::complete_region).
2068         copy_back(sd.region_to_addr(cur->shadow_region()), sd.region_to_addr(cur));
2069         ParCompactionManager::push_shadow_region_mt_safe(cur->shadow_region());
2070         cur->set_completed();
2071       }
2072     }
2073   }
2074 }
2075 
2076 size_t PSParallelCompact::next_src_region(MoveAndUpdateClosure& closure,
2077                                           SpaceId& src_space_id,
2078                                           HeapWord*& src_space_top,
2079                                           HeapWord* end_addr)
2080 {
2081   typedef ParallelCompactData::RegionData RegionData;
2082 
2083   ParallelCompactData& sd = PSParallelCompact::summary_data();
2084   const size_t region_size = ParallelCompactData::RegionSize;
2085 
2086   size_t src_region_idx = 0;
2087 
2088   // Skip empty regions (if any) up to the top of the space.
2089   HeapWord* const src_aligned_up = sd.region_align_up(end_addr);
2090   RegionData* src_region_ptr = sd.addr_to_region_ptr(src_aligned_up);
2091   HeapWord* const top_aligned_up = sd.region_align_up(src_space_top);
2092   const RegionData* const top_region_ptr =
2093     sd.addr_to_region_ptr(top_aligned_up);
2094   while (src_region_ptr < top_region_ptr && src_region_ptr->data_size() == 0) {
2095     ++src_region_ptr;
2096   }
2097 
2098   if (src_region_ptr < top_region_ptr) {
2099     // The next source region is in the current space.  Update src_region_idx
2100     // and the source address to match src_region_ptr.
2101     src_region_idx = sd.region(src_region_ptr);
2102     HeapWord* const src_region_addr = sd.region_to_addr(src_region_idx);
2103     if (src_region_addr > closure.source()) {
2104       closure.set_source(src_region_addr);
2105     }
2106     return src_region_idx;
2107   }
2108 
2109   // Switch to a new source space and find the first non-empty region.
2110   unsigned int space_id = src_space_id + 1;
2111   assert(space_id < last_space_id, "not enough spaces");
2112 
2113   HeapWord* const destination = closure.destination();
2114 
2115   do {
2116     MutableSpace* space = _space_info[space_id].space();
2117     HeapWord* const bottom = space->bottom();
2118     const RegionData* const bottom_cp = sd.addr_to_region_ptr(bottom);
2119 
2120     // Iterate over the spaces that do not compact into themselves.
2121     if (bottom_cp->destination() != bottom) {
2122       HeapWord* const top_aligned_up = sd.region_align_up(space->top());
2123       const RegionData* const top_cp = sd.addr_to_region_ptr(top_aligned_up);
2124 
2125       for (const RegionData* src_cp = bottom_cp; src_cp < top_cp; ++src_cp) {
2126         if (src_cp->live_obj_size() > 0) {
2127           // Found it.
2128           assert(src_cp->destination() == destination,
2129                  "first live obj in the space must match the destination");
2130           assert(src_cp->partial_obj_size() == 0,
2131                  "a space cannot begin with a partial obj");
2132 
2133           src_space_id = SpaceId(space_id);
2134           src_space_top = space->top();
2135           const size_t src_region_idx = sd.region(src_cp);
2136           closure.set_source(sd.region_to_addr(src_region_idx));
2137           return src_region_idx;
2138         } else {
2139           assert(src_cp->data_size() == 0, "sanity");
2140         }
2141       }
2142     }
2143   } while (++space_id < last_space_id);
2144 
2145   assert(false, "no source region was found");
2146   return 0;
2147 }
2148 
2149 HeapWord* PSParallelCompact::partial_obj_end(HeapWord* region_start_addr) {
2150   ParallelCompactData& sd = summary_data();
2151   assert(sd.is_region_aligned(region_start_addr), "precondition");
2152 
2153   // Use per-region partial_obj_size to locate the end of the obj, that extends to region_start_addr.
2154   SplitInfo& split_info = _space_info[space_id(region_start_addr)].split_info();
2155   size_t start_region_idx = sd.addr_to_region_idx(region_start_addr);
2156   size_t end_region_idx = sd.region_count();
2157   size_t accumulated_size = 0;
2158   for (size_t region_idx = start_region_idx; region_idx < end_region_idx; ++region_idx) {
2159     if (split_info.is_split(region_idx)) {
2160       accumulated_size += split_info.partial_obj_size();
2161       break;
2162     }
2163     size_t cur_partial_obj_size = sd.region(region_idx)->partial_obj_size();
2164     accumulated_size += cur_partial_obj_size;
2165     if (cur_partial_obj_size != ParallelCompactData::RegionSize) {
2166       break;
2167     }
2168   }
2169   return region_start_addr + accumulated_size;
2170 }
2171 
2172 void PSParallelCompact::fill_region(ParCompactionManager* cm, MoveAndUpdateClosure& closure, size_t region_idx)
2173 {
2174   ParMarkBitMap* const bitmap = mark_bitmap();
2175   ParallelCompactData& sd = summary_data();
2176   RegionData* const region_ptr = sd.region(region_idx);
2177 
2178   // Get the source region and related info.
2179   size_t src_region_idx = region_ptr->source_region();
2180   SpaceId src_space_id = space_id(sd.region_to_addr(src_region_idx));
2181   HeapWord* src_space_top = _space_info[src_space_id].space()->top();
2182   HeapWord* dest_addr = sd.region_to_addr(region_idx);
2183 
2184   closure.set_source(first_src_addr(dest_addr, src_space_id, src_region_idx));
2185 
2186   // Adjust src_region_idx to prepare for decrementing destination counts (the
2187   // destination count is not decremented when a region is copied to itself).
2188   if (src_region_idx == region_idx) {
2189     src_region_idx += 1;
2190   }
2191 
2192   if (bitmap->is_unmarked(closure.source())) {
2193     // The first source word is in the middle of an object; copy the remainder
2194     // of the object or as much as will fit.  The fact that pointer updates were
2195     // deferred will be noted when the object header is processed.
2196     HeapWord* const old_src_addr = closure.source();
2197     {
2198       HeapWord* region_start = sd.region_align_down(closure.source());
2199       HeapWord* obj_start = bitmap->find_obj_beg_reverse(region_start, closure.source());
2200       HeapWord* obj_end;
2201       if (bitmap->is_marked(obj_start)) {
2202         HeapWord* next_region_start = region_start + ParallelCompactData::RegionSize;
2203         HeapWord* partial_obj_start = (next_region_start >= src_space_top)
2204                                       ? nullptr
2205                                       : sd.addr_to_region_ptr(next_region_start)->partial_obj_addr();
2206         if (partial_obj_start == obj_start) {
2207           // This obj extends to next region.
2208           obj_end = partial_obj_end(next_region_start);
2209         } else {
2210           // Completely contained in this region; safe to use size().
2211           obj_end = obj_start + cast_to_oop(obj_start)->size();
2212         }
2213       } else {
2214         // This obj extends to current region.
2215         obj_end = partial_obj_end(region_start);
2216       }
2217       size_t partial_obj_size = pointer_delta(obj_end, closure.source());
2218       closure.copy_partial_obj(partial_obj_size);
2219     }
2220 
2221     if (closure.is_full()) {
2222       decrement_destination_counts(cm, src_space_id, src_region_idx,
2223                                    closure.source());
2224       closure.complete_region(dest_addr, region_ptr);
2225       return;
2226     }
2227 
2228     HeapWord* const end_addr = sd.region_align_down(closure.source());
2229     if (sd.region_align_down(old_src_addr) != end_addr) {
2230       // The partial object was copied from more than one source region.
2231       decrement_destination_counts(cm, src_space_id, src_region_idx, end_addr);
2232 
2233       // Move to the next source region, possibly switching spaces as well.  All
2234       // args except end_addr may be modified.
2235       src_region_idx = next_src_region(closure, src_space_id, src_space_top,
2236                                        end_addr);
2237     }
2238   }
2239 
2240   do {
2241     HeapWord* cur_addr = closure.source();
2242     HeapWord* const end_addr = MIN2(sd.region_align_up(cur_addr + 1),
2243                                     src_space_top);
2244     HeapWord* partial_obj_start = (end_addr == src_space_top)
2245                                 ? nullptr
2246                                 : sd.addr_to_region_ptr(end_addr)->partial_obj_addr();
2247     // apply closure on objs inside [cur_addr, end_addr)
2248     do {
2249       cur_addr = bitmap->find_obj_beg(cur_addr, end_addr);
2250       if (cur_addr == end_addr) {
2251         break;
2252       }
2253       size_t obj_size;
2254       if (partial_obj_start == cur_addr) {
2255         obj_size = pointer_delta(partial_obj_end(end_addr), cur_addr);
2256       } else {
2257         // This obj doesn't extend into next region; size() is safe to use.
2258         obj_size = cast_to_oop(cur_addr)->size();
2259       }
2260       closure.do_addr(cur_addr, obj_size);
2261       cur_addr += obj_size;
2262     } while (cur_addr < end_addr && !closure.is_full());
2263 
2264     if (closure.is_full()) {
2265       decrement_destination_counts(cm, src_space_id, src_region_idx,
2266                                    closure.source());
2267       closure.complete_region(dest_addr, region_ptr);
2268       return;
2269     }
2270 
2271     decrement_destination_counts(cm, src_space_id, src_region_idx, end_addr);
2272 
2273     // Move to the next source region, possibly switching spaces as well.  All
2274     // args except end_addr may be modified.
2275     src_region_idx = next_src_region(closure, src_space_id, src_space_top,
2276                                      end_addr);
2277   } while (true);
2278 }
2279 
2280 void PSParallelCompact::fill_and_update_region(ParCompactionManager* cm, size_t region_idx)
2281 {
2282   MoveAndUpdateClosure cl(mark_bitmap(), region_idx);
2283   fill_region(cm, cl, region_idx);
2284 }
2285 
2286 void PSParallelCompact::fill_and_update_shadow_region(ParCompactionManager* cm, size_t region_idx)
2287 {
2288   // Get a shadow region first
2289   ParallelCompactData& sd = summary_data();
2290   RegionData* const region_ptr = sd.region(region_idx);
2291   size_t shadow_region = ParCompactionManager::pop_shadow_region_mt_safe(region_ptr);
2292   // The InvalidShadow return value indicates the corresponding heap region is available,
2293   // so use MoveAndUpdateClosure to fill the normal region. Otherwise, use
2294   // MoveAndUpdateShadowClosure to fill the acquired shadow region.
2295   if (shadow_region == ParCompactionManager::InvalidShadow) {
2296     MoveAndUpdateClosure cl(mark_bitmap(), region_idx);
2297     region_ptr->shadow_to_normal();
2298     return fill_region(cm, cl, region_idx);
2299   } else {
2300     MoveAndUpdateShadowClosure cl(mark_bitmap(), region_idx, shadow_region);
2301     return fill_region(cm, cl, region_idx);
2302   }
2303 }
2304 
2305 void PSParallelCompact::copy_back(HeapWord *shadow_addr, HeapWord *region_addr)
2306 {
2307   Copy::aligned_conjoint_words(shadow_addr, region_addr, _summary_data.RegionSize);
2308 }
2309 
2310 bool PSParallelCompact::steal_unavailable_region(ParCompactionManager* cm, size_t &region_idx)
2311 {
2312   size_t next = cm->next_shadow_region();
2313   ParallelCompactData& sd = summary_data();
2314   size_t old_new_top = sd.addr_to_region_idx(_space_info[old_space_id].new_top());
2315   uint active_gc_threads = ParallelScavengeHeap::heap()->workers().active_workers();
2316 
2317   while (next < old_new_top) {
2318     if (sd.region(next)->mark_shadow()) {
2319       region_idx = next;
2320       return true;
2321     }
2322     next = cm->move_next_shadow_region_by(active_gc_threads);
2323   }
2324 
2325   return false;
2326 }
2327 
2328 // The shadow region is an optimization to address region dependencies in full GC. The basic
2329 // idea is making more regions available by temporally storing their live objects in empty
2330 // shadow regions to resolve dependencies between them and the destination regions. Therefore,
2331 // GC threads need not wait destination regions to be available before processing sources.
2332 //
2333 // A typical workflow would be:
2334 // After draining its own stack and failing to steal from others, a GC worker would pick an
2335 // unavailable region (destination count > 0) and get a shadow region. Then the worker fills
2336 // the shadow region by copying live objects from source regions of the unavailable one. Once
2337 // the unavailable region becomes available, the data in the shadow region will be copied back.
2338 // Shadow regions are empty regions in the to-space and regions between top and end of other spaces.
2339 void PSParallelCompact::initialize_shadow_regions(uint parallel_gc_threads)
2340 {
2341   const ParallelCompactData& sd = PSParallelCompact::summary_data();
2342 
2343   for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2344     SpaceInfo* const space_info = _space_info + id;
2345     MutableSpace* const space = space_info->space();
2346 
2347     const size_t beg_region =
2348       sd.addr_to_region_idx(sd.region_align_up(MAX2(space_info->new_top(), space->top())));
2349     const size_t end_region =
2350       sd.addr_to_region_idx(sd.region_align_down(space->end()));
2351 
2352     for (size_t cur = beg_region; cur < end_region; ++cur) {
2353       ParCompactionManager::push_shadow_region(cur);
2354     }
2355   }
2356 
2357   size_t beg_region = sd.addr_to_region_idx(_space_info[old_space_id].dense_prefix());
2358   for (uint i = 0; i < parallel_gc_threads; i++) {
2359     ParCompactionManager *cm = ParCompactionManager::gc_thread_compaction_manager(i);
2360     cm->set_next_shadow_region(beg_region + i);
2361   }
2362 }
2363 
2364 void MoveAndUpdateClosure::copy_partial_obj(size_t partial_obj_size)
2365 {
2366   size_t words = MIN2(partial_obj_size, words_remaining());
2367 
2368   // This test is necessary; if omitted, the pointer updates to a partial object
2369   // that crosses the dense prefix boundary could be overwritten.
2370   if (source() != copy_destination()) {
2371     DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
2372     Copy::aligned_conjoint_words(source(), copy_destination(), words);
2373   }
2374   update_state(words);
2375 }
2376 
2377 void MoveAndUpdateClosure::complete_region(HeapWord* dest_addr, PSParallelCompact::RegionData* region_ptr) {
2378   assert(region_ptr->shadow_state() == ParallelCompactData::RegionData::NormalRegion, "Region should be finished");
2379   region_ptr->set_completed();
2380 }
2381 
2382 void MoveAndUpdateClosure::do_addr(HeapWord* addr, size_t words) {
2383   assert(destination() != nullptr, "sanity");
2384   _source = addr;
2385 
2386   // The start_array must be updated even if the object is not moving.
2387   if (_start_array != nullptr) {
2388     _start_array->update_for_block(destination(), destination() + words);
2389   }
2390 
2391   // Avoid overflow
2392   words = MIN2(words, words_remaining());
2393   assert(words > 0, "inv");
2394 
2395   if (copy_destination() != source()) {
2396     DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
2397     assert(source() != destination(), "inv");
2398     assert(cast_to_oop(source())->is_forwarded(), "inv");
2399     assert(cast_to_oop(source())->forwardee() == cast_to_oop(destination()), "inv");
2400     Copy::aligned_conjoint_words(source(), copy_destination(), words);
2401     cast_to_oop(copy_destination())->init_mark();
2402   }
2403 
2404   update_state(words);
2405 }
2406 
2407 void MoveAndUpdateShadowClosure::complete_region(HeapWord* dest_addr, PSParallelCompact::RegionData* region_ptr) {
2408   assert(region_ptr->shadow_state() == ParallelCompactData::RegionData::ShadowRegion, "Region should be shadow");
2409   // Record the shadow region index
2410   region_ptr->set_shadow_region(_shadow);
2411   // Mark the shadow region as filled to indicate the data is ready to be
2412   // copied back
2413   region_ptr->mark_filled();
2414   // Try to copy the content of the shadow region back to its corresponding
2415   // heap region if available; the GC thread that decreases the destination
2416   // count to zero will do the copying otherwise (see
2417   // PSParallelCompact::decrement_destination_counts).
2418   if (((region_ptr->available() && region_ptr->claim()) || region_ptr->claimed()) && region_ptr->mark_copied()) {
2419     region_ptr->set_completed();
2420     PSParallelCompact::copy_back(PSParallelCompact::summary_data().region_to_addr(_shadow), dest_addr);
2421     ParCompactionManager::push_shadow_region_mt_safe(_shadow);
2422   }
2423 }
2424