1 /*
   2  * Copyright (c) 2005, 2022, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/classLoaderDataGraph.hpp"
  27 #include "classfile/javaClasses.inline.hpp"
  28 #include "classfile/stringTable.hpp"
  29 #include "classfile/symbolTable.hpp"
  30 #include "classfile/systemDictionary.hpp"
  31 #include "code/codeCache.hpp"
  32 #include "compiler/oopMap.hpp"
  33 #include "gc/parallel/parallelArguments.hpp"
  34 #include "gc/parallel/parallelScavengeHeap.inline.hpp"
  35 #include "gc/parallel/parMarkBitMap.inline.hpp"
  36 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
  37 #include "gc/parallel/psCompactionManager.inline.hpp"
  38 #include "gc/parallel/psOldGen.hpp"
  39 #include "gc/parallel/psParallelCompact.inline.hpp"
  40 #include "gc/parallel/psPromotionManager.inline.hpp"
  41 #include "gc/parallel/psRootType.hpp"
  42 #include "gc/parallel/psScavenge.hpp"
  43 #include "gc/parallel/psStringDedup.hpp"
  44 #include "gc/parallel/psYoungGen.hpp"
  45 #include "gc/shared/gcCause.hpp"
  46 #include "gc/shared/gcHeapSummary.hpp"
  47 #include "gc/shared/gcId.hpp"
  48 #include "gc/shared/gcLocker.hpp"
  49 #include "gc/shared/gcTimer.hpp"
  50 #include "gc/shared/gcTrace.hpp"
  51 #include "gc/shared/gcTraceTime.inline.hpp"
  52 #include "gc/shared/isGCActiveMark.hpp"
  53 #include "gc/shared/oopStorage.inline.hpp"
  54 #include "gc/shared/oopStorageSet.inline.hpp"
  55 #include "gc/shared/oopStorageSetParState.inline.hpp"
  56 #include "gc/shared/referencePolicy.hpp"
  57 #include "gc/shared/referenceProcessor.hpp"
  58 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
  59 #include "gc/shared/spaceDecorator.inline.hpp"
  60 #include "gc/shared/taskTerminator.hpp"
  61 #include "gc/shared/weakProcessor.inline.hpp"
  62 #include "gc/shared/workerPolicy.hpp"
  63 #include "gc/shared/workerThread.hpp"
  64 #include "gc/shared/workerUtils.hpp"
  65 #include "logging/log.hpp"
  66 #include "memory/iterator.inline.hpp"
  67 #include "memory/metaspaceUtils.hpp"
  68 #include "memory/resourceArea.hpp"
  69 #include "memory/universe.hpp"
  70 #include "oops/access.inline.hpp"
  71 #include "oops/flatArrayKlass.inline.hpp"
  72 #include "oops/instanceClassLoaderKlass.inline.hpp"
  73 #include "oops/instanceKlass.inline.hpp"
  74 #include "oops/instanceMirrorKlass.inline.hpp"
  75 #include "oops/methodData.hpp"
  76 #include "oops/objArrayKlass.inline.hpp"
  77 #include "oops/oop.inline.hpp"
  78 #include "runtime/atomic.hpp"
  79 #include "runtime/handles.inline.hpp"
  80 #include "runtime/java.hpp"
  81 #include "runtime/safepoint.hpp"
  82 #include "runtime/threads.hpp"
  83 #include "runtime/vmThread.hpp"
  84 #include "services/memTracker.hpp"
  85 #include "services/memoryService.hpp"
  86 #include "utilities/align.hpp"
  87 #include "utilities/debug.hpp"
  88 #include "utilities/events.hpp"
  89 #include "utilities/formatBuffer.hpp"
  90 #include "utilities/macros.hpp"
  91 #include "utilities/stack.inline.hpp"
  92 #if INCLUDE_JVMCI
  93 #include "jvmci/jvmci.hpp"
  94 #endif
  95 
  96 #include <math.h>
  97 
  98 // All sizes are in HeapWords.
  99 const size_t ParallelCompactData::Log2RegionSize  = 16; // 64K words
 100 const size_t ParallelCompactData::RegionSize      = (size_t)1 << Log2RegionSize;
 101 const size_t ParallelCompactData::RegionSizeBytes =
 102   RegionSize << LogHeapWordSize;
 103 const size_t ParallelCompactData::RegionSizeOffsetMask = RegionSize - 1;
 104 const size_t ParallelCompactData::RegionAddrOffsetMask = RegionSizeBytes - 1;
 105 const size_t ParallelCompactData::RegionAddrMask       = ~RegionAddrOffsetMask;
 106 
 107 const size_t ParallelCompactData::Log2BlockSize   = 7; // 128 words
 108 const size_t ParallelCompactData::BlockSize       = (size_t)1 << Log2BlockSize;
 109 const size_t ParallelCompactData::BlockSizeBytes  =
 110   BlockSize << LogHeapWordSize;
 111 const size_t ParallelCompactData::BlockSizeOffsetMask = BlockSize - 1;
 112 const size_t ParallelCompactData::BlockAddrOffsetMask = BlockSizeBytes - 1;
 113 const size_t ParallelCompactData::BlockAddrMask       = ~BlockAddrOffsetMask;
 114 
 115 const size_t ParallelCompactData::BlocksPerRegion = RegionSize / BlockSize;
 116 const size_t ParallelCompactData::Log2BlocksPerRegion =
 117   Log2RegionSize - Log2BlockSize;
 118 
 119 const ParallelCompactData::RegionData::region_sz_t
 120 ParallelCompactData::RegionData::dc_shift = 27;
 121 
 122 const ParallelCompactData::RegionData::region_sz_t
 123 ParallelCompactData::RegionData::dc_mask = ~0U << dc_shift;
 124 
 125 const ParallelCompactData::RegionData::region_sz_t
 126 ParallelCompactData::RegionData::dc_one = 0x1U << dc_shift;
 127 
 128 const ParallelCompactData::RegionData::region_sz_t
 129 ParallelCompactData::RegionData::los_mask = ~dc_mask;
 130 
 131 const ParallelCompactData::RegionData::region_sz_t
 132 ParallelCompactData::RegionData::dc_claimed = 0x8U << dc_shift;
 133 
 134 const ParallelCompactData::RegionData::region_sz_t
 135 ParallelCompactData::RegionData::dc_completed = 0xcU << dc_shift;
 136 
 137 SpaceInfo PSParallelCompact::_space_info[PSParallelCompact::last_space_id];
 138 
 139 SpanSubjectToDiscoveryClosure PSParallelCompact::_span_based_discoverer;
 140 ReferenceProcessor* PSParallelCompact::_ref_processor = NULL;
 141 
 142 double PSParallelCompact::_dwl_mean;
 143 double PSParallelCompact::_dwl_std_dev;
 144 double PSParallelCompact::_dwl_first_term;
 145 double PSParallelCompact::_dwl_adjustment;
 146 #ifdef  ASSERT
 147 bool   PSParallelCompact::_dwl_initialized = false;
 148 #endif  // #ifdef ASSERT
 149 
 150 void SplitInfo::record(size_t src_region_idx, size_t partial_obj_size,
 151                        HeapWord* destination)
 152 {
 153   assert(src_region_idx != 0, "invalid src_region_idx");
 154   assert(partial_obj_size != 0, "invalid partial_obj_size argument");
 155   assert(destination != NULL, "invalid destination argument");
 156 
 157   _src_region_idx = src_region_idx;
 158   _partial_obj_size = partial_obj_size;
 159   _destination = destination;
 160 
 161   // These fields may not be updated below, so make sure they're clear.
 162   assert(_dest_region_addr == NULL, "should have been cleared");
 163   assert(_first_src_addr == NULL, "should have been cleared");
 164 
 165   // Determine the number of destination regions for the partial object.
 166   HeapWord* const last_word = destination + partial_obj_size - 1;
 167   const ParallelCompactData& sd = PSParallelCompact::summary_data();
 168   HeapWord* const beg_region_addr = sd.region_align_down(destination);
 169   HeapWord* const end_region_addr = sd.region_align_down(last_word);
 170 
 171   if (beg_region_addr == end_region_addr) {
 172     // One destination region.
 173     _destination_count = 1;
 174     if (end_region_addr == destination) {
 175       // The destination falls on a region boundary, thus the first word of the
 176       // partial object will be the first word copied to the destination region.
 177       _dest_region_addr = end_region_addr;
 178       _first_src_addr = sd.region_to_addr(src_region_idx);
 179     }
 180   } else {
 181     // Two destination regions.  When copied, the partial object will cross a
 182     // destination region boundary, so a word somewhere within the partial
 183     // object will be the first word copied to the second destination region.
 184     _destination_count = 2;
 185     _dest_region_addr = end_region_addr;
 186     const size_t ofs = pointer_delta(end_region_addr, destination);
 187     assert(ofs < _partial_obj_size, "sanity");
 188     _first_src_addr = sd.region_to_addr(src_region_idx) + ofs;
 189   }
 190 }
 191 
 192 void SplitInfo::clear()
 193 {
 194   _src_region_idx = 0;
 195   _partial_obj_size = 0;
 196   _destination = NULL;
 197   _destination_count = 0;
 198   _dest_region_addr = NULL;
 199   _first_src_addr = NULL;
 200   assert(!is_valid(), "sanity");
 201 }
 202 
 203 #ifdef  ASSERT
 204 void SplitInfo::verify_clear()
 205 {
 206   assert(_src_region_idx == 0, "not clear");
 207   assert(_partial_obj_size == 0, "not clear");
 208   assert(_destination == NULL, "not clear");
 209   assert(_destination_count == 0, "not clear");
 210   assert(_dest_region_addr == NULL, "not clear");
 211   assert(_first_src_addr == NULL, "not clear");
 212 }
 213 #endif  // #ifdef ASSERT
 214 
 215 
 216 void PSParallelCompact::print_on_error(outputStream* st) {
 217   _mark_bitmap.print_on_error(st);
 218 }
 219 
 220 #ifndef PRODUCT
 221 const char* PSParallelCompact::space_names[] = {
 222   "old ", "eden", "from", "to  "
 223 };
 224 
 225 void PSParallelCompact::print_region_ranges() {
 226   if (!log_develop_is_enabled(Trace, gc, compaction)) {
 227     return;
 228   }
 229   Log(gc, compaction) log;
 230   ResourceMark rm;
 231   LogStream ls(log.trace());
 232   Universe::print_on(&ls);
 233   log.trace("space  bottom     top        end        new_top");
 234   log.trace("------ ---------- ---------- ---------- ----------");
 235 
 236   for (unsigned int id = 0; id < last_space_id; ++id) {
 237     const MutableSpace* space = _space_info[id].space();
 238     log.trace("%u %s "
 239               SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10) " "
 240               SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10) " ",
 241               id, space_names[id],
 242               summary_data().addr_to_region_idx(space->bottom()),
 243               summary_data().addr_to_region_idx(space->top()),
 244               summary_data().addr_to_region_idx(space->end()),
 245               summary_data().addr_to_region_idx(_space_info[id].new_top()));
 246   }
 247 }
 248 
 249 void
 250 print_generic_summary_region(size_t i, const ParallelCompactData::RegionData* c)
 251 {
 252 #define REGION_IDX_FORMAT        SIZE_FORMAT_W(7)
 253 #define REGION_DATA_FORMAT       SIZE_FORMAT_W(5)
 254 
 255   ParallelCompactData& sd = PSParallelCompact::summary_data();
 256   size_t dci = c->destination() ? sd.addr_to_region_idx(c->destination()) : 0;
 257   log_develop_trace(gc, compaction)(
 258       REGION_IDX_FORMAT " " PTR_FORMAT " "
 259       REGION_IDX_FORMAT " " PTR_FORMAT " "
 260       REGION_DATA_FORMAT " " REGION_DATA_FORMAT " "
 261       REGION_DATA_FORMAT " " REGION_IDX_FORMAT " %d",
 262       i, p2i(c->data_location()), dci, p2i(c->destination()),
 263       c->partial_obj_size(), c->live_obj_size(),
 264       c->data_size(), c->source_region(), c->destination_count());
 265 
 266 #undef  REGION_IDX_FORMAT
 267 #undef  REGION_DATA_FORMAT
 268 }
 269 
 270 void
 271 print_generic_summary_data(ParallelCompactData& summary_data,
 272                            HeapWord* const beg_addr,
 273                            HeapWord* const end_addr)
 274 {
 275   size_t total_words = 0;
 276   size_t i = summary_data.addr_to_region_idx(beg_addr);
 277   const size_t last = summary_data.addr_to_region_idx(end_addr);
 278   HeapWord* pdest = 0;
 279 
 280   while (i < last) {
 281     ParallelCompactData::RegionData* c = summary_data.region(i);
 282     if (c->data_size() != 0 || c->destination() != pdest) {
 283       print_generic_summary_region(i, c);
 284       total_words += c->data_size();
 285       pdest = c->destination();
 286     }
 287     ++i;
 288   }
 289 
 290   log_develop_trace(gc, compaction)("summary_data_bytes=" SIZE_FORMAT, total_words * HeapWordSize);
 291 }
 292 
 293 void
 294 PSParallelCompact::print_generic_summary_data(ParallelCompactData& summary_data,
 295                                               HeapWord* const beg_addr,
 296                                               HeapWord* const end_addr) {
 297   ::print_generic_summary_data(summary_data,beg_addr, end_addr);
 298 }
 299 
 300 void
 301 print_generic_summary_data(ParallelCompactData& summary_data,
 302                            SpaceInfo* space_info)
 303 {
 304   if (!log_develop_is_enabled(Trace, gc, compaction)) {
 305     return;
 306   }
 307 
 308   for (unsigned int id = 0; id < PSParallelCompact::last_space_id; ++id) {
 309     const MutableSpace* space = space_info[id].space();
 310     print_generic_summary_data(summary_data, space->bottom(),
 311                                MAX2(space->top(), space_info[id].new_top()));
 312   }
 313 }
 314 
 315 void
 316 print_initial_summary_data(ParallelCompactData& summary_data,
 317                            const MutableSpace* space) {
 318   if (space->top() == space->bottom()) {
 319     return;
 320   }
 321 
 322   const size_t region_size = ParallelCompactData::RegionSize;
 323   typedef ParallelCompactData::RegionData RegionData;
 324   HeapWord* const top_aligned_up = summary_data.region_align_up(space->top());
 325   const size_t end_region = summary_data.addr_to_region_idx(top_aligned_up);
 326   const RegionData* c = summary_data.region(end_region - 1);
 327   HeapWord* end_addr = c->destination() + c->data_size();
 328   const size_t live_in_space = pointer_delta(end_addr, space->bottom());
 329 
 330   // Print (and count) the full regions at the beginning of the space.
 331   size_t full_region_count = 0;
 332   size_t i = summary_data.addr_to_region_idx(space->bottom());
 333   while (i < end_region && summary_data.region(i)->data_size() == region_size) {
 334     ParallelCompactData::RegionData* c = summary_data.region(i);
 335     log_develop_trace(gc, compaction)(
 336         SIZE_FORMAT_W(5) " " PTR_FORMAT " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " %d",
 337         i, p2i(c->destination()),
 338         c->partial_obj_size(), c->live_obj_size(),
 339         c->data_size(), c->source_region(), c->destination_count());
 340     ++full_region_count;
 341     ++i;
 342   }
 343 
 344   size_t live_to_right = live_in_space - full_region_count * region_size;
 345 
 346   double max_reclaimed_ratio = 0.0;
 347   size_t max_reclaimed_ratio_region = 0;
 348   size_t max_dead_to_right = 0;
 349   size_t max_live_to_right = 0;
 350 
 351   // Print the 'reclaimed ratio' for regions while there is something live in
 352   // the region or to the right of it.  The remaining regions are empty (and
 353   // uninteresting), and computing the ratio will result in division by 0.
 354   while (i < end_region && live_to_right > 0) {
 355     c = summary_data.region(i);
 356     HeapWord* const region_addr = summary_data.region_to_addr(i);
 357     const size_t used_to_right = pointer_delta(space->top(), region_addr);
 358     const size_t dead_to_right = used_to_right - live_to_right;
 359     const double reclaimed_ratio = double(dead_to_right) / live_to_right;
 360 
 361     if (reclaimed_ratio > max_reclaimed_ratio) {
 362             max_reclaimed_ratio = reclaimed_ratio;
 363             max_reclaimed_ratio_region = i;
 364             max_dead_to_right = dead_to_right;
 365             max_live_to_right = live_to_right;
 366     }
 367 
 368     ParallelCompactData::RegionData* c = summary_data.region(i);
 369     log_develop_trace(gc, compaction)(
 370         SIZE_FORMAT_W(5) " " PTR_FORMAT " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " %d"
 371         "%12.10f " SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10),
 372         i, p2i(c->destination()),
 373         c->partial_obj_size(), c->live_obj_size(),
 374         c->data_size(), c->source_region(), c->destination_count(),
 375         reclaimed_ratio, dead_to_right, live_to_right);
 376 
 377 
 378     live_to_right -= c->data_size();
 379     ++i;
 380   }
 381 
 382   // Any remaining regions are empty.  Print one more if there is one.
 383   if (i < end_region) {
 384     ParallelCompactData::RegionData* c = summary_data.region(i);
 385     log_develop_trace(gc, compaction)(
 386         SIZE_FORMAT_W(5) " " PTR_FORMAT " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " %d",
 387          i, p2i(c->destination()),
 388          c->partial_obj_size(), c->live_obj_size(),
 389          c->data_size(), c->source_region(), c->destination_count());
 390   }
 391 
 392   log_develop_trace(gc, compaction)("max:  " SIZE_FORMAT_W(4) " d2r=" SIZE_FORMAT_W(10) " l2r=" SIZE_FORMAT_W(10) " max_ratio=%14.12f",
 393                                     max_reclaimed_ratio_region, max_dead_to_right, max_live_to_right, max_reclaimed_ratio);
 394 }
 395 
 396 void
 397 print_initial_summary_data(ParallelCompactData& summary_data,
 398                            SpaceInfo* space_info) {
 399   if (!log_develop_is_enabled(Trace, gc, compaction)) {
 400     return;
 401   }
 402 
 403   unsigned int id = PSParallelCompact::old_space_id;
 404   const MutableSpace* space;
 405   do {
 406     space = space_info[id].space();
 407     print_initial_summary_data(summary_data, space);
 408   } while (++id < PSParallelCompact::eden_space_id);
 409 
 410   do {
 411     space = space_info[id].space();
 412     print_generic_summary_data(summary_data, space->bottom(), space->top());
 413   } while (++id < PSParallelCompact::last_space_id);
 414 }
 415 #endif  // #ifndef PRODUCT
 416 
 417 ParallelCompactData::ParallelCompactData() :
 418   _region_start(NULL),
 419   DEBUG_ONLY(_region_end(NULL) COMMA)
 420   _region_vspace(NULL),
 421   _reserved_byte_size(0),
 422   _region_data(NULL),
 423   _region_count(0),
 424   _block_vspace(NULL),
 425   _block_data(NULL),
 426   _block_count(0) {}
 427 
 428 bool ParallelCompactData::initialize(MemRegion covered_region)
 429 {
 430   _region_start = covered_region.start();
 431   const size_t region_size = covered_region.word_size();
 432   DEBUG_ONLY(_region_end = _region_start + region_size;)
 433 
 434   assert(region_align_down(_region_start) == _region_start,
 435          "region start not aligned");
 436 
 437   bool result = initialize_region_data(region_size) && initialize_block_data();
 438   return result;
 439 }
 440 
 441 PSVirtualSpace*
 442 ParallelCompactData::create_vspace(size_t count, size_t element_size)
 443 {
 444   const size_t raw_bytes = count * element_size;
 445   const size_t page_sz = os::page_size_for_region_aligned(raw_bytes, 10);
 446   const size_t granularity = os::vm_allocation_granularity();
 447   _reserved_byte_size = align_up(raw_bytes, MAX2(page_sz, granularity));
 448 
 449   const size_t rs_align = page_sz == (size_t) os::vm_page_size() ? 0 :
 450     MAX2(page_sz, granularity);
 451   ReservedSpace rs(_reserved_byte_size, rs_align, page_sz);
 452   os::trace_page_sizes("Parallel Compact Data", raw_bytes, raw_bytes, page_sz, rs.base(),
 453                        rs.size());
 454 
 455   MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
 456 
 457   PSVirtualSpace* vspace = new PSVirtualSpace(rs, page_sz);
 458   if (vspace != 0) {
 459     if (vspace->expand_by(_reserved_byte_size)) {
 460       return vspace;
 461     }
 462     delete vspace;
 463     // Release memory reserved in the space.
 464     rs.release();
 465   }
 466 
 467   return 0;
 468 }
 469 
 470 bool ParallelCompactData::initialize_region_data(size_t region_size)
 471 {
 472   assert((region_size & RegionSizeOffsetMask) == 0,
 473          "region size not a multiple of RegionSize");
 474 
 475   const size_t count = region_size >> Log2RegionSize;
 476   _region_vspace = create_vspace(count, sizeof(RegionData));
 477   if (_region_vspace != 0) {
 478     _region_data = (RegionData*)_region_vspace->reserved_low_addr();
 479     _region_count = count;
 480     return true;
 481   }
 482   return false;
 483 }
 484 
 485 bool ParallelCompactData::initialize_block_data()
 486 {
 487   assert(_region_count != 0, "region data must be initialized first");
 488   const size_t count = _region_count << Log2BlocksPerRegion;
 489   _block_vspace = create_vspace(count, sizeof(BlockData));
 490   if (_block_vspace != 0) {
 491     _block_data = (BlockData*)_block_vspace->reserved_low_addr();
 492     _block_count = count;
 493     return true;
 494   }
 495   return false;
 496 }
 497 
 498 void ParallelCompactData::clear()
 499 {
 500   memset(_region_data, 0, _region_vspace->committed_size());
 501   memset(_block_data, 0, _block_vspace->committed_size());
 502 }
 503 
 504 void ParallelCompactData::clear_range(size_t beg_region, size_t end_region) {
 505   assert(beg_region <= _region_count, "beg_region out of range");
 506   assert(end_region <= _region_count, "end_region out of range");
 507   assert(RegionSize % BlockSize == 0, "RegionSize not a multiple of BlockSize");
 508 
 509   const size_t region_cnt = end_region - beg_region;
 510   memset(_region_data + beg_region, 0, region_cnt * sizeof(RegionData));
 511 
 512   const size_t beg_block = beg_region * BlocksPerRegion;
 513   const size_t block_cnt = region_cnt * BlocksPerRegion;
 514   memset(_block_data + beg_block, 0, block_cnt * sizeof(BlockData));
 515 }
 516 
 517 HeapWord* ParallelCompactData::partial_obj_end(size_t region_idx) const
 518 {
 519   const RegionData* cur_cp = region(region_idx);
 520   const RegionData* const end_cp = region(region_count() - 1);
 521 
 522   HeapWord* result = region_to_addr(region_idx);
 523   if (cur_cp < end_cp) {
 524     do {
 525       result += cur_cp->partial_obj_size();
 526     } while (cur_cp->partial_obj_size() == RegionSize && ++cur_cp < end_cp);
 527   }
 528   return result;
 529 }
 530 
 531 void ParallelCompactData::add_obj(HeapWord* addr, size_t len)
 532 {
 533   const size_t obj_ofs = pointer_delta(addr, _region_start);
 534   const size_t beg_region = obj_ofs >> Log2RegionSize;
 535   // end_region is inclusive
 536   const size_t end_region = (obj_ofs + len - 1) >> Log2RegionSize;
 537 
 538   if (beg_region == end_region) {
 539     // All in one region.
 540     _region_data[beg_region].add_live_obj(len);
 541     return;
 542   }
 543 
 544   // First region.
 545   const size_t beg_ofs = region_offset(addr);
 546   _region_data[beg_region].add_live_obj(RegionSize - beg_ofs);
 547 
 548   // Middle regions--completely spanned by this object.
 549   for (size_t region = beg_region + 1; region < end_region; ++region) {
 550     _region_data[region].set_partial_obj_size(RegionSize);
 551     _region_data[region].set_partial_obj_addr(addr);
 552   }
 553 
 554   // Last region.
 555   const size_t end_ofs = region_offset(addr + len - 1);
 556   _region_data[end_region].set_partial_obj_size(end_ofs + 1);
 557   _region_data[end_region].set_partial_obj_addr(addr);
 558 }
 559 
 560 void
 561 ParallelCompactData::summarize_dense_prefix(HeapWord* beg, HeapWord* end)
 562 {
 563   assert(is_region_aligned(beg), "not RegionSize aligned");
 564   assert(is_region_aligned(end), "not RegionSize aligned");
 565 
 566   size_t cur_region = addr_to_region_idx(beg);
 567   const size_t end_region = addr_to_region_idx(end);
 568   HeapWord* addr = beg;
 569   while (cur_region < end_region) {
 570     _region_data[cur_region].set_destination(addr);
 571     _region_data[cur_region].set_destination_count(0);
 572     _region_data[cur_region].set_source_region(cur_region);
 573     _region_data[cur_region].set_data_location(addr);
 574 
 575     // Update live_obj_size so the region appears completely full.
 576     size_t live_size = RegionSize - _region_data[cur_region].partial_obj_size();
 577     _region_data[cur_region].set_live_obj_size(live_size);
 578 
 579     ++cur_region;
 580     addr += RegionSize;
 581   }
 582 }
 583 
 584 // Find the point at which a space can be split and, if necessary, record the
 585 // split point.
 586 //
 587 // If the current src region (which overflowed the destination space) doesn't
 588 // have a partial object, the split point is at the beginning of the current src
 589 // region (an "easy" split, no extra bookkeeping required).
 590 //
 591 // If the current src region has a partial object, the split point is in the
 592 // region where that partial object starts (call it the split_region).  If
 593 // split_region has a partial object, then the split point is just after that
 594 // partial object (a "hard" split where we have to record the split data and
 595 // zero the partial_obj_size field).  With a "hard" split, we know that the
 596 // partial_obj ends within split_region because the partial object that caused
 597 // the overflow starts in split_region.  If split_region doesn't have a partial
 598 // obj, then the split is at the beginning of split_region (another "easy"
 599 // split).
 600 HeapWord*
 601 ParallelCompactData::summarize_split_space(size_t src_region,
 602                                            SplitInfo& split_info,
 603                                            HeapWord* destination,
 604                                            HeapWord* target_end,
 605                                            HeapWord** target_next)
 606 {
 607   assert(destination <= target_end, "sanity");
 608   assert(destination + _region_data[src_region].data_size() > target_end,
 609     "region should not fit into target space");
 610   assert(is_region_aligned(target_end), "sanity");
 611 
 612   size_t split_region = src_region;
 613   HeapWord* split_destination = destination;
 614   size_t partial_obj_size = _region_data[src_region].partial_obj_size();
 615 
 616   if (destination + partial_obj_size > target_end) {
 617     // The split point is just after the partial object (if any) in the
 618     // src_region that contains the start of the object that overflowed the
 619     // destination space.
 620     //
 621     // Find the start of the "overflow" object and set split_region to the
 622     // region containing it.
 623     HeapWord* const overflow_obj = _region_data[src_region].partial_obj_addr();
 624     split_region = addr_to_region_idx(overflow_obj);
 625 
 626     // Clear the source_region field of all destination regions whose first word
 627     // came from data after the split point (a non-null source_region field
 628     // implies a region must be filled).
 629     //
 630     // An alternative to the simple loop below:  clear during post_compact(),
 631     // which uses memcpy instead of individual stores, and is easy to
 632     // parallelize.  (The downside is that it clears the entire RegionData
 633     // object as opposed to just one field.)
 634     //
 635     // post_compact() would have to clear the summary data up to the highest
 636     // address that was written during the summary phase, which would be
 637     //
 638     //         max(top, max(new_top, clear_top))
 639     //
 640     // where clear_top is a new field in SpaceInfo.  Would have to set clear_top
 641     // to target_end.
 642     const RegionData* const sr = region(split_region);
 643     const size_t beg_idx =
 644       addr_to_region_idx(region_align_up(sr->destination() +
 645                                          sr->partial_obj_size()));
 646     const size_t end_idx = addr_to_region_idx(target_end);
 647 
 648     log_develop_trace(gc, compaction)("split:  clearing source_region field in [" SIZE_FORMAT ", " SIZE_FORMAT ")", beg_idx, end_idx);
 649     for (size_t idx = beg_idx; idx < end_idx; ++idx) {
 650       _region_data[idx].set_source_region(0);
 651     }
 652 
 653     // Set split_destination and partial_obj_size to reflect the split region.
 654     split_destination = sr->destination();
 655     partial_obj_size = sr->partial_obj_size();
 656   }
 657 
 658   // The split is recorded only if a partial object extends onto the region.
 659   if (partial_obj_size != 0) {
 660     _region_data[split_region].set_partial_obj_size(0);
 661     split_info.record(split_region, partial_obj_size, split_destination);
 662   }
 663 
 664   // Setup the continuation addresses.
 665   *target_next = split_destination + partial_obj_size;
 666   HeapWord* const source_next = region_to_addr(split_region) + partial_obj_size;
 667 
 668   if (log_develop_is_enabled(Trace, gc, compaction)) {
 669     const char * split_type = partial_obj_size == 0 ? "easy" : "hard";
 670     log_develop_trace(gc, compaction)("%s split:  src=" PTR_FORMAT " src_c=" SIZE_FORMAT " pos=" SIZE_FORMAT,
 671                                       split_type, p2i(source_next), split_region, partial_obj_size);
 672     log_develop_trace(gc, compaction)("%s split:  dst=" PTR_FORMAT " dst_c=" SIZE_FORMAT " tn=" PTR_FORMAT,
 673                                       split_type, p2i(split_destination),
 674                                       addr_to_region_idx(split_destination),
 675                                       p2i(*target_next));
 676 
 677     if (partial_obj_size != 0) {
 678       HeapWord* const po_beg = split_info.destination();
 679       HeapWord* const po_end = po_beg + split_info.partial_obj_size();
 680       log_develop_trace(gc, compaction)("%s split:  po_beg=" PTR_FORMAT " " SIZE_FORMAT " po_end=" PTR_FORMAT " " SIZE_FORMAT,
 681                                         split_type,
 682                                         p2i(po_beg), addr_to_region_idx(po_beg),
 683                                         p2i(po_end), addr_to_region_idx(po_end));
 684     }
 685   }
 686 
 687   return source_next;
 688 }
 689 
 690 bool ParallelCompactData::summarize(SplitInfo& split_info,
 691                                     HeapWord* source_beg, HeapWord* source_end,
 692                                     HeapWord** source_next,
 693                                     HeapWord* target_beg, HeapWord* target_end,
 694                                     HeapWord** target_next)
 695 {
 696   HeapWord* const source_next_val = source_next == NULL ? NULL : *source_next;
 697   log_develop_trace(gc, compaction)(
 698       "sb=" PTR_FORMAT " se=" PTR_FORMAT " sn=" PTR_FORMAT
 699       "tb=" PTR_FORMAT " te=" PTR_FORMAT " tn=" PTR_FORMAT,
 700       p2i(source_beg), p2i(source_end), p2i(source_next_val),
 701       p2i(target_beg), p2i(target_end), p2i(*target_next));
 702 
 703   size_t cur_region = addr_to_region_idx(source_beg);
 704   const size_t end_region = addr_to_region_idx(region_align_up(source_end));
 705 
 706   HeapWord *dest_addr = target_beg;
 707   while (cur_region < end_region) {
 708     // The destination must be set even if the region has no data.
 709     _region_data[cur_region].set_destination(dest_addr);
 710 
 711     size_t words = _region_data[cur_region].data_size();
 712     if (words > 0) {
 713       // If cur_region does not fit entirely into the target space, find a point
 714       // at which the source space can be 'split' so that part is copied to the
 715       // target space and the rest is copied elsewhere.
 716       if (dest_addr + words > target_end) {
 717         assert(source_next != NULL, "source_next is NULL when splitting");
 718         *source_next = summarize_split_space(cur_region, split_info, dest_addr,
 719                                              target_end, target_next);
 720         return false;
 721       }
 722 
 723       // Compute the destination_count for cur_region, and if necessary, update
 724       // source_region for a destination region.  The source_region field is
 725       // updated if cur_region is the first (left-most) region to be copied to a
 726       // destination region.
 727       //
 728       // The destination_count calculation is a bit subtle.  A region that has
 729       // data that compacts into itself does not count itself as a destination.
 730       // This maintains the invariant that a zero count means the region is
 731       // available and can be claimed and then filled.
 732       uint destination_count = 0;
 733       if (split_info.is_split(cur_region)) {
 734         // The current region has been split:  the partial object will be copied
 735         // to one destination space and the remaining data will be copied to
 736         // another destination space.  Adjust the initial destination_count and,
 737         // if necessary, set the source_region field if the partial object will
 738         // cross a destination region boundary.
 739         destination_count = split_info.destination_count();
 740         if (destination_count == 2) {
 741           size_t dest_idx = addr_to_region_idx(split_info.dest_region_addr());
 742           _region_data[dest_idx].set_source_region(cur_region);
 743         }
 744       }
 745 
 746       HeapWord* const last_addr = dest_addr + words - 1;
 747       const size_t dest_region_1 = addr_to_region_idx(dest_addr);
 748       const size_t dest_region_2 = addr_to_region_idx(last_addr);
 749 
 750       // Initially assume that the destination regions will be the same and
 751       // adjust the value below if necessary.  Under this assumption, if
 752       // cur_region == dest_region_2, then cur_region will be compacted
 753       // completely into itself.
 754       destination_count += cur_region == dest_region_2 ? 0 : 1;
 755       if (dest_region_1 != dest_region_2) {
 756         // Destination regions differ; adjust destination_count.
 757         destination_count += 1;
 758         // Data from cur_region will be copied to the start of dest_region_2.
 759         _region_data[dest_region_2].set_source_region(cur_region);
 760       } else if (is_region_aligned(dest_addr)) {
 761         // Data from cur_region will be copied to the start of the destination
 762         // region.
 763         _region_data[dest_region_1].set_source_region(cur_region);
 764       }
 765 
 766       _region_data[cur_region].set_destination_count(destination_count);
 767       _region_data[cur_region].set_data_location(region_to_addr(cur_region));
 768       dest_addr += words;
 769     }
 770 
 771     ++cur_region;
 772   }
 773 
 774   *target_next = dest_addr;
 775   return true;
 776 }
 777 
 778 HeapWord* ParallelCompactData::calc_new_pointer(HeapWord* addr, ParCompactionManager* cm) const {
 779   assert(addr != NULL, "Should detect NULL oop earlier");
 780   assert(ParallelScavengeHeap::heap()->is_in(addr), "not in heap");
 781   assert(PSParallelCompact::mark_bitmap()->is_marked(addr), "not marked");
 782 
 783   // Region covering the object.
 784   RegionData* const region_ptr = addr_to_region_ptr(addr);
 785   HeapWord* result = region_ptr->destination();
 786 
 787   // If the entire Region is live, the new location is region->destination + the
 788   // offset of the object within in the Region.
 789 
 790   // Run some performance tests to determine if this special case pays off.  It
 791   // is worth it for pointers into the dense prefix.  If the optimization to
 792   // avoid pointer updates in regions that only point to the dense prefix is
 793   // ever implemented, this should be revisited.
 794   if (region_ptr->data_size() == RegionSize) {
 795     result += region_offset(addr);
 796     return result;
 797   }
 798 
 799   // Otherwise, the new location is region->destination + block offset + the
 800   // number of live words in the Block that are (a) to the left of addr and (b)
 801   // due to objects that start in the Block.
 802 
 803   // Fill in the block table if necessary.  This is unsynchronized, so multiple
 804   // threads may fill the block table for a region (harmless, since it is
 805   // idempotent).
 806   if (!region_ptr->blocks_filled()) {
 807     PSParallelCompact::fill_blocks(addr_to_region_idx(addr));
 808     region_ptr->set_blocks_filled();
 809   }
 810 
 811   HeapWord* const search_start = block_align_down(addr);
 812   const size_t block_offset = addr_to_block_ptr(addr)->offset();
 813 
 814   const ParMarkBitMap* bitmap = PSParallelCompact::mark_bitmap();
 815   const size_t live = bitmap->live_words_in_range(cm, search_start, cast_to_oop(addr));
 816   result += block_offset + live;
 817   DEBUG_ONLY(PSParallelCompact::check_new_location(addr, result));
 818   return result;
 819 }
 820 
 821 #ifdef ASSERT
 822 void ParallelCompactData::verify_clear(const PSVirtualSpace* vspace)
 823 {
 824   const size_t* const beg = (const size_t*)vspace->committed_low_addr();
 825   const size_t* const end = (const size_t*)vspace->committed_high_addr();
 826   for (const size_t* p = beg; p < end; ++p) {
 827     assert(*p == 0, "not zero");
 828   }
 829 }
 830 
 831 void ParallelCompactData::verify_clear()
 832 {
 833   verify_clear(_region_vspace);
 834   verify_clear(_block_vspace);
 835 }
 836 #endif  // #ifdef ASSERT
 837 
 838 STWGCTimer          PSParallelCompact::_gc_timer;
 839 ParallelOldTracer   PSParallelCompact::_gc_tracer;
 840 elapsedTimer        PSParallelCompact::_accumulated_time;
 841 unsigned int        PSParallelCompact::_total_invocations = 0;
 842 unsigned int        PSParallelCompact::_maximum_compaction_gc_num = 0;
 843 CollectorCounters*  PSParallelCompact::_counters = NULL;
 844 ParMarkBitMap       PSParallelCompact::_mark_bitmap;
 845 ParallelCompactData PSParallelCompact::_summary_data;
 846 
 847 PSParallelCompact::IsAliveClosure PSParallelCompact::_is_alive_closure;
 848 
 849 bool PSParallelCompact::IsAliveClosure::do_object_b(oop p) { return mark_bitmap()->is_marked(p); }
 850 
 851 void PSParallelCompact::post_initialize() {
 852   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 853   _span_based_discoverer.set_span(heap->reserved_region());
 854   _ref_processor =
 855     new ReferenceProcessor(&_span_based_discoverer,
 856                            ParallelGCThreads,   // mt processing degree
 857                            ParallelGCThreads,   // mt discovery degree
 858                            false,               // concurrent_discovery
 859                            &_is_alive_closure); // non-header is alive closure
 860 
 861   _counters = new CollectorCounters("Parallel full collection pauses", 1);
 862 
 863   // Initialize static fields in ParCompactionManager.
 864   ParCompactionManager::initialize(mark_bitmap());
 865 }
 866 
 867 bool PSParallelCompact::initialize() {
 868   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 869   MemRegion mr = heap->reserved_region();
 870 
 871   // Was the old gen get allocated successfully?
 872   if (!heap->old_gen()->is_allocated()) {
 873     return false;
 874   }
 875 
 876   initialize_space_info();
 877   initialize_dead_wood_limiter();
 878 
 879   if (!_mark_bitmap.initialize(mr)) {
 880     vm_shutdown_during_initialization(
 881       err_msg("Unable to allocate " SIZE_FORMAT "KB bitmaps for parallel "
 882       "garbage collection for the requested " SIZE_FORMAT "KB heap.",
 883       _mark_bitmap.reserved_byte_size()/K, mr.byte_size()/K));
 884     return false;
 885   }
 886 
 887   if (!_summary_data.initialize(mr)) {
 888     vm_shutdown_during_initialization(
 889       err_msg("Unable to allocate " SIZE_FORMAT "KB card tables for parallel "
 890       "garbage collection for the requested " SIZE_FORMAT "KB heap.",
 891       _summary_data.reserved_byte_size()/K, mr.byte_size()/K));
 892     return false;
 893   }
 894 
 895   return true;
 896 }
 897 
 898 void PSParallelCompact::initialize_space_info()
 899 {
 900   memset(&_space_info, 0, sizeof(_space_info));
 901 
 902   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 903   PSYoungGen* young_gen = heap->young_gen();
 904 
 905   _space_info[old_space_id].set_space(heap->old_gen()->object_space());
 906   _space_info[eden_space_id].set_space(young_gen->eden_space());
 907   _space_info[from_space_id].set_space(young_gen->from_space());
 908   _space_info[to_space_id].set_space(young_gen->to_space());
 909 
 910   _space_info[old_space_id].set_start_array(heap->old_gen()->start_array());
 911 }
 912 
 913 void PSParallelCompact::initialize_dead_wood_limiter()
 914 {
 915   const size_t max = 100;
 916   _dwl_mean = double(MIN2(ParallelOldDeadWoodLimiterMean, max)) / 100.0;
 917   _dwl_std_dev = double(MIN2(ParallelOldDeadWoodLimiterStdDev, max)) / 100.0;
 918   _dwl_first_term = 1.0 / (sqrt(2.0 * M_PI) * _dwl_std_dev);
 919   DEBUG_ONLY(_dwl_initialized = true;)
 920   _dwl_adjustment = normal_distribution(1.0);
 921 }
 922 
 923 void
 924 PSParallelCompact::clear_data_covering_space(SpaceId id)
 925 {
 926   // At this point, top is the value before GC, new_top() is the value that will
 927   // be set at the end of GC.  The marking bitmap is cleared to top; nothing
 928   // should be marked above top.  The summary data is cleared to the larger of
 929   // top & new_top.
 930   MutableSpace* const space = _space_info[id].space();
 931   HeapWord* const bot = space->bottom();
 932   HeapWord* const top = space->top();
 933   HeapWord* const max_top = MAX2(top, _space_info[id].new_top());
 934 
 935   const idx_t beg_bit = _mark_bitmap.addr_to_bit(bot);
 936   const idx_t end_bit = _mark_bitmap.align_range_end(_mark_bitmap.addr_to_bit(top));
 937   _mark_bitmap.clear_range(beg_bit, end_bit);
 938 
 939   const size_t beg_region = _summary_data.addr_to_region_idx(bot);
 940   const size_t end_region =
 941     _summary_data.addr_to_region_idx(_summary_data.region_align_up(max_top));
 942   _summary_data.clear_range(beg_region, end_region);
 943 
 944   // Clear the data used to 'split' regions.
 945   SplitInfo& split_info = _space_info[id].split_info();
 946   if (split_info.is_valid()) {
 947     split_info.clear();
 948   }
 949   DEBUG_ONLY(split_info.verify_clear();)
 950 }
 951 
 952 void PSParallelCompact::pre_compact()
 953 {
 954   // Update the from & to space pointers in space_info, since they are swapped
 955   // at each young gen gc.  Do the update unconditionally (even though a
 956   // promotion failure does not swap spaces) because an unknown number of young
 957   // collections will have swapped the spaces an unknown number of times.
 958   GCTraceTime(Debug, gc, phases) tm("Pre Compact", &_gc_timer);
 959   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 960   _space_info[from_space_id].set_space(heap->young_gen()->from_space());
 961   _space_info[to_space_id].set_space(heap->young_gen()->to_space());
 962 
 963   // Increment the invocation count
 964   heap->increment_total_collections(true);
 965 
 966   CodeCache::on_gc_marking_cycle_start();
 967   CodeCache::arm_all_nmethods();
 968 
 969   // We need to track unique mark sweep invocations as well.
 970   _total_invocations++;
 971 
 972   heap->print_heap_before_gc();
 973   heap->trace_heap_before_gc(&_gc_tracer);
 974 
 975   // Fill in TLABs
 976   heap->ensure_parsability(true);  // retire TLABs
 977 
 978   if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
 979     Universe::verify("Before GC");
 980   }
 981 
 982   // Verify object start arrays
 983   if (VerifyObjectStartArray &&
 984       VerifyBeforeGC) {
 985     heap->old_gen()->verify_object_start_array();
 986   }
 987 
 988   DEBUG_ONLY(mark_bitmap()->verify_clear();)
 989   DEBUG_ONLY(summary_data().verify_clear();)
 990 
 991   ParCompactionManager::reset_all_bitmap_query_caches();
 992 }
 993 
 994 void PSParallelCompact::post_compact()
 995 {
 996   GCTraceTime(Info, gc, phases) tm("Post Compact", &_gc_timer);
 997   ParCompactionManager::remove_all_shadow_regions();
 998 
 999   CodeCache::on_gc_marking_cycle_finish();
1000   CodeCache::arm_all_nmethods();
1001 
1002   for (unsigned int id = old_space_id; id < last_space_id; ++id) {
1003     // Clear the marking bitmap, summary data and split info.
1004     clear_data_covering_space(SpaceId(id));
1005     // Update top().  Must be done after clearing the bitmap and summary data.
1006     _space_info[id].publish_new_top();
1007   }
1008 
1009   ParCompactionManager::flush_all_string_dedup_requests();
1010 
1011   MutableSpace* const eden_space = _space_info[eden_space_id].space();
1012   MutableSpace* const from_space = _space_info[from_space_id].space();
1013   MutableSpace* const to_space   = _space_info[to_space_id].space();
1014 
1015   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
1016   bool eden_empty = eden_space->is_empty();
1017 
1018   // Update heap occupancy information which is used as input to the soft ref
1019   // clearing policy at the next gc.
1020   Universe::heap()->update_capacity_and_used_at_gc();
1021 
1022   bool young_gen_empty = eden_empty && from_space->is_empty() &&
1023     to_space->is_empty();
1024 
1025   PSCardTable* ct = heap->card_table();
1026   MemRegion old_mr = heap->old_gen()->reserved();
1027   if (young_gen_empty) {
1028     ct->clear(old_mr);
1029   } else {
1030     ct->invalidate(old_mr);
1031   }
1032 
1033   // Delete metaspaces for unloaded class loaders and clean up loader_data graph
1034   ClassLoaderDataGraph::purge(/*at_safepoint*/true);
1035   DEBUG_ONLY(MetaspaceUtils::verify();)
1036 
1037   heap->prune_scavengable_nmethods();
1038 
1039 #if COMPILER2_OR_JVMCI
1040   DerivedPointerTable::update_pointers();
1041 #endif
1042 
1043   if (ZapUnusedHeapArea) {
1044     heap->gen_mangle_unused_area();
1045   }
1046 
1047   // Signal that we have completed a visit to all live objects.
1048   Universe::heap()->record_whole_heap_examined_timestamp();
1049 }
1050 
1051 HeapWord*
1052 PSParallelCompact::compute_dense_prefix_via_density(const SpaceId id,
1053                                                     bool maximum_compaction)
1054 {
1055   const size_t region_size = ParallelCompactData::RegionSize;
1056   const ParallelCompactData& sd = summary_data();
1057 
1058   const MutableSpace* const space = _space_info[id].space();
1059   HeapWord* const top_aligned_up = sd.region_align_up(space->top());
1060   const RegionData* const beg_cp = sd.addr_to_region_ptr(space->bottom());
1061   const RegionData* const end_cp = sd.addr_to_region_ptr(top_aligned_up);
1062 
1063   // Skip full regions at the beginning of the space--they are necessarily part
1064   // of the dense prefix.
1065   size_t full_count = 0;
1066   const RegionData* cp;
1067   for (cp = beg_cp; cp < end_cp && cp->data_size() == region_size; ++cp) {
1068     ++full_count;
1069   }
1070 
1071   assert(total_invocations() >= _maximum_compaction_gc_num, "sanity");
1072   const size_t gcs_since_max = total_invocations() - _maximum_compaction_gc_num;
1073   const bool interval_ended = gcs_since_max > HeapMaximumCompactionInterval;
1074   if (maximum_compaction || cp == end_cp || interval_ended) {
1075     _maximum_compaction_gc_num = total_invocations();
1076     return sd.region_to_addr(cp);
1077   }
1078 
1079   HeapWord* const new_top = _space_info[id].new_top();
1080   const size_t space_live = pointer_delta(new_top, space->bottom());
1081   const size_t space_used = space->used_in_words();
1082   const size_t space_capacity = space->capacity_in_words();
1083 
1084   const double cur_density = double(space_live) / space_capacity;
1085   const double deadwood_density =
1086     (1.0 - cur_density) * (1.0 - cur_density) * cur_density * cur_density;
1087   const size_t deadwood_goal = size_t(space_capacity * deadwood_density);
1088 
1089   log_develop_debug(gc, compaction)(
1090       "cur_dens=%5.3f dw_dens=%5.3f dw_goal=" SIZE_FORMAT,
1091       cur_density, deadwood_density, deadwood_goal);
1092   log_develop_debug(gc, compaction)(
1093       "space_live=" SIZE_FORMAT " space_used=" SIZE_FORMAT " "
1094       "space_cap=" SIZE_FORMAT,
1095       space_live, space_used,
1096       space_capacity);
1097 
1098   // XXX - Use binary search?
1099   HeapWord* dense_prefix = sd.region_to_addr(cp);
1100   const RegionData* full_cp = cp;
1101   const RegionData* const top_cp = sd.addr_to_region_ptr(space->top() - 1);
1102   while (cp < end_cp) {
1103     HeapWord* region_destination = cp->destination();
1104     const size_t cur_deadwood = pointer_delta(dense_prefix, region_destination);
1105 
1106     log_develop_trace(gc, compaction)(
1107         "c#=" SIZE_FORMAT_W(4) " dst=" PTR_FORMAT " "
1108         "dp=" PTR_FORMAT " cdw=" SIZE_FORMAT_W(8),
1109         sd.region(cp), p2i(region_destination),
1110         p2i(dense_prefix), cur_deadwood);
1111 
1112     if (cur_deadwood >= deadwood_goal) {
1113       // Found the region that has the correct amount of deadwood to the left.
1114       // This typically occurs after crossing a fairly sparse set of regions, so
1115       // iterate backwards over those sparse regions, looking for the region
1116       // that has the lowest density of live objects 'to the right.'
1117       size_t space_to_left = sd.region(cp) * region_size;
1118       size_t live_to_left = space_to_left - cur_deadwood;
1119       size_t space_to_right = space_capacity - space_to_left;
1120       size_t live_to_right = space_live - live_to_left;
1121       double density_to_right = double(live_to_right) / space_to_right;
1122       while (cp > full_cp) {
1123         --cp;
1124         const size_t prev_region_live_to_right = live_to_right -
1125           cp->data_size();
1126         const size_t prev_region_space_to_right = space_to_right + region_size;
1127         double prev_region_density_to_right =
1128           double(prev_region_live_to_right) / prev_region_space_to_right;
1129         if (density_to_right <= prev_region_density_to_right) {
1130           return dense_prefix;
1131         }
1132 
1133         log_develop_trace(gc, compaction)(
1134             "backing up from c=" SIZE_FORMAT_W(4) " d2r=%10.8f "
1135             "pc_d2r=%10.8f",
1136             sd.region(cp), density_to_right,
1137             prev_region_density_to_right);
1138 
1139         dense_prefix -= region_size;
1140         live_to_right = prev_region_live_to_right;
1141         space_to_right = prev_region_space_to_right;
1142         density_to_right = prev_region_density_to_right;
1143       }
1144       return dense_prefix;
1145     }
1146 
1147     dense_prefix += region_size;
1148     ++cp;
1149   }
1150 
1151   return dense_prefix;
1152 }
1153 
1154 #ifndef PRODUCT
1155 void PSParallelCompact::print_dense_prefix_stats(const char* const algorithm,
1156                                                  const SpaceId id,
1157                                                  const bool maximum_compaction,
1158                                                  HeapWord* const addr)
1159 {
1160   const size_t region_idx = summary_data().addr_to_region_idx(addr);
1161   RegionData* const cp = summary_data().region(region_idx);
1162   const MutableSpace* const space = _space_info[id].space();
1163   HeapWord* const new_top = _space_info[id].new_top();
1164 
1165   const size_t space_live = pointer_delta(new_top, space->bottom());
1166   const size_t dead_to_left = pointer_delta(addr, cp->destination());
1167   const size_t space_cap = space->capacity_in_words();
1168   const double dead_to_left_pct = double(dead_to_left) / space_cap;
1169   const size_t live_to_right = new_top - cp->destination();
1170   const size_t dead_to_right = space->top() - addr - live_to_right;
1171 
1172   log_develop_debug(gc, compaction)(
1173       "%s=" PTR_FORMAT " dpc=" SIZE_FORMAT_W(5) " "
1174       "spl=" SIZE_FORMAT " "
1175       "d2l=" SIZE_FORMAT " d2l%%=%6.4f "
1176       "d2r=" SIZE_FORMAT " l2r=" SIZE_FORMAT " "
1177       "ratio=%10.8f",
1178       algorithm, p2i(addr), region_idx,
1179       space_live,
1180       dead_to_left, dead_to_left_pct,
1181       dead_to_right, live_to_right,
1182       double(dead_to_right) / live_to_right);
1183 }
1184 #endif  // #ifndef PRODUCT
1185 
1186 // Return a fraction indicating how much of the generation can be treated as
1187 // "dead wood" (i.e., not reclaimed).  The function uses a normal distribution
1188 // based on the density of live objects in the generation to determine a limit,
1189 // which is then adjusted so the return value is min_percent when the density is
1190 // 1.
1191 //
1192 // The following table shows some return values for a different values of the
1193 // standard deviation (ParallelOldDeadWoodLimiterStdDev); the mean is 0.5 and
1194 // min_percent is 1.
1195 //
1196 //                          fraction allowed as dead wood
1197 //         -----------------------------------------------------------------
1198 // density std_dev=70 std_dev=75 std_dev=80 std_dev=85 std_dev=90 std_dev=95
1199 // ------- ---------- ---------- ---------- ---------- ---------- ----------
1200 // 0.00000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000
1201 // 0.05000 0.03193096 0.02836880 0.02550828 0.02319280 0.02130337 0.01974941
1202 // 0.10000 0.05247504 0.04547452 0.03988045 0.03537016 0.03170171 0.02869272
1203 // 0.15000 0.07135702 0.06111390 0.05296419 0.04641639 0.04110601 0.03676066
1204 // 0.20000 0.08831616 0.07509618 0.06461766 0.05622444 0.04943437 0.04388975
1205 // 0.25000 0.10311208 0.08724696 0.07471205 0.06469760 0.05661313 0.05002313
1206 // 0.30000 0.11553050 0.09741183 0.08313394 0.07175114 0.06257797 0.05511132
1207 // 0.35000 0.12538832 0.10545958 0.08978741 0.07731366 0.06727491 0.05911289
1208 // 0.40000 0.13253818 0.11128511 0.09459590 0.08132834 0.07066107 0.06199500
1209 // 0.45000 0.13687208 0.11481163 0.09750361 0.08375387 0.07270534 0.06373386
1210 // 0.50000 0.13832410 0.11599237 0.09847664 0.08456518 0.07338887 0.06431510
1211 // 0.55000 0.13687208 0.11481163 0.09750361 0.08375387 0.07270534 0.06373386
1212 // 0.60000 0.13253818 0.11128511 0.09459590 0.08132834 0.07066107 0.06199500
1213 // 0.65000 0.12538832 0.10545958 0.08978741 0.07731366 0.06727491 0.05911289
1214 // 0.70000 0.11553050 0.09741183 0.08313394 0.07175114 0.06257797 0.05511132
1215 // 0.75000 0.10311208 0.08724696 0.07471205 0.06469760 0.05661313 0.05002313
1216 // 0.80000 0.08831616 0.07509618 0.06461766 0.05622444 0.04943437 0.04388975
1217 // 0.85000 0.07135702 0.06111390 0.05296419 0.04641639 0.04110601 0.03676066
1218 // 0.90000 0.05247504 0.04547452 0.03988045 0.03537016 0.03170171 0.02869272
1219 // 0.95000 0.03193096 0.02836880 0.02550828 0.02319280 0.02130337 0.01974941
1220 // 1.00000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000
1221 
1222 double PSParallelCompact::dead_wood_limiter(double density, size_t min_percent)
1223 {
1224   assert(_dwl_initialized, "uninitialized");
1225 
1226   // The raw limit is the value of the normal distribution at x = density.
1227   const double raw_limit = normal_distribution(density);
1228 
1229   // Adjust the raw limit so it becomes the minimum when the density is 1.
1230   //
1231   // First subtract the adjustment value (which is simply the precomputed value
1232   // normal_distribution(1.0)); this yields a value of 0 when the density is 1.
1233   // Then add the minimum value, so the minimum is returned when the density is
1234   // 1.  Finally, prevent negative values, which occur when the mean is not 0.5.
1235   const double min = double(min_percent) / 100.0;
1236   const double limit = raw_limit - _dwl_adjustment + min;
1237   return MAX2(limit, 0.0);
1238 }
1239 
1240 ParallelCompactData::RegionData*
1241 PSParallelCompact::first_dead_space_region(const RegionData* beg,
1242                                            const RegionData* end)
1243 {
1244   const size_t region_size = ParallelCompactData::RegionSize;
1245   ParallelCompactData& sd = summary_data();
1246   size_t left = sd.region(beg);
1247   size_t right = end > beg ? sd.region(end) - 1 : left;
1248 
1249   // Binary search.
1250   while (left < right) {
1251     // Equivalent to (left + right) / 2, but does not overflow.
1252     const size_t middle = left + (right - left) / 2;
1253     RegionData* const middle_ptr = sd.region(middle);
1254     HeapWord* const dest = middle_ptr->destination();
1255     HeapWord* const addr = sd.region_to_addr(middle);
1256     assert(dest != NULL, "sanity");
1257     assert(dest <= addr, "must move left");
1258 
1259     if (middle > left && dest < addr) {
1260       right = middle - 1;
1261     } else if (middle < right && middle_ptr->data_size() == region_size) {
1262       left = middle + 1;
1263     } else {
1264       return middle_ptr;
1265     }
1266   }
1267   return sd.region(left);
1268 }
1269 
1270 ParallelCompactData::RegionData*
1271 PSParallelCompact::dead_wood_limit_region(const RegionData* beg,
1272                                           const RegionData* end,
1273                                           size_t dead_words)
1274 {
1275   ParallelCompactData& sd = summary_data();
1276   size_t left = sd.region(beg);
1277   size_t right = end > beg ? sd.region(end) - 1 : left;
1278 
1279   // Binary search.
1280   while (left < right) {
1281     // Equivalent to (left + right) / 2, but does not overflow.
1282     const size_t middle = left + (right - left) / 2;
1283     RegionData* const middle_ptr = sd.region(middle);
1284     HeapWord* const dest = middle_ptr->destination();
1285     HeapWord* const addr = sd.region_to_addr(middle);
1286     assert(dest != NULL, "sanity");
1287     assert(dest <= addr, "must move left");
1288 
1289     const size_t dead_to_left = pointer_delta(addr, dest);
1290     if (middle > left && dead_to_left > dead_words) {
1291       right = middle - 1;
1292     } else if (middle < right && dead_to_left < dead_words) {
1293       left = middle + 1;
1294     } else {
1295       return middle_ptr;
1296     }
1297   }
1298   return sd.region(left);
1299 }
1300 
1301 // The result is valid during the summary phase, after the initial summarization
1302 // of each space into itself, and before final summarization.
1303 inline double
1304 PSParallelCompact::reclaimed_ratio(const RegionData* const cp,
1305                                    HeapWord* const bottom,
1306                                    HeapWord* const top,
1307                                    HeapWord* const new_top)
1308 {
1309   ParallelCompactData& sd = summary_data();
1310 
1311   assert(cp != NULL, "sanity");
1312   assert(bottom != NULL, "sanity");
1313   assert(top != NULL, "sanity");
1314   assert(new_top != NULL, "sanity");
1315   assert(top >= new_top, "summary data problem?");
1316   assert(new_top > bottom, "space is empty; should not be here");
1317   assert(new_top >= cp->destination(), "sanity");
1318   assert(top >= sd.region_to_addr(cp), "sanity");
1319 
1320   HeapWord* const destination = cp->destination();
1321   const size_t dense_prefix_live  = pointer_delta(destination, bottom);
1322   const size_t compacted_region_live = pointer_delta(new_top, destination);
1323   const size_t compacted_region_used = pointer_delta(top,
1324                                                      sd.region_to_addr(cp));
1325   const size_t reclaimable = compacted_region_used - compacted_region_live;
1326 
1327   const double divisor = dense_prefix_live + 1.25 * compacted_region_live;
1328   return double(reclaimable) / divisor;
1329 }
1330 
1331 // Return the address of the end of the dense prefix, a.k.a. the start of the
1332 // compacted region.  The address is always on a region boundary.
1333 //
1334 // Completely full regions at the left are skipped, since no compaction can
1335 // occur in those regions.  Then the maximum amount of dead wood to allow is
1336 // computed, based on the density (amount live / capacity) of the generation;
1337 // the region with approximately that amount of dead space to the left is
1338 // identified as the limit region.  Regions between the last completely full
1339 // region and the limit region are scanned and the one that has the best
1340 // (maximum) reclaimed_ratio() is selected.
1341 HeapWord*
1342 PSParallelCompact::compute_dense_prefix(const SpaceId id,
1343                                         bool maximum_compaction)
1344 {
1345   const size_t region_size = ParallelCompactData::RegionSize;
1346   const ParallelCompactData& sd = summary_data();
1347 
1348   const MutableSpace* const space = _space_info[id].space();
1349   HeapWord* const top = space->top();
1350   HeapWord* const top_aligned_up = sd.region_align_up(top);
1351   HeapWord* const new_top = _space_info[id].new_top();
1352   HeapWord* const new_top_aligned_up = sd.region_align_up(new_top);
1353   HeapWord* const bottom = space->bottom();
1354   const RegionData* const beg_cp = sd.addr_to_region_ptr(bottom);
1355   const RegionData* const top_cp = sd.addr_to_region_ptr(top_aligned_up);
1356   const RegionData* const new_top_cp =
1357     sd.addr_to_region_ptr(new_top_aligned_up);
1358 
1359   // Skip full regions at the beginning of the space--they are necessarily part
1360   // of the dense prefix.
1361   const RegionData* const full_cp = first_dead_space_region(beg_cp, new_top_cp);
1362   assert(full_cp->destination() == sd.region_to_addr(full_cp) ||
1363          space->is_empty(), "no dead space allowed to the left");
1364   assert(full_cp->data_size() < region_size || full_cp == new_top_cp - 1,
1365          "region must have dead space");
1366 
1367   // The gc number is saved whenever a maximum compaction is done, and used to
1368   // determine when the maximum compaction interval has expired.  This avoids
1369   // successive max compactions for different reasons.
1370   assert(total_invocations() >= _maximum_compaction_gc_num, "sanity");
1371   const size_t gcs_since_max = total_invocations() - _maximum_compaction_gc_num;
1372   const bool interval_ended = gcs_since_max > HeapMaximumCompactionInterval ||
1373     total_invocations() == HeapFirstMaximumCompactionCount;
1374   if (maximum_compaction || full_cp == top_cp || interval_ended) {
1375     _maximum_compaction_gc_num = total_invocations();
1376     return sd.region_to_addr(full_cp);
1377   }
1378 
1379   const size_t space_live = pointer_delta(new_top, bottom);
1380   const size_t space_used = space->used_in_words();
1381   const size_t space_capacity = space->capacity_in_words();
1382 
1383   const double density = double(space_live) / double(space_capacity);
1384   const size_t min_percent_free = MarkSweepDeadRatio;
1385   const double limiter = dead_wood_limiter(density, min_percent_free);
1386   const size_t dead_wood_max = space_used - space_live;
1387   const size_t dead_wood_limit = MIN2(size_t(space_capacity * limiter),
1388                                       dead_wood_max);
1389 
1390   log_develop_debug(gc, compaction)(
1391       "space_live=" SIZE_FORMAT " space_used=" SIZE_FORMAT " "
1392       "space_cap=" SIZE_FORMAT,
1393       space_live, space_used,
1394       space_capacity);
1395   log_develop_debug(gc, compaction)(
1396       "dead_wood_limiter(%6.4f, " SIZE_FORMAT ")=%6.4f "
1397       "dead_wood_max=" SIZE_FORMAT " dead_wood_limit=" SIZE_FORMAT,
1398       density, min_percent_free, limiter,
1399       dead_wood_max, dead_wood_limit);
1400 
1401   // Locate the region with the desired amount of dead space to the left.
1402   const RegionData* const limit_cp =
1403     dead_wood_limit_region(full_cp, top_cp, dead_wood_limit);
1404 
1405   // Scan from the first region with dead space to the limit region and find the
1406   // one with the best (largest) reclaimed ratio.
1407   double best_ratio = 0.0;
1408   const RegionData* best_cp = full_cp;
1409   for (const RegionData* cp = full_cp; cp < limit_cp; ++cp) {
1410     double tmp_ratio = reclaimed_ratio(cp, bottom, top, new_top);
1411     if (tmp_ratio > best_ratio) {
1412       best_cp = cp;
1413       best_ratio = tmp_ratio;
1414     }
1415   }
1416 
1417   return sd.region_to_addr(best_cp);
1418 }
1419 
1420 void PSParallelCompact::summarize_spaces_quick()
1421 {
1422   for (unsigned int i = 0; i < last_space_id; ++i) {
1423     const MutableSpace* space = _space_info[i].space();
1424     HeapWord** nta = _space_info[i].new_top_addr();
1425     bool result = _summary_data.summarize(_space_info[i].split_info(),
1426                                           space->bottom(), space->top(), NULL,
1427                                           space->bottom(), space->end(), nta);
1428     assert(result, "space must fit into itself");
1429     _space_info[i].set_dense_prefix(space->bottom());
1430   }
1431 }
1432 
1433 void PSParallelCompact::fill_dense_prefix_end(SpaceId id)
1434 {
1435   HeapWord* const dense_prefix_end = dense_prefix(id);
1436   const RegionData* region = _summary_data.addr_to_region_ptr(dense_prefix_end);
1437   const idx_t dense_prefix_bit = _mark_bitmap.addr_to_bit(dense_prefix_end);
1438   if (dead_space_crosses_boundary(region, dense_prefix_bit)) {
1439     // Only enough dead space is filled so that any remaining dead space to the
1440     // left is larger than the minimum filler object.  (The remainder is filled
1441     // during the copy/update phase.)
1442     //
1443     // The size of the dead space to the right of the boundary is not a
1444     // concern, since compaction will be able to use whatever space is
1445     // available.
1446     //
1447     // Here '||' is the boundary, 'x' represents a don't care bit and a box
1448     // surrounds the space to be filled with an object.
1449     //
1450     // In the 32-bit VM, each bit represents two 32-bit words:
1451     //                              +---+
1452     // a) beg_bits:  ...  x   x   x | 0 | ||   0   x  x  ...
1453     //    end_bits:  ...  x   x   x | 0 | ||   0   x  x  ...
1454     //                              +---+
1455     //
1456     // In the 64-bit VM, each bit represents one 64-bit word:
1457     //                              +------------+
1458     // b) beg_bits:  ...  x   x   x | 0   ||   0 | x  x  ...
1459     //    end_bits:  ...  x   x   1 | 0   ||   0 | x  x  ...
1460     //                              +------------+
1461     //                          +-------+
1462     // c) beg_bits:  ...  x   x | 0   0 | ||   0   x  x  ...
1463     //    end_bits:  ...  x   1 | 0   0 | ||   0   x  x  ...
1464     //                          +-------+
1465     //                      +-----------+
1466     // d) beg_bits:  ...  x | 0   0   0 | ||   0   x  x  ...
1467     //    end_bits:  ...  1 | 0   0   0 | ||   0   x  x  ...
1468     //                      +-----------+
1469     //                          +-------+
1470     // e) beg_bits:  ...  0   0 | 0   0 | ||   0   x  x  ...
1471     //    end_bits:  ...  0   0 | 0   0 | ||   0   x  x  ...
1472     //                          +-------+
1473 
1474     // Initially assume case a, c or e will apply.
1475     size_t obj_len = CollectedHeap::min_fill_size();
1476     HeapWord* obj_beg = dense_prefix_end - obj_len;
1477 
1478 #ifdef  _LP64
1479     if (MinObjAlignment > 1) { // object alignment > heap word size
1480       // Cases a, c or e.
1481     } else if (_mark_bitmap.is_obj_end(dense_prefix_bit - 2)) {
1482       // Case b above.
1483       obj_beg = dense_prefix_end - 1;
1484     } else if (!_mark_bitmap.is_obj_end(dense_prefix_bit - 3) &&
1485                _mark_bitmap.is_obj_end(dense_prefix_bit - 4)) {
1486       // Case d above.
1487       obj_beg = dense_prefix_end - 3;
1488       obj_len = 3;
1489     }
1490 #endif  // #ifdef _LP64
1491 
1492     CollectedHeap::fill_with_object(obj_beg, obj_len);
1493     _mark_bitmap.mark_obj(obj_beg, obj_len);
1494     _summary_data.add_obj(obj_beg, obj_len);
1495     assert(start_array(id) != NULL, "sanity");
1496     start_array(id)->allocate_block(obj_beg);
1497   }
1498 }
1499 
1500 void
1501 PSParallelCompact::summarize_space(SpaceId id, bool maximum_compaction)
1502 {
1503   assert(id < last_space_id, "id out of range");
1504   assert(_space_info[id].dense_prefix() == _space_info[id].space()->bottom(),
1505          "should have been reset in summarize_spaces_quick()");
1506 
1507   const MutableSpace* space = _space_info[id].space();
1508   if (_space_info[id].new_top() != space->bottom()) {
1509     HeapWord* dense_prefix_end = compute_dense_prefix(id, maximum_compaction);
1510     _space_info[id].set_dense_prefix(dense_prefix_end);
1511 
1512 #ifndef PRODUCT
1513     if (log_is_enabled(Debug, gc, compaction)) {
1514       print_dense_prefix_stats("ratio", id, maximum_compaction,
1515                                dense_prefix_end);
1516       HeapWord* addr = compute_dense_prefix_via_density(id, maximum_compaction);
1517       print_dense_prefix_stats("density", id, maximum_compaction, addr);
1518     }
1519 #endif  // #ifndef PRODUCT
1520 
1521     // Recompute the summary data, taking into account the dense prefix.  If
1522     // every last byte will be reclaimed, then the existing summary data which
1523     // compacts everything can be left in place.
1524     if (!maximum_compaction && dense_prefix_end != space->bottom()) {
1525       // If dead space crosses the dense prefix boundary, it is (at least
1526       // partially) filled with a dummy object, marked live and added to the
1527       // summary data.  This simplifies the copy/update phase and must be done
1528       // before the final locations of objects are determined, to prevent
1529       // leaving a fragment of dead space that is too small to fill.
1530       fill_dense_prefix_end(id);
1531 
1532       // Compute the destination of each Region, and thus each object.
1533       _summary_data.summarize_dense_prefix(space->bottom(), dense_prefix_end);
1534       _summary_data.summarize(_space_info[id].split_info(),
1535                               dense_prefix_end, space->top(), NULL,
1536                               dense_prefix_end, space->end(),
1537                               _space_info[id].new_top_addr());
1538     }
1539   }
1540 
1541   if (log_develop_is_enabled(Trace, gc, compaction)) {
1542     const size_t region_size = ParallelCompactData::RegionSize;
1543     HeapWord* const dense_prefix_end = _space_info[id].dense_prefix();
1544     const size_t dp_region = _summary_data.addr_to_region_idx(dense_prefix_end);
1545     const size_t dp_words = pointer_delta(dense_prefix_end, space->bottom());
1546     HeapWord* const new_top = _space_info[id].new_top();
1547     const HeapWord* nt_aligned_up = _summary_data.region_align_up(new_top);
1548     const size_t cr_words = pointer_delta(nt_aligned_up, dense_prefix_end);
1549     log_develop_trace(gc, compaction)(
1550         "id=%d cap=" SIZE_FORMAT " dp=" PTR_FORMAT " "
1551         "dp_region=" SIZE_FORMAT " " "dp_count=" SIZE_FORMAT " "
1552         "cr_count=" SIZE_FORMAT " " "nt=" PTR_FORMAT,
1553         id, space->capacity_in_words(), p2i(dense_prefix_end),
1554         dp_region, dp_words / region_size,
1555         cr_words / region_size, p2i(new_top));
1556   }
1557 }
1558 
1559 #ifndef PRODUCT
1560 void PSParallelCompact::summary_phase_msg(SpaceId dst_space_id,
1561                                           HeapWord* dst_beg, HeapWord* dst_end,
1562                                           SpaceId src_space_id,
1563                                           HeapWord* src_beg, HeapWord* src_end)
1564 {
1565   log_develop_trace(gc, compaction)(
1566       "Summarizing %d [%s] into %d [%s]:  "
1567       "src=" PTR_FORMAT "-" PTR_FORMAT " "
1568       SIZE_FORMAT "-" SIZE_FORMAT " "
1569       "dst=" PTR_FORMAT "-" PTR_FORMAT " "
1570       SIZE_FORMAT "-" SIZE_FORMAT,
1571       src_space_id, space_names[src_space_id],
1572       dst_space_id, space_names[dst_space_id],
1573       p2i(src_beg), p2i(src_end),
1574       _summary_data.addr_to_region_idx(src_beg),
1575       _summary_data.addr_to_region_idx(src_end),
1576       p2i(dst_beg), p2i(dst_end),
1577       _summary_data.addr_to_region_idx(dst_beg),
1578       _summary_data.addr_to_region_idx(dst_end));
1579 }
1580 #endif  // #ifndef PRODUCT
1581 
1582 void PSParallelCompact::summary_phase(bool maximum_compaction)
1583 {
1584   GCTraceTime(Info, gc, phases) tm("Summary Phase", &_gc_timer);
1585 
1586   // Quick summarization of each space into itself, to see how much is live.
1587   summarize_spaces_quick();
1588 
1589   log_develop_trace(gc, compaction)("summary phase:  after summarizing each space to self");
1590   NOT_PRODUCT(print_region_ranges());
1591   NOT_PRODUCT(print_initial_summary_data(_summary_data, _space_info));
1592 
1593   // The amount of live data that will end up in old space (assuming it fits).
1594   size_t old_space_total_live = 0;
1595   for (unsigned int id = old_space_id; id < last_space_id; ++id) {
1596     old_space_total_live += pointer_delta(_space_info[id].new_top(),
1597                                           _space_info[id].space()->bottom());
1598   }
1599 
1600   MutableSpace* const old_space = _space_info[old_space_id].space();
1601   const size_t old_capacity = old_space->capacity_in_words();
1602   if (old_space_total_live > old_capacity) {
1603     // XXX - should also try to expand
1604     maximum_compaction = true;
1605   }
1606 
1607   // Old generations.
1608   summarize_space(old_space_id, maximum_compaction);
1609 
1610   // Summarize the remaining spaces in the young gen.  The initial target space
1611   // is the old gen.  If a space does not fit entirely into the target, then the
1612   // remainder is compacted into the space itself and that space becomes the new
1613   // target.
1614   SpaceId dst_space_id = old_space_id;
1615   HeapWord* dst_space_end = old_space->end();
1616   HeapWord** new_top_addr = _space_info[dst_space_id].new_top_addr();
1617   for (unsigned int id = eden_space_id; id < last_space_id; ++id) {
1618     const MutableSpace* space = _space_info[id].space();
1619     const size_t live = pointer_delta(_space_info[id].new_top(),
1620                                       space->bottom());
1621     const size_t available = pointer_delta(dst_space_end, *new_top_addr);
1622 
1623     NOT_PRODUCT(summary_phase_msg(dst_space_id, *new_top_addr, dst_space_end,
1624                                   SpaceId(id), space->bottom(), space->top());)
1625     if (live > 0 && live <= available) {
1626       // All the live data will fit.
1627       bool done = _summary_data.summarize(_space_info[id].split_info(),
1628                                           space->bottom(), space->top(),
1629                                           NULL,
1630                                           *new_top_addr, dst_space_end,
1631                                           new_top_addr);
1632       assert(done, "space must fit into old gen");
1633 
1634       // Reset the new_top value for the space.
1635       _space_info[id].set_new_top(space->bottom());
1636     } else if (live > 0) {
1637       // Attempt to fit part of the source space into the target space.
1638       HeapWord* next_src_addr = NULL;
1639       bool done = _summary_data.summarize(_space_info[id].split_info(),
1640                                           space->bottom(), space->top(),
1641                                           &next_src_addr,
1642                                           *new_top_addr, dst_space_end,
1643                                           new_top_addr);
1644       assert(!done, "space should not fit into old gen");
1645       assert(next_src_addr != NULL, "sanity");
1646 
1647       // The source space becomes the new target, so the remainder is compacted
1648       // within the space itself.
1649       dst_space_id = SpaceId(id);
1650       dst_space_end = space->end();
1651       new_top_addr = _space_info[id].new_top_addr();
1652       NOT_PRODUCT(summary_phase_msg(dst_space_id,
1653                                     space->bottom(), dst_space_end,
1654                                     SpaceId(id), next_src_addr, space->top());)
1655       done = _summary_data.summarize(_space_info[id].split_info(),
1656                                      next_src_addr, space->top(),
1657                                      NULL,
1658                                      space->bottom(), dst_space_end,
1659                                      new_top_addr);
1660       assert(done, "space must fit when compacted into itself");
1661       assert(*new_top_addr <= space->top(), "usage should not grow");
1662     }
1663   }
1664 
1665   log_develop_trace(gc, compaction)("Summary_phase:  after final summarization");
1666   NOT_PRODUCT(print_region_ranges());
1667   NOT_PRODUCT(print_initial_summary_data(_summary_data, _space_info));
1668 }
1669 
1670 // This method should contain all heap-specific policy for invoking a full
1671 // collection.  invoke_no_policy() will only attempt to compact the heap; it
1672 // will do nothing further.  If we need to bail out for policy reasons, scavenge
1673 // before full gc, or any other specialized behavior, it needs to be added here.
1674 //
1675 // Note that this method should only be called from the vm_thread while at a
1676 // safepoint.
1677 //
1678 // Note that the all_soft_refs_clear flag in the soft ref policy
1679 // may be true because this method can be called without intervening
1680 // activity.  For example when the heap space is tight and full measure
1681 // are being taken to free space.
1682 void PSParallelCompact::invoke(bool maximum_heap_compaction) {
1683   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
1684   assert(Thread::current() == (Thread*)VMThread::vm_thread(),
1685          "should be in vm thread");
1686 
1687   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
1688   assert(!heap->is_gc_active(), "not reentrant");
1689 
1690   IsGCActiveMark mark;
1691 
1692   if (ScavengeBeforeFullGC) {
1693     PSScavenge::invoke_no_policy();
1694   }
1695 
1696   const bool clear_all_soft_refs =
1697     heap->soft_ref_policy()->should_clear_all_soft_refs();
1698 
1699   PSParallelCompact::invoke_no_policy(clear_all_soft_refs ||
1700                                       maximum_heap_compaction);
1701 }
1702 
1703 // This method contains no policy. You should probably
1704 // be calling invoke() instead.
1705 bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
1706   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
1707   assert(ref_processor() != NULL, "Sanity");
1708 
1709   if (GCLocker::check_active_before_gc()) {
1710     return false;
1711   }
1712 
1713   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
1714 
1715   GCIdMark gc_id_mark;
1716   _gc_timer.register_gc_start();
1717   _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start());
1718 
1719   GCCause::Cause gc_cause = heap->gc_cause();
1720   PSYoungGen* young_gen = heap->young_gen();
1721   PSOldGen* old_gen = heap->old_gen();
1722   PSAdaptiveSizePolicy* size_policy = heap->size_policy();
1723 
1724   // The scope of casr should end after code that can change
1725   // SoftRefPolicy::_should_clear_all_soft_refs.
1726   ClearedAllSoftRefs casr(maximum_heap_compaction,
1727                           heap->soft_ref_policy());
1728 
1729   if (ZapUnusedHeapArea) {
1730     // Save information needed to minimize mangling
1731     heap->record_gen_tops_before_GC();
1732   }
1733 
1734   // Make sure data structures are sane, make the heap parsable, and do other
1735   // miscellaneous bookkeeping.
1736   pre_compact();
1737 
1738   const PreGenGCValues pre_gc_values = heap->get_pre_gc_values();
1739 
1740   {
1741     const uint active_workers =
1742       WorkerPolicy::calc_active_workers(ParallelScavengeHeap::heap()->workers().max_workers(),
1743                                         ParallelScavengeHeap::heap()->workers().active_workers(),
1744                                         Threads::number_of_non_daemon_threads());
1745     ParallelScavengeHeap::heap()->workers().set_active_workers(active_workers);
1746 
1747     GCTraceCPUTime tcpu(&_gc_tracer);
1748     GCTraceTime(Info, gc) tm("Pause Full", NULL, gc_cause, true);
1749 
1750     heap->pre_full_gc_dump(&_gc_timer);
1751 
1752     TraceCollectorStats tcs(counters());
1753     TraceMemoryManagerStats tms(heap->old_gc_manager(), gc_cause);
1754 
1755     if (log_is_enabled(Debug, gc, heap, exit)) {
1756       accumulated_time()->start();
1757     }
1758 
1759     // Let the size policy know we're starting
1760     size_policy->major_collection_begin();
1761 
1762 #if COMPILER2_OR_JVMCI
1763     DerivedPointerTable::clear();
1764 #endif
1765 
1766     ref_processor()->start_discovery(maximum_heap_compaction);
1767 
1768     marking_phase(&_gc_tracer);
1769 
1770     bool max_on_system_gc = UseMaximumCompactionOnSystemGC
1771       && GCCause::is_user_requested_gc(gc_cause);
1772     summary_phase(maximum_heap_compaction || max_on_system_gc);
1773 
1774 #if COMPILER2_OR_JVMCI
1775     assert(DerivedPointerTable::is_active(), "Sanity");
1776     DerivedPointerTable::set_active(false);
1777 #endif
1778 
1779     // adjust_roots() updates Universe::_intArrayKlassObj which is
1780     // needed by the compaction for filling holes in the dense prefix.
1781     adjust_roots();
1782 
1783     compact();
1784 
1785     ParCompactionManager::verify_all_region_stack_empty();
1786 
1787     // Reset the mark bitmap, summary data, and do other bookkeeping.  Must be
1788     // done before resizing.
1789     post_compact();
1790 
1791     // Let the size policy know we're done
1792     size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause);
1793 
1794     if (UseAdaptiveSizePolicy) {
1795       log_debug(gc, ergo)("AdaptiveSizeStart: collection: %d ", heap->total_collections());
1796       log_trace(gc, ergo)("old_gen_capacity: " SIZE_FORMAT " young_gen_capacity: " SIZE_FORMAT,
1797                           old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes());
1798 
1799       // Don't check if the size_policy is ready here.  Let
1800       // the size_policy check that internally.
1801       if (UseAdaptiveGenerationSizePolicyAtMajorCollection &&
1802           AdaptiveSizePolicy::should_update_promo_stats(gc_cause)) {
1803         // Swap the survivor spaces if from_space is empty. The
1804         // resize_young_gen() called below is normally used after
1805         // a successful young GC and swapping of survivor spaces;
1806         // otherwise, it will fail to resize the young gen with
1807         // the current implementation.
1808         if (young_gen->from_space()->is_empty()) {
1809           young_gen->from_space()->clear(SpaceDecorator::Mangle);
1810           young_gen->swap_spaces();
1811         }
1812 
1813         // Calculate optimal free space amounts
1814         assert(young_gen->max_gen_size() >
1815           young_gen->from_space()->capacity_in_bytes() +
1816           young_gen->to_space()->capacity_in_bytes(),
1817           "Sizes of space in young gen are out-of-bounds");
1818 
1819         size_t young_live = young_gen->used_in_bytes();
1820         size_t eden_live = young_gen->eden_space()->used_in_bytes();
1821         size_t old_live = old_gen->used_in_bytes();
1822         size_t cur_eden = young_gen->eden_space()->capacity_in_bytes();
1823         size_t max_old_gen_size = old_gen->max_gen_size();
1824         size_t max_eden_size = young_gen->max_gen_size() -
1825           young_gen->from_space()->capacity_in_bytes() -
1826           young_gen->to_space()->capacity_in_bytes();
1827 
1828         // Used for diagnostics
1829         size_policy->clear_generation_free_space_flags();
1830 
1831         size_policy->compute_generations_free_space(young_live,
1832                                                     eden_live,
1833                                                     old_live,
1834                                                     cur_eden,
1835                                                     max_old_gen_size,
1836                                                     max_eden_size,
1837                                                     true /* full gc*/);
1838 
1839         size_policy->check_gc_overhead_limit(eden_live,
1840                                              max_old_gen_size,
1841                                              max_eden_size,
1842                                              true /* full gc*/,
1843                                              gc_cause,
1844                                              heap->soft_ref_policy());
1845 
1846         size_policy->decay_supplemental_growth(true /* full gc*/);
1847 
1848         heap->resize_old_gen(
1849           size_policy->calculated_old_free_size_in_bytes());
1850 
1851         heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(),
1852                                size_policy->calculated_survivor_size_in_bytes());
1853       }
1854 
1855       log_debug(gc, ergo)("AdaptiveSizeStop: collection: %d ", heap->total_collections());
1856     }
1857 
1858     if (UsePerfData) {
1859       PSGCAdaptivePolicyCounters* const counters = heap->gc_policy_counters();
1860       counters->update_counters();
1861       counters->update_old_capacity(old_gen->capacity_in_bytes());
1862       counters->update_young_capacity(young_gen->capacity_in_bytes());
1863     }
1864 
1865     heap->resize_all_tlabs();
1866 
1867     // Resize the metaspace capacity after a collection
1868     MetaspaceGC::compute_new_size();
1869 
1870     if (log_is_enabled(Debug, gc, heap, exit)) {
1871       accumulated_time()->stop();
1872     }
1873 
1874     heap->print_heap_change(pre_gc_values);
1875 
1876     // Track memory usage and detect low memory
1877     MemoryService::track_memory_usage();
1878     heap->update_counters();
1879 
1880     heap->post_full_gc_dump(&_gc_timer);
1881   }
1882 
1883   if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
1884     Universe::verify("After GC");
1885   }
1886 
1887   // Re-verify object start arrays
1888   if (VerifyObjectStartArray &&
1889       VerifyAfterGC) {
1890     old_gen->verify_object_start_array();
1891   }
1892 
1893   if (ZapUnusedHeapArea) {
1894     old_gen->object_space()->check_mangled_unused_area_complete();
1895   }
1896 
1897   heap->print_heap_after_gc();
1898   heap->trace_heap_after_gc(&_gc_tracer);
1899 
1900   AdaptiveSizePolicyOutput::print(size_policy, heap->total_collections());
1901 
1902   _gc_timer.register_gc_end();
1903 
1904   _gc_tracer.report_dense_prefix(dense_prefix(old_space_id));
1905   _gc_tracer.report_gc_end(_gc_timer.gc_end(), _gc_timer.time_partitions());
1906 
1907   return true;
1908 }
1909 
1910 class PCAddThreadRootsMarkingTaskClosure : public ThreadClosure {
1911 private:
1912   uint _worker_id;
1913 
1914 public:
1915   PCAddThreadRootsMarkingTaskClosure(uint worker_id) : _worker_id(worker_id) { }
1916   void do_thread(Thread* thread) {
1917     assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
1918 
1919     ResourceMark rm;
1920 
1921     ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(_worker_id);
1922 
1923     PCMarkAndPushClosure mark_and_push_closure(cm);
1924     MarkingCodeBlobClosure mark_and_push_in_blobs(&mark_and_push_closure, !CodeBlobToOopClosure::FixRelocations, true /* keepalive nmethods */);
1925 
1926     thread->oops_do(&mark_and_push_closure, &mark_and_push_in_blobs);
1927 
1928     // Do the real work
1929     cm->follow_marking_stacks();
1930   }
1931 };
1932 
1933 void steal_marking_work(TaskTerminator& terminator, uint worker_id) {
1934   assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
1935 
1936   ParCompactionManager* cm =
1937     ParCompactionManager::gc_thread_compaction_manager(worker_id);
1938 
1939   do {
1940     oop obj = NULL;
1941     ObjArrayTask task;
1942     if (ParCompactionManager::steal_objarray(worker_id,  task)) {
1943       cm->follow_array((objArrayOop)task.obj(), task.index());
1944     } else if (ParCompactionManager::steal(worker_id, obj)) {
1945       cm->follow_contents(obj);
1946     }
1947     cm->follow_marking_stacks();
1948   } while (!terminator.offer_termination());
1949 }
1950 
1951 class MarkFromRootsTask : public WorkerTask {
1952   StrongRootsScope _strong_roots_scope; // needed for Threads::possibly_parallel_threads_do
1953   OopStorageSetStrongParState<false /* concurrent */, false /* is_const */> _oop_storage_set_par_state;
1954   TaskTerminator _terminator;
1955   uint _active_workers;
1956 
1957 public:
1958   MarkFromRootsTask(uint active_workers) :
1959       WorkerTask("MarkFromRootsTask"),
1960       _strong_roots_scope(active_workers),
1961       _terminator(active_workers, ParCompactionManager::oop_task_queues()),
1962       _active_workers(active_workers) {}
1963 
1964   virtual void work(uint worker_id) {
1965     ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(worker_id);
1966     PCMarkAndPushClosure mark_and_push_closure(cm);
1967 
1968     {
1969       CLDToOopClosure cld_closure(&mark_and_push_closure, ClassLoaderData::_claim_strong);
1970       ClassLoaderDataGraph::always_strong_cld_do(&cld_closure);
1971 
1972       // Do the real work
1973       cm->follow_marking_stacks();
1974     }
1975 
1976     PCAddThreadRootsMarkingTaskClosure closure(worker_id);
1977     Threads::possibly_parallel_threads_do(true /*parallel */, &closure);
1978 
1979     // Mark from OopStorages
1980     {
1981       _oop_storage_set_par_state.oops_do(&mark_and_push_closure);
1982       // Do the real work
1983       cm->follow_marking_stacks();
1984     }
1985 
1986     if (_active_workers > 1) {
1987       steal_marking_work(_terminator, worker_id);
1988     }
1989   }
1990 };
1991 
1992 class ParallelCompactRefProcProxyTask : public RefProcProxyTask {
1993   TaskTerminator _terminator;
1994 
1995 public:
1996   ParallelCompactRefProcProxyTask(uint max_workers)
1997     : RefProcProxyTask("ParallelCompactRefProcProxyTask", max_workers),
1998       _terminator(_max_workers, ParCompactionManager::oop_task_queues()) {}
1999 
2000   void work(uint worker_id) override {
2001     assert(worker_id < _max_workers, "sanity");
2002     ParCompactionManager* cm = (_tm == RefProcThreadModel::Single) ? ParCompactionManager::get_vmthread_cm() : ParCompactionManager::gc_thread_compaction_manager(worker_id);
2003     PCMarkAndPushClosure keep_alive(cm);
2004     BarrierEnqueueDiscoveredFieldClosure enqueue;
2005     ParCompactionManager::FollowStackClosure complete_gc(cm, (_tm == RefProcThreadModel::Single) ? nullptr : &_terminator, worker_id);
2006     _rp_task->rp_work(worker_id, PSParallelCompact::is_alive_closure(), &keep_alive, &enqueue, &complete_gc);
2007   }
2008 
2009   void prepare_run_task_hook() override {
2010     _terminator.reset_for_reuse(_queue_count);
2011   }
2012 };
2013 
2014 void PSParallelCompact::marking_phase(ParallelOldTracer *gc_tracer) {
2015   // Recursively traverse all live objects and mark them
2016   GCTraceTime(Info, gc, phases) tm("Marking Phase", &_gc_timer);
2017 
2018   uint active_gc_threads = ParallelScavengeHeap::heap()->workers().active_workers();
2019 
2020   // Need new claim bits before marking starts.
2021   ClassLoaderDataGraph::clear_claimed_marks();
2022 
2023   {
2024     GCTraceTime(Debug, gc, phases) tm("Par Mark", &_gc_timer);
2025 
2026     MarkFromRootsTask task(active_gc_threads);
2027     ParallelScavengeHeap::heap()->workers().run_task(&task);
2028   }
2029 
2030   // Process reference objects found during marking
2031   {
2032     GCTraceTime(Debug, gc, phases) tm("Reference Processing", &_gc_timer);
2033 
2034     ReferenceProcessorStats stats;
2035     ReferenceProcessorPhaseTimes pt(&_gc_timer, ref_processor()->max_num_queues());
2036 
2037     ref_processor()->set_active_mt_degree(active_gc_threads);
2038     ParallelCompactRefProcProxyTask task(ref_processor()->max_num_queues());
2039     stats = ref_processor()->process_discovered_references(task, pt);
2040 
2041     gc_tracer->report_gc_reference_stats(stats);
2042     pt.print_all_references();
2043   }
2044 
2045   // This is the point where the entire marking should have completed.
2046   ParCompactionManager::verify_all_marking_stack_empty();
2047 
2048   {
2049     GCTraceTime(Debug, gc, phases) tm("Weak Processing", &_gc_timer);
2050     WeakProcessor::weak_oops_do(&ParallelScavengeHeap::heap()->workers(),
2051                                 is_alive_closure(),
2052                                 &do_nothing_cl,
2053                                 1);
2054   }
2055 
2056   {
2057     GCTraceTime(Debug, gc, phases) tm_m("Class Unloading", &_gc_timer);
2058     CodeCache::UnloadingScope scope(is_alive_closure());
2059 
2060     // Follow system dictionary roots and unload classes.
2061     bool purged_class = SystemDictionary::do_unloading(&_gc_timer);
2062 
2063     // Unload nmethods.
2064     CodeCache::do_unloading(purged_class);
2065 
2066     // Prune dead klasses from subklass/sibling/implementor lists.
2067     Klass::clean_weak_klass_links(purged_class);
2068 
2069     // Clean JVMCI metadata handles.
2070     JVMCI_ONLY(JVMCI::do_unloading(purged_class));
2071   }
2072 
2073   _gc_tracer.report_object_count_after_gc(is_alive_closure());
2074 #if TASKQUEUE_STATS
2075   ParCompactionManager::oop_task_queues()->print_and_reset_taskqueue_stats("Oop Queue");
2076   ParCompactionManager::_objarray_task_queues->print_and_reset_taskqueue_stats("ObjArrayOop Queue");
2077 #endif
2078 }
2079 
2080 class PSAdjustTask final : public WorkerTask {
2081   SubTasksDone                               _sub_tasks;
2082   WeakProcessor::Task                        _weak_proc_task;
2083   OopStorageSetStrongParState<false, false>  _oop_storage_iter;
2084   uint                                       _nworkers;
2085 
2086   enum PSAdjustSubTask {
2087     PSAdjustSubTask_code_cache,
2088 
2089     PSAdjustSubTask_num_elements
2090   };
2091 
2092 public:
2093   PSAdjustTask(uint nworkers) :
2094     WorkerTask("PSAdjust task"),
2095     _sub_tasks(PSAdjustSubTask_num_elements),
2096     _weak_proc_task(nworkers),
2097     _nworkers(nworkers) {
2098     // Need new claim bits when tracing through and adjusting pointers.
2099     ClassLoaderDataGraph::clear_claimed_marks();
2100     if (nworkers > 1) {
2101       Threads::change_thread_claim_token();
2102     }
2103   }
2104 
2105   ~PSAdjustTask() {
2106     Threads::assert_all_threads_claimed();
2107   }
2108 
2109   void work(uint worker_id) {
2110     ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(worker_id);
2111     PCAdjustPointerClosure adjust(cm);
2112     {
2113       ResourceMark rm;
2114       Threads::possibly_parallel_oops_do(_nworkers > 1, &adjust, nullptr);
2115     }
2116     _oop_storage_iter.oops_do(&adjust);
2117     {
2118       CLDToOopClosure cld_closure(&adjust, ClassLoaderData::_claim_strong);
2119       ClassLoaderDataGraph::cld_do(&cld_closure);
2120     }
2121     {
2122       AlwaysTrueClosure always_alive;
2123       _weak_proc_task.work(worker_id, &always_alive, &adjust);
2124     }
2125     if (_sub_tasks.try_claim_task(PSAdjustSubTask_code_cache)) {
2126       CodeBlobToOopClosure adjust_code(&adjust, CodeBlobToOopClosure::FixRelocations);
2127       CodeCache::blobs_do(&adjust_code);
2128     }
2129     _sub_tasks.all_tasks_claimed();
2130   }
2131 };
2132 
2133 void PSParallelCompact::adjust_roots() {
2134   // Adjust the pointers to reflect the new locations
2135   GCTraceTime(Info, gc, phases) tm("Adjust Roots", &_gc_timer);
2136   uint nworkers = ParallelScavengeHeap::heap()->workers().active_workers();
2137   PSAdjustTask task(nworkers);
2138   ParallelScavengeHeap::heap()->workers().run_task(&task);
2139 }
2140 
2141 // Helper class to print 8 region numbers per line and then print the total at the end.
2142 class FillableRegionLogger : public StackObj {
2143 private:
2144   Log(gc, compaction) log;
2145   static const int LineLength = 8;
2146   size_t _regions[LineLength];
2147   int _next_index;
2148   bool _enabled;
2149   size_t _total_regions;
2150 public:
2151   FillableRegionLogger() : _next_index(0), _enabled(log_develop_is_enabled(Trace, gc, compaction)), _total_regions(0) { }
2152   ~FillableRegionLogger() {
2153     log.trace(SIZE_FORMAT " initially fillable regions", _total_regions);
2154   }
2155 
2156   void print_line() {
2157     if (!_enabled || _next_index == 0) {
2158       return;
2159     }
2160     FormatBuffer<> line("Fillable: ");
2161     for (int i = 0; i < _next_index; i++) {
2162       line.append(" " SIZE_FORMAT_W(7), _regions[i]);
2163     }
2164     log.trace("%s", line.buffer());
2165     _next_index = 0;
2166   }
2167 
2168   void handle(size_t region) {
2169     if (!_enabled) {
2170       return;
2171     }
2172     _regions[_next_index++] = region;
2173     if (_next_index == LineLength) {
2174       print_line();
2175     }
2176     _total_regions++;
2177   }
2178 };
2179 
2180 void PSParallelCompact::prepare_region_draining_tasks(uint parallel_gc_threads)
2181 {
2182   GCTraceTime(Trace, gc, phases) tm("Drain Task Setup", &_gc_timer);
2183 
2184   // Find the threads that are active
2185   uint worker_id = 0;
2186 
2187   // Find all regions that are available (can be filled immediately) and
2188   // distribute them to the thread stacks.  The iteration is done in reverse
2189   // order (high to low) so the regions will be removed in ascending order.
2190 
2191   const ParallelCompactData& sd = PSParallelCompact::summary_data();
2192 
2193   // id + 1 is used to test termination so unsigned  can
2194   // be used with an old_space_id == 0.
2195   FillableRegionLogger region_logger;
2196   for (unsigned int id = to_space_id; id + 1 > old_space_id; --id) {
2197     SpaceInfo* const space_info = _space_info + id;
2198     HeapWord* const new_top = space_info->new_top();
2199 
2200     const size_t beg_region = sd.addr_to_region_idx(space_info->dense_prefix());
2201     const size_t end_region =
2202       sd.addr_to_region_idx(sd.region_align_up(new_top));
2203 
2204     for (size_t cur = end_region - 1; cur + 1 > beg_region; --cur) {
2205       if (sd.region(cur)->claim_unsafe()) {
2206         ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(worker_id);
2207         bool result = sd.region(cur)->mark_normal();
2208         assert(result, "Must succeed at this point.");
2209         cm->region_stack()->push(cur);
2210         region_logger.handle(cur);
2211         // Assign regions to tasks in round-robin fashion.
2212         if (++worker_id == parallel_gc_threads) {
2213           worker_id = 0;
2214         }
2215       }
2216     }
2217     region_logger.print_line();
2218   }
2219 }
2220 
2221 class TaskQueue : StackObj {
2222   volatile uint _counter;
2223   uint _size;
2224   uint _insert_index;
2225   PSParallelCompact::UpdateDensePrefixTask* _backing_array;
2226 public:
2227   explicit TaskQueue(uint size) : _counter(0), _size(size), _insert_index(0), _backing_array(NULL) {
2228     _backing_array = NEW_C_HEAP_ARRAY(PSParallelCompact::UpdateDensePrefixTask, _size, mtGC);
2229   }
2230   ~TaskQueue() {
2231     assert(_counter >= _insert_index, "not all queue elements were claimed");
2232     FREE_C_HEAP_ARRAY(T, _backing_array);
2233   }
2234 
2235   void push(const PSParallelCompact::UpdateDensePrefixTask& value) {
2236     assert(_insert_index < _size, "too small backing array");
2237     _backing_array[_insert_index++] = value;
2238   }
2239 
2240   bool try_claim(PSParallelCompact::UpdateDensePrefixTask& reference) {
2241     uint claimed = Atomic::fetch_and_add(&_counter, 1u);
2242     if (claimed < _insert_index) {
2243       reference = _backing_array[claimed];
2244       return true;
2245     } else {
2246       return false;
2247     }
2248   }
2249 };
2250 
2251 #define PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING 4
2252 
2253 void PSParallelCompact::enqueue_dense_prefix_tasks(TaskQueue& task_queue,
2254                                                    uint parallel_gc_threads) {
2255   GCTraceTime(Trace, gc, phases) tm("Dense Prefix Task Setup", &_gc_timer);
2256 
2257   ParallelCompactData& sd = PSParallelCompact::summary_data();
2258 
2259   // Iterate over all the spaces adding tasks for updating
2260   // regions in the dense prefix.  Assume that 1 gc thread
2261   // will work on opening the gaps and the remaining gc threads
2262   // will work on the dense prefix.
2263   unsigned int space_id;
2264   for (space_id = old_space_id; space_id < last_space_id; ++ space_id) {
2265     HeapWord* const dense_prefix_end = _space_info[space_id].dense_prefix();
2266     const MutableSpace* const space = _space_info[space_id].space();
2267 
2268     if (dense_prefix_end == space->bottom()) {
2269       // There is no dense prefix for this space.
2270       continue;
2271     }
2272 
2273     // The dense prefix is before this region.
2274     size_t region_index_end_dense_prefix =
2275         sd.addr_to_region_idx(dense_prefix_end);
2276     RegionData* const dense_prefix_cp =
2277       sd.region(region_index_end_dense_prefix);
2278     assert(dense_prefix_end == space->end() ||
2279            dense_prefix_cp->available() ||
2280            dense_prefix_cp->claimed(),
2281            "The region after the dense prefix should always be ready to fill");
2282 
2283     size_t region_index_start = sd.addr_to_region_idx(space->bottom());
2284 
2285     // Is there dense prefix work?
2286     size_t total_dense_prefix_regions =
2287       region_index_end_dense_prefix - region_index_start;
2288     // How many regions of the dense prefix should be given to
2289     // each thread?
2290     if (total_dense_prefix_regions > 0) {
2291       uint tasks_for_dense_prefix = 1;
2292       if (total_dense_prefix_regions <=
2293           (parallel_gc_threads * PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING)) {
2294         // Don't over partition.  This assumes that
2295         // PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING is a small integer value
2296         // so there are not many regions to process.
2297         tasks_for_dense_prefix = parallel_gc_threads;
2298       } else {
2299         // Over partition
2300         tasks_for_dense_prefix = parallel_gc_threads *
2301           PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING;
2302       }
2303       size_t regions_per_thread = total_dense_prefix_regions /
2304         tasks_for_dense_prefix;
2305       // Give each thread at least 1 region.
2306       if (regions_per_thread == 0) {
2307         regions_per_thread = 1;
2308       }
2309 
2310       for (uint k = 0; k < tasks_for_dense_prefix; k++) {
2311         if (region_index_start >= region_index_end_dense_prefix) {
2312           break;
2313         }
2314         // region_index_end is not processed
2315         size_t region_index_end = MIN2(region_index_start + regions_per_thread,
2316                                        region_index_end_dense_prefix);
2317         task_queue.push(UpdateDensePrefixTask(SpaceId(space_id),
2318                                               region_index_start,
2319                                               region_index_end));
2320         region_index_start = region_index_end;
2321       }
2322     }
2323     // This gets any part of the dense prefix that did not
2324     // fit evenly.
2325     if (region_index_start < region_index_end_dense_prefix) {
2326       task_queue.push(UpdateDensePrefixTask(SpaceId(space_id),
2327                                             region_index_start,
2328                                             region_index_end_dense_prefix));
2329     }
2330   }
2331 }
2332 
2333 #ifdef ASSERT
2334 // Write a histogram of the number of times the block table was filled for a
2335 // region.
2336 void PSParallelCompact::write_block_fill_histogram()
2337 {
2338   if (!log_develop_is_enabled(Trace, gc, compaction)) {
2339     return;
2340   }
2341 
2342   Log(gc, compaction) log;
2343   ResourceMark rm;
2344   LogStream ls(log.trace());
2345   outputStream* out = &ls;
2346 
2347   typedef ParallelCompactData::RegionData rd_t;
2348   ParallelCompactData& sd = summary_data();
2349 
2350   for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2351     MutableSpace* const spc = _space_info[id].space();
2352     if (spc->bottom() != spc->top()) {
2353       const rd_t* const beg = sd.addr_to_region_ptr(spc->bottom());
2354       HeapWord* const top_aligned_up = sd.region_align_up(spc->top());
2355       const rd_t* const end = sd.addr_to_region_ptr(top_aligned_up);
2356 
2357       size_t histo[5] = { 0, 0, 0, 0, 0 };
2358       const size_t histo_len = sizeof(histo) / sizeof(size_t);
2359       const size_t region_cnt = pointer_delta(end, beg, sizeof(rd_t));
2360 
2361       for (const rd_t* cur = beg; cur < end; ++cur) {
2362         ++histo[MIN2(cur->blocks_filled_count(), histo_len - 1)];
2363       }
2364       out->print("Block fill histogram: %u %-4s" SIZE_FORMAT_W(5), id, space_names[id], region_cnt);
2365       for (size_t i = 0; i < histo_len; ++i) {
2366         out->print(" " SIZE_FORMAT_W(5) " %5.1f%%",
2367                    histo[i], 100.0 * histo[i] / region_cnt);
2368       }
2369       out->cr();
2370     }
2371   }
2372 }
2373 #endif // #ifdef ASSERT
2374 
2375 static void compaction_with_stealing_work(TaskTerminator* terminator, uint worker_id) {
2376   assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
2377 
2378   ParCompactionManager* cm =
2379     ParCompactionManager::gc_thread_compaction_manager(worker_id);
2380 
2381   // Drain the stacks that have been preloaded with regions
2382   // that are ready to fill.
2383 
2384   cm->drain_region_stacks();
2385 
2386   guarantee(cm->region_stack()->is_empty(), "Not empty");
2387 
2388   size_t region_index = 0;
2389 
2390   while (true) {
2391     if (ParCompactionManager::steal(worker_id, region_index)) {
2392       PSParallelCompact::fill_and_update_region(cm, region_index);
2393       cm->drain_region_stacks();
2394     } else if (PSParallelCompact::steal_unavailable_region(cm, region_index)) {
2395       // Fill and update an unavailable region with the help of a shadow region
2396       PSParallelCompact::fill_and_update_shadow_region(cm, region_index);
2397       cm->drain_region_stacks();
2398     } else {
2399       if (terminator->offer_termination()) {
2400         break;
2401       }
2402       // Go around again.
2403     }
2404   }
2405 }
2406 
2407 class UpdateDensePrefixAndCompactionTask: public WorkerTask {
2408   TaskQueue& _tq;
2409   TaskTerminator _terminator;
2410 
2411 public:
2412   UpdateDensePrefixAndCompactionTask(TaskQueue& tq, uint active_workers) :
2413       WorkerTask("UpdateDensePrefixAndCompactionTask"),
2414       _tq(tq),
2415       _terminator(active_workers, ParCompactionManager::region_task_queues()) {
2416   }
2417   virtual void work(uint worker_id) {
2418     ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(worker_id);
2419 
2420     for (PSParallelCompact::UpdateDensePrefixTask task; _tq.try_claim(task); /* empty */) {
2421       PSParallelCompact::update_and_deadwood_in_dense_prefix(cm,
2422                                                              task._space_id,
2423                                                              task._region_index_start,
2424                                                              task._region_index_end);
2425     }
2426 
2427     // Once a thread has drained it's stack, it should try to steal regions from
2428     // other threads.
2429     compaction_with_stealing_work(&_terminator, worker_id);
2430 
2431     // At this point all regions have been compacted, so it's now safe
2432     // to update the deferred objects that cross region boundaries.
2433     cm->drain_deferred_objects();
2434   }
2435 };
2436 
2437 void PSParallelCompact::compact() {
2438   GCTraceTime(Info, gc, phases) tm("Compaction Phase", &_gc_timer);
2439 
2440   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
2441   PSOldGen* old_gen = heap->old_gen();
2442   old_gen->start_array()->reset();
2443   uint active_gc_threads = ParallelScavengeHeap::heap()->workers().active_workers();
2444 
2445   // for [0..last_space_id)
2446   //     for [0..active_gc_threads * PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING)
2447   //         push
2448   //     push
2449   //
2450   // max push count is thus: last_space_id * (active_gc_threads * PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING + 1)
2451   TaskQueue task_queue(last_space_id * (active_gc_threads * PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING + 1));
2452   initialize_shadow_regions(active_gc_threads);
2453   prepare_region_draining_tasks(active_gc_threads);
2454   enqueue_dense_prefix_tasks(task_queue, active_gc_threads);
2455 
2456   {
2457     GCTraceTime(Trace, gc, phases) tm("Par Compact", &_gc_timer);
2458 
2459     UpdateDensePrefixAndCompactionTask task(task_queue, active_gc_threads);
2460     ParallelScavengeHeap::heap()->workers().run_task(&task);
2461 
2462 #ifdef  ASSERT
2463     // Verify that all regions have been processed.
2464     for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2465       verify_complete(SpaceId(id));
2466     }
2467 #endif
2468   }
2469 
2470   DEBUG_ONLY(write_block_fill_histogram());
2471 }
2472 
2473 #ifdef  ASSERT
2474 void PSParallelCompact::verify_complete(SpaceId space_id) {
2475   // All Regions between space bottom() to new_top() should be marked as filled
2476   // and all Regions between new_top() and top() should be available (i.e.,
2477   // should have been emptied).
2478   ParallelCompactData& sd = summary_data();
2479   SpaceInfo si = _space_info[space_id];
2480   HeapWord* new_top_addr = sd.region_align_up(si.new_top());
2481   HeapWord* old_top_addr = sd.region_align_up(si.space()->top());
2482   const size_t beg_region = sd.addr_to_region_idx(si.space()->bottom());
2483   const size_t new_top_region = sd.addr_to_region_idx(new_top_addr);
2484   const size_t old_top_region = sd.addr_to_region_idx(old_top_addr);
2485 
2486   bool issued_a_warning = false;
2487 
2488   size_t cur_region;
2489   for (cur_region = beg_region; cur_region < new_top_region; ++cur_region) {
2490     const RegionData* const c = sd.region(cur_region);
2491     if (!c->completed()) {
2492       log_warning(gc)("region " SIZE_FORMAT " not filled: destination_count=%u",
2493                       cur_region, c->destination_count());
2494       issued_a_warning = true;
2495     }
2496   }
2497 
2498   for (cur_region = new_top_region; cur_region < old_top_region; ++cur_region) {
2499     const RegionData* const c = sd.region(cur_region);
2500     if (!c->available()) {
2501       log_warning(gc)("region " SIZE_FORMAT " not empty: destination_count=%u",
2502                       cur_region, c->destination_count());
2503       issued_a_warning = true;
2504     }
2505   }
2506 
2507   if (issued_a_warning) {
2508     print_region_ranges();
2509   }
2510 }
2511 #endif  // #ifdef ASSERT
2512 
2513 inline void UpdateOnlyClosure::do_addr(HeapWord* addr) {
2514   _start_array->allocate_block(addr);
2515   compaction_manager()->update_contents(cast_to_oop(addr));
2516 }
2517 
2518 // Update interior oops in the ranges of regions [beg_region, end_region).
2519 void
2520 PSParallelCompact::update_and_deadwood_in_dense_prefix(ParCompactionManager* cm,
2521                                                        SpaceId space_id,
2522                                                        size_t beg_region,
2523                                                        size_t end_region) {
2524   ParallelCompactData& sd = summary_data();
2525   ParMarkBitMap* const mbm = mark_bitmap();
2526 
2527   HeapWord* beg_addr = sd.region_to_addr(beg_region);
2528   HeapWord* const end_addr = sd.region_to_addr(end_region);
2529   assert(beg_region <= end_region, "bad region range");
2530   assert(end_addr <= dense_prefix(space_id), "not in the dense prefix");
2531 
2532 #ifdef  ASSERT
2533   // Claim the regions to avoid triggering an assert when they are marked as
2534   // filled.
2535   for (size_t claim_region = beg_region; claim_region < end_region; ++claim_region) {
2536     assert(sd.region(claim_region)->claim_unsafe(), "claim() failed");
2537   }
2538 #endif  // #ifdef ASSERT
2539 
2540   if (beg_addr != space(space_id)->bottom()) {
2541     // Find the first live object or block of dead space that *starts* in this
2542     // range of regions.  If a partial object crosses onto the region, skip it;
2543     // it will be marked for 'deferred update' when the object head is
2544     // processed.  If dead space crosses onto the region, it is also skipped; it
2545     // will be filled when the prior region is processed.  If neither of those
2546     // apply, the first word in the region is the start of a live object or dead
2547     // space.
2548     assert(beg_addr > space(space_id)->bottom(), "sanity");
2549     const RegionData* const cp = sd.region(beg_region);
2550     if (cp->partial_obj_size() != 0) {
2551       beg_addr = sd.partial_obj_end(beg_region);
2552     } else if (dead_space_crosses_boundary(cp, mbm->addr_to_bit(beg_addr))) {
2553       beg_addr = mbm->find_obj_beg(beg_addr, end_addr);
2554     }
2555   }
2556 
2557   if (beg_addr < end_addr) {
2558     // A live object or block of dead space starts in this range of Regions.
2559      HeapWord* const dense_prefix_end = dense_prefix(space_id);
2560 
2561     // Create closures and iterate.
2562     UpdateOnlyClosure update_closure(mbm, cm, space_id);
2563     FillClosure fill_closure(cm, space_id);
2564     ParMarkBitMap::IterationStatus status;
2565     status = mbm->iterate(&update_closure, &fill_closure, beg_addr, end_addr,
2566                           dense_prefix_end);
2567     if (status == ParMarkBitMap::incomplete) {
2568       update_closure.do_addr(update_closure.source());
2569     }
2570   }
2571 
2572   // Mark the regions as filled.
2573   RegionData* const beg_cp = sd.region(beg_region);
2574   RegionData* const end_cp = sd.region(end_region);
2575   for (RegionData* cp = beg_cp; cp < end_cp; ++cp) {
2576     cp->set_completed();
2577   }
2578 }
2579 
2580 // Return the SpaceId for the space containing addr.  If addr is not in the
2581 // heap, last_space_id is returned.  In debug mode it expects the address to be
2582 // in the heap and asserts such.
2583 PSParallelCompact::SpaceId PSParallelCompact::space_id(HeapWord* addr) {
2584   assert(ParallelScavengeHeap::heap()->is_in_reserved(addr), "addr not in the heap");
2585 
2586   for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2587     if (_space_info[id].space()->contains(addr)) {
2588       return SpaceId(id);
2589     }
2590   }
2591 
2592   assert(false, "no space contains the addr");
2593   return last_space_id;
2594 }
2595 
2596 void PSParallelCompact::update_deferred_object(ParCompactionManager* cm, HeapWord *addr) {
2597 #ifdef ASSERT
2598   ParallelCompactData& sd = summary_data();
2599   size_t region_idx = sd.addr_to_region_idx(addr);
2600   assert(sd.region(region_idx)->completed(), "first region must be completed before deferred updates");
2601   assert(sd.region(region_idx + 1)->completed(), "second region must be completed before deferred updates");
2602 #endif
2603 
2604   const SpaceInfo* const space_info = _space_info + space_id(addr);
2605   ObjectStartArray* const start_array = space_info->start_array();
2606   if (start_array != NULL) {
2607     start_array->allocate_block(addr);
2608   }
2609 
2610   cm->update_contents(cast_to_oop(addr));
2611   assert(oopDesc::is_oop(cast_to_oop(addr)), "Expected an oop at " PTR_FORMAT, p2i(cast_to_oop(addr)));
2612 }
2613 
2614 // Skip over count live words starting from beg, and return the address of the
2615 // next live word.  Unless marked, the word corresponding to beg is assumed to
2616 // be dead.  Callers must either ensure beg does not correspond to the middle of
2617 // an object, or account for those live words in some other way.  Callers must
2618 // also ensure that there are enough live words in the range [beg, end) to skip.
2619 HeapWord*
2620 PSParallelCompact::skip_live_words(HeapWord* beg, HeapWord* end, size_t count)
2621 {
2622   assert(count > 0, "sanity");
2623 
2624   ParMarkBitMap* m = mark_bitmap();
2625   idx_t bits_to_skip = m->words_to_bits(count);
2626   idx_t cur_beg = m->addr_to_bit(beg);
2627   const idx_t search_end = m->align_range_end(m->addr_to_bit(end));
2628 
2629   do {
2630     cur_beg = m->find_obj_beg(cur_beg, search_end);
2631     idx_t cur_end = m->find_obj_end(cur_beg, search_end);
2632     const size_t obj_bits = cur_end - cur_beg + 1;
2633     if (obj_bits > bits_to_skip) {
2634       return m->bit_to_addr(cur_beg + bits_to_skip);
2635     }
2636     bits_to_skip -= obj_bits;
2637     cur_beg = cur_end + 1;
2638   } while (bits_to_skip > 0);
2639 
2640   // Skipping the desired number of words landed just past the end of an object.
2641   // Find the start of the next object.
2642   cur_beg = m->find_obj_beg(cur_beg, search_end);
2643   assert(cur_beg < m->addr_to_bit(end), "not enough live words to skip");
2644   return m->bit_to_addr(cur_beg);
2645 }
2646 
2647 HeapWord* PSParallelCompact::first_src_addr(HeapWord* const dest_addr,
2648                                             SpaceId src_space_id,
2649                                             size_t src_region_idx)
2650 {
2651   assert(summary_data().is_region_aligned(dest_addr), "not aligned");
2652 
2653   const SplitInfo& split_info = _space_info[src_space_id].split_info();
2654   if (split_info.dest_region_addr() == dest_addr) {
2655     // The partial object ending at the split point contains the first word to
2656     // be copied to dest_addr.
2657     return split_info.first_src_addr();
2658   }
2659 
2660   const ParallelCompactData& sd = summary_data();
2661   ParMarkBitMap* const bitmap = mark_bitmap();
2662   const size_t RegionSize = ParallelCompactData::RegionSize;
2663 
2664   assert(sd.is_region_aligned(dest_addr), "not aligned");
2665   const RegionData* const src_region_ptr = sd.region(src_region_idx);
2666   const size_t partial_obj_size = src_region_ptr->partial_obj_size();
2667   HeapWord* const src_region_destination = src_region_ptr->destination();
2668 
2669   assert(dest_addr >= src_region_destination, "wrong src region");
2670   assert(src_region_ptr->data_size() > 0, "src region cannot be empty");
2671 
2672   HeapWord* const src_region_beg = sd.region_to_addr(src_region_idx);
2673   HeapWord* const src_region_end = src_region_beg + RegionSize;
2674 
2675   HeapWord* addr = src_region_beg;
2676   if (dest_addr == src_region_destination) {
2677     // Return the first live word in the source region.
2678     if (partial_obj_size == 0) {
2679       addr = bitmap->find_obj_beg(addr, src_region_end);
2680       assert(addr < src_region_end, "no objects start in src region");
2681     }
2682     return addr;
2683   }
2684 
2685   // Must skip some live data.
2686   size_t words_to_skip = dest_addr - src_region_destination;
2687   assert(src_region_ptr->data_size() > words_to_skip, "wrong src region");
2688 
2689   if (partial_obj_size >= words_to_skip) {
2690     // All the live words to skip are part of the partial object.
2691     addr += words_to_skip;
2692     if (partial_obj_size == words_to_skip) {
2693       // Find the first live word past the partial object.
2694       addr = bitmap->find_obj_beg(addr, src_region_end);
2695       assert(addr < src_region_end, "wrong src region");
2696     }
2697     return addr;
2698   }
2699 
2700   // Skip over the partial object (if any).
2701   if (partial_obj_size != 0) {
2702     words_to_skip -= partial_obj_size;
2703     addr += partial_obj_size;
2704   }
2705 
2706   // Skip over live words due to objects that start in the region.
2707   addr = skip_live_words(addr, src_region_end, words_to_skip);
2708   assert(addr < src_region_end, "wrong src region");
2709   return addr;
2710 }
2711 
2712 void PSParallelCompact::decrement_destination_counts(ParCompactionManager* cm,
2713                                                      SpaceId src_space_id,
2714                                                      size_t beg_region,
2715                                                      HeapWord* end_addr)
2716 {
2717   ParallelCompactData& sd = summary_data();
2718 
2719 #ifdef ASSERT
2720   MutableSpace* const src_space = _space_info[src_space_id].space();
2721   HeapWord* const beg_addr = sd.region_to_addr(beg_region);
2722   assert(src_space->contains(beg_addr) || beg_addr == src_space->end(),
2723          "src_space_id does not match beg_addr");
2724   assert(src_space->contains(end_addr) || end_addr == src_space->end(),
2725          "src_space_id does not match end_addr");
2726 #endif // #ifdef ASSERT
2727 
2728   RegionData* const beg = sd.region(beg_region);
2729   RegionData* const end = sd.addr_to_region_ptr(sd.region_align_up(end_addr));
2730 
2731   // Regions up to new_top() are enqueued if they become available.
2732   HeapWord* const new_top = _space_info[src_space_id].new_top();
2733   RegionData* const enqueue_end =
2734     sd.addr_to_region_ptr(sd.region_align_up(new_top));
2735 
2736   for (RegionData* cur = beg; cur < end; ++cur) {
2737     assert(cur->data_size() > 0, "region must have live data");
2738     cur->decrement_destination_count();
2739     if (cur < enqueue_end && cur->available() && cur->claim()) {
2740       if (cur->mark_normal()) {
2741         cm->push_region(sd.region(cur));
2742       } else if (cur->mark_copied()) {
2743         // Try to copy the content of the shadow region back to its corresponding
2744         // heap region if the shadow region is filled. Otherwise, the GC thread
2745         // fills the shadow region will copy the data back (see
2746         // MoveAndUpdateShadowClosure::complete_region).
2747         copy_back(sd.region_to_addr(cur->shadow_region()), sd.region_to_addr(cur));
2748         ParCompactionManager::push_shadow_region_mt_safe(cur->shadow_region());
2749         cur->set_completed();
2750       }
2751     }
2752   }
2753 }
2754 
2755 size_t PSParallelCompact::next_src_region(MoveAndUpdateClosure& closure,
2756                                           SpaceId& src_space_id,
2757                                           HeapWord*& src_space_top,
2758                                           HeapWord* end_addr)
2759 {
2760   typedef ParallelCompactData::RegionData RegionData;
2761 
2762   ParallelCompactData& sd = PSParallelCompact::summary_data();
2763   const size_t region_size = ParallelCompactData::RegionSize;
2764 
2765   size_t src_region_idx = 0;
2766 
2767   // Skip empty regions (if any) up to the top of the space.
2768   HeapWord* const src_aligned_up = sd.region_align_up(end_addr);
2769   RegionData* src_region_ptr = sd.addr_to_region_ptr(src_aligned_up);
2770   HeapWord* const top_aligned_up = sd.region_align_up(src_space_top);
2771   const RegionData* const top_region_ptr =
2772     sd.addr_to_region_ptr(top_aligned_up);
2773   while (src_region_ptr < top_region_ptr && src_region_ptr->data_size() == 0) {
2774     ++src_region_ptr;
2775   }
2776 
2777   if (src_region_ptr < top_region_ptr) {
2778     // The next source region is in the current space.  Update src_region_idx
2779     // and the source address to match src_region_ptr.
2780     src_region_idx = sd.region(src_region_ptr);
2781     HeapWord* const src_region_addr = sd.region_to_addr(src_region_idx);
2782     if (src_region_addr > closure.source()) {
2783       closure.set_source(src_region_addr);
2784     }
2785     return src_region_idx;
2786   }
2787 
2788   // Switch to a new source space and find the first non-empty region.
2789   unsigned int space_id = src_space_id + 1;
2790   assert(space_id < last_space_id, "not enough spaces");
2791 
2792   HeapWord* const destination = closure.destination();
2793 
2794   do {
2795     MutableSpace* space = _space_info[space_id].space();
2796     HeapWord* const bottom = space->bottom();
2797     const RegionData* const bottom_cp = sd.addr_to_region_ptr(bottom);
2798 
2799     // Iterate over the spaces that do not compact into themselves.
2800     if (bottom_cp->destination() != bottom) {
2801       HeapWord* const top_aligned_up = sd.region_align_up(space->top());
2802       const RegionData* const top_cp = sd.addr_to_region_ptr(top_aligned_up);
2803 
2804       for (const RegionData* src_cp = bottom_cp; src_cp < top_cp; ++src_cp) {
2805         if (src_cp->live_obj_size() > 0) {
2806           // Found it.
2807           assert(src_cp->destination() == destination,
2808                  "first live obj in the space must match the destination");
2809           assert(src_cp->partial_obj_size() == 0,
2810                  "a space cannot begin with a partial obj");
2811 
2812           src_space_id = SpaceId(space_id);
2813           src_space_top = space->top();
2814           const size_t src_region_idx = sd.region(src_cp);
2815           closure.set_source(sd.region_to_addr(src_region_idx));
2816           return src_region_idx;
2817         } else {
2818           assert(src_cp->data_size() == 0, "sanity");
2819         }
2820       }
2821     }
2822   } while (++space_id < last_space_id);
2823 
2824   assert(false, "no source region was found");
2825   return 0;
2826 }
2827 
2828 void PSParallelCompact::fill_region(ParCompactionManager* cm, MoveAndUpdateClosure& closure, size_t region_idx)
2829 {
2830   typedef ParMarkBitMap::IterationStatus IterationStatus;
2831   ParMarkBitMap* const bitmap = mark_bitmap();
2832   ParallelCompactData& sd = summary_data();
2833   RegionData* const region_ptr = sd.region(region_idx);
2834 
2835   // Get the source region and related info.
2836   size_t src_region_idx = region_ptr->source_region();
2837   SpaceId src_space_id = space_id(sd.region_to_addr(src_region_idx));
2838   HeapWord* src_space_top = _space_info[src_space_id].space()->top();
2839   HeapWord* dest_addr = sd.region_to_addr(region_idx);
2840 
2841   closure.set_source(first_src_addr(dest_addr, src_space_id, src_region_idx));
2842 
2843   // Adjust src_region_idx to prepare for decrementing destination counts (the
2844   // destination count is not decremented when a region is copied to itself).
2845   if (src_region_idx == region_idx) {
2846     src_region_idx += 1;
2847   }
2848 
2849   if (bitmap->is_unmarked(closure.source())) {
2850     // The first source word is in the middle of an object; copy the remainder
2851     // of the object or as much as will fit.  The fact that pointer updates were
2852     // deferred will be noted when the object header is processed.
2853     HeapWord* const old_src_addr = closure.source();
2854     closure.copy_partial_obj();
2855     if (closure.is_full()) {
2856       decrement_destination_counts(cm, src_space_id, src_region_idx,
2857                                    closure.source());
2858       closure.complete_region(cm, dest_addr, region_ptr);
2859       return;
2860     }
2861 
2862     HeapWord* const end_addr = sd.region_align_down(closure.source());
2863     if (sd.region_align_down(old_src_addr) != end_addr) {
2864       // The partial object was copied from more than one source region.
2865       decrement_destination_counts(cm, src_space_id, src_region_idx, end_addr);
2866 
2867       // Move to the next source region, possibly switching spaces as well.  All
2868       // args except end_addr may be modified.
2869       src_region_idx = next_src_region(closure, src_space_id, src_space_top,
2870                                        end_addr);
2871     }
2872   }
2873 
2874   do {
2875     HeapWord* const cur_addr = closure.source();
2876     HeapWord* const end_addr = MIN2(sd.region_align_up(cur_addr + 1),
2877                                     src_space_top);
2878     IterationStatus status = bitmap->iterate(&closure, cur_addr, end_addr);
2879 
2880     if (status == ParMarkBitMap::incomplete) {
2881       // The last obj that starts in the source region does not end in the
2882       // region.
2883       assert(closure.source() < end_addr, "sanity");
2884       HeapWord* const obj_beg = closure.source();
2885       HeapWord* const range_end = MIN2(obj_beg + closure.words_remaining(),
2886                                        src_space_top);
2887       HeapWord* const obj_end = bitmap->find_obj_end(obj_beg, range_end);
2888       if (obj_end < range_end) {
2889         // The end was found; the entire object will fit.
2890         status = closure.do_addr(obj_beg, bitmap->obj_size(obj_beg, obj_end));
2891         assert(status != ParMarkBitMap::would_overflow, "sanity");
2892       } else {
2893         // The end was not found; the object will not fit.
2894         assert(range_end < src_space_top, "obj cannot cross space boundary");
2895         status = ParMarkBitMap::would_overflow;
2896       }
2897     }
2898 
2899     if (status == ParMarkBitMap::would_overflow) {
2900       // The last object did not fit.  Note that interior oop updates were
2901       // deferred, then copy enough of the object to fill the region.
2902       cm->push_deferred_object(closure.destination());
2903       status = closure.copy_until_full(); // copies from closure.source()
2904 
2905       decrement_destination_counts(cm, src_space_id, src_region_idx,
2906                                    closure.source());
2907       closure.complete_region(cm, dest_addr, region_ptr);
2908       return;
2909     }
2910 
2911     if (status == ParMarkBitMap::full) {
2912       decrement_destination_counts(cm, src_space_id, src_region_idx,
2913                                    closure.source());
2914       closure.complete_region(cm, dest_addr, region_ptr);
2915       return;
2916     }
2917 
2918     decrement_destination_counts(cm, src_space_id, src_region_idx, end_addr);
2919 
2920     // Move to the next source region, possibly switching spaces as well.  All
2921     // args except end_addr may be modified.
2922     src_region_idx = next_src_region(closure, src_space_id, src_space_top,
2923                                      end_addr);
2924   } while (true);
2925 }
2926 
2927 void PSParallelCompact::fill_and_update_region(ParCompactionManager* cm, size_t region_idx)
2928 {
2929   MoveAndUpdateClosure cl(mark_bitmap(), cm, region_idx);
2930   fill_region(cm, cl, region_idx);
2931 }
2932 
2933 void PSParallelCompact::fill_and_update_shadow_region(ParCompactionManager* cm, size_t region_idx)
2934 {
2935   // Get a shadow region first
2936   ParallelCompactData& sd = summary_data();
2937   RegionData* const region_ptr = sd.region(region_idx);
2938   size_t shadow_region = ParCompactionManager::pop_shadow_region_mt_safe(region_ptr);
2939   // The InvalidShadow return value indicates the corresponding heap region is available,
2940   // so use MoveAndUpdateClosure to fill the normal region. Otherwise, use
2941   // MoveAndUpdateShadowClosure to fill the acquired shadow region.
2942   if (shadow_region == ParCompactionManager::InvalidShadow) {
2943     MoveAndUpdateClosure cl(mark_bitmap(), cm, region_idx);
2944     region_ptr->shadow_to_normal();
2945     return fill_region(cm, cl, region_idx);
2946   } else {
2947     MoveAndUpdateShadowClosure cl(mark_bitmap(), cm, region_idx, shadow_region);
2948     return fill_region(cm, cl, region_idx);
2949   }
2950 }
2951 
2952 void PSParallelCompact::copy_back(HeapWord *shadow_addr, HeapWord *region_addr)
2953 {
2954   Copy::aligned_conjoint_words(shadow_addr, region_addr, _summary_data.RegionSize);
2955 }
2956 
2957 bool PSParallelCompact::steal_unavailable_region(ParCompactionManager* cm, size_t &region_idx)
2958 {
2959   size_t next = cm->next_shadow_region();
2960   ParallelCompactData& sd = summary_data();
2961   size_t old_new_top = sd.addr_to_region_idx(_space_info[old_space_id].new_top());
2962   uint active_gc_threads = ParallelScavengeHeap::heap()->workers().active_workers();
2963 
2964   while (next < old_new_top) {
2965     if (sd.region(next)->mark_shadow()) {
2966       region_idx = next;
2967       return true;
2968     }
2969     next = cm->move_next_shadow_region_by(active_gc_threads);
2970   }
2971 
2972   return false;
2973 }
2974 
2975 // The shadow region is an optimization to address region dependencies in full GC. The basic
2976 // idea is making more regions available by temporally storing their live objects in empty
2977 // shadow regions to resolve dependencies between them and the destination regions. Therefore,
2978 // GC threads need not wait destination regions to be available before processing sources.
2979 //
2980 // A typical workflow would be:
2981 // After draining its own stack and failing to steal from others, a GC worker would pick an
2982 // unavailable region (destination count > 0) and get a shadow region. Then the worker fills
2983 // the shadow region by copying live objects from source regions of the unavailable one. Once
2984 // the unavailable region becomes available, the data in the shadow region will be copied back.
2985 // Shadow regions are empty regions in the to-space and regions between top and end of other spaces.
2986 void PSParallelCompact::initialize_shadow_regions(uint parallel_gc_threads)
2987 {
2988   const ParallelCompactData& sd = PSParallelCompact::summary_data();
2989 
2990   for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2991     SpaceInfo* const space_info = _space_info + id;
2992     MutableSpace* const space = space_info->space();
2993 
2994     const size_t beg_region =
2995       sd.addr_to_region_idx(sd.region_align_up(MAX2(space_info->new_top(), space->top())));
2996     const size_t end_region =
2997       sd.addr_to_region_idx(sd.region_align_down(space->end()));
2998 
2999     for (size_t cur = beg_region; cur < end_region; ++cur) {
3000       ParCompactionManager::push_shadow_region(cur);
3001     }
3002   }
3003 
3004   size_t beg_region = sd.addr_to_region_idx(_space_info[old_space_id].dense_prefix());
3005   for (uint i = 0; i < parallel_gc_threads; i++) {
3006     ParCompactionManager *cm = ParCompactionManager::gc_thread_compaction_manager(i);
3007     cm->set_next_shadow_region(beg_region + i);
3008   }
3009 }
3010 
3011 void PSParallelCompact::fill_blocks(size_t region_idx)
3012 {
3013   // Fill in the block table elements for the specified region.  Each block
3014   // table element holds the number of live words in the region that are to the
3015   // left of the first object that starts in the block.  Thus only blocks in
3016   // which an object starts need to be filled.
3017   //
3018   // The algorithm scans the section of the bitmap that corresponds to the
3019   // region, keeping a running total of the live words.  When an object start is
3020   // found, if it's the first to start in the block that contains it, the
3021   // current total is written to the block table element.
3022   const size_t Log2BlockSize = ParallelCompactData::Log2BlockSize;
3023   const size_t Log2RegionSize = ParallelCompactData::Log2RegionSize;
3024   const size_t RegionSize = ParallelCompactData::RegionSize;
3025 
3026   ParallelCompactData& sd = summary_data();
3027   const size_t partial_obj_size = sd.region(region_idx)->partial_obj_size();
3028   if (partial_obj_size >= RegionSize) {
3029     return; // No objects start in this region.
3030   }
3031 
3032   // Ensure the first loop iteration decides that the block has changed.
3033   size_t cur_block = sd.block_count();
3034 
3035   const ParMarkBitMap* const bitmap = mark_bitmap();
3036 
3037   const size_t Log2BitsPerBlock = Log2BlockSize - LogMinObjAlignment;
3038   assert((size_t)1 << Log2BitsPerBlock ==
3039          bitmap->words_to_bits(ParallelCompactData::BlockSize), "sanity");
3040 
3041   size_t beg_bit = bitmap->words_to_bits(region_idx << Log2RegionSize);
3042   const size_t range_end = beg_bit + bitmap->words_to_bits(RegionSize);
3043   size_t live_bits = bitmap->words_to_bits(partial_obj_size);
3044   beg_bit = bitmap->find_obj_beg(beg_bit + live_bits, range_end);
3045   while (beg_bit < range_end) {
3046     const size_t new_block = beg_bit >> Log2BitsPerBlock;
3047     if (new_block != cur_block) {
3048       cur_block = new_block;
3049       sd.block(cur_block)->set_offset(bitmap->bits_to_words(live_bits));
3050     }
3051 
3052     const size_t end_bit = bitmap->find_obj_end(beg_bit, range_end);
3053     if (end_bit < range_end - 1) {
3054       live_bits += end_bit - beg_bit + 1;
3055       beg_bit = bitmap->find_obj_beg(end_bit + 1, range_end);
3056     } else {
3057       return;
3058     }
3059   }
3060 }
3061 
3062 ParMarkBitMap::IterationStatus MoveAndUpdateClosure::copy_until_full()
3063 {
3064   if (source() != copy_destination()) {
3065     DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
3066     Copy::aligned_conjoint_words(source(), copy_destination(), words_remaining());
3067   }
3068   update_state(words_remaining());
3069   assert(is_full(), "sanity");
3070   return ParMarkBitMap::full;
3071 }
3072 
3073 void MoveAndUpdateClosure::copy_partial_obj()
3074 {
3075   size_t words = words_remaining();
3076 
3077   HeapWord* const range_end = MIN2(source() + words, bitmap()->region_end());
3078   HeapWord* const end_addr = bitmap()->find_obj_end(source(), range_end);
3079   if (end_addr < range_end) {
3080     words = bitmap()->obj_size(source(), end_addr);
3081   }
3082 
3083   // This test is necessary; if omitted, the pointer updates to a partial object
3084   // that crosses the dense prefix boundary could be overwritten.
3085   if (source() != copy_destination()) {
3086     DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
3087     Copy::aligned_conjoint_words(source(), copy_destination(), words);
3088   }
3089   update_state(words);
3090 }
3091 
3092 void MoveAndUpdateClosure::complete_region(ParCompactionManager *cm, HeapWord *dest_addr,
3093                                            PSParallelCompact::RegionData *region_ptr) {
3094   assert(region_ptr->shadow_state() == ParallelCompactData::RegionData::NormalRegion, "Region should be finished");
3095   region_ptr->set_completed();
3096 }
3097 
3098 ParMarkBitMapClosure::IterationStatus
3099 MoveAndUpdateClosure::do_addr(HeapWord* addr, size_t words) {
3100   assert(destination() != NULL, "sanity");
3101   assert(bitmap()->obj_size(addr) == words, "bad size");
3102 
3103   _source = addr;
3104   assert(PSParallelCompact::summary_data().calc_new_pointer(source(), compaction_manager()) ==
3105          destination(), "wrong destination");
3106 
3107   if (words > words_remaining()) {
3108     return ParMarkBitMap::would_overflow;
3109   }
3110 
3111   // The start_array must be updated even if the object is not moving.
3112   if (_start_array != NULL) {
3113     _start_array->allocate_block(destination());
3114   }
3115 
3116   if (copy_destination() != source()) {
3117     DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
3118     Copy::aligned_conjoint_words(source(), copy_destination(), words);
3119   }
3120 
3121   oop moved_oop = cast_to_oop(copy_destination());
3122   compaction_manager()->update_contents(moved_oop);
3123   assert(oopDesc::is_oop_or_null(moved_oop), "Expected an oop or NULL at " PTR_FORMAT, p2i(moved_oop));
3124 
3125   update_state(words);
3126   assert(copy_destination() == cast_from_oop<HeapWord*>(moved_oop) + moved_oop->size(), "sanity");
3127   return is_full() ? ParMarkBitMap::full : ParMarkBitMap::incomplete;
3128 }
3129 
3130 void MoveAndUpdateShadowClosure::complete_region(ParCompactionManager *cm, HeapWord *dest_addr,
3131                                                  PSParallelCompact::RegionData *region_ptr) {
3132   assert(region_ptr->shadow_state() == ParallelCompactData::RegionData::ShadowRegion, "Region should be shadow");
3133   // Record the shadow region index
3134   region_ptr->set_shadow_region(_shadow);
3135   // Mark the shadow region as filled to indicate the data is ready to be
3136   // copied back
3137   region_ptr->mark_filled();
3138   // Try to copy the content of the shadow region back to its corresponding
3139   // heap region if available; the GC thread that decreases the destination
3140   // count to zero will do the copying otherwise (see
3141   // PSParallelCompact::decrement_destination_counts).
3142   if (((region_ptr->available() && region_ptr->claim()) || region_ptr->claimed()) && region_ptr->mark_copied()) {
3143     region_ptr->set_completed();
3144     PSParallelCompact::copy_back(PSParallelCompact::summary_data().region_to_addr(_shadow), dest_addr);
3145     ParCompactionManager::push_shadow_region_mt_safe(_shadow);
3146   }
3147 }
3148 
3149 UpdateOnlyClosure::UpdateOnlyClosure(ParMarkBitMap* mbm,
3150                                      ParCompactionManager* cm,
3151                                      PSParallelCompact::SpaceId space_id) :
3152   ParMarkBitMapClosure(mbm, cm),
3153   _space_id(space_id),
3154   _start_array(PSParallelCompact::start_array(space_id))
3155 {
3156 }
3157 
3158 // Updates the references in the object to their new values.
3159 ParMarkBitMapClosure::IterationStatus
3160 UpdateOnlyClosure::do_addr(HeapWord* addr, size_t words) {
3161   do_addr(addr);
3162   return ParMarkBitMap::incomplete;
3163 }
3164 
3165 FillClosure::FillClosure(ParCompactionManager* cm, PSParallelCompact::SpaceId space_id) :
3166   ParMarkBitMapClosure(PSParallelCompact::mark_bitmap(), cm),
3167   _start_array(PSParallelCompact::start_array(space_id))
3168 {
3169   assert(space_id == PSParallelCompact::old_space_id,
3170          "cannot use FillClosure in the young gen");
3171 }
3172 
3173 ParMarkBitMapClosure::IterationStatus
3174 FillClosure::do_addr(HeapWord* addr, size_t size) {
3175   CollectedHeap::fill_with_objects(addr, size);
3176   HeapWord* const end = addr + size;
3177   do {
3178     _start_array->allocate_block(addr);
3179     addr += cast_to_oop(addr)->size();
3180   } while (addr < end);
3181   return ParMarkBitMap::incomplete;
3182 }