1 /* 2 * Copyright (c) 2005, 2024, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/classLoaderDataGraph.hpp" 27 #include "classfile/javaClasses.inline.hpp" 28 #include "classfile/stringTable.hpp" 29 #include "classfile/symbolTable.hpp" 30 #include "classfile/systemDictionary.hpp" 31 #include "code/codeCache.hpp" 32 #include "compiler/oopMap.hpp" 33 #include "gc/parallel/objectStartArray.inline.hpp" 34 #include "gc/parallel/parallelArguments.hpp" 35 #include "gc/parallel/parallelScavengeHeap.inline.hpp" 36 #include "gc/parallel/parMarkBitMap.inline.hpp" 37 #include "gc/parallel/psAdaptiveSizePolicy.hpp" 38 #include "gc/parallel/psCompactionManager.inline.hpp" 39 #include "gc/parallel/psOldGen.hpp" 40 #include "gc/parallel/psParallelCompact.inline.hpp" 41 #include "gc/parallel/psPromotionManager.inline.hpp" 42 #include "gc/parallel/psRootType.hpp" 43 #include "gc/parallel/psScavenge.hpp" 44 #include "gc/parallel/psStringDedup.hpp" 45 #include "gc/parallel/psYoungGen.hpp" 46 #include "gc/shared/classUnloadingContext.hpp" 47 #include "gc/shared/gcCause.hpp" 48 #include "gc/shared/gcHeapSummary.hpp" 49 #include "gc/shared/gcId.hpp" 50 #include "gc/shared/gcLocker.hpp" 51 #include "gc/shared/gcTimer.hpp" 52 #include "gc/shared/gcTrace.hpp" 53 #include "gc/shared/gcTraceTime.inline.hpp" 54 #include "gc/shared/isGCActiveMark.hpp" 55 #include "gc/shared/oopStorage.inline.hpp" 56 #include "gc/shared/oopStorageSet.inline.hpp" 57 #include "gc/shared/oopStorageSetParState.inline.hpp" 58 #include "gc/shared/preservedMarks.inline.hpp" 59 #include "gc/shared/referencePolicy.hpp" 60 #include "gc/shared/referenceProcessor.hpp" 61 #include "gc/shared/referenceProcessorPhaseTimes.hpp" 62 #include "gc/shared/strongRootsScope.hpp" 63 #include "gc/shared/taskTerminator.hpp" 64 #include "gc/shared/weakProcessor.inline.hpp" 65 #include "gc/shared/workerPolicy.hpp" 66 #include "gc/shared/workerThread.hpp" 67 #include "gc/shared/workerUtils.hpp" 68 #include "logging/log.hpp" 69 #include "memory/iterator.inline.hpp" 70 #include "memory/metaspaceUtils.hpp" 71 #include "memory/resourceArea.hpp" 72 #include "memory/universe.hpp" 73 #include "nmt/memTracker.hpp" 74 #include "oops/access.inline.hpp" 75 #include "oops/instanceClassLoaderKlass.inline.hpp" 76 #include "oops/instanceKlass.inline.hpp" 77 #include "oops/instanceMirrorKlass.inline.hpp" 78 #include "oops/methodData.hpp" 79 #include "oops/objArrayKlass.inline.hpp" 80 #include "oops/oop.inline.hpp" 81 #include "runtime/atomic.hpp" 82 #include "runtime/handles.inline.hpp" 83 #include "runtime/java.hpp" 84 #include "runtime/safepoint.hpp" 85 #include "runtime/threads.hpp" 86 #include "runtime/vmThread.hpp" 87 #include "services/memoryService.hpp" 88 #include "utilities/align.hpp" 89 #include "utilities/debug.hpp" 90 #include "utilities/events.hpp" 91 #include "utilities/formatBuffer.hpp" 92 #include "utilities/macros.hpp" 93 #include "utilities/stack.inline.hpp" 94 #if INCLUDE_JVMCI 95 #include "jvmci/jvmci.hpp" 96 #endif 97 98 #include <math.h> 99 100 // All sizes are in HeapWords. 101 const size_t ParallelCompactData::Log2RegionSize = 16; // 64K words 102 const size_t ParallelCompactData::RegionSize = (size_t)1 << Log2RegionSize; 103 static_assert(ParallelCompactData::RegionSize >= BitsPerWord, "region-start bit word-aligned"); 104 const size_t ParallelCompactData::RegionSizeBytes = 105 RegionSize << LogHeapWordSize; 106 const size_t ParallelCompactData::RegionSizeOffsetMask = RegionSize - 1; 107 const size_t ParallelCompactData::RegionAddrOffsetMask = RegionSizeBytes - 1; 108 const size_t ParallelCompactData::RegionAddrMask = ~RegionAddrOffsetMask; 109 110 const ParallelCompactData::RegionData::region_sz_t 111 ParallelCompactData::RegionData::dc_shift = 27; 112 113 const ParallelCompactData::RegionData::region_sz_t 114 ParallelCompactData::RegionData::dc_mask = ~0U << dc_shift; 115 116 const ParallelCompactData::RegionData::region_sz_t 117 ParallelCompactData::RegionData::dc_one = 0x1U << dc_shift; 118 119 const ParallelCompactData::RegionData::region_sz_t 120 ParallelCompactData::RegionData::los_mask = ~dc_mask; 121 122 const ParallelCompactData::RegionData::region_sz_t 123 ParallelCompactData::RegionData::dc_claimed = 0x8U << dc_shift; 124 125 const ParallelCompactData::RegionData::region_sz_t 126 ParallelCompactData::RegionData::dc_completed = 0xcU << dc_shift; 127 128 SpaceInfo PSParallelCompact::_space_info[PSParallelCompact::last_space_id]; 129 130 SpanSubjectToDiscoveryClosure PSParallelCompact::_span_based_discoverer; 131 ReferenceProcessor* PSParallelCompact::_ref_processor = nullptr; 132 133 void SplitInfo::record(size_t src_region_idx, size_t partial_obj_size, 134 HeapWord* destination) 135 { 136 assert(src_region_idx != 0, "invalid src_region_idx"); 137 assert(partial_obj_size != 0, "invalid partial_obj_size argument"); 138 assert(destination != nullptr, "invalid destination argument"); 139 140 _src_region_idx = src_region_idx; 141 _partial_obj_size = partial_obj_size; 142 _destination = destination; 143 144 // These fields may not be updated below, so make sure they're clear. 145 assert(_dest_region_addr == nullptr, "should have been cleared"); 146 assert(_first_src_addr == nullptr, "should have been cleared"); 147 148 // Determine the number of destination regions for the partial object. 149 HeapWord* const last_word = destination + partial_obj_size - 1; 150 const ParallelCompactData& sd = PSParallelCompact::summary_data(); 151 HeapWord* const beg_region_addr = sd.region_align_down(destination); 152 HeapWord* const end_region_addr = sd.region_align_down(last_word); 153 154 if (beg_region_addr == end_region_addr) { 155 // One destination region. 156 _destination_count = 1; 157 if (end_region_addr == destination) { 158 // The destination falls on a region boundary, thus the first word of the 159 // partial object will be the first word copied to the destination region. 160 _dest_region_addr = end_region_addr; 161 _first_src_addr = sd.region_to_addr(src_region_idx); 162 } 163 } else { 164 // Two destination regions. When copied, the partial object will cross a 165 // destination region boundary, so a word somewhere within the partial 166 // object will be the first word copied to the second destination region. 167 _destination_count = 2; 168 _dest_region_addr = end_region_addr; 169 const size_t ofs = pointer_delta(end_region_addr, destination); 170 assert(ofs < _partial_obj_size, "sanity"); 171 _first_src_addr = sd.region_to_addr(src_region_idx) + ofs; 172 } 173 } 174 175 void SplitInfo::clear() 176 { 177 _src_region_idx = 0; 178 _partial_obj_size = 0; 179 _destination = nullptr; 180 _destination_count = 0; 181 _dest_region_addr = nullptr; 182 _first_src_addr = nullptr; 183 assert(!is_valid(), "sanity"); 184 } 185 186 #ifdef ASSERT 187 void SplitInfo::verify_clear() 188 { 189 assert(_src_region_idx == 0, "not clear"); 190 assert(_partial_obj_size == 0, "not clear"); 191 assert(_destination == nullptr, "not clear"); 192 assert(_destination_count == 0, "not clear"); 193 assert(_dest_region_addr == nullptr, "not clear"); 194 assert(_first_src_addr == nullptr, "not clear"); 195 } 196 #endif // #ifdef ASSERT 197 198 199 void PSParallelCompact::print_on_error(outputStream* st) { 200 _mark_bitmap.print_on_error(st); 201 } 202 203 ParallelCompactData::ParallelCompactData() : 204 _heap_start(nullptr), 205 DEBUG_ONLY(_heap_end(nullptr) COMMA) 206 _region_vspace(nullptr), 207 _reserved_byte_size(0), 208 _region_data(nullptr), 209 _region_count(0) {} 210 211 bool ParallelCompactData::initialize(MemRegion reserved_heap) 212 { 213 _heap_start = reserved_heap.start(); 214 const size_t heap_size = reserved_heap.word_size(); 215 DEBUG_ONLY(_heap_end = _heap_start + heap_size;) 216 217 assert(region_align_down(_heap_start) == _heap_start, 218 "region start not aligned"); 219 220 return initialize_region_data(heap_size); 221 } 222 223 PSVirtualSpace* 224 ParallelCompactData::create_vspace(size_t count, size_t element_size) 225 { 226 const size_t raw_bytes = count * element_size; 227 const size_t page_sz = os::page_size_for_region_aligned(raw_bytes, 10); 228 const size_t granularity = os::vm_allocation_granularity(); 229 _reserved_byte_size = align_up(raw_bytes, MAX2(page_sz, granularity)); 230 231 const size_t rs_align = page_sz == os::vm_page_size() ? 0 : 232 MAX2(page_sz, granularity); 233 ReservedSpace rs(_reserved_byte_size, rs_align, page_sz); 234 os::trace_page_sizes("Parallel Compact Data", raw_bytes, raw_bytes, rs.base(), 235 rs.size(), page_sz); 236 237 MemTracker::record_virtual_memory_type((address)rs.base(), mtGC); 238 239 PSVirtualSpace* vspace = new PSVirtualSpace(rs, page_sz); 240 if (vspace != 0) { 241 if (vspace->expand_by(_reserved_byte_size)) { 242 return vspace; 243 } 244 delete vspace; 245 // Release memory reserved in the space. 246 rs.release(); 247 } 248 249 return 0; 250 } 251 252 bool ParallelCompactData::initialize_region_data(size_t heap_size) 253 { 254 assert(is_aligned(heap_size, RegionSize), "precondition"); 255 256 const size_t count = heap_size >> Log2RegionSize; 257 _region_vspace = create_vspace(count, sizeof(RegionData)); 258 if (_region_vspace != 0) { 259 _region_data = (RegionData*)_region_vspace->reserved_low_addr(); 260 _region_count = count; 261 return true; 262 } 263 return false; 264 } 265 266 void ParallelCompactData::clear_range(size_t beg_region, size_t end_region) { 267 assert(beg_region <= _region_count, "beg_region out of range"); 268 assert(end_region <= _region_count, "end_region out of range"); 269 270 const size_t region_cnt = end_region - beg_region; 271 memset(_region_data + beg_region, 0, region_cnt * sizeof(RegionData)); 272 } 273 274 void 275 ParallelCompactData::summarize_dense_prefix(HeapWord* beg, HeapWord* end) 276 { 277 assert(is_region_aligned(beg), "not RegionSize aligned"); 278 assert(is_region_aligned(end), "not RegionSize aligned"); 279 280 size_t cur_region = addr_to_region_idx(beg); 281 const size_t end_region = addr_to_region_idx(end); 282 HeapWord* addr = beg; 283 while (cur_region < end_region) { 284 _region_data[cur_region].set_destination(addr); 285 _region_data[cur_region].set_destination_count(0); 286 _region_data[cur_region].set_source_region(cur_region); 287 288 // Update live_obj_size so the region appears completely full. 289 size_t live_size = RegionSize - _region_data[cur_region].partial_obj_size(); 290 _region_data[cur_region].set_live_obj_size(live_size); 291 292 ++cur_region; 293 addr += RegionSize; 294 } 295 } 296 297 // Find the point at which a space can be split and, if necessary, record the 298 // split point. 299 // 300 // If the current src region (which overflowed the destination space) doesn't 301 // have a partial object, the split point is at the beginning of the current src 302 // region (an "easy" split, no extra bookkeeping required). 303 // 304 // If the current src region has a partial object, the split point is in the 305 // region where that partial object starts (call it the split_region). If 306 // split_region has a partial object, then the split point is just after that 307 // partial object (a "hard" split where we have to record the split data and 308 // zero the partial_obj_size field). With a "hard" split, we know that the 309 // partial_obj ends within split_region because the partial object that caused 310 // the overflow starts in split_region. If split_region doesn't have a partial 311 // obj, then the split is at the beginning of split_region (another "easy" 312 // split). 313 HeapWord* 314 ParallelCompactData::summarize_split_space(size_t src_region, 315 SplitInfo& split_info, 316 HeapWord* destination, 317 HeapWord* target_end, 318 HeapWord** target_next) 319 { 320 assert(destination <= target_end, "sanity"); 321 assert(destination + _region_data[src_region].data_size() > target_end, 322 "region should not fit into target space"); 323 assert(is_region_aligned(target_end), "sanity"); 324 325 size_t split_region = src_region; 326 HeapWord* split_destination = destination; 327 size_t partial_obj_size = _region_data[src_region].partial_obj_size(); 328 329 if (destination + partial_obj_size > target_end) { 330 // The split point is just after the partial object (if any) in the 331 // src_region that contains the start of the object that overflowed the 332 // destination space. 333 // 334 // Find the start of the "overflow" object and set split_region to the 335 // region containing it. 336 HeapWord* const overflow_obj = _region_data[src_region].partial_obj_addr(); 337 split_region = addr_to_region_idx(overflow_obj); 338 339 // Clear the source_region field of all destination regions whose first word 340 // came from data after the split point (a non-null source_region field 341 // implies a region must be filled). 342 // 343 // An alternative to the simple loop below: clear during post_compact(), 344 // which uses memcpy instead of individual stores, and is easy to 345 // parallelize. (The downside is that it clears the entire RegionData 346 // object as opposed to just one field.) 347 // 348 // post_compact() would have to clear the summary data up to the highest 349 // address that was written during the summary phase, which would be 350 // 351 // max(top, max(new_top, clear_top)) 352 // 353 // where clear_top is a new field in SpaceInfo. Would have to set clear_top 354 // to target_end. 355 const RegionData* const sr = region(split_region); 356 const size_t beg_idx = 357 addr_to_region_idx(region_align_up(sr->destination() + 358 sr->partial_obj_size())); 359 const size_t end_idx = addr_to_region_idx(target_end); 360 361 log_develop_trace(gc, compaction)("split: clearing source_region field in [" SIZE_FORMAT ", " SIZE_FORMAT ")", beg_idx, end_idx); 362 for (size_t idx = beg_idx; idx < end_idx; ++idx) { 363 _region_data[idx].set_source_region(0); 364 } 365 366 // Set split_destination and partial_obj_size to reflect the split region. 367 split_destination = sr->destination(); 368 partial_obj_size = sr->partial_obj_size(); 369 } 370 371 // The split is recorded only if a partial object extends onto the region. 372 if (partial_obj_size != 0) { 373 _region_data[split_region].set_partial_obj_size(0); 374 split_info.record(split_region, partial_obj_size, split_destination); 375 } 376 377 // Setup the continuation addresses. 378 *target_next = split_destination + partial_obj_size; 379 HeapWord* const source_next = region_to_addr(split_region) + partial_obj_size; 380 381 if (log_develop_is_enabled(Trace, gc, compaction)) { 382 const char * split_type = partial_obj_size == 0 ? "easy" : "hard"; 383 log_develop_trace(gc, compaction)("%s split: src=" PTR_FORMAT " src_c=" SIZE_FORMAT " pos=" SIZE_FORMAT, 384 split_type, p2i(source_next), split_region, partial_obj_size); 385 log_develop_trace(gc, compaction)("%s split: dst=" PTR_FORMAT " dst_c=" SIZE_FORMAT " tn=" PTR_FORMAT, 386 split_type, p2i(split_destination), 387 addr_to_region_idx(split_destination), 388 p2i(*target_next)); 389 390 if (partial_obj_size != 0) { 391 HeapWord* const po_beg = split_info.destination(); 392 HeapWord* const po_end = po_beg + split_info.partial_obj_size(); 393 log_develop_trace(gc, compaction)("%s split: po_beg=" PTR_FORMAT " " SIZE_FORMAT " po_end=" PTR_FORMAT " " SIZE_FORMAT, 394 split_type, 395 p2i(po_beg), addr_to_region_idx(po_beg), 396 p2i(po_end), addr_to_region_idx(po_end)); 397 } 398 } 399 400 return source_next; 401 } 402 403 size_t ParallelCompactData::live_words_in_space(const MutableSpace* space, 404 HeapWord** full_region_prefix_end) { 405 size_t cur_region = addr_to_region_idx(space->bottom()); 406 const size_t end_region = addr_to_region_idx(region_align_up(space->top())); 407 size_t live_words = 0; 408 if (full_region_prefix_end == nullptr) { 409 for (/* empty */; cur_region < end_region; ++cur_region) { 410 live_words += _region_data[cur_region].data_size(); 411 } 412 } else { 413 bool first_set = false; 414 for (/* empty */; cur_region < end_region; ++cur_region) { 415 size_t live_words_in_region = _region_data[cur_region].data_size(); 416 if (!first_set && live_words_in_region < RegionSize) { 417 *full_region_prefix_end = region_to_addr(cur_region); 418 first_set = true; 419 } 420 live_words += live_words_in_region; 421 } 422 if (!first_set) { 423 // All regions are full of live objs. 424 assert(is_region_aligned(space->top()), "inv"); 425 *full_region_prefix_end = space->top(); 426 } 427 assert(*full_region_prefix_end != nullptr, "postcondition"); 428 assert(is_region_aligned(*full_region_prefix_end), "inv"); 429 assert(*full_region_prefix_end >= space->bottom(), "in-range"); 430 assert(*full_region_prefix_end <= space->top(), "in-range"); 431 } 432 return live_words; 433 } 434 435 bool ParallelCompactData::summarize(SplitInfo& split_info, 436 HeapWord* source_beg, HeapWord* source_end, 437 HeapWord** source_next, 438 HeapWord* target_beg, HeapWord* target_end, 439 HeapWord** target_next) 440 { 441 HeapWord* const source_next_val = source_next == nullptr ? nullptr : *source_next; 442 log_develop_trace(gc, compaction)( 443 "sb=" PTR_FORMAT " se=" PTR_FORMAT " sn=" PTR_FORMAT 444 "tb=" PTR_FORMAT " te=" PTR_FORMAT " tn=" PTR_FORMAT, 445 p2i(source_beg), p2i(source_end), p2i(source_next_val), 446 p2i(target_beg), p2i(target_end), p2i(*target_next)); 447 448 size_t cur_region = addr_to_region_idx(source_beg); 449 const size_t end_region = addr_to_region_idx(region_align_up(source_end)); 450 451 HeapWord *dest_addr = target_beg; 452 while (cur_region < end_region) { 453 // The destination must be set even if the region has no data. 454 _region_data[cur_region].set_destination(dest_addr); 455 456 size_t words = _region_data[cur_region].data_size(); 457 if (words > 0) { 458 // If cur_region does not fit entirely into the target space, find a point 459 // at which the source space can be 'split' so that part is copied to the 460 // target space and the rest is copied elsewhere. 461 if (dest_addr + words > target_end) { 462 assert(source_next != nullptr, "source_next is null when splitting"); 463 *source_next = summarize_split_space(cur_region, split_info, dest_addr, 464 target_end, target_next); 465 return false; 466 } 467 468 // Compute the destination_count for cur_region, and if necessary, update 469 // source_region for a destination region. The source_region field is 470 // updated if cur_region is the first (left-most) region to be copied to a 471 // destination region. 472 // 473 // The destination_count calculation is a bit subtle. A region that has 474 // data that compacts into itself does not count itself as a destination. 475 // This maintains the invariant that a zero count means the region is 476 // available and can be claimed and then filled. 477 uint destination_count = 0; 478 if (split_info.is_split(cur_region)) { 479 // The current region has been split: the partial object will be copied 480 // to one destination space and the remaining data will be copied to 481 // another destination space. Adjust the initial destination_count and, 482 // if necessary, set the source_region field if the partial object will 483 // cross a destination region boundary. 484 destination_count = split_info.destination_count(); 485 if (destination_count == 2) { 486 size_t dest_idx = addr_to_region_idx(split_info.dest_region_addr()); 487 _region_data[dest_idx].set_source_region(cur_region); 488 } 489 } 490 491 HeapWord* const last_addr = dest_addr + words - 1; 492 const size_t dest_region_1 = addr_to_region_idx(dest_addr); 493 const size_t dest_region_2 = addr_to_region_idx(last_addr); 494 495 // Initially assume that the destination regions will be the same and 496 // adjust the value below if necessary. Under this assumption, if 497 // cur_region == dest_region_2, then cur_region will be compacted 498 // completely into itself. 499 destination_count += cur_region == dest_region_2 ? 0 : 1; 500 if (dest_region_1 != dest_region_2) { 501 // Destination regions differ; adjust destination_count. 502 destination_count += 1; 503 // Data from cur_region will be copied to the start of dest_region_2. 504 _region_data[dest_region_2].set_source_region(cur_region); 505 } else if (is_region_aligned(dest_addr)) { 506 // Data from cur_region will be copied to the start of the destination 507 // region. 508 _region_data[dest_region_1].set_source_region(cur_region); 509 } 510 511 _region_data[cur_region].set_destination_count(destination_count); 512 dest_addr += words; 513 } 514 515 ++cur_region; 516 } 517 518 *target_next = dest_addr; 519 return true; 520 } 521 522 #ifdef ASSERT 523 void ParallelCompactData::verify_clear(const PSVirtualSpace* vspace) 524 { 525 const size_t* const beg = (const size_t*)vspace->committed_low_addr(); 526 const size_t* const end = (const size_t*)vspace->committed_high_addr(); 527 for (const size_t* p = beg; p < end; ++p) { 528 assert(*p == 0, "not zero"); 529 } 530 } 531 532 void ParallelCompactData::verify_clear() 533 { 534 verify_clear(_region_vspace); 535 } 536 #endif // #ifdef ASSERT 537 538 STWGCTimer PSParallelCompact::_gc_timer; 539 ParallelOldTracer PSParallelCompact::_gc_tracer; 540 elapsedTimer PSParallelCompact::_accumulated_time; 541 unsigned int PSParallelCompact::_maximum_compaction_gc_num = 0; 542 CollectorCounters* PSParallelCompact::_counters = nullptr; 543 ParMarkBitMap PSParallelCompact::_mark_bitmap; 544 ParallelCompactData PSParallelCompact::_summary_data; 545 546 PSParallelCompact::IsAliveClosure PSParallelCompact::_is_alive_closure; 547 548 class PCAdjustPointerClosure: public BasicOopIterateClosure { 549 template <typename T> 550 void do_oop_work(T* p) { PSParallelCompact::adjust_pointer(p); } 551 552 public: 553 virtual void do_oop(oop* p) { do_oop_work(p); } 554 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 555 556 virtual ReferenceIterationMode reference_iteration_mode() { return DO_FIELDS; } 557 }; 558 559 static PCAdjustPointerClosure pc_adjust_pointer_closure; 560 561 bool PSParallelCompact::IsAliveClosure::do_object_b(oop p) { return mark_bitmap()->is_marked(p); } 562 563 void PSParallelCompact::post_initialize() { 564 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 565 _span_based_discoverer.set_span(heap->reserved_region()); 566 _ref_processor = 567 new ReferenceProcessor(&_span_based_discoverer, 568 ParallelGCThreads, // mt processing degree 569 ParallelGCThreads, // mt discovery degree 570 false, // concurrent_discovery 571 &_is_alive_closure); // non-header is alive closure 572 573 _counters = new CollectorCounters("Parallel full collection pauses", 1); 574 575 // Initialize static fields in ParCompactionManager. 576 ParCompactionManager::initialize(mark_bitmap()); 577 } 578 579 bool PSParallelCompact::initialize_aux_data() { 580 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 581 MemRegion mr = heap->reserved_region(); 582 assert(mr.byte_size() != 0, "heap should be reserved"); 583 584 initialize_space_info(); 585 586 if (!_mark_bitmap.initialize(mr)) { 587 vm_shutdown_during_initialization( 588 err_msg("Unable to allocate " SIZE_FORMAT "KB bitmaps for parallel " 589 "garbage collection for the requested " SIZE_FORMAT "KB heap.", 590 _mark_bitmap.reserved_byte_size()/K, mr.byte_size()/K)); 591 return false; 592 } 593 594 if (!_summary_data.initialize(mr)) { 595 vm_shutdown_during_initialization( 596 err_msg("Unable to allocate " SIZE_FORMAT "KB card tables for parallel " 597 "garbage collection for the requested " SIZE_FORMAT "KB heap.", 598 _summary_data.reserved_byte_size()/K, mr.byte_size()/K)); 599 return false; 600 } 601 602 return true; 603 } 604 605 void PSParallelCompact::initialize_space_info() 606 { 607 memset(&_space_info, 0, sizeof(_space_info)); 608 609 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 610 PSYoungGen* young_gen = heap->young_gen(); 611 612 _space_info[old_space_id].set_space(heap->old_gen()->object_space()); 613 _space_info[eden_space_id].set_space(young_gen->eden_space()); 614 _space_info[from_space_id].set_space(young_gen->from_space()); 615 _space_info[to_space_id].set_space(young_gen->to_space()); 616 617 _space_info[old_space_id].set_start_array(heap->old_gen()->start_array()); 618 } 619 620 void 621 PSParallelCompact::clear_data_covering_space(SpaceId id) 622 { 623 // At this point, top is the value before GC, new_top() is the value that will 624 // be set at the end of GC. The marking bitmap is cleared to top; nothing 625 // should be marked above top. The summary data is cleared to the larger of 626 // top & new_top. 627 MutableSpace* const space = _space_info[id].space(); 628 HeapWord* const bot = space->bottom(); 629 HeapWord* const top = space->top(); 630 HeapWord* const max_top = MAX2(top, _space_info[id].new_top()); 631 632 _mark_bitmap.clear_range(bot, top); 633 634 const size_t beg_region = _summary_data.addr_to_region_idx(bot); 635 const size_t end_region = 636 _summary_data.addr_to_region_idx(_summary_data.region_align_up(max_top)); 637 _summary_data.clear_range(beg_region, end_region); 638 639 // Clear the data used to 'split' regions. 640 SplitInfo& split_info = _space_info[id].split_info(); 641 if (split_info.is_valid()) { 642 split_info.clear(); 643 } 644 DEBUG_ONLY(split_info.verify_clear();) 645 } 646 647 void PSParallelCompact::pre_compact() 648 { 649 // Update the from & to space pointers in space_info, since they are swapped 650 // at each young gen gc. Do the update unconditionally (even though a 651 // promotion failure does not swap spaces) because an unknown number of young 652 // collections will have swapped the spaces an unknown number of times. 653 GCTraceTime(Debug, gc, phases) tm("Pre Compact", &_gc_timer); 654 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 655 _space_info[from_space_id].set_space(heap->young_gen()->from_space()); 656 _space_info[to_space_id].set_space(heap->young_gen()->to_space()); 657 658 // Increment the invocation count 659 heap->increment_total_collections(true); 660 661 CodeCache::on_gc_marking_cycle_start(); 662 663 heap->print_heap_before_gc(); 664 heap->trace_heap_before_gc(&_gc_tracer); 665 666 // Fill in TLABs 667 heap->ensure_parsability(true); // retire TLABs 668 669 if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) { 670 Universe::verify("Before GC"); 671 } 672 673 DEBUG_ONLY(mark_bitmap()->verify_clear();) 674 DEBUG_ONLY(summary_data().verify_clear();) 675 } 676 677 void PSParallelCompact::post_compact() 678 { 679 GCTraceTime(Info, gc, phases) tm("Post Compact", &_gc_timer); 680 ParCompactionManager::remove_all_shadow_regions(); 681 682 CodeCache::on_gc_marking_cycle_finish(); 683 CodeCache::arm_all_nmethods(); 684 685 for (unsigned int id = old_space_id; id < last_space_id; ++id) { 686 // Clear the marking bitmap, summary data and split info. 687 clear_data_covering_space(SpaceId(id)); 688 { 689 MutableSpace* space = _space_info[id].space(); 690 HeapWord* top = space->top(); 691 HeapWord* new_top = _space_info[id].new_top(); 692 if (ZapUnusedHeapArea && new_top < top) { 693 space->mangle_region(MemRegion(new_top, top)); 694 } 695 // Update top(). Must be done after clearing the bitmap and summary data. 696 space->set_top(new_top); 697 } 698 } 699 700 ParCompactionManager::flush_all_string_dedup_requests(); 701 702 MutableSpace* const eden_space = _space_info[eden_space_id].space(); 703 MutableSpace* const from_space = _space_info[from_space_id].space(); 704 MutableSpace* const to_space = _space_info[to_space_id].space(); 705 706 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 707 bool eden_empty = eden_space->is_empty(); 708 709 // Update heap occupancy information which is used as input to the soft ref 710 // clearing policy at the next gc. 711 Universe::heap()->update_capacity_and_used_at_gc(); 712 713 bool young_gen_empty = eden_empty && from_space->is_empty() && 714 to_space->is_empty(); 715 716 PSCardTable* ct = heap->card_table(); 717 MemRegion old_mr = heap->old_gen()->committed(); 718 if (young_gen_empty) { 719 ct->clear_MemRegion(old_mr); 720 } else { 721 ct->dirty_MemRegion(old_mr); 722 } 723 724 { 725 // Delete metaspaces for unloaded class loaders and clean up loader_data graph 726 GCTraceTime(Debug, gc, phases) t("Purge Class Loader Data", gc_timer()); 727 ClassLoaderDataGraph::purge(true /* at_safepoint */); 728 DEBUG_ONLY(MetaspaceUtils::verify();) 729 } 730 731 // Need to clear claim bits for the next mark. 732 ClassLoaderDataGraph::clear_claimed_marks(); 733 734 heap->prune_scavengable_nmethods(); 735 736 #if COMPILER2_OR_JVMCI 737 DerivedPointerTable::update_pointers(); 738 #endif 739 740 // Signal that we have completed a visit to all live objects. 741 Universe::heap()->record_whole_heap_examined_timestamp(); 742 } 743 744 HeapWord* PSParallelCompact::compute_dense_prefix_for_old_space(MutableSpace* old_space, 745 HeapWord* full_region_prefix_end) { 746 const size_t region_size = ParallelCompactData::RegionSize; 747 const ParallelCompactData& sd = summary_data(); 748 749 // Iteration starts with the region *after* the full-region-prefix-end. 750 const RegionData* const start_region = sd.addr_to_region_ptr(full_region_prefix_end); 751 // If final region is not full, iteration stops before that region, 752 // because fill_dense_prefix_end assumes that prefix_end <= top. 753 const RegionData* const end_region = sd.addr_to_region_ptr(old_space->top()); 754 assert(start_region <= end_region, "inv"); 755 756 size_t max_waste = old_space->capacity_in_words() * (MarkSweepDeadRatio / 100.0); 757 const RegionData* cur_region = start_region; 758 for (/* empty */; cur_region < end_region; ++cur_region) { 759 assert(region_size >= cur_region->data_size(), "inv"); 760 size_t dead_size = region_size - cur_region->data_size(); 761 if (max_waste < dead_size) { 762 break; 763 } 764 max_waste -= dead_size; 765 } 766 767 HeapWord* const prefix_end = sd.region_to_addr(cur_region); 768 assert(sd.is_region_aligned(prefix_end), "postcondition"); 769 assert(prefix_end >= full_region_prefix_end, "in-range"); 770 assert(prefix_end <= old_space->top(), "in-range"); 771 return prefix_end; 772 } 773 774 void PSParallelCompact::fill_dense_prefix_end(SpaceId id) { 775 // Comparing two sizes to decide if filling is required: 776 // 777 // The size of the filler (min-obj-size) is 2 heap words with the default 778 // MinObjAlignment, since both markword and klass take 1 heap word. 779 // 780 // The size of the gap (if any) right before dense-prefix-end is 781 // MinObjAlignment. 782 // 783 // Need to fill in the gap only if it's smaller than min-obj-size, and the 784 // filler obj will extend to next region. 785 786 // Note: If min-fill-size decreases to 1, this whole method becomes redundant. 787 assert(CollectedHeap::min_fill_size() >= 2, "inv"); 788 #ifndef _LP64 789 // In 32-bit system, each heap word is 4 bytes, so MinObjAlignment == 2. 790 // The gap is always equal to min-fill-size, so nothing to do. 791 return; 792 #endif 793 if (MinObjAlignment > 1) { 794 return; 795 } 796 assert(CollectedHeap::min_fill_size() == 2, "inv"); 797 HeapWord* const dense_prefix_end = dense_prefix(id); 798 assert(_summary_data.is_region_aligned(dense_prefix_end), "precondition"); 799 assert(dense_prefix_end <= space(id)->top(), "precondition"); 800 if (dense_prefix_end == space(id)->top()) { 801 // Must not have single-word gap right before prefix-end/top. 802 return; 803 } 804 RegionData* const region_after_dense_prefix = _summary_data.addr_to_region_ptr(dense_prefix_end); 805 806 if (region_after_dense_prefix->partial_obj_size() != 0 || 807 _mark_bitmap.is_marked(dense_prefix_end)) { 808 // The region after the dense prefix starts with live bytes. 809 return; 810 } 811 812 HeapWord* block_start = start_array(id)->block_start_reaching_into_card(dense_prefix_end); 813 if (block_start == dense_prefix_end - 1) { 814 assert(!_mark_bitmap.is_marked(block_start), "inv"); 815 // There is exactly one heap word gap right before the dense prefix end, so we need a filler object. 816 // The filler object will extend into region_after_dense_prefix. 817 const size_t obj_len = 2; // min-fill-size 818 HeapWord* const obj_beg = dense_prefix_end - 1; 819 CollectedHeap::fill_with_object(obj_beg, obj_len); 820 _mark_bitmap.mark_obj(obj_beg); 821 _summary_data.addr_to_region_ptr(obj_beg)->add_live_obj(1); 822 region_after_dense_prefix->set_partial_obj_size(1); 823 region_after_dense_prefix->set_partial_obj_addr(obj_beg); 824 assert(start_array(id) != nullptr, "sanity"); 825 start_array(id)->update_for_block(obj_beg, obj_beg + obj_len); 826 } 827 } 828 829 bool PSParallelCompact::reassess_maximum_compaction(bool maximum_compaction, 830 size_t total_live_words, 831 MutableSpace* const old_space, 832 HeapWord* full_region_prefix_end) { 833 // Check if all live objs are larger than old-gen. 834 const bool is_old_gen_overflowing = (total_live_words > old_space->capacity_in_words()); 835 836 // JVM flags 837 const uint total_invocations = ParallelScavengeHeap::heap()->total_full_collections(); 838 assert(total_invocations >= _maximum_compaction_gc_num, "sanity"); 839 const size_t gcs_since_max = total_invocations - _maximum_compaction_gc_num; 840 const bool is_interval_ended = gcs_since_max > HeapMaximumCompactionInterval; 841 842 // If all regions in old-gen are full 843 const bool is_region_full = 844 full_region_prefix_end >= _summary_data.region_align_down(old_space->top()); 845 846 if (maximum_compaction || is_old_gen_overflowing || is_interval_ended || is_region_full) { 847 _maximum_compaction_gc_num = total_invocations; 848 return true; 849 } 850 851 return false; 852 } 853 854 void PSParallelCompact::summary_phase(bool maximum_compaction) 855 { 856 GCTraceTime(Info, gc, phases) tm("Summary Phase", &_gc_timer); 857 858 MutableSpace* const old_space = _space_info[old_space_id].space(); 859 { 860 size_t total_live_words = 0; 861 HeapWord* full_region_prefix_end = nullptr; 862 { 863 // old-gen 864 size_t live_words = _summary_data.live_words_in_space(old_space, 865 &full_region_prefix_end); 866 total_live_words += live_words; 867 } 868 // young-gen 869 for (uint i = eden_space_id; i < last_space_id; ++i) { 870 const MutableSpace* space = _space_info[i].space(); 871 size_t live_words = _summary_data.live_words_in_space(space); 872 total_live_words += live_words; 873 _space_info[i].set_new_top(space->bottom() + live_words); 874 _space_info[i].set_dense_prefix(space->bottom()); 875 } 876 877 maximum_compaction = reassess_maximum_compaction(maximum_compaction, 878 total_live_words, 879 old_space, 880 full_region_prefix_end); 881 HeapWord* dense_prefix_end = 882 maximum_compaction ? full_region_prefix_end 883 : compute_dense_prefix_for_old_space(old_space, 884 full_region_prefix_end); 885 SpaceId id = old_space_id; 886 _space_info[id].set_dense_prefix(dense_prefix_end); 887 888 if (dense_prefix_end != old_space->bottom()) { 889 fill_dense_prefix_end(id); 890 _summary_data.summarize_dense_prefix(old_space->bottom(), dense_prefix_end); 891 } 892 _summary_data.summarize(_space_info[id].split_info(), 893 dense_prefix_end, old_space->top(), nullptr, 894 dense_prefix_end, old_space->end(), 895 _space_info[id].new_top_addr()); 896 } 897 898 // Summarize the remaining spaces in the young gen. The initial target space 899 // is the old gen. If a space does not fit entirely into the target, then the 900 // remainder is compacted into the space itself and that space becomes the new 901 // target. 902 SpaceId dst_space_id = old_space_id; 903 HeapWord* dst_space_end = old_space->end(); 904 HeapWord** new_top_addr = _space_info[dst_space_id].new_top_addr(); 905 for (unsigned int id = eden_space_id; id < last_space_id; ++id) { 906 const MutableSpace* space = _space_info[id].space(); 907 const size_t live = pointer_delta(_space_info[id].new_top(), 908 space->bottom()); 909 const size_t available = pointer_delta(dst_space_end, *new_top_addr); 910 911 if (live > 0 && live <= available) { 912 // All the live data will fit. 913 bool done = _summary_data.summarize(_space_info[id].split_info(), 914 space->bottom(), space->top(), 915 nullptr, 916 *new_top_addr, dst_space_end, 917 new_top_addr); 918 assert(done, "space must fit into old gen"); 919 920 // Reset the new_top value for the space. 921 _space_info[id].set_new_top(space->bottom()); 922 } else if (live > 0) { 923 // Attempt to fit part of the source space into the target space. 924 HeapWord* next_src_addr = nullptr; 925 bool done = _summary_data.summarize(_space_info[id].split_info(), 926 space->bottom(), space->top(), 927 &next_src_addr, 928 *new_top_addr, dst_space_end, 929 new_top_addr); 930 assert(!done, "space should not fit into old gen"); 931 assert(next_src_addr != nullptr, "sanity"); 932 933 // The source space becomes the new target, so the remainder is compacted 934 // within the space itself. 935 dst_space_id = SpaceId(id); 936 dst_space_end = space->end(); 937 new_top_addr = _space_info[id].new_top_addr(); 938 done = _summary_data.summarize(_space_info[id].split_info(), 939 next_src_addr, space->top(), 940 nullptr, 941 space->bottom(), dst_space_end, 942 new_top_addr); 943 assert(done, "space must fit when compacted into itself"); 944 assert(*new_top_addr <= space->top(), "usage should not grow"); 945 } 946 } 947 } 948 949 // This method should contain all heap-specific policy for invoking a full 950 // collection. invoke_no_policy() will only attempt to compact the heap; it 951 // will do nothing further. If we need to bail out for policy reasons, scavenge 952 // before full gc, or any other specialized behavior, it needs to be added here. 953 // 954 // Note that this method should only be called from the vm_thread while at a 955 // safepoint. 956 // 957 // Note that the all_soft_refs_clear flag in the soft ref policy 958 // may be true because this method can be called without intervening 959 // activity. For example when the heap space is tight and full measure 960 // are being taken to free space. 961 bool PSParallelCompact::invoke(bool maximum_heap_compaction) { 962 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); 963 assert(Thread::current() == (Thread*)VMThread::vm_thread(), 964 "should be in vm thread"); 965 966 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 967 assert(!heap->is_stw_gc_active(), "not reentrant"); 968 969 IsSTWGCActiveMark mark; 970 971 const bool clear_all_soft_refs = 972 heap->soft_ref_policy()->should_clear_all_soft_refs(); 973 974 return PSParallelCompact::invoke_no_policy(clear_all_soft_refs || 975 maximum_heap_compaction); 976 } 977 978 // This method contains no policy. You should probably 979 // be calling invoke() instead. 980 bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) { 981 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); 982 assert(ref_processor() != nullptr, "Sanity"); 983 984 if (GCLocker::check_active_before_gc()) { 985 return false; 986 } 987 988 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 989 990 GCIdMark gc_id_mark; 991 _gc_timer.register_gc_start(); 992 _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start()); 993 994 GCCause::Cause gc_cause = heap->gc_cause(); 995 PSYoungGen* young_gen = heap->young_gen(); 996 PSOldGen* old_gen = heap->old_gen(); 997 PSAdaptiveSizePolicy* size_policy = heap->size_policy(); 998 999 // The scope of casr should end after code that can change 1000 // SoftRefPolicy::_should_clear_all_soft_refs. 1001 ClearedAllSoftRefs casr(maximum_heap_compaction, 1002 heap->soft_ref_policy()); 1003 1004 // Make sure data structures are sane, make the heap parsable, and do other 1005 // miscellaneous bookkeeping. 1006 pre_compact(); 1007 1008 const PreGenGCValues pre_gc_values = heap->get_pre_gc_values(); 1009 1010 { 1011 const uint active_workers = 1012 WorkerPolicy::calc_active_workers(ParallelScavengeHeap::heap()->workers().max_workers(), 1013 ParallelScavengeHeap::heap()->workers().active_workers(), 1014 Threads::number_of_non_daemon_threads()); 1015 ParallelScavengeHeap::heap()->workers().set_active_workers(active_workers); 1016 1017 GCTraceCPUTime tcpu(&_gc_tracer); 1018 GCTraceTime(Info, gc) tm("Pause Full", nullptr, gc_cause, true); 1019 1020 heap->pre_full_gc_dump(&_gc_timer); 1021 1022 TraceCollectorStats tcs(counters()); 1023 TraceMemoryManagerStats tms(heap->old_gc_manager(), gc_cause, "end of major GC"); 1024 1025 if (log_is_enabled(Debug, gc, heap, exit)) { 1026 accumulated_time()->start(); 1027 } 1028 1029 // Let the size policy know we're starting 1030 size_policy->major_collection_begin(); 1031 1032 #if COMPILER2_OR_JVMCI 1033 DerivedPointerTable::clear(); 1034 #endif 1035 1036 ref_processor()->start_discovery(maximum_heap_compaction); 1037 1038 ClassUnloadingContext ctx(1 /* num_nmethod_unlink_workers */, 1039 false /* unregister_nmethods_during_purge */, 1040 false /* lock_nmethod_free_separately */); 1041 1042 marking_phase(&_gc_tracer); 1043 1044 bool max_on_system_gc = UseMaximumCompactionOnSystemGC 1045 && GCCause::is_user_requested_gc(gc_cause); 1046 summary_phase(maximum_heap_compaction || max_on_system_gc); 1047 1048 #if COMPILER2_OR_JVMCI 1049 assert(DerivedPointerTable::is_active(), "Sanity"); 1050 DerivedPointerTable::set_active(false); 1051 #endif 1052 1053 forward_to_new_addr(); 1054 1055 adjust_pointers(); 1056 1057 compact(); 1058 1059 ParCompactionManager::_preserved_marks_set->restore(&ParallelScavengeHeap::heap()->workers()); 1060 1061 ParCompactionManager::verify_all_region_stack_empty(); 1062 1063 // Reset the mark bitmap, summary data, and do other bookkeeping. Must be 1064 // done before resizing. 1065 post_compact(); 1066 1067 // Let the size policy know we're done 1068 size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause); 1069 1070 if (UseAdaptiveSizePolicy) { 1071 log_debug(gc, ergo)("AdaptiveSizeStart: collection: %d ", heap->total_collections()); 1072 log_trace(gc, ergo)("old_gen_capacity: " SIZE_FORMAT " young_gen_capacity: " SIZE_FORMAT, 1073 old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes()); 1074 1075 // Don't check if the size_policy is ready here. Let 1076 // the size_policy check that internally. 1077 if (UseAdaptiveGenerationSizePolicyAtMajorCollection && 1078 AdaptiveSizePolicy::should_update_promo_stats(gc_cause)) { 1079 // Swap the survivor spaces if from_space is empty. The 1080 // resize_young_gen() called below is normally used after 1081 // a successful young GC and swapping of survivor spaces; 1082 // otherwise, it will fail to resize the young gen with 1083 // the current implementation. 1084 if (young_gen->from_space()->is_empty()) { 1085 young_gen->from_space()->clear(SpaceDecorator::Mangle); 1086 young_gen->swap_spaces(); 1087 } 1088 1089 // Calculate optimal free space amounts 1090 assert(young_gen->max_gen_size() > 1091 young_gen->from_space()->capacity_in_bytes() + 1092 young_gen->to_space()->capacity_in_bytes(), 1093 "Sizes of space in young gen are out-of-bounds"); 1094 1095 size_t young_live = young_gen->used_in_bytes(); 1096 size_t eden_live = young_gen->eden_space()->used_in_bytes(); 1097 size_t old_live = old_gen->used_in_bytes(); 1098 size_t cur_eden = young_gen->eden_space()->capacity_in_bytes(); 1099 size_t max_old_gen_size = old_gen->max_gen_size(); 1100 size_t max_eden_size = young_gen->max_gen_size() - 1101 young_gen->from_space()->capacity_in_bytes() - 1102 young_gen->to_space()->capacity_in_bytes(); 1103 1104 // Used for diagnostics 1105 size_policy->clear_generation_free_space_flags(); 1106 1107 size_policy->compute_generations_free_space(young_live, 1108 eden_live, 1109 old_live, 1110 cur_eden, 1111 max_old_gen_size, 1112 max_eden_size, 1113 true /* full gc*/); 1114 1115 size_policy->check_gc_overhead_limit(eden_live, 1116 max_old_gen_size, 1117 max_eden_size, 1118 true /* full gc*/, 1119 gc_cause, 1120 heap->soft_ref_policy()); 1121 1122 size_policy->decay_supplemental_growth(true /* full gc*/); 1123 1124 heap->resize_old_gen( 1125 size_policy->calculated_old_free_size_in_bytes()); 1126 1127 heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(), 1128 size_policy->calculated_survivor_size_in_bytes()); 1129 } 1130 1131 log_debug(gc, ergo)("AdaptiveSizeStop: collection: %d ", heap->total_collections()); 1132 } 1133 1134 if (UsePerfData) { 1135 PSGCAdaptivePolicyCounters* const counters = heap->gc_policy_counters(); 1136 counters->update_counters(); 1137 counters->update_old_capacity(old_gen->capacity_in_bytes()); 1138 counters->update_young_capacity(young_gen->capacity_in_bytes()); 1139 } 1140 1141 heap->resize_all_tlabs(); 1142 1143 // Resize the metaspace capacity after a collection 1144 MetaspaceGC::compute_new_size(); 1145 1146 if (log_is_enabled(Debug, gc, heap, exit)) { 1147 accumulated_time()->stop(); 1148 } 1149 1150 heap->print_heap_change(pre_gc_values); 1151 1152 // Track memory usage and detect low memory 1153 MemoryService::track_memory_usage(); 1154 heap->update_counters(); 1155 1156 heap->post_full_gc_dump(&_gc_timer); 1157 } 1158 1159 if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) { 1160 Universe::verify("After GC"); 1161 } 1162 1163 heap->print_heap_after_gc(); 1164 heap->trace_heap_after_gc(&_gc_tracer); 1165 1166 AdaptiveSizePolicyOutput::print(size_policy, heap->total_collections()); 1167 1168 _gc_timer.register_gc_end(); 1169 1170 _gc_tracer.report_dense_prefix(dense_prefix(old_space_id)); 1171 _gc_tracer.report_gc_end(_gc_timer.gc_end(), _gc_timer.time_partitions()); 1172 1173 return true; 1174 } 1175 1176 class PCAddThreadRootsMarkingTaskClosure : public ThreadClosure { 1177 private: 1178 uint _worker_id; 1179 1180 public: 1181 PCAddThreadRootsMarkingTaskClosure(uint worker_id) : _worker_id(worker_id) { } 1182 void do_thread(Thread* thread) { 1183 assert(ParallelScavengeHeap::heap()->is_stw_gc_active(), "called outside gc"); 1184 1185 ResourceMark rm; 1186 1187 ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(_worker_id); 1188 1189 PCMarkAndPushClosure mark_and_push_closure(cm); 1190 MarkingNMethodClosure mark_and_push_in_blobs(&mark_and_push_closure, !NMethodToOopClosure::FixRelocations, true /* keepalive nmethods */); 1191 1192 thread->oops_do(&mark_and_push_closure, &mark_and_push_in_blobs); 1193 1194 // Do the real work 1195 cm->follow_marking_stacks(); 1196 } 1197 }; 1198 1199 void steal_marking_work(TaskTerminator& terminator, uint worker_id) { 1200 assert(ParallelScavengeHeap::heap()->is_stw_gc_active(), "called outside gc"); 1201 1202 ParCompactionManager* cm = 1203 ParCompactionManager::gc_thread_compaction_manager(worker_id); 1204 1205 do { 1206 oop obj = nullptr; 1207 ObjArrayTask task; 1208 if (ParCompactionManager::steal_objarray(worker_id, task)) { 1209 cm->follow_array((objArrayOop)task.obj(), task.index()); 1210 } else if (ParCompactionManager::steal(worker_id, obj)) { 1211 cm->follow_contents(obj); 1212 } 1213 cm->follow_marking_stacks(); 1214 } while (!terminator.offer_termination()); 1215 } 1216 1217 class MarkFromRootsTask : public WorkerTask { 1218 StrongRootsScope _strong_roots_scope; // needed for Threads::possibly_parallel_threads_do 1219 OopStorageSetStrongParState<false /* concurrent */, false /* is_const */> _oop_storage_set_par_state; 1220 TaskTerminator _terminator; 1221 uint _active_workers; 1222 1223 public: 1224 MarkFromRootsTask(uint active_workers) : 1225 WorkerTask("MarkFromRootsTask"), 1226 _strong_roots_scope(active_workers), 1227 _terminator(active_workers, ParCompactionManager::oop_task_queues()), 1228 _active_workers(active_workers) {} 1229 1230 virtual void work(uint worker_id) { 1231 ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(worker_id); 1232 cm->create_marking_stats_cache(); 1233 PCMarkAndPushClosure mark_and_push_closure(cm); 1234 1235 { 1236 CLDToOopClosure cld_closure(&mark_and_push_closure, ClassLoaderData::_claim_stw_fullgc_mark); 1237 ClassLoaderDataGraph::always_strong_cld_do(&cld_closure); 1238 1239 // Do the real work 1240 cm->follow_marking_stacks(); 1241 } 1242 1243 PCAddThreadRootsMarkingTaskClosure closure(worker_id); 1244 Threads::possibly_parallel_threads_do(true /* is_par */, &closure); 1245 1246 // Mark from OopStorages 1247 { 1248 _oop_storage_set_par_state.oops_do(&mark_and_push_closure); 1249 // Do the real work 1250 cm->follow_marking_stacks(); 1251 } 1252 1253 if (_active_workers > 1) { 1254 steal_marking_work(_terminator, worker_id); 1255 } 1256 } 1257 }; 1258 1259 class ParallelCompactRefProcProxyTask : public RefProcProxyTask { 1260 TaskTerminator _terminator; 1261 1262 public: 1263 ParallelCompactRefProcProxyTask(uint max_workers) 1264 : RefProcProxyTask("ParallelCompactRefProcProxyTask", max_workers), 1265 _terminator(_max_workers, ParCompactionManager::oop_task_queues()) {} 1266 1267 void work(uint worker_id) override { 1268 assert(worker_id < _max_workers, "sanity"); 1269 ParCompactionManager* cm = (_tm == RefProcThreadModel::Single) ? ParCompactionManager::get_vmthread_cm() : ParCompactionManager::gc_thread_compaction_manager(worker_id); 1270 PCMarkAndPushClosure keep_alive(cm); 1271 BarrierEnqueueDiscoveredFieldClosure enqueue; 1272 ParCompactionManager::FollowStackClosure complete_gc(cm, (_tm == RefProcThreadModel::Single) ? nullptr : &_terminator, worker_id); 1273 _rp_task->rp_work(worker_id, PSParallelCompact::is_alive_closure(), &keep_alive, &enqueue, &complete_gc); 1274 } 1275 1276 void prepare_run_task_hook() override { 1277 _terminator.reset_for_reuse(_queue_count); 1278 } 1279 }; 1280 1281 static void flush_marking_stats_cache(const uint num_workers) { 1282 for (uint i = 0; i < num_workers; ++i) { 1283 ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(i); 1284 cm->flush_and_destroy_marking_stats_cache(); 1285 } 1286 } 1287 1288 void PSParallelCompact::marking_phase(ParallelOldTracer *gc_tracer) { 1289 // Recursively traverse all live objects and mark them 1290 GCTraceTime(Info, gc, phases) tm("Marking Phase", &_gc_timer); 1291 1292 uint active_gc_threads = ParallelScavengeHeap::heap()->workers().active_workers(); 1293 1294 ClassLoaderDataGraph::verify_claimed_marks_cleared(ClassLoaderData::_claim_stw_fullgc_mark); 1295 { 1296 GCTraceTime(Debug, gc, phases) tm("Par Mark", &_gc_timer); 1297 1298 MarkFromRootsTask task(active_gc_threads); 1299 ParallelScavengeHeap::heap()->workers().run_task(&task); 1300 } 1301 1302 // Process reference objects found during marking 1303 { 1304 GCTraceTime(Debug, gc, phases) tm("Reference Processing", &_gc_timer); 1305 1306 ReferenceProcessorStats stats; 1307 ReferenceProcessorPhaseTimes pt(&_gc_timer, ref_processor()->max_num_queues()); 1308 1309 ref_processor()->set_active_mt_degree(active_gc_threads); 1310 ParallelCompactRefProcProxyTask task(ref_processor()->max_num_queues()); 1311 stats = ref_processor()->process_discovered_references(task, pt); 1312 1313 gc_tracer->report_gc_reference_stats(stats); 1314 pt.print_all_references(); 1315 } 1316 1317 { 1318 GCTraceTime(Debug, gc, phases) tm("Flush Marking Stats", &_gc_timer); 1319 1320 flush_marking_stats_cache(active_gc_threads); 1321 } 1322 1323 // This is the point where the entire marking should have completed. 1324 ParCompactionManager::verify_all_marking_stack_empty(); 1325 1326 { 1327 GCTraceTime(Debug, gc, phases) tm("Weak Processing", &_gc_timer); 1328 WeakProcessor::weak_oops_do(&ParallelScavengeHeap::heap()->workers(), 1329 is_alive_closure(), 1330 &do_nothing_cl, 1331 1); 1332 } 1333 1334 { 1335 GCTraceTime(Debug, gc, phases) tm_m("Class Unloading", &_gc_timer); 1336 1337 ClassUnloadingContext* ctx = ClassUnloadingContext::context(); 1338 1339 bool unloading_occurred; 1340 { 1341 CodeCache::UnlinkingScope scope(is_alive_closure()); 1342 1343 // Follow system dictionary roots and unload classes. 1344 unloading_occurred = SystemDictionary::do_unloading(&_gc_timer); 1345 1346 // Unload nmethods. 1347 CodeCache::do_unloading(unloading_occurred); 1348 } 1349 1350 { 1351 GCTraceTime(Debug, gc, phases) t("Purge Unlinked NMethods", gc_timer()); 1352 // Release unloaded nmethod's memory. 1353 ctx->purge_nmethods(); 1354 } 1355 { 1356 GCTraceTime(Debug, gc, phases) ur("Unregister NMethods", &_gc_timer); 1357 ParallelScavengeHeap::heap()->prune_unlinked_nmethods(); 1358 } 1359 { 1360 GCTraceTime(Debug, gc, phases) t("Free Code Blobs", gc_timer()); 1361 ctx->free_nmethods(); 1362 } 1363 1364 // Prune dead klasses from subklass/sibling/implementor lists. 1365 Klass::clean_weak_klass_links(unloading_occurred); 1366 1367 // Clean JVMCI metadata handles. 1368 JVMCI_ONLY(JVMCI::do_unloading(unloading_occurred)); 1369 } 1370 1371 { 1372 GCTraceTime(Debug, gc, phases) tm("Report Object Count", &_gc_timer); 1373 _gc_tracer.report_object_count_after_gc(is_alive_closure(), &ParallelScavengeHeap::heap()->workers()); 1374 } 1375 #if TASKQUEUE_STATS 1376 ParCompactionManager::oop_task_queues()->print_and_reset_taskqueue_stats("Oop Queue"); 1377 ParCompactionManager::_objarray_task_queues->print_and_reset_taskqueue_stats("ObjArrayOop Queue"); 1378 #endif 1379 } 1380 1381 template<typename Func> 1382 void PSParallelCompact::adjust_in_space_helper(SpaceId id, volatile uint* claim_counter, Func&& on_stripe) { 1383 MutableSpace* sp = PSParallelCompact::space(id); 1384 HeapWord* const bottom = sp->bottom(); 1385 HeapWord* const top = sp->top(); 1386 if (bottom == top) { 1387 return; 1388 } 1389 1390 const uint num_regions_per_stripe = 2; 1391 const size_t region_size = ParallelCompactData::RegionSize; 1392 const size_t stripe_size = num_regions_per_stripe * region_size; 1393 1394 while (true) { 1395 uint counter = Atomic::fetch_then_add(claim_counter, num_regions_per_stripe); 1396 HeapWord* cur_stripe = bottom + counter * region_size; 1397 if (cur_stripe >= top) { 1398 break; 1399 } 1400 HeapWord* stripe_end = MIN2(cur_stripe + stripe_size, top); 1401 on_stripe(cur_stripe, stripe_end); 1402 } 1403 } 1404 1405 void PSParallelCompact::adjust_in_old_space(volatile uint* claim_counter) { 1406 // Regions in old-space shouldn't be split. 1407 assert(!_space_info[old_space_id].split_info().is_valid(), "inv"); 1408 1409 auto scan_obj_with_limit = [&] (HeapWord* obj_start, HeapWord* left, HeapWord* right) { 1410 assert(mark_bitmap()->is_marked(obj_start), "inv"); 1411 oop obj = cast_to_oop(obj_start); 1412 return obj->oop_iterate_size(&pc_adjust_pointer_closure, MemRegion(left, right)); 1413 }; 1414 1415 adjust_in_space_helper(old_space_id, claim_counter, [&] (HeapWord* stripe_start, HeapWord* stripe_end) { 1416 assert(_summary_data.is_region_aligned(stripe_start), "inv"); 1417 RegionData* cur_region = _summary_data.addr_to_region_ptr(stripe_start); 1418 HeapWord* obj_start; 1419 if (cur_region->partial_obj_size() != 0) { 1420 obj_start = cur_region->partial_obj_addr(); 1421 obj_start += scan_obj_with_limit(obj_start, stripe_start, stripe_end); 1422 } else { 1423 obj_start = stripe_start; 1424 } 1425 1426 while (obj_start < stripe_end) { 1427 obj_start = mark_bitmap()->find_obj_beg(obj_start, stripe_end); 1428 if (obj_start >= stripe_end) { 1429 break; 1430 } 1431 obj_start += scan_obj_with_limit(obj_start, stripe_start, stripe_end); 1432 } 1433 }); 1434 } 1435 1436 void PSParallelCompact::adjust_in_young_space(SpaceId id, volatile uint* claim_counter) { 1437 adjust_in_space_helper(id, claim_counter, [](HeapWord* stripe_start, HeapWord* stripe_end) { 1438 HeapWord* obj_start = stripe_start; 1439 while (obj_start < stripe_end) { 1440 obj_start = mark_bitmap()->find_obj_beg(obj_start, stripe_end); 1441 if (obj_start >= stripe_end) { 1442 break; 1443 } 1444 oop obj = cast_to_oop(obj_start); 1445 obj_start += obj->oop_iterate_size(&pc_adjust_pointer_closure); 1446 } 1447 }); 1448 } 1449 1450 void PSParallelCompact::adjust_pointers_in_spaces(uint worker_id, volatile uint* claim_counters) { 1451 auto start_time = Ticks::now(); 1452 adjust_in_old_space(&claim_counters[0]); 1453 for (uint id = eden_space_id; id < last_space_id; ++id) { 1454 adjust_in_young_space(SpaceId(id), &claim_counters[id]); 1455 } 1456 log_trace(gc, phases)("adjust_pointers_in_spaces worker %u: %.3f ms", worker_id, (Ticks::now() - start_time).seconds() * 1000); 1457 } 1458 1459 class PSAdjustTask final : public WorkerTask { 1460 SubTasksDone _sub_tasks; 1461 WeakProcessor::Task _weak_proc_task; 1462 OopStorageSetStrongParState<false, false> _oop_storage_iter; 1463 uint _nworkers; 1464 volatile uint _claim_counters[PSParallelCompact::last_space_id] = {}; 1465 1466 enum PSAdjustSubTask { 1467 PSAdjustSubTask_code_cache, 1468 1469 PSAdjustSubTask_num_elements 1470 }; 1471 1472 public: 1473 PSAdjustTask(uint nworkers) : 1474 WorkerTask("PSAdjust task"), 1475 _sub_tasks(PSAdjustSubTask_num_elements), 1476 _weak_proc_task(nworkers), 1477 _nworkers(nworkers) { 1478 1479 ClassLoaderDataGraph::verify_claimed_marks_cleared(ClassLoaderData::_claim_stw_fullgc_adjust); 1480 if (nworkers > 1) { 1481 Threads::change_thread_claim_token(); 1482 } 1483 } 1484 1485 ~PSAdjustTask() { 1486 Threads::assert_all_threads_claimed(); 1487 } 1488 1489 void work(uint worker_id) { 1490 ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(worker_id); 1491 cm->preserved_marks()->adjust_during_full_gc(); 1492 { 1493 // adjust pointers in all spaces 1494 PSParallelCompact::adjust_pointers_in_spaces(worker_id, _claim_counters); 1495 } 1496 { 1497 ResourceMark rm; 1498 Threads::possibly_parallel_oops_do(_nworkers > 1, &pc_adjust_pointer_closure, nullptr); 1499 } 1500 _oop_storage_iter.oops_do(&pc_adjust_pointer_closure); 1501 { 1502 CLDToOopClosure cld_closure(&pc_adjust_pointer_closure, ClassLoaderData::_claim_stw_fullgc_adjust); 1503 ClassLoaderDataGraph::cld_do(&cld_closure); 1504 } 1505 { 1506 AlwaysTrueClosure always_alive; 1507 _weak_proc_task.work(worker_id, &always_alive, &pc_adjust_pointer_closure); 1508 } 1509 if (_sub_tasks.try_claim_task(PSAdjustSubTask_code_cache)) { 1510 NMethodToOopClosure adjust_code(&pc_adjust_pointer_closure, NMethodToOopClosure::FixRelocations); 1511 CodeCache::nmethods_do(&adjust_code); 1512 } 1513 _sub_tasks.all_tasks_claimed(); 1514 } 1515 }; 1516 1517 void PSParallelCompact::adjust_pointers() { 1518 // Adjust the pointers to reflect the new locations 1519 GCTraceTime(Info, gc, phases) tm("Adjust Pointers", &_gc_timer); 1520 uint nworkers = ParallelScavengeHeap::heap()->workers().active_workers(); 1521 PSAdjustTask task(nworkers); 1522 ParallelScavengeHeap::heap()->workers().run_task(&task); 1523 } 1524 1525 // Split [start, end) evenly for a number of workers and return the 1526 // range for worker_id. 1527 static void split_regions_for_worker(size_t start, size_t end, 1528 uint worker_id, uint num_workers, 1529 size_t* worker_start, size_t* worker_end) { 1530 assert(start < end, "precondition"); 1531 assert(num_workers > 0, "precondition"); 1532 assert(worker_id < num_workers, "precondition"); 1533 1534 size_t num_regions = end - start; 1535 size_t num_regions_per_worker = num_regions / num_workers; 1536 size_t remainder = num_regions % num_workers; 1537 // The first few workers will get one extra. 1538 *worker_start = start + worker_id * num_regions_per_worker 1539 + MIN2(checked_cast<size_t>(worker_id), remainder); 1540 *worker_end = *worker_start + num_regions_per_worker 1541 + (worker_id < remainder ? 1 : 0); 1542 } 1543 1544 void PSParallelCompact::forward_to_new_addr() { 1545 GCTraceTime(Info, gc, phases) tm("Forward", &_gc_timer); 1546 uint nworkers = ParallelScavengeHeap::heap()->workers().active_workers(); 1547 1548 struct ForwardTask final : public WorkerTask { 1549 uint _num_workers; 1550 1551 explicit ForwardTask(uint num_workers) : 1552 WorkerTask("PSForward task"), 1553 _num_workers(num_workers) {} 1554 1555 void work(uint worker_id) override { 1556 ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(worker_id); 1557 for (uint id = old_space_id; id < last_space_id; ++id) { 1558 MutableSpace* sp = PSParallelCompact::space(SpaceId(id)); 1559 HeapWord* dense_prefix_addr = dense_prefix(SpaceId(id)); 1560 HeapWord* top = sp->top(); 1561 1562 if (dense_prefix_addr == top) { 1563 continue; 1564 } 1565 1566 size_t dense_prefix_region = _summary_data.addr_to_region_idx(dense_prefix_addr); 1567 size_t top_region = _summary_data.addr_to_region_idx(_summary_data.region_align_up(top)); 1568 size_t start_region; 1569 size_t end_region; 1570 split_regions_for_worker(dense_prefix_region, top_region, 1571 worker_id, _num_workers, 1572 &start_region, &end_region); 1573 for (size_t cur_region = start_region; cur_region < end_region; ++cur_region) { 1574 RegionData* region_ptr = _summary_data.region(cur_region); 1575 size_t live_words = region_ptr->partial_obj_size(); 1576 1577 if (live_words == ParallelCompactData::RegionSize) { 1578 // No obj-start 1579 continue; 1580 } 1581 1582 HeapWord* region_start = _summary_data.region_to_addr(cur_region); 1583 HeapWord* region_end = region_start + ParallelCompactData::RegionSize; 1584 1585 HeapWord* cur_addr = region_start + live_words; 1586 1587 HeapWord* destination = region_ptr->destination(); 1588 while (cur_addr < region_end) { 1589 cur_addr = mark_bitmap()->find_obj_beg(cur_addr, region_end); 1590 if (cur_addr >= region_end) { 1591 break; 1592 } 1593 assert(mark_bitmap()->is_marked(cur_addr), "inv"); 1594 HeapWord* new_addr = destination + live_words; 1595 oop obj = cast_to_oop(cur_addr); 1596 if (new_addr != cur_addr) { 1597 cm->preserved_marks()->push_if_necessary(obj, obj->mark()); 1598 obj->forward_to(cast_to_oop(new_addr)); 1599 } 1600 size_t obj_size = obj->size(); 1601 live_words += obj_size; 1602 cur_addr += obj_size; 1603 } 1604 } 1605 } 1606 } 1607 } task(nworkers); 1608 1609 ParallelScavengeHeap::heap()->workers().run_task(&task); 1610 debug_only(verify_forward();) 1611 } 1612 1613 #ifdef ASSERT 1614 void PSParallelCompact::verify_forward() { 1615 HeapWord* old_dense_prefix_addr = dense_prefix(SpaceId(old_space_id)); 1616 RegionData* old_region = _summary_data.region(_summary_data.addr_to_region_idx(old_dense_prefix_addr)); 1617 HeapWord* bump_ptr = old_region->partial_obj_size() != 0 1618 ? old_dense_prefix_addr + old_region->partial_obj_size() 1619 : old_dense_prefix_addr; 1620 SpaceId bump_ptr_space = old_space_id; 1621 1622 for (uint id = old_space_id; id < last_space_id; ++id) { 1623 MutableSpace* sp = PSParallelCompact::space(SpaceId(id)); 1624 HeapWord* dense_prefix_addr = dense_prefix(SpaceId(id)); 1625 HeapWord* top = sp->top(); 1626 HeapWord* cur_addr = dense_prefix_addr; 1627 1628 while (cur_addr < top) { 1629 cur_addr = mark_bitmap()->find_obj_beg(cur_addr, top); 1630 if (cur_addr >= top) { 1631 break; 1632 } 1633 assert(mark_bitmap()->is_marked(cur_addr), "inv"); 1634 // Move to the space containing cur_addr 1635 if (bump_ptr == _space_info[bump_ptr_space].new_top()) { 1636 bump_ptr = space(space_id(cur_addr))->bottom(); 1637 bump_ptr_space = space_id(bump_ptr); 1638 } 1639 oop obj = cast_to_oop(cur_addr); 1640 if (cur_addr != bump_ptr) { 1641 assert(obj->forwardee() == cast_to_oop(bump_ptr), "inv"); 1642 } 1643 bump_ptr += obj->size(); 1644 cur_addr += obj->size(); 1645 } 1646 } 1647 } 1648 #endif 1649 1650 // Helper class to print 8 region numbers per line and then print the total at the end. 1651 class FillableRegionLogger : public StackObj { 1652 private: 1653 Log(gc, compaction) log; 1654 static const int LineLength = 8; 1655 size_t _regions[LineLength]; 1656 int _next_index; 1657 bool _enabled; 1658 size_t _total_regions; 1659 public: 1660 FillableRegionLogger() : _next_index(0), _enabled(log_develop_is_enabled(Trace, gc, compaction)), _total_regions(0) { } 1661 ~FillableRegionLogger() { 1662 log.trace(SIZE_FORMAT " initially fillable regions", _total_regions); 1663 } 1664 1665 void print_line() { 1666 if (!_enabled || _next_index == 0) { 1667 return; 1668 } 1669 FormatBuffer<> line("Fillable: "); 1670 for (int i = 0; i < _next_index; i++) { 1671 line.append(" " SIZE_FORMAT_W(7), _regions[i]); 1672 } 1673 log.trace("%s", line.buffer()); 1674 _next_index = 0; 1675 } 1676 1677 void handle(size_t region) { 1678 if (!_enabled) { 1679 return; 1680 } 1681 _regions[_next_index++] = region; 1682 if (_next_index == LineLength) { 1683 print_line(); 1684 } 1685 _total_regions++; 1686 } 1687 }; 1688 1689 void PSParallelCompact::prepare_region_draining_tasks(uint parallel_gc_threads) 1690 { 1691 GCTraceTime(Trace, gc, phases) tm("Drain Task Setup", &_gc_timer); 1692 1693 // Find the threads that are active 1694 uint worker_id = 0; 1695 1696 // Find all regions that are available (can be filled immediately) and 1697 // distribute them to the thread stacks. The iteration is done in reverse 1698 // order (high to low) so the regions will be removed in ascending order. 1699 1700 const ParallelCompactData& sd = PSParallelCompact::summary_data(); 1701 1702 // id + 1 is used to test termination so unsigned can 1703 // be used with an old_space_id == 0. 1704 FillableRegionLogger region_logger; 1705 for (unsigned int id = to_space_id; id + 1 > old_space_id; --id) { 1706 SpaceInfo* const space_info = _space_info + id; 1707 HeapWord* const new_top = space_info->new_top(); 1708 1709 const size_t beg_region = sd.addr_to_region_idx(space_info->dense_prefix()); 1710 const size_t end_region = 1711 sd.addr_to_region_idx(sd.region_align_up(new_top)); 1712 1713 for (size_t cur = end_region - 1; cur + 1 > beg_region; --cur) { 1714 if (sd.region(cur)->claim_unsafe()) { 1715 ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(worker_id); 1716 bool result = sd.region(cur)->mark_normal(); 1717 assert(result, "Must succeed at this point."); 1718 cm->region_stack()->push(cur); 1719 region_logger.handle(cur); 1720 // Assign regions to tasks in round-robin fashion. 1721 if (++worker_id == parallel_gc_threads) { 1722 worker_id = 0; 1723 } 1724 } 1725 } 1726 region_logger.print_line(); 1727 } 1728 } 1729 1730 static void compaction_with_stealing_work(TaskTerminator* terminator, uint worker_id) { 1731 assert(ParallelScavengeHeap::heap()->is_stw_gc_active(), "called outside gc"); 1732 1733 ParCompactionManager* cm = 1734 ParCompactionManager::gc_thread_compaction_manager(worker_id); 1735 1736 // Drain the stacks that have been preloaded with regions 1737 // that are ready to fill. 1738 1739 cm->drain_region_stacks(); 1740 1741 guarantee(cm->region_stack()->is_empty(), "Not empty"); 1742 1743 size_t region_index = 0; 1744 1745 while (true) { 1746 if (ParCompactionManager::steal(worker_id, region_index)) { 1747 PSParallelCompact::fill_and_update_region(cm, region_index); 1748 cm->drain_region_stacks(); 1749 } else if (PSParallelCompact::steal_unavailable_region(cm, region_index)) { 1750 // Fill and update an unavailable region with the help of a shadow region 1751 PSParallelCompact::fill_and_update_shadow_region(cm, region_index); 1752 cm->drain_region_stacks(); 1753 } else { 1754 if (terminator->offer_termination()) { 1755 break; 1756 } 1757 // Go around again. 1758 } 1759 } 1760 } 1761 1762 class FillDensePrefixAndCompactionTask: public WorkerTask { 1763 uint _num_workers; 1764 TaskTerminator _terminator; 1765 1766 public: 1767 FillDensePrefixAndCompactionTask(uint active_workers) : 1768 WorkerTask("FillDensePrefixAndCompactionTask"), 1769 _num_workers(active_workers), 1770 _terminator(active_workers, ParCompactionManager::region_task_queues()) { 1771 } 1772 1773 virtual void work(uint worker_id) { 1774 { 1775 auto start = Ticks::now(); 1776 PSParallelCompact::fill_dead_objs_in_dense_prefix(worker_id, _num_workers); 1777 log_trace(gc, phases)("Fill dense prefix by worker %u: %.3f ms", worker_id, (Ticks::now() - start).seconds() * 1000); 1778 } 1779 compaction_with_stealing_work(&_terminator, worker_id); 1780 } 1781 }; 1782 1783 void PSParallelCompact::fill_range_in_dense_prefix(HeapWord* start, HeapWord* end) { 1784 #ifdef ASSERT 1785 { 1786 assert(start < end, "precondition"); 1787 assert(mark_bitmap()->find_obj_beg(start, end) == end, "precondition"); 1788 HeapWord* bottom = _space_info[old_space_id].space()->bottom(); 1789 if (start != bottom) { 1790 HeapWord* obj_start = mark_bitmap()->find_obj_beg_reverse(bottom, start); 1791 HeapWord* after_obj = obj_start + cast_to_oop(obj_start)->size(); 1792 assert(after_obj == start, "precondition"); 1793 } 1794 } 1795 #endif 1796 1797 CollectedHeap::fill_with_objects(start, pointer_delta(end, start)); 1798 HeapWord* addr = start; 1799 do { 1800 size_t size = cast_to_oop(addr)->size(); 1801 start_array(old_space_id)->update_for_block(addr, addr + size); 1802 addr += size; 1803 } while (addr < end); 1804 } 1805 1806 void PSParallelCompact::fill_dead_objs_in_dense_prefix(uint worker_id, uint num_workers) { 1807 ParMarkBitMap* bitmap = mark_bitmap(); 1808 1809 HeapWord* const bottom = _space_info[old_space_id].space()->bottom(); 1810 HeapWord* const prefix_end = dense_prefix(old_space_id); 1811 1812 if (bottom == prefix_end) { 1813 return; 1814 } 1815 1816 size_t bottom_region = _summary_data.addr_to_region_idx(bottom); 1817 size_t prefix_end_region = _summary_data.addr_to_region_idx(prefix_end); 1818 1819 size_t start_region; 1820 size_t end_region; 1821 split_regions_for_worker(bottom_region, prefix_end_region, 1822 worker_id, num_workers, 1823 &start_region, &end_region); 1824 1825 if (start_region == end_region) { 1826 return; 1827 } 1828 1829 HeapWord* const start_addr = _summary_data.region_to_addr(start_region); 1830 HeapWord* const end_addr = _summary_data.region_to_addr(end_region); 1831 1832 // Skip live partial obj (if any) from previous region. 1833 HeapWord* cur_addr; 1834 RegionData* start_region_ptr = _summary_data.region(start_region); 1835 if (start_region_ptr->partial_obj_size() != 0) { 1836 HeapWord* partial_obj_start = start_region_ptr->partial_obj_addr(); 1837 assert(bitmap->is_marked(partial_obj_start), "inv"); 1838 cur_addr = partial_obj_start + cast_to_oop(partial_obj_start)->size(); 1839 } else { 1840 cur_addr = start_addr; 1841 } 1842 1843 // end_addr is inclusive to handle regions starting with dead space. 1844 while (cur_addr <= end_addr) { 1845 // Use prefix_end to handle trailing obj in each worker region-chunk. 1846 HeapWord* live_start = bitmap->find_obj_beg(cur_addr, prefix_end); 1847 if (cur_addr != live_start) { 1848 // Only worker 0 handles proceeding dead space. 1849 if (cur_addr != start_addr || worker_id == 0) { 1850 fill_range_in_dense_prefix(cur_addr, live_start); 1851 } 1852 } 1853 if (live_start >= end_addr) { 1854 break; 1855 } 1856 assert(bitmap->is_marked(live_start), "inv"); 1857 cur_addr = live_start + cast_to_oop(live_start)->size(); 1858 } 1859 } 1860 1861 void PSParallelCompact::compact() { 1862 GCTraceTime(Info, gc, phases) tm("Compaction Phase", &_gc_timer); 1863 1864 uint active_gc_threads = ParallelScavengeHeap::heap()->workers().active_workers(); 1865 1866 initialize_shadow_regions(active_gc_threads); 1867 prepare_region_draining_tasks(active_gc_threads); 1868 1869 { 1870 GCTraceTime(Trace, gc, phases) tm("Par Compact", &_gc_timer); 1871 1872 FillDensePrefixAndCompactionTask task(active_gc_threads); 1873 ParallelScavengeHeap::heap()->workers().run_task(&task); 1874 1875 #ifdef ASSERT 1876 verify_filler_in_dense_prefix(); 1877 1878 // Verify that all regions have been processed. 1879 for (unsigned int id = old_space_id; id < last_space_id; ++id) { 1880 verify_complete(SpaceId(id)); 1881 } 1882 #endif 1883 } 1884 } 1885 1886 #ifdef ASSERT 1887 void PSParallelCompact::verify_filler_in_dense_prefix() { 1888 HeapWord* bottom = _space_info[old_space_id].space()->bottom(); 1889 HeapWord* dense_prefix_end = dense_prefix(old_space_id); 1890 HeapWord* cur_addr = bottom; 1891 while (cur_addr < dense_prefix_end) { 1892 oop obj = cast_to_oop(cur_addr); 1893 oopDesc::verify(obj); 1894 if (!mark_bitmap()->is_marked(cur_addr)) { 1895 Klass* k = cast_to_oop(cur_addr)->klass_without_asserts(); 1896 assert(k == Universe::fillerArrayKlass() || k == vmClasses::FillerObject_klass(), "inv"); 1897 } 1898 cur_addr += obj->size(); 1899 } 1900 } 1901 1902 void PSParallelCompact::verify_complete(SpaceId space_id) { 1903 // All Regions served as compaction targets, from dense_prefix() to 1904 // new_top(), should be marked as filled and all Regions between new_top() 1905 // and top() should be available (i.e., should have been emptied). 1906 ParallelCompactData& sd = summary_data(); 1907 SpaceInfo si = _space_info[space_id]; 1908 HeapWord* new_top_addr = sd.region_align_up(si.new_top()); 1909 HeapWord* old_top_addr = sd.region_align_up(si.space()->top()); 1910 const size_t beg_region = sd.addr_to_region_idx(si.dense_prefix()); 1911 const size_t new_top_region = sd.addr_to_region_idx(new_top_addr); 1912 const size_t old_top_region = sd.addr_to_region_idx(old_top_addr); 1913 1914 size_t cur_region; 1915 for (cur_region = beg_region; cur_region < new_top_region; ++cur_region) { 1916 const RegionData* const c = sd.region(cur_region); 1917 if (!c->completed()) { 1918 log_warning(gc)("region " SIZE_FORMAT " not filled: destination_count=%u", 1919 cur_region, c->destination_count()); 1920 } 1921 } 1922 1923 for (cur_region = new_top_region; cur_region < old_top_region; ++cur_region) { 1924 const RegionData* const c = sd.region(cur_region); 1925 if (!c->available()) { 1926 log_warning(gc)("region " SIZE_FORMAT " not empty: destination_count=%u", 1927 cur_region, c->destination_count()); 1928 } 1929 } 1930 } 1931 #endif // #ifdef ASSERT 1932 1933 // Return the SpaceId for the space containing addr. If addr is not in the 1934 // heap, last_space_id is returned. In debug mode it expects the address to be 1935 // in the heap and asserts such. 1936 PSParallelCompact::SpaceId PSParallelCompact::space_id(HeapWord* addr) { 1937 assert(ParallelScavengeHeap::heap()->is_in_reserved(addr), "addr not in the heap"); 1938 1939 for (unsigned int id = old_space_id; id < last_space_id; ++id) { 1940 if (_space_info[id].space()->contains(addr)) { 1941 return SpaceId(id); 1942 } 1943 } 1944 1945 assert(false, "no space contains the addr"); 1946 return last_space_id; 1947 } 1948 1949 // Skip over count live words starting from beg, and return the address of the 1950 // next live word. Unless marked, the word corresponding to beg is assumed to 1951 // be dead. Callers must either ensure beg does not correspond to the middle of 1952 // an object, or account for those live words in some other way. Callers must 1953 // also ensure that there are enough live words in the range [beg, end) to skip. 1954 HeapWord* 1955 PSParallelCompact::skip_live_words(HeapWord* beg, HeapWord* end, size_t count) 1956 { 1957 assert(count > 0, "sanity"); 1958 1959 ParMarkBitMap* m = mark_bitmap(); 1960 HeapWord* cur_addr = beg; 1961 while (true) { 1962 cur_addr = m->find_obj_beg(cur_addr, end); 1963 assert(cur_addr < end, "inv"); 1964 size_t obj_size = cast_to_oop(cur_addr)->size(); 1965 // Strictly greater-than 1966 if (obj_size > count) { 1967 return cur_addr + count; 1968 } 1969 count -= obj_size; 1970 cur_addr += obj_size; 1971 } 1972 } 1973 1974 HeapWord* PSParallelCompact::first_src_addr(HeapWord* const dest_addr, 1975 SpaceId src_space_id, 1976 size_t src_region_idx) 1977 { 1978 assert(summary_data().is_region_aligned(dest_addr), "not aligned"); 1979 1980 const SplitInfo& split_info = _space_info[src_space_id].split_info(); 1981 if (split_info.dest_region_addr() == dest_addr) { 1982 // The partial object ending at the split point contains the first word to 1983 // be copied to dest_addr. 1984 return split_info.first_src_addr(); 1985 } 1986 1987 const ParallelCompactData& sd = summary_data(); 1988 ParMarkBitMap* const bitmap = mark_bitmap(); 1989 const size_t RegionSize = ParallelCompactData::RegionSize; 1990 1991 assert(sd.is_region_aligned(dest_addr), "not aligned"); 1992 const RegionData* const src_region_ptr = sd.region(src_region_idx); 1993 const size_t partial_obj_size = src_region_ptr->partial_obj_size(); 1994 HeapWord* const src_region_destination = src_region_ptr->destination(); 1995 1996 assert(dest_addr >= src_region_destination, "wrong src region"); 1997 assert(src_region_ptr->data_size() > 0, "src region cannot be empty"); 1998 1999 HeapWord* const src_region_beg = sd.region_to_addr(src_region_idx); 2000 HeapWord* const src_region_end = src_region_beg + RegionSize; 2001 2002 HeapWord* addr = src_region_beg; 2003 if (dest_addr == src_region_destination) { 2004 // Return the first live word in the source region. 2005 if (partial_obj_size == 0) { 2006 addr = bitmap->find_obj_beg(addr, src_region_end); 2007 assert(addr < src_region_end, "no objects start in src region"); 2008 } 2009 return addr; 2010 } 2011 2012 // Must skip some live data. 2013 size_t words_to_skip = dest_addr - src_region_destination; 2014 assert(src_region_ptr->data_size() > words_to_skip, "wrong src region"); 2015 2016 if (partial_obj_size >= words_to_skip) { 2017 // All the live words to skip are part of the partial object. 2018 addr += words_to_skip; 2019 if (partial_obj_size == words_to_skip) { 2020 // Find the first live word past the partial object. 2021 addr = bitmap->find_obj_beg(addr, src_region_end); 2022 assert(addr < src_region_end, "wrong src region"); 2023 } 2024 return addr; 2025 } 2026 2027 // Skip over the partial object (if any). 2028 if (partial_obj_size != 0) { 2029 words_to_skip -= partial_obj_size; 2030 addr += partial_obj_size; 2031 } 2032 2033 // Skip over live words due to objects that start in the region. 2034 addr = skip_live_words(addr, src_region_end, words_to_skip); 2035 assert(addr < src_region_end, "wrong src region"); 2036 return addr; 2037 } 2038 2039 void PSParallelCompact::decrement_destination_counts(ParCompactionManager* cm, 2040 SpaceId src_space_id, 2041 size_t beg_region, 2042 HeapWord* end_addr) 2043 { 2044 ParallelCompactData& sd = summary_data(); 2045 2046 #ifdef ASSERT 2047 MutableSpace* const src_space = _space_info[src_space_id].space(); 2048 HeapWord* const beg_addr = sd.region_to_addr(beg_region); 2049 assert(src_space->contains(beg_addr) || beg_addr == src_space->end(), 2050 "src_space_id does not match beg_addr"); 2051 assert(src_space->contains(end_addr) || end_addr == src_space->end(), 2052 "src_space_id does not match end_addr"); 2053 #endif // #ifdef ASSERT 2054 2055 RegionData* const beg = sd.region(beg_region); 2056 RegionData* const end = sd.addr_to_region_ptr(sd.region_align_up(end_addr)); 2057 2058 // Regions up to new_top() are enqueued if they become available. 2059 HeapWord* const new_top = _space_info[src_space_id].new_top(); 2060 RegionData* const enqueue_end = 2061 sd.addr_to_region_ptr(sd.region_align_up(new_top)); 2062 2063 for (RegionData* cur = beg; cur < end; ++cur) { 2064 assert(cur->data_size() > 0, "region must have live data"); 2065 cur->decrement_destination_count(); 2066 if (cur < enqueue_end && cur->available() && cur->claim()) { 2067 if (cur->mark_normal()) { 2068 cm->push_region(sd.region(cur)); 2069 } else if (cur->mark_copied()) { 2070 // Try to copy the content of the shadow region back to its corresponding 2071 // heap region if the shadow region is filled. Otherwise, the GC thread 2072 // fills the shadow region will copy the data back (see 2073 // MoveAndUpdateShadowClosure::complete_region). 2074 copy_back(sd.region_to_addr(cur->shadow_region()), sd.region_to_addr(cur)); 2075 ParCompactionManager::push_shadow_region_mt_safe(cur->shadow_region()); 2076 cur->set_completed(); 2077 } 2078 } 2079 } 2080 } 2081 2082 size_t PSParallelCompact::next_src_region(MoveAndUpdateClosure& closure, 2083 SpaceId& src_space_id, 2084 HeapWord*& src_space_top, 2085 HeapWord* end_addr) 2086 { 2087 typedef ParallelCompactData::RegionData RegionData; 2088 2089 ParallelCompactData& sd = PSParallelCompact::summary_data(); 2090 const size_t region_size = ParallelCompactData::RegionSize; 2091 2092 size_t src_region_idx = 0; 2093 2094 // Skip empty regions (if any) up to the top of the space. 2095 HeapWord* const src_aligned_up = sd.region_align_up(end_addr); 2096 RegionData* src_region_ptr = sd.addr_to_region_ptr(src_aligned_up); 2097 HeapWord* const top_aligned_up = sd.region_align_up(src_space_top); 2098 const RegionData* const top_region_ptr = 2099 sd.addr_to_region_ptr(top_aligned_up); 2100 while (src_region_ptr < top_region_ptr && src_region_ptr->data_size() == 0) { 2101 ++src_region_ptr; 2102 } 2103 2104 if (src_region_ptr < top_region_ptr) { 2105 // The next source region is in the current space. Update src_region_idx 2106 // and the source address to match src_region_ptr. 2107 src_region_idx = sd.region(src_region_ptr); 2108 HeapWord* const src_region_addr = sd.region_to_addr(src_region_idx); 2109 if (src_region_addr > closure.source()) { 2110 closure.set_source(src_region_addr); 2111 } 2112 return src_region_idx; 2113 } 2114 2115 // Switch to a new source space and find the first non-empty region. 2116 unsigned int space_id = src_space_id + 1; 2117 assert(space_id < last_space_id, "not enough spaces"); 2118 2119 HeapWord* const destination = closure.destination(); 2120 2121 do { 2122 MutableSpace* space = _space_info[space_id].space(); 2123 HeapWord* const bottom = space->bottom(); 2124 const RegionData* const bottom_cp = sd.addr_to_region_ptr(bottom); 2125 2126 // Iterate over the spaces that do not compact into themselves. 2127 if (bottom_cp->destination() != bottom) { 2128 HeapWord* const top_aligned_up = sd.region_align_up(space->top()); 2129 const RegionData* const top_cp = sd.addr_to_region_ptr(top_aligned_up); 2130 2131 for (const RegionData* src_cp = bottom_cp; src_cp < top_cp; ++src_cp) { 2132 if (src_cp->live_obj_size() > 0) { 2133 // Found it. 2134 assert(src_cp->destination() == destination, 2135 "first live obj in the space must match the destination"); 2136 assert(src_cp->partial_obj_size() == 0, 2137 "a space cannot begin with a partial obj"); 2138 2139 src_space_id = SpaceId(space_id); 2140 src_space_top = space->top(); 2141 const size_t src_region_idx = sd.region(src_cp); 2142 closure.set_source(sd.region_to_addr(src_region_idx)); 2143 return src_region_idx; 2144 } else { 2145 assert(src_cp->data_size() == 0, "sanity"); 2146 } 2147 } 2148 } 2149 } while (++space_id < last_space_id); 2150 2151 assert(false, "no source region was found"); 2152 return 0; 2153 } 2154 2155 HeapWord* PSParallelCompact::partial_obj_end(HeapWord* region_start_addr) { 2156 ParallelCompactData& sd = summary_data(); 2157 assert(sd.is_region_aligned(region_start_addr), "precondition"); 2158 2159 // Use per-region partial_obj_size to locate the end of the obj, that extends to region_start_addr. 2160 SplitInfo& split_info = _space_info[space_id(region_start_addr)].split_info(); 2161 size_t start_region_idx = sd.addr_to_region_idx(region_start_addr); 2162 size_t end_region_idx = sd.region_count(); 2163 size_t accumulated_size = 0; 2164 for (size_t region_idx = start_region_idx; region_idx < end_region_idx; ++region_idx) { 2165 if (split_info.is_split(region_idx)) { 2166 accumulated_size += split_info.partial_obj_size(); 2167 break; 2168 } 2169 size_t cur_partial_obj_size = sd.region(region_idx)->partial_obj_size(); 2170 accumulated_size += cur_partial_obj_size; 2171 if (cur_partial_obj_size != ParallelCompactData::RegionSize) { 2172 break; 2173 } 2174 } 2175 return region_start_addr + accumulated_size; 2176 } 2177 2178 void PSParallelCompact::fill_region(ParCompactionManager* cm, MoveAndUpdateClosure& closure, size_t region_idx) 2179 { 2180 ParMarkBitMap* const bitmap = mark_bitmap(); 2181 ParallelCompactData& sd = summary_data(); 2182 RegionData* const region_ptr = sd.region(region_idx); 2183 2184 // Get the source region and related info. 2185 size_t src_region_idx = region_ptr->source_region(); 2186 SpaceId src_space_id = space_id(sd.region_to_addr(src_region_idx)); 2187 HeapWord* src_space_top = _space_info[src_space_id].space()->top(); 2188 HeapWord* dest_addr = sd.region_to_addr(region_idx); 2189 2190 closure.set_source(first_src_addr(dest_addr, src_space_id, src_region_idx)); 2191 2192 // Adjust src_region_idx to prepare for decrementing destination counts (the 2193 // destination count is not decremented when a region is copied to itself). 2194 if (src_region_idx == region_idx) { 2195 src_region_idx += 1; 2196 } 2197 2198 if (bitmap->is_unmarked(closure.source())) { 2199 // The first source word is in the middle of an object; copy the remainder 2200 // of the object or as much as will fit. The fact that pointer updates were 2201 // deferred will be noted when the object header is processed. 2202 HeapWord* const old_src_addr = closure.source(); 2203 { 2204 HeapWord* region_start = sd.region_align_down(closure.source()); 2205 HeapWord* obj_start = bitmap->find_obj_beg_reverse(region_start, closure.source()); 2206 HeapWord* obj_end; 2207 if (bitmap->is_marked(obj_start)) { 2208 HeapWord* next_region_start = region_start + ParallelCompactData::RegionSize; 2209 HeapWord* partial_obj_start = (next_region_start >= src_space_top) 2210 ? nullptr 2211 : sd.addr_to_region_ptr(next_region_start)->partial_obj_addr(); 2212 if (partial_obj_start == obj_start) { 2213 // This obj extends to next region. 2214 obj_end = partial_obj_end(next_region_start); 2215 } else { 2216 // Completely contained in this region; safe to use size(). 2217 obj_end = obj_start + cast_to_oop(obj_start)->size(); 2218 } 2219 } else { 2220 // This obj extends to current region. 2221 obj_end = partial_obj_end(region_start); 2222 } 2223 size_t partial_obj_size = pointer_delta(obj_end, closure.source()); 2224 closure.copy_partial_obj(partial_obj_size); 2225 } 2226 2227 if (closure.is_full()) { 2228 decrement_destination_counts(cm, src_space_id, src_region_idx, 2229 closure.source()); 2230 closure.complete_region(dest_addr, region_ptr); 2231 return; 2232 } 2233 2234 HeapWord* const end_addr = sd.region_align_down(closure.source()); 2235 if (sd.region_align_down(old_src_addr) != end_addr) { 2236 // The partial object was copied from more than one source region. 2237 decrement_destination_counts(cm, src_space_id, src_region_idx, end_addr); 2238 2239 // Move to the next source region, possibly switching spaces as well. All 2240 // args except end_addr may be modified. 2241 src_region_idx = next_src_region(closure, src_space_id, src_space_top, 2242 end_addr); 2243 } 2244 } 2245 2246 do { 2247 HeapWord* cur_addr = closure.source(); 2248 HeapWord* const end_addr = MIN2(sd.region_align_up(cur_addr + 1), 2249 src_space_top); 2250 HeapWord* partial_obj_start = (end_addr == src_space_top) 2251 ? nullptr 2252 : sd.addr_to_region_ptr(end_addr)->partial_obj_addr(); 2253 // apply closure on objs inside [cur_addr, end_addr) 2254 do { 2255 cur_addr = bitmap->find_obj_beg(cur_addr, end_addr); 2256 if (cur_addr == end_addr) { 2257 break; 2258 } 2259 size_t obj_size; 2260 if (partial_obj_start == cur_addr) { 2261 obj_size = pointer_delta(partial_obj_end(end_addr), cur_addr); 2262 } else { 2263 // This obj doesn't extend into next region; size() is safe to use. 2264 obj_size = cast_to_oop(cur_addr)->size(); 2265 } 2266 closure.do_addr(cur_addr, obj_size); 2267 cur_addr += obj_size; 2268 } while (cur_addr < end_addr && !closure.is_full()); 2269 2270 if (closure.is_full()) { 2271 decrement_destination_counts(cm, src_space_id, src_region_idx, 2272 closure.source()); 2273 closure.complete_region(dest_addr, region_ptr); 2274 return; 2275 } 2276 2277 decrement_destination_counts(cm, src_space_id, src_region_idx, end_addr); 2278 2279 // Move to the next source region, possibly switching spaces as well. All 2280 // args except end_addr may be modified. 2281 src_region_idx = next_src_region(closure, src_space_id, src_space_top, 2282 end_addr); 2283 } while (true); 2284 } 2285 2286 void PSParallelCompact::fill_and_update_region(ParCompactionManager* cm, size_t region_idx) 2287 { 2288 MoveAndUpdateClosure cl(mark_bitmap(), region_idx); 2289 fill_region(cm, cl, region_idx); 2290 } 2291 2292 void PSParallelCompact::fill_and_update_shadow_region(ParCompactionManager* cm, size_t region_idx) 2293 { 2294 // Get a shadow region first 2295 ParallelCompactData& sd = summary_data(); 2296 RegionData* const region_ptr = sd.region(region_idx); 2297 size_t shadow_region = ParCompactionManager::pop_shadow_region_mt_safe(region_ptr); 2298 // The InvalidShadow return value indicates the corresponding heap region is available, 2299 // so use MoveAndUpdateClosure to fill the normal region. Otherwise, use 2300 // MoveAndUpdateShadowClosure to fill the acquired shadow region. 2301 if (shadow_region == ParCompactionManager::InvalidShadow) { 2302 MoveAndUpdateClosure cl(mark_bitmap(), region_idx); 2303 region_ptr->shadow_to_normal(); 2304 return fill_region(cm, cl, region_idx); 2305 } else { 2306 MoveAndUpdateShadowClosure cl(mark_bitmap(), region_idx, shadow_region); 2307 return fill_region(cm, cl, region_idx); 2308 } 2309 } 2310 2311 void PSParallelCompact::copy_back(HeapWord *shadow_addr, HeapWord *region_addr) 2312 { 2313 Copy::aligned_conjoint_words(shadow_addr, region_addr, _summary_data.RegionSize); 2314 } 2315 2316 bool PSParallelCompact::steal_unavailable_region(ParCompactionManager* cm, size_t ®ion_idx) 2317 { 2318 size_t next = cm->next_shadow_region(); 2319 ParallelCompactData& sd = summary_data(); 2320 size_t old_new_top = sd.addr_to_region_idx(_space_info[old_space_id].new_top()); 2321 uint active_gc_threads = ParallelScavengeHeap::heap()->workers().active_workers(); 2322 2323 while (next < old_new_top) { 2324 if (sd.region(next)->mark_shadow()) { 2325 region_idx = next; 2326 return true; 2327 } 2328 next = cm->move_next_shadow_region_by(active_gc_threads); 2329 } 2330 2331 return false; 2332 } 2333 2334 // The shadow region is an optimization to address region dependencies in full GC. The basic 2335 // idea is making more regions available by temporally storing their live objects in empty 2336 // shadow regions to resolve dependencies between them and the destination regions. Therefore, 2337 // GC threads need not wait destination regions to be available before processing sources. 2338 // 2339 // A typical workflow would be: 2340 // After draining its own stack and failing to steal from others, a GC worker would pick an 2341 // unavailable region (destination count > 0) and get a shadow region. Then the worker fills 2342 // the shadow region by copying live objects from source regions of the unavailable one. Once 2343 // the unavailable region becomes available, the data in the shadow region will be copied back. 2344 // Shadow regions are empty regions in the to-space and regions between top and end of other spaces. 2345 void PSParallelCompact::initialize_shadow_regions(uint parallel_gc_threads) 2346 { 2347 const ParallelCompactData& sd = PSParallelCompact::summary_data(); 2348 2349 for (unsigned int id = old_space_id; id < last_space_id; ++id) { 2350 SpaceInfo* const space_info = _space_info + id; 2351 MutableSpace* const space = space_info->space(); 2352 2353 const size_t beg_region = 2354 sd.addr_to_region_idx(sd.region_align_up(MAX2(space_info->new_top(), space->top()))); 2355 const size_t end_region = 2356 sd.addr_to_region_idx(sd.region_align_down(space->end())); 2357 2358 for (size_t cur = beg_region; cur < end_region; ++cur) { 2359 ParCompactionManager::push_shadow_region(cur); 2360 } 2361 } 2362 2363 size_t beg_region = sd.addr_to_region_idx(_space_info[old_space_id].dense_prefix()); 2364 for (uint i = 0; i < parallel_gc_threads; i++) { 2365 ParCompactionManager *cm = ParCompactionManager::gc_thread_compaction_manager(i); 2366 cm->set_next_shadow_region(beg_region + i); 2367 } 2368 } 2369 2370 void MoveAndUpdateClosure::copy_partial_obj(size_t partial_obj_size) 2371 { 2372 size_t words = MIN2(partial_obj_size, words_remaining()); 2373 2374 // This test is necessary; if omitted, the pointer updates to a partial object 2375 // that crosses the dense prefix boundary could be overwritten. 2376 if (source() != copy_destination()) { 2377 DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());) 2378 Copy::aligned_conjoint_words(source(), copy_destination(), words); 2379 } 2380 update_state(words); 2381 } 2382 2383 void MoveAndUpdateClosure::complete_region(HeapWord* dest_addr, PSParallelCompact::RegionData* region_ptr) { 2384 assert(region_ptr->shadow_state() == ParallelCompactData::RegionData::NormalRegion, "Region should be finished"); 2385 region_ptr->set_completed(); 2386 } 2387 2388 void MoveAndUpdateClosure::do_addr(HeapWord* addr, size_t words) { 2389 assert(destination() != nullptr, "sanity"); 2390 _source = addr; 2391 2392 // The start_array must be updated even if the object is not moving. 2393 if (_start_array != nullptr) { 2394 _start_array->update_for_block(destination(), destination() + words); 2395 } 2396 2397 // Avoid overflow 2398 words = MIN2(words, words_remaining()); 2399 assert(words > 0, "inv"); 2400 2401 if (copy_destination() != source()) { 2402 DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());) 2403 assert(source() != destination(), "inv"); 2404 assert(cast_to_oop(source())->is_forwarded(), "inv"); 2405 assert(cast_to_oop(source())->forwardee() == cast_to_oop(destination()), "inv"); 2406 Copy::aligned_conjoint_words(source(), copy_destination(), words); 2407 cast_to_oop(copy_destination())->init_mark(); 2408 } 2409 2410 update_state(words); 2411 } 2412 2413 void MoveAndUpdateShadowClosure::complete_region(HeapWord* dest_addr, PSParallelCompact::RegionData* region_ptr) { 2414 assert(region_ptr->shadow_state() == ParallelCompactData::RegionData::ShadowRegion, "Region should be shadow"); 2415 // Record the shadow region index 2416 region_ptr->set_shadow_region(_shadow); 2417 // Mark the shadow region as filled to indicate the data is ready to be 2418 // copied back 2419 region_ptr->mark_filled(); 2420 // Try to copy the content of the shadow region back to its corresponding 2421 // heap region if available; the GC thread that decreases the destination 2422 // count to zero will do the copying otherwise (see 2423 // PSParallelCompact::decrement_destination_counts). 2424 if (((region_ptr->available() && region_ptr->claim()) || region_ptr->claimed()) && region_ptr->mark_copied()) { 2425 region_ptr->set_completed(); 2426 PSParallelCompact::copy_back(PSParallelCompact::summary_data().region_to_addr(_shadow), dest_addr); 2427 ParCompactionManager::push_shadow_region_mt_safe(_shadow); 2428 } 2429 } 2430