1 /* 2 * Copyright (c) 2005, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "classfile/classLoaderDataGraph.hpp" 26 #include "classfile/javaClasses.inline.hpp" 27 #include "classfile/stringTable.hpp" 28 #include "classfile/symbolTable.hpp" 29 #include "classfile/systemDictionary.hpp" 30 #include "code/codeCache.hpp" 31 #include "compiler/oopMap.hpp" 32 #include "gc/parallel/objectStartArray.inline.hpp" 33 #include "gc/parallel/parallelArguments.hpp" 34 #include "gc/parallel/parallelScavengeHeap.inline.hpp" 35 #include "gc/parallel/parMarkBitMap.inline.hpp" 36 #include "gc/parallel/psAdaptiveSizePolicy.hpp" 37 #include "gc/parallel/psCompactionManager.inline.hpp" 38 #include "gc/parallel/psOldGen.hpp" 39 #include "gc/parallel/psParallelCompact.inline.hpp" 40 #include "gc/parallel/psPromotionManager.inline.hpp" 41 #include "gc/parallel/psRootType.hpp" 42 #include "gc/parallel/psScavenge.hpp" 43 #include "gc/parallel/psStringDedup.hpp" 44 #include "gc/parallel/psYoungGen.hpp" 45 #include "gc/shared/classUnloadingContext.hpp" 46 #include "gc/shared/fullGCForwarding.inline.hpp" 47 #include "gc/shared/gcCause.hpp" 48 #include "gc/shared/gcHeapSummary.hpp" 49 #include "gc/shared/gcId.hpp" 50 #include "gc/shared/gcLocker.hpp" 51 #include "gc/shared/gcTimer.hpp" 52 #include "gc/shared/gcTrace.hpp" 53 #include "gc/shared/gcTraceTime.inline.hpp" 54 #include "gc/shared/gcVMOperations.hpp" 55 #include "gc/shared/isGCActiveMark.hpp" 56 #include "gc/shared/oopStorage.inline.hpp" 57 #include "gc/shared/oopStorageSet.inline.hpp" 58 #include "gc/shared/oopStorageSetParState.inline.hpp" 59 #include "gc/shared/preservedMarks.inline.hpp" 60 #include "gc/shared/referencePolicy.hpp" 61 #include "gc/shared/referenceProcessor.hpp" 62 #include "gc/shared/referenceProcessorPhaseTimes.hpp" 63 #include "gc/shared/spaceDecorator.hpp" 64 #include "gc/shared/strongRootsScope.hpp" 65 #include "gc/shared/taskTerminator.hpp" 66 #include "gc/shared/weakProcessor.inline.hpp" 67 #include "gc/shared/workerPolicy.hpp" 68 #include "gc/shared/workerThread.hpp" 69 #include "gc/shared/workerUtils.hpp" 70 #include "logging/log.hpp" 71 #include "memory/iterator.inline.hpp" 72 #include "memory/memoryReserver.hpp" 73 #include "memory/metaspaceUtils.hpp" 74 #include "memory/resourceArea.hpp" 75 #include "memory/universe.hpp" 76 #include "nmt/memTracker.hpp" 77 #include "oops/access.inline.hpp" 78 #include "oops/instanceClassLoaderKlass.inline.hpp" 79 #include "oops/instanceKlass.inline.hpp" 80 #include "oops/instanceMirrorKlass.inline.hpp" 81 #include "oops/methodData.hpp" 82 #include "oops/objArrayKlass.inline.hpp" 83 #include "oops/oop.inline.hpp" 84 #include "runtime/atomic.hpp" 85 #include "runtime/handles.inline.hpp" 86 #include "runtime/java.hpp" 87 #include "runtime/safepoint.hpp" 88 #include "runtime/threads.hpp" 89 #include "runtime/vmThread.hpp" 90 #include "services/memoryService.hpp" 91 #include "utilities/align.hpp" 92 #include "utilities/debug.hpp" 93 #include "utilities/events.hpp" 94 #include "utilities/formatBuffer.hpp" 95 #include "utilities/macros.hpp" 96 #include "utilities/stack.inline.hpp" 97 #if INCLUDE_JVMCI 98 #include "jvmci/jvmci.hpp" 99 #endif 100 101 #include <math.h> 102 103 // All sizes are in HeapWords. 104 const size_t ParallelCompactData::Log2RegionSize = 16; // 64K words 105 const size_t ParallelCompactData::RegionSize = (size_t)1 << Log2RegionSize; 106 static_assert(ParallelCompactData::RegionSize >= BitsPerWord, "region-start bit word-aligned"); 107 const size_t ParallelCompactData::RegionSizeBytes = 108 RegionSize << LogHeapWordSize; 109 const size_t ParallelCompactData::RegionSizeOffsetMask = RegionSize - 1; 110 const size_t ParallelCompactData::RegionAddrOffsetMask = RegionSizeBytes - 1; 111 const size_t ParallelCompactData::RegionAddrMask = ~RegionAddrOffsetMask; 112 113 const ParallelCompactData::RegionData::region_sz_t 114 ParallelCompactData::RegionData::dc_shift = 27; 115 116 const ParallelCompactData::RegionData::region_sz_t 117 ParallelCompactData::RegionData::dc_mask = ~0U << dc_shift; 118 119 const ParallelCompactData::RegionData::region_sz_t 120 ParallelCompactData::RegionData::dc_one = 0x1U << dc_shift; 121 122 const ParallelCompactData::RegionData::region_sz_t 123 ParallelCompactData::RegionData::los_mask = ~dc_mask; 124 125 const ParallelCompactData::RegionData::region_sz_t 126 ParallelCompactData::RegionData::dc_claimed = 0x8U << dc_shift; 127 128 const ParallelCompactData::RegionData::region_sz_t 129 ParallelCompactData::RegionData::dc_completed = 0xcU << dc_shift; 130 131 bool ParallelCompactData::RegionData::is_clear() { 132 return (_destination == nullptr) && 133 (_source_region == 0) && 134 (_partial_obj_addr == nullptr) && 135 (_partial_obj_size == 0) && 136 (_dc_and_los == 0) && 137 (_shadow_state == 0); 138 } 139 140 #ifdef ASSERT 141 void ParallelCompactData::RegionData::verify_clear() { 142 assert(_destination == nullptr, "inv"); 143 assert(_source_region == 0, "inv"); 144 assert(_partial_obj_addr == nullptr, "inv"); 145 assert(_partial_obj_size == 0, "inv"); 146 assert(_dc_and_los == 0, "inv"); 147 assert(_shadow_state == 0, "inv"); 148 } 149 #endif 150 151 SpaceInfo PSParallelCompact::_space_info[PSParallelCompact::last_space_id]; 152 153 SpanSubjectToDiscoveryClosure PSParallelCompact::_span_based_discoverer; 154 ReferenceProcessor* PSParallelCompact::_ref_processor = nullptr; 155 156 void SplitInfo::record(size_t split_region_idx, HeapWord* split_point, size_t preceding_live_words) { 157 assert(split_region_idx != 0, "precondition"); 158 159 // Obj denoted by split_point will be deferred to the next space. 160 assert(split_point != nullptr, "precondition"); 161 162 const ParallelCompactData& sd = PSParallelCompact::summary_data(); 163 164 PSParallelCompact::RegionData* split_region_ptr = sd.region(split_region_idx); 165 assert(preceding_live_words < split_region_ptr->data_size(), "inv"); 166 167 HeapWord* preceding_destination = split_region_ptr->destination(); 168 assert(preceding_destination != nullptr, "inv"); 169 170 // How many regions does the preceding part occupy 171 uint preceding_destination_count; 172 if (preceding_live_words == 0) { 173 preceding_destination_count = 0; 174 } else { 175 // -1 so that the ending address doesn't fall on the region-boundary 176 if (sd.region_align_down(preceding_destination) == 177 sd.region_align_down(preceding_destination + preceding_live_words - 1)) { 178 preceding_destination_count = 1; 179 } else { 180 preceding_destination_count = 2; 181 } 182 } 183 184 _split_region_idx = split_region_idx; 185 _split_point = split_point; 186 _preceding_live_words = preceding_live_words; 187 _preceding_destination = preceding_destination; 188 _preceding_destination_count = preceding_destination_count; 189 } 190 191 void SplitInfo::clear() 192 { 193 _split_region_idx = 0; 194 _split_point = nullptr; 195 _preceding_live_words = 0; 196 _preceding_destination = nullptr; 197 _preceding_destination_count = 0; 198 assert(!is_valid(), "sanity"); 199 } 200 201 #ifdef ASSERT 202 void SplitInfo::verify_clear() 203 { 204 assert(_split_region_idx == 0, "not clear"); 205 assert(_split_point == nullptr, "not clear"); 206 assert(_preceding_live_words == 0, "not clear"); 207 assert(_preceding_destination == nullptr, "not clear"); 208 assert(_preceding_destination_count == 0, "not clear"); 209 } 210 #endif // #ifdef ASSERT 211 212 213 void PSParallelCompact::print_on_error(outputStream* st) { 214 _mark_bitmap.print_on_error(st); 215 } 216 217 ParallelCompactData::ParallelCompactData() : 218 _heap_start(nullptr), 219 DEBUG_ONLY(_heap_end(nullptr) COMMA) 220 _region_vspace(nullptr), 221 _reserved_byte_size(0), 222 _region_data(nullptr), 223 _region_count(0) {} 224 225 bool ParallelCompactData::initialize(MemRegion reserved_heap) 226 { 227 _heap_start = reserved_heap.start(); 228 const size_t heap_size = reserved_heap.word_size(); 229 DEBUG_ONLY(_heap_end = _heap_start + heap_size;) 230 231 assert(region_align_down(_heap_start) == _heap_start, 232 "region start not aligned"); 233 234 return initialize_region_data(heap_size); 235 } 236 237 PSVirtualSpace* 238 ParallelCompactData::create_vspace(size_t count, size_t element_size) 239 { 240 const size_t raw_bytes = count * element_size; 241 const size_t page_sz = os::page_size_for_region_aligned(raw_bytes, 10); 242 const size_t granularity = os::vm_allocation_granularity(); 243 const size_t rs_align = MAX2(page_sz, granularity); 244 245 _reserved_byte_size = align_up(raw_bytes, rs_align); 246 247 ReservedSpace rs = MemoryReserver::reserve(_reserved_byte_size, 248 rs_align, 249 page_sz); 250 251 if (!rs.is_reserved()) { 252 // Failed to reserve memory. 253 return nullptr; 254 } 255 256 os::trace_page_sizes("Parallel Compact Data", raw_bytes, raw_bytes, rs.base(), 257 rs.size(), page_sz); 258 259 MemTracker::record_virtual_memory_tag((address)rs.base(), mtGC); 260 261 PSVirtualSpace* vspace = new PSVirtualSpace(rs, page_sz); 262 263 if (!vspace->expand_by(_reserved_byte_size)) { 264 // Failed to commit memory. 265 266 delete vspace; 267 268 // Release memory reserved in the space. 269 MemoryReserver::release(rs); 270 271 return nullptr; 272 } 273 274 return vspace; 275 } 276 277 bool ParallelCompactData::initialize_region_data(size_t heap_size) 278 { 279 assert(is_aligned(heap_size, RegionSize), "precondition"); 280 281 const size_t count = heap_size >> Log2RegionSize; 282 _region_vspace = create_vspace(count, sizeof(RegionData)); 283 if (_region_vspace != nullptr) { 284 _region_data = (RegionData*)_region_vspace->reserved_low_addr(); 285 _region_count = count; 286 return true; 287 } 288 return false; 289 } 290 291 void ParallelCompactData::clear_range(size_t beg_region, size_t end_region) { 292 assert(beg_region <= _region_count, "beg_region out of range"); 293 assert(end_region <= _region_count, "end_region out of range"); 294 295 const size_t region_cnt = end_region - beg_region; 296 memset(_region_data + beg_region, 0, region_cnt * sizeof(RegionData)); 297 } 298 299 void 300 ParallelCompactData::summarize_dense_prefix(HeapWord* beg, HeapWord* end) 301 { 302 assert(is_region_aligned(beg), "not RegionSize aligned"); 303 assert(is_region_aligned(end), "not RegionSize aligned"); 304 305 size_t cur_region = addr_to_region_idx(beg); 306 const size_t end_region = addr_to_region_idx(end); 307 HeapWord* addr = beg; 308 while (cur_region < end_region) { 309 _region_data[cur_region].set_destination(addr); 310 _region_data[cur_region].set_destination_count(0); 311 _region_data[cur_region].set_source_region(cur_region); 312 313 // Update live_obj_size so the region appears completely full. 314 size_t live_size = RegionSize - _region_data[cur_region].partial_obj_size(); 315 _region_data[cur_region].set_live_obj_size(live_size); 316 317 ++cur_region; 318 addr += RegionSize; 319 } 320 } 321 322 // The total live words on src_region would overflow the target space, so find 323 // the overflowing object and record the split point. The invariant is that an 324 // obj should not cross space boundary. 325 HeapWord* ParallelCompactData::summarize_split_space(size_t src_region, 326 SplitInfo& split_info, 327 HeapWord* const destination, 328 HeapWord* const target_end, 329 HeapWord** target_next) { 330 assert(destination <= target_end, "sanity"); 331 assert(destination + _region_data[src_region].data_size() > target_end, 332 "region should not fit into target space"); 333 assert(is_region_aligned(target_end), "sanity"); 334 335 size_t partial_obj_size = _region_data[src_region].partial_obj_size(); 336 337 if (destination + partial_obj_size > target_end) { 338 assert(partial_obj_size > 0, "inv"); 339 // The overflowing obj is from a previous region. 340 // 341 // source-regions: 342 // 343 // *************** 344 // | A|AA | 345 // *************** 346 // ^ 347 // | split-point 348 // 349 // dest-region: 350 // 351 // ******** 352 // |~~~~A | 353 // ******** 354 // ^^ 355 // || target-space-end 356 // | 357 // | destination 358 // 359 // AAA would overflow target-space. 360 // 361 HeapWord* overflowing_obj = _region_data[src_region].partial_obj_addr(); 362 size_t split_region = addr_to_region_idx(overflowing_obj); 363 364 // The number of live words before the overflowing object on this split region 365 size_t preceding_live_words; 366 if (is_region_aligned(overflowing_obj)) { 367 preceding_live_words = 0; 368 } else { 369 // Words accounted by the overflowing object on the split region 370 size_t overflowing_size = pointer_delta(region_align_up(overflowing_obj), overflowing_obj); 371 preceding_live_words = region(split_region)->data_size() - overflowing_size; 372 } 373 374 split_info.record(split_region, overflowing_obj, preceding_live_words); 375 376 // The [overflowing_obj, src_region_start) part has been accounted for, so 377 // must move back the new_top, now that this overflowing obj is deferred. 378 HeapWord* new_top = destination - pointer_delta(region_to_addr(src_region), overflowing_obj); 379 380 // If the overflowing obj was relocated to its original destination, 381 // those destination regions would have their source_region set. Now that 382 // this overflowing obj is relocated somewhere else, reset the 383 // source_region. 384 { 385 size_t range_start = addr_to_region_idx(region_align_up(new_top)); 386 size_t range_end = addr_to_region_idx(region_align_up(destination)); 387 for (size_t i = range_start; i < range_end; ++i) { 388 region(i)->set_source_region(0); 389 } 390 } 391 392 // Update new top of target space 393 *target_next = new_top; 394 395 return overflowing_obj; 396 } 397 398 // Obj-iteration to locate the overflowing obj 399 HeapWord* region_start = region_to_addr(src_region); 400 HeapWord* region_end = region_start + RegionSize; 401 HeapWord* cur_addr = region_start + partial_obj_size; 402 size_t live_words = partial_obj_size; 403 404 while (true) { 405 assert(cur_addr < region_end, "inv"); 406 cur_addr = PSParallelCompact::mark_bitmap()->find_obj_beg(cur_addr, region_end); 407 // There must be an overflowing obj in this region 408 assert(cur_addr < region_end, "inv"); 409 410 oop obj = cast_to_oop(cur_addr); 411 size_t obj_size = obj->size(); 412 if (destination + live_words + obj_size > target_end) { 413 // Found the overflowing obj 414 split_info.record(src_region, cur_addr, live_words); 415 *target_next = destination + live_words; 416 return cur_addr; 417 } 418 419 live_words += obj_size; 420 cur_addr += obj_size; 421 } 422 } 423 424 size_t ParallelCompactData::live_words_in_space(const MutableSpace* space, 425 HeapWord** full_region_prefix_end) { 426 size_t cur_region = addr_to_region_idx(space->bottom()); 427 const size_t end_region = addr_to_region_idx(region_align_up(space->top())); 428 size_t live_words = 0; 429 if (full_region_prefix_end == nullptr) { 430 for (/* empty */; cur_region < end_region; ++cur_region) { 431 live_words += _region_data[cur_region].data_size(); 432 } 433 } else { 434 bool first_set = false; 435 for (/* empty */; cur_region < end_region; ++cur_region) { 436 size_t live_words_in_region = _region_data[cur_region].data_size(); 437 if (!first_set && live_words_in_region < RegionSize) { 438 *full_region_prefix_end = region_to_addr(cur_region); 439 first_set = true; 440 } 441 live_words += live_words_in_region; 442 } 443 if (!first_set) { 444 // All regions are full of live objs. 445 assert(is_region_aligned(space->top()), "inv"); 446 *full_region_prefix_end = space->top(); 447 } 448 assert(*full_region_prefix_end != nullptr, "postcondition"); 449 assert(is_region_aligned(*full_region_prefix_end), "inv"); 450 assert(*full_region_prefix_end >= space->bottom(), "in-range"); 451 assert(*full_region_prefix_end <= space->top(), "in-range"); 452 } 453 return live_words; 454 } 455 456 bool ParallelCompactData::summarize(SplitInfo& split_info, 457 HeapWord* source_beg, HeapWord* source_end, 458 HeapWord** source_next, 459 HeapWord* target_beg, HeapWord* target_end, 460 HeapWord** target_next) 461 { 462 HeapWord* const source_next_val = source_next == nullptr ? nullptr : *source_next; 463 log_develop_trace(gc, compaction)( 464 "sb=" PTR_FORMAT " se=" PTR_FORMAT " sn=" PTR_FORMAT 465 "tb=" PTR_FORMAT " te=" PTR_FORMAT " tn=" PTR_FORMAT, 466 p2i(source_beg), p2i(source_end), p2i(source_next_val), 467 p2i(target_beg), p2i(target_end), p2i(*target_next)); 468 469 size_t cur_region = addr_to_region_idx(source_beg); 470 const size_t end_region = addr_to_region_idx(region_align_up(source_end)); 471 472 HeapWord *dest_addr = target_beg; 473 for (/* empty */; cur_region < end_region; cur_region++) { 474 size_t words = _region_data[cur_region].data_size(); 475 476 // Skip empty ones 477 if (words == 0) { 478 continue; 479 } 480 481 if (split_info.is_split(cur_region)) { 482 assert(words > split_info.preceding_live_words(), "inv"); 483 words -= split_info.preceding_live_words(); 484 } 485 486 _region_data[cur_region].set_destination(dest_addr); 487 488 // If cur_region does not fit entirely into the target space, find a point 489 // at which the source space can be 'split' so that part is copied to the 490 // target space and the rest is copied elsewhere. 491 if (dest_addr + words > target_end) { 492 assert(source_next != nullptr, "source_next is null when splitting"); 493 *source_next = summarize_split_space(cur_region, split_info, dest_addr, 494 target_end, target_next); 495 return false; 496 } 497 498 uint destination_count = split_info.is_split(cur_region) 499 ? split_info.preceding_destination_count() 500 : 0; 501 502 HeapWord* const last_addr = dest_addr + words - 1; 503 const size_t dest_region_1 = addr_to_region_idx(dest_addr); 504 const size_t dest_region_2 = addr_to_region_idx(last_addr); 505 506 // Initially assume that the destination regions will be the same and 507 // adjust the value below if necessary. Under this assumption, if 508 // cur_region == dest_region_2, then cur_region will be compacted 509 // completely into itself. 510 destination_count += cur_region == dest_region_2 ? 0 : 1; 511 if (dest_region_1 != dest_region_2) { 512 // Destination regions differ; adjust destination_count. 513 destination_count += 1; 514 // Data from cur_region will be copied to the start of dest_region_2. 515 _region_data[dest_region_2].set_source_region(cur_region); 516 } else if (is_region_aligned(dest_addr)) { 517 // Data from cur_region will be copied to the start of the destination 518 // region. 519 _region_data[dest_region_1].set_source_region(cur_region); 520 } 521 522 _region_data[cur_region].set_destination_count(destination_count); 523 dest_addr += words; 524 } 525 526 *target_next = dest_addr; 527 return true; 528 } 529 530 #ifdef ASSERT 531 void ParallelCompactData::verify_clear() { 532 for (uint cur_idx = 0; cur_idx < region_count(); ++cur_idx) { 533 if (!region(cur_idx)->is_clear()) { 534 log_warning(gc)("Uncleared Region: %u", cur_idx); 535 region(cur_idx)->verify_clear(); 536 } 537 } 538 } 539 #endif // #ifdef ASSERT 540 541 STWGCTimer PSParallelCompact::_gc_timer; 542 ParallelOldTracer PSParallelCompact::_gc_tracer; 543 elapsedTimer PSParallelCompact::_accumulated_time; 544 unsigned int PSParallelCompact::_maximum_compaction_gc_num = 0; 545 CollectorCounters* PSParallelCompact::_counters = nullptr; 546 ParMarkBitMap PSParallelCompact::_mark_bitmap; 547 ParallelCompactData PSParallelCompact::_summary_data; 548 549 PSParallelCompact::IsAliveClosure PSParallelCompact::_is_alive_closure; 550 551 class PCAdjustPointerClosure: public BasicOopIterateClosure { 552 template <typename T> 553 void do_oop_work(T* p) { PSParallelCompact::adjust_pointer(p); } 554 555 public: 556 virtual void do_oop(oop* p) { do_oop_work(p); } 557 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 558 559 virtual ReferenceIterationMode reference_iteration_mode() { return DO_FIELDS; } 560 }; 561 562 static PCAdjustPointerClosure pc_adjust_pointer_closure; 563 564 bool PSParallelCompact::IsAliveClosure::do_object_b(oop p) { return mark_bitmap()->is_marked(p); } 565 566 void PSParallelCompact::post_initialize() { 567 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 568 _span_based_discoverer.set_span(heap->reserved_region()); 569 _ref_processor = 570 new ReferenceProcessor(&_span_based_discoverer, 571 ParallelGCThreads, // mt processing degree 572 ParallelGCThreads, // mt discovery degree 573 false, // concurrent_discovery 574 &_is_alive_closure); // non-header is alive closure 575 576 _counters = new CollectorCounters("Parallel full collection pauses", 1); 577 578 // Initialize static fields in ParCompactionManager. 579 ParCompactionManager::initialize(mark_bitmap()); 580 } 581 582 bool PSParallelCompact::initialize_aux_data() { 583 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 584 MemRegion mr = heap->reserved_region(); 585 assert(mr.byte_size() != 0, "heap should be reserved"); 586 587 initialize_space_info(); 588 589 if (!_mark_bitmap.initialize(mr)) { 590 vm_shutdown_during_initialization( 591 err_msg("Unable to allocate %zuKB bitmaps for parallel " 592 "garbage collection for the requested %zuKB heap.", 593 _mark_bitmap.reserved_byte_size()/K, mr.byte_size()/K)); 594 return false; 595 } 596 597 if (!_summary_data.initialize(mr)) { 598 vm_shutdown_during_initialization( 599 err_msg("Unable to allocate %zuKB card tables for parallel " 600 "garbage collection for the requested %zuKB heap.", 601 _summary_data.reserved_byte_size()/K, mr.byte_size()/K)); 602 return false; 603 } 604 605 return true; 606 } 607 608 void PSParallelCompact::initialize_space_info() 609 { 610 memset(&_space_info, 0, sizeof(_space_info)); 611 612 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 613 PSYoungGen* young_gen = heap->young_gen(); 614 615 _space_info[old_space_id].set_space(heap->old_gen()->object_space()); 616 _space_info[eden_space_id].set_space(young_gen->eden_space()); 617 _space_info[from_space_id].set_space(young_gen->from_space()); 618 _space_info[to_space_id].set_space(young_gen->to_space()); 619 620 _space_info[old_space_id].set_start_array(heap->old_gen()->start_array()); 621 } 622 623 void 624 PSParallelCompact::clear_data_covering_space(SpaceId id) 625 { 626 // At this point, top is the value before GC, new_top() is the value that will 627 // be set at the end of GC. The marking bitmap is cleared to top; nothing 628 // should be marked above top. The summary data is cleared to the larger of 629 // top & new_top. 630 MutableSpace* const space = _space_info[id].space(); 631 HeapWord* const bot = space->bottom(); 632 HeapWord* const top = space->top(); 633 HeapWord* const max_top = MAX2(top, _space_info[id].new_top()); 634 635 _mark_bitmap.clear_range(bot, top); 636 637 const size_t beg_region = _summary_data.addr_to_region_idx(bot); 638 const size_t end_region = 639 _summary_data.addr_to_region_idx(_summary_data.region_align_up(max_top)); 640 _summary_data.clear_range(beg_region, end_region); 641 642 // Clear the data used to 'split' regions. 643 SplitInfo& split_info = _space_info[id].split_info(); 644 if (split_info.is_valid()) { 645 split_info.clear(); 646 } 647 DEBUG_ONLY(split_info.verify_clear();) 648 } 649 650 void PSParallelCompact::pre_compact() 651 { 652 // Update the from & to space pointers in space_info, since they are swapped 653 // at each young gen gc. Do the update unconditionally (even though a 654 // promotion failure does not swap spaces) because an unknown number of young 655 // collections will have swapped the spaces an unknown number of times. 656 GCTraceTime(Debug, gc, phases) tm("Pre Compact", &_gc_timer); 657 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 658 _space_info[from_space_id].set_space(heap->young_gen()->from_space()); 659 _space_info[to_space_id].set_space(heap->young_gen()->to_space()); 660 661 // Increment the invocation count 662 heap->increment_total_collections(true); 663 664 CodeCache::on_gc_marking_cycle_start(); 665 666 heap->print_heap_before_gc(); 667 heap->trace_heap_before_gc(&_gc_tracer); 668 669 // Fill in TLABs 670 heap->ensure_parsability(true); // retire TLABs 671 672 if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) { 673 Universe::verify("Before GC"); 674 } 675 676 DEBUG_ONLY(mark_bitmap()->verify_clear();) 677 DEBUG_ONLY(summary_data().verify_clear();) 678 } 679 680 void PSParallelCompact::post_compact() 681 { 682 GCTraceTime(Info, gc, phases) tm("Post Compact", &_gc_timer); 683 ParCompactionManager::remove_all_shadow_regions(); 684 685 CodeCache::on_gc_marking_cycle_finish(); 686 CodeCache::arm_all_nmethods(); 687 688 for (unsigned int id = old_space_id; id < last_space_id; ++id) { 689 // Clear the marking bitmap, summary data and split info. 690 clear_data_covering_space(SpaceId(id)); 691 { 692 MutableSpace* space = _space_info[id].space(); 693 HeapWord* top = space->top(); 694 HeapWord* new_top = _space_info[id].new_top(); 695 if (ZapUnusedHeapArea && new_top < top) { 696 space->mangle_region(MemRegion(new_top, top)); 697 } 698 // Update top(). Must be done after clearing the bitmap and summary data. 699 space->set_top(new_top); 700 } 701 } 702 703 #ifdef ASSERT 704 { 705 mark_bitmap()->verify_clear(); 706 summary_data().verify_clear(); 707 } 708 #endif 709 710 ParCompactionManager::flush_all_string_dedup_requests(); 711 712 MutableSpace* const eden_space = _space_info[eden_space_id].space(); 713 MutableSpace* const from_space = _space_info[from_space_id].space(); 714 MutableSpace* const to_space = _space_info[to_space_id].space(); 715 716 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 717 bool eden_empty = eden_space->is_empty(); 718 719 // Update heap occupancy information which is used as input to the soft ref 720 // clearing policy at the next gc. 721 Universe::heap()->update_capacity_and_used_at_gc(); 722 723 bool young_gen_empty = eden_empty && from_space->is_empty() && 724 to_space->is_empty(); 725 726 PSCardTable* ct = heap->card_table(); 727 MemRegion old_mr = heap->old_gen()->committed(); 728 if (young_gen_empty) { 729 ct->clear_MemRegion(old_mr); 730 } else { 731 ct->dirty_MemRegion(old_mr); 732 } 733 734 { 735 // Delete metaspaces for unloaded class loaders and clean up loader_data graph 736 GCTraceTime(Debug, gc, phases) t("Purge Class Loader Data", gc_timer()); 737 ClassLoaderDataGraph::purge(true /* at_safepoint */); 738 DEBUG_ONLY(MetaspaceUtils::verify();) 739 } 740 741 // Need to clear claim bits for the next mark. 742 ClassLoaderDataGraph::clear_claimed_marks(); 743 744 heap->prune_scavengable_nmethods(); 745 746 #if COMPILER2_OR_JVMCI 747 DerivedPointerTable::update_pointers(); 748 #endif 749 750 // Signal that we have completed a visit to all live objects. 751 Universe::heap()->record_whole_heap_examined_timestamp(); 752 } 753 754 HeapWord* PSParallelCompact::compute_dense_prefix_for_old_space(MutableSpace* old_space, 755 HeapWord* full_region_prefix_end) { 756 const size_t region_size = ParallelCompactData::RegionSize; 757 const ParallelCompactData& sd = summary_data(); 758 759 // Iteration starts with the region *after* the full-region-prefix-end. 760 const RegionData* const start_region = sd.addr_to_region_ptr(full_region_prefix_end); 761 // If final region is not full, iteration stops before that region, 762 // because fill_dense_prefix_end assumes that prefix_end <= top. 763 const RegionData* const end_region = sd.addr_to_region_ptr(old_space->top()); 764 assert(start_region <= end_region, "inv"); 765 766 size_t max_waste = old_space->capacity_in_words() * (MarkSweepDeadRatio / 100.0); 767 const RegionData* cur_region = start_region; 768 for (/* empty */; cur_region < end_region; ++cur_region) { 769 assert(region_size >= cur_region->data_size(), "inv"); 770 size_t dead_size = region_size - cur_region->data_size(); 771 if (max_waste < dead_size) { 772 break; 773 } 774 max_waste -= dead_size; 775 } 776 777 HeapWord* const prefix_end = sd.region_to_addr(cur_region); 778 assert(sd.is_region_aligned(prefix_end), "postcondition"); 779 assert(prefix_end >= full_region_prefix_end, "in-range"); 780 assert(prefix_end <= old_space->top(), "in-range"); 781 return prefix_end; 782 } 783 784 void PSParallelCompact::fill_dense_prefix_end(SpaceId id) { 785 // Comparing two sizes to decide if filling is required: 786 // 787 // The size of the filler (min-obj-size) is 2 heap words with the default 788 // MinObjAlignment, since both markword and klass take 1 heap word. 789 // With +UseCompactObjectHeaders, the minimum filler size is only one word, 790 // because the Klass* gets encoded in the mark-word. 791 // 792 // The size of the gap (if any) right before dense-prefix-end is 793 // MinObjAlignment. 794 // 795 // Need to fill in the gap only if it's smaller than min-obj-size, and the 796 // filler obj will extend to next region. 797 798 if (MinObjAlignment >= checked_cast<int>(CollectedHeap::min_fill_size())) { 799 return; 800 } 801 802 assert(!UseCompactObjectHeaders, "Compact headers can allocate small objects"); 803 assert(CollectedHeap::min_fill_size() == 2, "inv"); 804 HeapWord* const dense_prefix_end = dense_prefix(id); 805 assert(_summary_data.is_region_aligned(dense_prefix_end), "precondition"); 806 assert(dense_prefix_end <= space(id)->top(), "precondition"); 807 if (dense_prefix_end == space(id)->top()) { 808 // Must not have single-word gap right before prefix-end/top. 809 return; 810 } 811 RegionData* const region_after_dense_prefix = _summary_data.addr_to_region_ptr(dense_prefix_end); 812 813 if (region_after_dense_prefix->partial_obj_size() != 0 || 814 _mark_bitmap.is_marked(dense_prefix_end)) { 815 // The region after the dense prefix starts with live bytes. 816 return; 817 } 818 819 HeapWord* block_start = start_array(id)->block_start_reaching_into_card(dense_prefix_end); 820 if (block_start == dense_prefix_end - 1) { 821 assert(!_mark_bitmap.is_marked(block_start), "inv"); 822 // There is exactly one heap word gap right before the dense prefix end, so we need a filler object. 823 // The filler object will extend into region_after_dense_prefix. 824 const size_t obj_len = 2; // min-fill-size 825 HeapWord* const obj_beg = dense_prefix_end - 1; 826 CollectedHeap::fill_with_object(obj_beg, obj_len); 827 _mark_bitmap.mark_obj(obj_beg); 828 _summary_data.addr_to_region_ptr(obj_beg)->add_live_obj(1); 829 region_after_dense_prefix->set_partial_obj_size(1); 830 region_after_dense_prefix->set_partial_obj_addr(obj_beg); 831 assert(start_array(id) != nullptr, "sanity"); 832 start_array(id)->update_for_block(obj_beg, obj_beg + obj_len); 833 } 834 } 835 836 bool PSParallelCompact::check_maximum_compaction(size_t total_live_words, 837 MutableSpace* const old_space, 838 HeapWord* full_region_prefix_end) { 839 840 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 841 842 // Check System.GC 843 bool is_max_on_system_gc = UseMaximumCompactionOnSystemGC 844 && GCCause::is_user_requested_gc(heap->gc_cause()); 845 846 // Check if all live objs are larger than old-gen. 847 const bool is_old_gen_overflowing = (total_live_words > old_space->capacity_in_words()); 848 849 // JVM flags 850 const uint total_invocations = heap->total_full_collections(); 851 assert(total_invocations >= _maximum_compaction_gc_num, "sanity"); 852 const size_t gcs_since_max = total_invocations - _maximum_compaction_gc_num; 853 const bool is_interval_ended = gcs_since_max > HeapMaximumCompactionInterval; 854 855 // If all regions in old-gen are full 856 const bool is_region_full = 857 full_region_prefix_end >= _summary_data.region_align_down(old_space->top()); 858 859 if (is_max_on_system_gc || is_old_gen_overflowing || is_interval_ended || is_region_full) { 860 _maximum_compaction_gc_num = total_invocations; 861 return true; 862 } 863 864 return false; 865 } 866 867 void PSParallelCompact::summary_phase() 868 { 869 GCTraceTime(Info, gc, phases) tm("Summary Phase", &_gc_timer); 870 871 MutableSpace* const old_space = _space_info[old_space_id].space(); 872 { 873 size_t total_live_words = 0; 874 HeapWord* full_region_prefix_end = nullptr; 875 { 876 // old-gen 877 size_t live_words = _summary_data.live_words_in_space(old_space, 878 &full_region_prefix_end); 879 total_live_words += live_words; 880 } 881 // young-gen 882 for (uint i = eden_space_id; i < last_space_id; ++i) { 883 const MutableSpace* space = _space_info[i].space(); 884 size_t live_words = _summary_data.live_words_in_space(space); 885 total_live_words += live_words; 886 _space_info[i].set_new_top(space->bottom() + live_words); 887 _space_info[i].set_dense_prefix(space->bottom()); 888 } 889 890 bool maximum_compaction = check_maximum_compaction(total_live_words, 891 old_space, 892 full_region_prefix_end); 893 HeapWord* dense_prefix_end = maximum_compaction 894 ? full_region_prefix_end 895 : compute_dense_prefix_for_old_space(old_space, 896 full_region_prefix_end); 897 SpaceId id = old_space_id; 898 _space_info[id].set_dense_prefix(dense_prefix_end); 899 900 if (dense_prefix_end != old_space->bottom()) { 901 fill_dense_prefix_end(id); 902 _summary_data.summarize_dense_prefix(old_space->bottom(), dense_prefix_end); 903 } 904 905 // Compacting objs in [dense_prefix_end, old_space->top()) 906 _summary_data.summarize(_space_info[id].split_info(), 907 dense_prefix_end, old_space->top(), nullptr, 908 dense_prefix_end, old_space->end(), 909 _space_info[id].new_top_addr()); 910 } 911 912 // Summarize the remaining spaces in the young gen. The initial target space 913 // is the old gen. If a space does not fit entirely into the target, then the 914 // remainder is compacted into the space itself and that space becomes the new 915 // target. 916 SpaceId dst_space_id = old_space_id; 917 HeapWord* dst_space_end = old_space->end(); 918 HeapWord** new_top_addr = _space_info[dst_space_id].new_top_addr(); 919 for (unsigned int id = eden_space_id; id < last_space_id; ++id) { 920 const MutableSpace* space = _space_info[id].space(); 921 const size_t live = pointer_delta(_space_info[id].new_top(), 922 space->bottom()); 923 const size_t available = pointer_delta(dst_space_end, *new_top_addr); 924 925 if (live > 0 && live <= available) { 926 // All the live data will fit. 927 bool done = _summary_data.summarize(_space_info[id].split_info(), 928 space->bottom(), space->top(), 929 nullptr, 930 *new_top_addr, dst_space_end, 931 new_top_addr); 932 assert(done, "space must fit into old gen"); 933 934 // Reset the new_top value for the space. 935 _space_info[id].set_new_top(space->bottom()); 936 } else if (live > 0) { 937 // Attempt to fit part of the source space into the target space. 938 HeapWord* next_src_addr = nullptr; 939 bool done = _summary_data.summarize(_space_info[id].split_info(), 940 space->bottom(), space->top(), 941 &next_src_addr, 942 *new_top_addr, dst_space_end, 943 new_top_addr); 944 assert(!done, "space should not fit into old gen"); 945 assert(next_src_addr != nullptr, "sanity"); 946 947 // The source space becomes the new target, so the remainder is compacted 948 // within the space itself. 949 dst_space_id = SpaceId(id); 950 dst_space_end = space->end(); 951 new_top_addr = _space_info[id].new_top_addr(); 952 done = _summary_data.summarize(_space_info[id].split_info(), 953 next_src_addr, space->top(), 954 nullptr, 955 space->bottom(), dst_space_end, 956 new_top_addr); 957 assert(done, "space must fit when compacted into itself"); 958 assert(*new_top_addr <= space->top(), "usage should not grow"); 959 } 960 } 961 } 962 963 // This method should contain all heap-specific policy for invoking a full 964 // collection. invoke_no_policy() will only attempt to compact the heap; it 965 // will do nothing further. If we need to bail out for policy reasons, scavenge 966 // before full gc, or any other specialized behavior, it needs to be added here. 967 // 968 // Note that this method should only be called from the vm_thread while at a 969 // safepoint. 970 // 971 // Note that the all_soft_refs_clear flag in the soft ref policy 972 // may be true because this method can be called without intervening 973 // activity. For example when the heap space is tight and full measure 974 // are being taken to free space. 975 bool PSParallelCompact::invoke(bool clear_all_soft_refs) { 976 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); 977 assert(Thread::current() == (Thread*)VMThread::vm_thread(), 978 "should be in vm thread"); 979 980 SvcGCMarker sgcm(SvcGCMarker::FULL); 981 IsSTWGCActiveMark mark; 982 983 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 984 clear_all_soft_refs = clear_all_soft_refs 985 || heap->soft_ref_policy()->should_clear_all_soft_refs(); 986 987 return PSParallelCompact::invoke_no_policy(clear_all_soft_refs); 988 } 989 990 // This method contains no policy. You should probably 991 // be calling invoke() instead. 992 bool PSParallelCompact::invoke_no_policy(bool clear_all_soft_refs) { 993 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); 994 assert(ref_processor() != nullptr, "Sanity"); 995 996 if (GCLocker::check_active_before_gc()) { 997 return false; 998 } 999 1000 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 1001 1002 GCIdMark gc_id_mark; 1003 _gc_timer.register_gc_start(); 1004 _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start()); 1005 1006 GCCause::Cause gc_cause = heap->gc_cause(); 1007 PSYoungGen* young_gen = heap->young_gen(); 1008 PSOldGen* old_gen = heap->old_gen(); 1009 PSAdaptiveSizePolicy* size_policy = heap->size_policy(); 1010 1011 // The scope of casr should end after code that can change 1012 // SoftRefPolicy::_should_clear_all_soft_refs. 1013 ClearedAllSoftRefs casr(clear_all_soft_refs, 1014 heap->soft_ref_policy()); 1015 1016 // Make sure data structures are sane, make the heap parsable, and do other 1017 // miscellaneous bookkeeping. 1018 pre_compact(); 1019 1020 const PreGenGCValues pre_gc_values = heap->get_pre_gc_values(); 1021 1022 { 1023 const uint active_workers = 1024 WorkerPolicy::calc_active_workers(ParallelScavengeHeap::heap()->workers().max_workers(), 1025 ParallelScavengeHeap::heap()->workers().active_workers(), 1026 Threads::number_of_non_daemon_threads()); 1027 ParallelScavengeHeap::heap()->workers().set_active_workers(active_workers); 1028 1029 GCTraceCPUTime tcpu(&_gc_tracer); 1030 GCTraceTime(Info, gc) tm("Pause Full", nullptr, gc_cause, true); 1031 1032 heap->pre_full_gc_dump(&_gc_timer); 1033 1034 TraceCollectorStats tcs(counters()); 1035 TraceMemoryManagerStats tms(heap->old_gc_manager(), gc_cause, "end of major GC"); 1036 1037 if (log_is_enabled(Debug, gc, heap, exit)) { 1038 accumulated_time()->start(); 1039 } 1040 1041 // Let the size policy know we're starting 1042 size_policy->major_collection_begin(); 1043 1044 #if COMPILER2_OR_JVMCI 1045 DerivedPointerTable::clear(); 1046 #endif 1047 1048 ref_processor()->start_discovery(clear_all_soft_refs); 1049 1050 ClassUnloadingContext ctx(1 /* num_nmethod_unlink_workers */, 1051 false /* unregister_nmethods_during_purge */, 1052 false /* lock_nmethod_free_separately */); 1053 1054 marking_phase(&_gc_tracer); 1055 1056 summary_phase(); 1057 1058 #if COMPILER2_OR_JVMCI 1059 assert(DerivedPointerTable::is_active(), "Sanity"); 1060 DerivedPointerTable::set_active(false); 1061 #endif 1062 1063 FullGCForwarding::begin(); 1064 1065 forward_to_new_addr(); 1066 1067 adjust_pointers(); 1068 1069 compact(); 1070 1071 FullGCForwarding::end(); 1072 1073 ParCompactionManager::_preserved_marks_set->restore(&ParallelScavengeHeap::heap()->workers()); 1074 1075 ParCompactionManager::verify_all_region_stack_empty(); 1076 1077 // Reset the mark bitmap, summary data, and do other bookkeeping. Must be 1078 // done before resizing. 1079 post_compact(); 1080 1081 // Let the size policy know we're done 1082 size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause); 1083 1084 if (UseAdaptiveSizePolicy) { 1085 log_debug(gc, ergo)("AdaptiveSizeStart: collection: %d ", heap->total_collections()); 1086 log_trace(gc, ergo)("old_gen_capacity: %zu young_gen_capacity: %zu", 1087 old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes()); 1088 1089 // Don't check if the size_policy is ready here. Let 1090 // the size_policy check that internally. 1091 if (UseAdaptiveGenerationSizePolicyAtMajorCollection && 1092 AdaptiveSizePolicy::should_update_promo_stats(gc_cause)) { 1093 // Swap the survivor spaces if from_space is empty. The 1094 // resize_young_gen() called below is normally used after 1095 // a successful young GC and swapping of survivor spaces; 1096 // otherwise, it will fail to resize the young gen with 1097 // the current implementation. 1098 if (young_gen->from_space()->is_empty()) { 1099 young_gen->from_space()->clear(SpaceDecorator::Mangle); 1100 young_gen->swap_spaces(); 1101 } 1102 1103 // Calculate optimal free space amounts 1104 assert(young_gen->max_gen_size() > 1105 young_gen->from_space()->capacity_in_bytes() + 1106 young_gen->to_space()->capacity_in_bytes(), 1107 "Sizes of space in young gen are out-of-bounds"); 1108 1109 size_t young_live = young_gen->used_in_bytes(); 1110 size_t eden_live = young_gen->eden_space()->used_in_bytes(); 1111 size_t old_live = old_gen->used_in_bytes(); 1112 size_t cur_eden = young_gen->eden_space()->capacity_in_bytes(); 1113 size_t max_old_gen_size = old_gen->max_gen_size(); 1114 size_t max_eden_size = young_gen->max_gen_size() - 1115 young_gen->from_space()->capacity_in_bytes() - 1116 young_gen->to_space()->capacity_in_bytes(); 1117 1118 // Used for diagnostics 1119 size_policy->clear_generation_free_space_flags(); 1120 1121 size_policy->compute_generations_free_space(young_live, 1122 eden_live, 1123 old_live, 1124 cur_eden, 1125 max_old_gen_size, 1126 max_eden_size, 1127 true /* full gc*/); 1128 1129 size_policy->check_gc_overhead_limit(eden_live, 1130 max_old_gen_size, 1131 max_eden_size, 1132 true /* full gc*/, 1133 gc_cause, 1134 heap->soft_ref_policy()); 1135 1136 size_policy->decay_supplemental_growth(true /* full gc*/); 1137 1138 heap->resize_old_gen( 1139 size_policy->calculated_old_free_size_in_bytes()); 1140 1141 heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(), 1142 size_policy->calculated_survivor_size_in_bytes()); 1143 } 1144 1145 log_debug(gc, ergo)("AdaptiveSizeStop: collection: %d ", heap->total_collections()); 1146 } 1147 1148 if (UsePerfData) { 1149 PSGCAdaptivePolicyCounters* const counters = heap->gc_policy_counters(); 1150 counters->update_counters(); 1151 counters->update_old_capacity(old_gen->capacity_in_bytes()); 1152 counters->update_young_capacity(young_gen->capacity_in_bytes()); 1153 } 1154 1155 heap->resize_all_tlabs(); 1156 1157 // Resize the metaspace capacity after a collection 1158 MetaspaceGC::compute_new_size(); 1159 1160 if (log_is_enabled(Debug, gc, heap, exit)) { 1161 accumulated_time()->stop(); 1162 } 1163 1164 heap->print_heap_change(pre_gc_values); 1165 1166 // Track memory usage and detect low memory 1167 MemoryService::track_memory_usage(); 1168 heap->update_counters(); 1169 1170 heap->post_full_gc_dump(&_gc_timer); 1171 } 1172 1173 if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) { 1174 Universe::verify("After GC"); 1175 } 1176 1177 heap->print_heap_after_gc(); 1178 heap->trace_heap_after_gc(&_gc_tracer); 1179 1180 AdaptiveSizePolicyOutput::print(size_policy, heap->total_collections()); 1181 1182 _gc_timer.register_gc_end(); 1183 1184 _gc_tracer.report_dense_prefix(dense_prefix(old_space_id)); 1185 _gc_tracer.report_gc_end(_gc_timer.gc_end(), _gc_timer.time_partitions()); 1186 1187 return true; 1188 } 1189 1190 class PCAddThreadRootsMarkingTaskClosure : public ThreadClosure { 1191 private: 1192 uint _worker_id; 1193 1194 public: 1195 PCAddThreadRootsMarkingTaskClosure(uint worker_id) : _worker_id(worker_id) { } 1196 void do_thread(Thread* thread) { 1197 assert(ParallelScavengeHeap::heap()->is_stw_gc_active(), "called outside gc"); 1198 1199 ResourceMark rm; 1200 1201 ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(_worker_id); 1202 1203 MarkingNMethodClosure mark_and_push_in_blobs(&cm->_mark_and_push_closure, 1204 !NMethodToOopClosure::FixRelocations, 1205 true /* keepalive nmethods */); 1206 1207 thread->oops_do(&cm->_mark_and_push_closure, &mark_and_push_in_blobs); 1208 1209 // Do the real work 1210 cm->follow_marking_stacks(); 1211 } 1212 }; 1213 1214 void steal_marking_work(TaskTerminator& terminator, uint worker_id) { 1215 assert(ParallelScavengeHeap::heap()->is_stw_gc_active(), "called outside gc"); 1216 1217 ParCompactionManager* cm = 1218 ParCompactionManager::gc_thread_compaction_manager(worker_id); 1219 1220 do { 1221 ScannerTask task; 1222 if (ParCompactionManager::steal(worker_id, task)) { 1223 cm->follow_contents(task, true); 1224 } 1225 cm->follow_marking_stacks(); 1226 } while (!terminator.offer_termination()); 1227 } 1228 1229 class MarkFromRootsTask : public WorkerTask { 1230 StrongRootsScope _strong_roots_scope; // needed for Threads::possibly_parallel_threads_do 1231 OopStorageSetStrongParState<false /* concurrent */, false /* is_const */> _oop_storage_set_par_state; 1232 TaskTerminator _terminator; 1233 uint _active_workers; 1234 1235 public: 1236 MarkFromRootsTask(uint active_workers) : 1237 WorkerTask("MarkFromRootsTask"), 1238 _strong_roots_scope(active_workers), 1239 _terminator(active_workers, ParCompactionManager::marking_stacks()), 1240 _active_workers(active_workers) {} 1241 1242 virtual void work(uint worker_id) { 1243 ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(worker_id); 1244 cm->create_marking_stats_cache(); 1245 { 1246 CLDToOopClosure cld_closure(&cm->_mark_and_push_closure, ClassLoaderData::_claim_stw_fullgc_mark); 1247 ClassLoaderDataGraph::always_strong_cld_do(&cld_closure); 1248 1249 // Do the real work 1250 cm->follow_marking_stacks(); 1251 } 1252 1253 { 1254 PCAddThreadRootsMarkingTaskClosure closure(worker_id); 1255 Threads::possibly_parallel_threads_do(_active_workers > 1 /* is_par */, &closure); 1256 } 1257 1258 // Mark from OopStorages 1259 { 1260 _oop_storage_set_par_state.oops_do(&cm->_mark_and_push_closure); 1261 // Do the real work 1262 cm->follow_marking_stacks(); 1263 } 1264 1265 if (_active_workers > 1) { 1266 steal_marking_work(_terminator, worker_id); 1267 } 1268 } 1269 }; 1270 1271 class ParallelCompactRefProcProxyTask : public RefProcProxyTask { 1272 TaskTerminator _terminator; 1273 1274 public: 1275 ParallelCompactRefProcProxyTask(uint max_workers) 1276 : RefProcProxyTask("ParallelCompactRefProcProxyTask", max_workers), 1277 _terminator(_max_workers, ParCompactionManager::marking_stacks()) {} 1278 1279 void work(uint worker_id) override { 1280 assert(worker_id < _max_workers, "sanity"); 1281 ParCompactionManager* cm = (_tm == RefProcThreadModel::Single) ? ParCompactionManager::get_vmthread_cm() : ParCompactionManager::gc_thread_compaction_manager(worker_id); 1282 BarrierEnqueueDiscoveredFieldClosure enqueue; 1283 ParCompactionManager::FollowStackClosure complete_gc(cm, (_tm == RefProcThreadModel::Single) ? nullptr : &_terminator, worker_id); 1284 _rp_task->rp_work(worker_id, PSParallelCompact::is_alive_closure(), &cm->_mark_and_push_closure, &enqueue, &complete_gc); 1285 } 1286 1287 void prepare_run_task_hook() override { 1288 _terminator.reset_for_reuse(_queue_count); 1289 } 1290 }; 1291 1292 static void flush_marking_stats_cache(const uint num_workers) { 1293 for (uint i = 0; i < num_workers; ++i) { 1294 ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(i); 1295 cm->flush_and_destroy_marking_stats_cache(); 1296 } 1297 } 1298 1299 void PSParallelCompact::marking_phase(ParallelOldTracer *gc_tracer) { 1300 // Recursively traverse all live objects and mark them 1301 GCTraceTime(Info, gc, phases) tm("Marking Phase", &_gc_timer); 1302 1303 uint active_gc_threads = ParallelScavengeHeap::heap()->workers().active_workers(); 1304 1305 ClassLoaderDataGraph::verify_claimed_marks_cleared(ClassLoaderData::_claim_stw_fullgc_mark); 1306 { 1307 GCTraceTime(Debug, gc, phases) tm("Par Mark", &_gc_timer); 1308 1309 MarkFromRootsTask task(active_gc_threads); 1310 ParallelScavengeHeap::heap()->workers().run_task(&task); 1311 } 1312 1313 // Process reference objects found during marking 1314 { 1315 GCTraceTime(Debug, gc, phases) tm("Reference Processing", &_gc_timer); 1316 1317 ReferenceProcessorStats stats; 1318 ReferenceProcessorPhaseTimes pt(&_gc_timer, ref_processor()->max_num_queues()); 1319 1320 ref_processor()->set_active_mt_degree(active_gc_threads); 1321 ParallelCompactRefProcProxyTask task(ref_processor()->max_num_queues()); 1322 stats = ref_processor()->process_discovered_references(task, pt); 1323 1324 gc_tracer->report_gc_reference_stats(stats); 1325 pt.print_all_references(); 1326 } 1327 1328 { 1329 GCTraceTime(Debug, gc, phases) tm("Flush Marking Stats", &_gc_timer); 1330 1331 flush_marking_stats_cache(active_gc_threads); 1332 } 1333 1334 // This is the point where the entire marking should have completed. 1335 ParCompactionManager::verify_all_marking_stack_empty(); 1336 1337 { 1338 GCTraceTime(Debug, gc, phases) tm("Weak Processing", &_gc_timer); 1339 WeakProcessor::weak_oops_do(&ParallelScavengeHeap::heap()->workers(), 1340 is_alive_closure(), 1341 &do_nothing_cl, 1342 1); 1343 } 1344 1345 { 1346 GCTraceTime(Debug, gc, phases) tm_m("Class Unloading", &_gc_timer); 1347 1348 ClassUnloadingContext* ctx = ClassUnloadingContext::context(); 1349 1350 bool unloading_occurred; 1351 { 1352 CodeCache::UnlinkingScope scope(is_alive_closure()); 1353 1354 // Follow system dictionary roots and unload classes. 1355 unloading_occurred = SystemDictionary::do_unloading(&_gc_timer); 1356 1357 // Unload nmethods. 1358 CodeCache::do_unloading(unloading_occurred); 1359 } 1360 1361 { 1362 GCTraceTime(Debug, gc, phases) t("Purge Unlinked NMethods", gc_timer()); 1363 // Release unloaded nmethod's memory. 1364 ctx->purge_nmethods(); 1365 } 1366 { 1367 GCTraceTime(Debug, gc, phases) ur("Unregister NMethods", &_gc_timer); 1368 ParallelScavengeHeap::heap()->prune_unlinked_nmethods(); 1369 } 1370 { 1371 GCTraceTime(Debug, gc, phases) t("Free Code Blobs", gc_timer()); 1372 ctx->free_nmethods(); 1373 } 1374 1375 // Prune dead klasses from subklass/sibling/implementor lists. 1376 Klass::clean_weak_klass_links(unloading_occurred); 1377 1378 // Clean JVMCI metadata handles. 1379 JVMCI_ONLY(JVMCI::do_unloading(unloading_occurred)); 1380 } 1381 1382 { 1383 GCTraceTime(Debug, gc, phases) tm("Report Object Count", &_gc_timer); 1384 _gc_tracer.report_object_count_after_gc(is_alive_closure(), &ParallelScavengeHeap::heap()->workers()); 1385 } 1386 #if TASKQUEUE_STATS 1387 ParCompactionManager::print_and_reset_taskqueue_stats(); 1388 #endif 1389 } 1390 1391 template<typename Func> 1392 void PSParallelCompact::adjust_in_space_helper(SpaceId id, volatile uint* claim_counter, Func&& on_stripe) { 1393 MutableSpace* sp = PSParallelCompact::space(id); 1394 HeapWord* const bottom = sp->bottom(); 1395 HeapWord* const top = sp->top(); 1396 if (bottom == top) { 1397 return; 1398 } 1399 1400 const uint num_regions_per_stripe = 2; 1401 const size_t region_size = ParallelCompactData::RegionSize; 1402 const size_t stripe_size = num_regions_per_stripe * region_size; 1403 1404 while (true) { 1405 uint counter = Atomic::fetch_then_add(claim_counter, num_regions_per_stripe); 1406 HeapWord* cur_stripe = bottom + counter * region_size; 1407 if (cur_stripe >= top) { 1408 break; 1409 } 1410 HeapWord* stripe_end = MIN2(cur_stripe + stripe_size, top); 1411 on_stripe(cur_stripe, stripe_end); 1412 } 1413 } 1414 1415 void PSParallelCompact::adjust_in_old_space(volatile uint* claim_counter) { 1416 // Regions in old-space shouldn't be split. 1417 assert(!_space_info[old_space_id].split_info().is_valid(), "inv"); 1418 1419 auto scan_obj_with_limit = [&] (HeapWord* obj_start, HeapWord* left, HeapWord* right) { 1420 assert(mark_bitmap()->is_marked(obj_start), "inv"); 1421 oop obj = cast_to_oop(obj_start); 1422 return obj->oop_iterate_size(&pc_adjust_pointer_closure, MemRegion(left, right)); 1423 }; 1424 1425 adjust_in_space_helper(old_space_id, claim_counter, [&] (HeapWord* stripe_start, HeapWord* stripe_end) { 1426 assert(_summary_data.is_region_aligned(stripe_start), "inv"); 1427 RegionData* cur_region = _summary_data.addr_to_region_ptr(stripe_start); 1428 HeapWord* obj_start; 1429 if (cur_region->partial_obj_size() != 0) { 1430 obj_start = cur_region->partial_obj_addr(); 1431 obj_start += scan_obj_with_limit(obj_start, stripe_start, stripe_end); 1432 } else { 1433 obj_start = stripe_start; 1434 } 1435 1436 while (obj_start < stripe_end) { 1437 obj_start = mark_bitmap()->find_obj_beg(obj_start, stripe_end); 1438 if (obj_start >= stripe_end) { 1439 break; 1440 } 1441 obj_start += scan_obj_with_limit(obj_start, stripe_start, stripe_end); 1442 } 1443 }); 1444 } 1445 1446 void PSParallelCompact::adjust_in_young_space(SpaceId id, volatile uint* claim_counter) { 1447 adjust_in_space_helper(id, claim_counter, [](HeapWord* stripe_start, HeapWord* stripe_end) { 1448 HeapWord* obj_start = stripe_start; 1449 while (obj_start < stripe_end) { 1450 obj_start = mark_bitmap()->find_obj_beg(obj_start, stripe_end); 1451 if (obj_start >= stripe_end) { 1452 break; 1453 } 1454 oop obj = cast_to_oop(obj_start); 1455 obj_start += obj->oop_iterate_size(&pc_adjust_pointer_closure); 1456 } 1457 }); 1458 } 1459 1460 void PSParallelCompact::adjust_pointers_in_spaces(uint worker_id, volatile uint* claim_counters) { 1461 auto start_time = Ticks::now(); 1462 adjust_in_old_space(&claim_counters[0]); 1463 for (uint id = eden_space_id; id < last_space_id; ++id) { 1464 adjust_in_young_space(SpaceId(id), &claim_counters[id]); 1465 } 1466 log_trace(gc, phases)("adjust_pointers_in_spaces worker %u: %.3f ms", worker_id, (Ticks::now() - start_time).seconds() * 1000); 1467 } 1468 1469 class PSAdjustTask final : public WorkerTask { 1470 SubTasksDone _sub_tasks; 1471 WeakProcessor::Task _weak_proc_task; 1472 OopStorageSetStrongParState<false, false> _oop_storage_iter; 1473 uint _nworkers; 1474 volatile uint _claim_counters[PSParallelCompact::last_space_id] = {}; 1475 1476 enum PSAdjustSubTask { 1477 PSAdjustSubTask_code_cache, 1478 1479 PSAdjustSubTask_num_elements 1480 }; 1481 1482 public: 1483 PSAdjustTask(uint nworkers) : 1484 WorkerTask("PSAdjust task"), 1485 _sub_tasks(PSAdjustSubTask_num_elements), 1486 _weak_proc_task(nworkers), 1487 _nworkers(nworkers) { 1488 1489 ClassLoaderDataGraph::verify_claimed_marks_cleared(ClassLoaderData::_claim_stw_fullgc_adjust); 1490 if (nworkers > 1) { 1491 Threads::change_thread_claim_token(); 1492 } 1493 } 1494 1495 ~PSAdjustTask() { 1496 Threads::assert_all_threads_claimed(); 1497 } 1498 1499 void work(uint worker_id) { 1500 ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(worker_id); 1501 cm->preserved_marks()->adjust_during_full_gc(); 1502 { 1503 // adjust pointers in all spaces 1504 PSParallelCompact::adjust_pointers_in_spaces(worker_id, _claim_counters); 1505 } 1506 { 1507 ResourceMark rm; 1508 Threads::possibly_parallel_oops_do(_nworkers > 1, &pc_adjust_pointer_closure, nullptr); 1509 } 1510 _oop_storage_iter.oops_do(&pc_adjust_pointer_closure); 1511 { 1512 CLDToOopClosure cld_closure(&pc_adjust_pointer_closure, ClassLoaderData::_claim_stw_fullgc_adjust); 1513 ClassLoaderDataGraph::cld_do(&cld_closure); 1514 } 1515 { 1516 AlwaysTrueClosure always_alive; 1517 _weak_proc_task.work(worker_id, &always_alive, &pc_adjust_pointer_closure); 1518 } 1519 if (_sub_tasks.try_claim_task(PSAdjustSubTask_code_cache)) { 1520 NMethodToOopClosure adjust_code(&pc_adjust_pointer_closure, NMethodToOopClosure::FixRelocations); 1521 CodeCache::nmethods_do(&adjust_code); 1522 } 1523 _sub_tasks.all_tasks_claimed(); 1524 } 1525 }; 1526 1527 void PSParallelCompact::adjust_pointers() { 1528 // Adjust the pointers to reflect the new locations 1529 GCTraceTime(Info, gc, phases) tm("Adjust Pointers", &_gc_timer); 1530 uint nworkers = ParallelScavengeHeap::heap()->workers().active_workers(); 1531 PSAdjustTask task(nworkers); 1532 ParallelScavengeHeap::heap()->workers().run_task(&task); 1533 } 1534 1535 // Split [start, end) evenly for a number of workers and return the 1536 // range for worker_id. 1537 static void split_regions_for_worker(size_t start, size_t end, 1538 uint worker_id, uint num_workers, 1539 size_t* worker_start, size_t* worker_end) { 1540 assert(start < end, "precondition"); 1541 assert(num_workers > 0, "precondition"); 1542 assert(worker_id < num_workers, "precondition"); 1543 1544 size_t num_regions = end - start; 1545 size_t num_regions_per_worker = num_regions / num_workers; 1546 size_t remainder = num_regions % num_workers; 1547 // The first few workers will get one extra. 1548 *worker_start = start + worker_id * num_regions_per_worker 1549 + MIN2(checked_cast<size_t>(worker_id), remainder); 1550 *worker_end = *worker_start + num_regions_per_worker 1551 + (worker_id < remainder ? 1 : 0); 1552 } 1553 1554 void PSParallelCompact::forward_to_new_addr() { 1555 GCTraceTime(Info, gc, phases) tm("Forward", &_gc_timer); 1556 uint nworkers = ParallelScavengeHeap::heap()->workers().active_workers(); 1557 1558 struct ForwardTask final : public WorkerTask { 1559 uint _num_workers; 1560 1561 explicit ForwardTask(uint num_workers) : 1562 WorkerTask("PSForward task"), 1563 _num_workers(num_workers) {} 1564 1565 static void forward_objs_in_range(ParCompactionManager* cm, 1566 HeapWord* start, 1567 HeapWord* end, 1568 HeapWord* destination) { 1569 HeapWord* cur_addr = start; 1570 HeapWord* new_addr = destination; 1571 1572 while (cur_addr < end) { 1573 cur_addr = mark_bitmap()->find_obj_beg(cur_addr, end); 1574 if (cur_addr >= end) { 1575 return; 1576 } 1577 assert(mark_bitmap()->is_marked(cur_addr), "inv"); 1578 oop obj = cast_to_oop(cur_addr); 1579 if (new_addr != cur_addr) { 1580 cm->preserved_marks()->push_if_necessary(obj, obj->mark()); 1581 FullGCForwarding::forward_to(obj, cast_to_oop(new_addr)); 1582 } 1583 size_t obj_size = obj->size(); 1584 new_addr += obj_size; 1585 cur_addr += obj_size; 1586 } 1587 } 1588 1589 void work(uint worker_id) override { 1590 ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(worker_id); 1591 for (uint id = old_space_id; id < last_space_id; ++id) { 1592 MutableSpace* sp = PSParallelCompact::space(SpaceId(id)); 1593 HeapWord* dense_prefix_addr = dense_prefix(SpaceId(id)); 1594 HeapWord* top = sp->top(); 1595 1596 if (dense_prefix_addr == top) { 1597 continue; 1598 } 1599 1600 const SplitInfo& split_info = _space_info[SpaceId(id)].split_info(); 1601 1602 size_t dense_prefix_region = _summary_data.addr_to_region_idx(dense_prefix_addr); 1603 size_t top_region = _summary_data.addr_to_region_idx(_summary_data.region_align_up(top)); 1604 size_t start_region; 1605 size_t end_region; 1606 split_regions_for_worker(dense_prefix_region, top_region, 1607 worker_id, _num_workers, 1608 &start_region, &end_region); 1609 for (size_t cur_region = start_region; cur_region < end_region; ++cur_region) { 1610 RegionData* region_ptr = _summary_data.region(cur_region); 1611 size_t partial_obj_size = region_ptr->partial_obj_size(); 1612 1613 if (partial_obj_size == ParallelCompactData::RegionSize) { 1614 // No obj-start 1615 continue; 1616 } 1617 1618 HeapWord* region_start = _summary_data.region_to_addr(cur_region); 1619 HeapWord* region_end = region_start + ParallelCompactData::RegionSize; 1620 1621 if (split_info.is_split(cur_region)) { 1622 // Part 1: will be relocated to space-1 1623 HeapWord* preceding_destination = split_info.preceding_destination(); 1624 HeapWord* split_point = split_info.split_point(); 1625 forward_objs_in_range(cm, region_start + partial_obj_size, split_point, preceding_destination + partial_obj_size); 1626 1627 // Part 2: will be relocated to space-2 1628 HeapWord* destination = region_ptr->destination(); 1629 forward_objs_in_range(cm, split_point, region_end, destination); 1630 } else { 1631 HeapWord* destination = region_ptr->destination(); 1632 forward_objs_in_range(cm, region_start + partial_obj_size, region_end, destination + partial_obj_size); 1633 } 1634 } 1635 } 1636 } 1637 } task(nworkers); 1638 1639 ParallelScavengeHeap::heap()->workers().run_task(&task); 1640 debug_only(verify_forward();) 1641 } 1642 1643 #ifdef ASSERT 1644 void PSParallelCompact::verify_forward() { 1645 HeapWord* old_dense_prefix_addr = dense_prefix(SpaceId(old_space_id)); 1646 RegionData* old_region = _summary_data.region(_summary_data.addr_to_region_idx(old_dense_prefix_addr)); 1647 HeapWord* bump_ptr = old_region->partial_obj_size() != 0 1648 ? old_dense_prefix_addr + old_region->partial_obj_size() 1649 : old_dense_prefix_addr; 1650 SpaceId bump_ptr_space = old_space_id; 1651 1652 for (uint id = old_space_id; id < last_space_id; ++id) { 1653 MutableSpace* sp = PSParallelCompact::space(SpaceId(id)); 1654 HeapWord* dense_prefix_addr = dense_prefix(SpaceId(id)); 1655 HeapWord* top = sp->top(); 1656 HeapWord* cur_addr = dense_prefix_addr; 1657 1658 while (cur_addr < top) { 1659 cur_addr = mark_bitmap()->find_obj_beg(cur_addr, top); 1660 if (cur_addr >= top) { 1661 break; 1662 } 1663 assert(mark_bitmap()->is_marked(cur_addr), "inv"); 1664 assert(bump_ptr <= _space_info[bump_ptr_space].new_top(), "inv"); 1665 // Move to the space containing cur_addr 1666 if (bump_ptr == _space_info[bump_ptr_space].new_top()) { 1667 bump_ptr = space(space_id(cur_addr))->bottom(); 1668 bump_ptr_space = space_id(bump_ptr); 1669 } 1670 oop obj = cast_to_oop(cur_addr); 1671 if (cur_addr == bump_ptr) { 1672 assert(!FullGCForwarding::is_forwarded(obj), "inv"); 1673 } else { 1674 assert(FullGCForwarding::forwardee(obj) == cast_to_oop(bump_ptr), "inv"); 1675 } 1676 bump_ptr += obj->size(); 1677 cur_addr += obj->size(); 1678 } 1679 } 1680 } 1681 #endif 1682 1683 // Helper class to print 8 region numbers per line and then print the total at the end. 1684 class FillableRegionLogger : public StackObj { 1685 private: 1686 Log(gc, compaction) log; 1687 static const int LineLength = 8; 1688 size_t _regions[LineLength]; 1689 int _next_index; 1690 bool _enabled; 1691 size_t _total_regions; 1692 public: 1693 FillableRegionLogger() : _next_index(0), _enabled(log_develop_is_enabled(Trace, gc, compaction)), _total_regions(0) { } 1694 ~FillableRegionLogger() { 1695 log.trace("%zu initially fillable regions", _total_regions); 1696 } 1697 1698 void print_line() { 1699 if (!_enabled || _next_index == 0) { 1700 return; 1701 } 1702 FormatBuffer<> line("Fillable: "); 1703 for (int i = 0; i < _next_index; i++) { 1704 line.append(" %7zu", _regions[i]); 1705 } 1706 log.trace("%s", line.buffer()); 1707 _next_index = 0; 1708 } 1709 1710 void handle(size_t region) { 1711 if (!_enabled) { 1712 return; 1713 } 1714 _regions[_next_index++] = region; 1715 if (_next_index == LineLength) { 1716 print_line(); 1717 } 1718 _total_regions++; 1719 } 1720 }; 1721 1722 void PSParallelCompact::prepare_region_draining_tasks(uint parallel_gc_threads) 1723 { 1724 GCTraceTime(Trace, gc, phases) tm("Drain Task Setup", &_gc_timer); 1725 1726 // Find the threads that are active 1727 uint worker_id = 0; 1728 1729 // Find all regions that are available (can be filled immediately) and 1730 // distribute them to the thread stacks. The iteration is done in reverse 1731 // order (high to low) so the regions will be removed in ascending order. 1732 1733 const ParallelCompactData& sd = PSParallelCompact::summary_data(); 1734 1735 // id + 1 is used to test termination so unsigned can 1736 // be used with an old_space_id == 0. 1737 FillableRegionLogger region_logger; 1738 for (unsigned int id = to_space_id; id + 1 > old_space_id; --id) { 1739 SpaceInfo* const space_info = _space_info + id; 1740 HeapWord* const new_top = space_info->new_top(); 1741 1742 const size_t beg_region = sd.addr_to_region_idx(space_info->dense_prefix()); 1743 const size_t end_region = 1744 sd.addr_to_region_idx(sd.region_align_up(new_top)); 1745 1746 for (size_t cur = end_region - 1; cur + 1 > beg_region; --cur) { 1747 if (sd.region(cur)->claim_unsafe()) { 1748 ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(worker_id); 1749 bool result = sd.region(cur)->mark_normal(); 1750 assert(result, "Must succeed at this point."); 1751 cm->region_stack()->push(cur); 1752 region_logger.handle(cur); 1753 // Assign regions to tasks in round-robin fashion. 1754 if (++worker_id == parallel_gc_threads) { 1755 worker_id = 0; 1756 } 1757 } 1758 } 1759 region_logger.print_line(); 1760 } 1761 } 1762 1763 static void compaction_with_stealing_work(TaskTerminator* terminator, uint worker_id) { 1764 assert(ParallelScavengeHeap::heap()->is_stw_gc_active(), "called outside gc"); 1765 1766 ParCompactionManager* cm = 1767 ParCompactionManager::gc_thread_compaction_manager(worker_id); 1768 1769 // Drain the stacks that have been preloaded with regions 1770 // that are ready to fill. 1771 1772 cm->drain_region_stacks(); 1773 1774 guarantee(cm->region_stack()->is_empty(), "Not empty"); 1775 1776 size_t region_index = 0; 1777 1778 while (true) { 1779 if (ParCompactionManager::steal(worker_id, region_index)) { 1780 PSParallelCompact::fill_and_update_region(cm, region_index); 1781 cm->drain_region_stacks(); 1782 } else if (PSParallelCompact::steal_unavailable_region(cm, region_index)) { 1783 // Fill and update an unavailable region with the help of a shadow region 1784 PSParallelCompact::fill_and_update_shadow_region(cm, region_index); 1785 cm->drain_region_stacks(); 1786 } else { 1787 if (terminator->offer_termination()) { 1788 break; 1789 } 1790 // Go around again. 1791 } 1792 } 1793 } 1794 1795 class FillDensePrefixAndCompactionTask: public WorkerTask { 1796 uint _num_workers; 1797 TaskTerminator _terminator; 1798 1799 public: 1800 FillDensePrefixAndCompactionTask(uint active_workers) : 1801 WorkerTask("FillDensePrefixAndCompactionTask"), 1802 _num_workers(active_workers), 1803 _terminator(active_workers, ParCompactionManager::region_task_queues()) { 1804 } 1805 1806 virtual void work(uint worker_id) { 1807 { 1808 auto start = Ticks::now(); 1809 PSParallelCompact::fill_dead_objs_in_dense_prefix(worker_id, _num_workers); 1810 log_trace(gc, phases)("Fill dense prefix by worker %u: %.3f ms", worker_id, (Ticks::now() - start).seconds() * 1000); 1811 } 1812 compaction_with_stealing_work(&_terminator, worker_id); 1813 } 1814 }; 1815 1816 void PSParallelCompact::fill_range_in_dense_prefix(HeapWord* start, HeapWord* end) { 1817 #ifdef ASSERT 1818 { 1819 assert(start < end, "precondition"); 1820 assert(mark_bitmap()->find_obj_beg(start, end) == end, "precondition"); 1821 HeapWord* bottom = _space_info[old_space_id].space()->bottom(); 1822 if (start != bottom) { 1823 HeapWord* obj_start = mark_bitmap()->find_obj_beg_reverse(bottom, start); 1824 HeapWord* after_obj = obj_start + cast_to_oop(obj_start)->size(); 1825 assert(after_obj == start, "precondition"); 1826 } 1827 } 1828 #endif 1829 1830 CollectedHeap::fill_with_objects(start, pointer_delta(end, start)); 1831 HeapWord* addr = start; 1832 do { 1833 size_t size = cast_to_oop(addr)->size(); 1834 start_array(old_space_id)->update_for_block(addr, addr + size); 1835 addr += size; 1836 } while (addr < end); 1837 } 1838 1839 void PSParallelCompact::fill_dead_objs_in_dense_prefix(uint worker_id, uint num_workers) { 1840 ParMarkBitMap* bitmap = mark_bitmap(); 1841 1842 HeapWord* const bottom = _space_info[old_space_id].space()->bottom(); 1843 HeapWord* const prefix_end = dense_prefix(old_space_id); 1844 1845 if (bottom == prefix_end) { 1846 return; 1847 } 1848 1849 size_t bottom_region = _summary_data.addr_to_region_idx(bottom); 1850 size_t prefix_end_region = _summary_data.addr_to_region_idx(prefix_end); 1851 1852 size_t start_region; 1853 size_t end_region; 1854 split_regions_for_worker(bottom_region, prefix_end_region, 1855 worker_id, num_workers, 1856 &start_region, &end_region); 1857 1858 if (start_region == end_region) { 1859 return; 1860 } 1861 1862 HeapWord* const start_addr = _summary_data.region_to_addr(start_region); 1863 HeapWord* const end_addr = _summary_data.region_to_addr(end_region); 1864 1865 // Skip live partial obj (if any) from previous region. 1866 HeapWord* cur_addr; 1867 RegionData* start_region_ptr = _summary_data.region(start_region); 1868 if (start_region_ptr->partial_obj_size() != 0) { 1869 HeapWord* partial_obj_start = start_region_ptr->partial_obj_addr(); 1870 assert(bitmap->is_marked(partial_obj_start), "inv"); 1871 cur_addr = partial_obj_start + cast_to_oop(partial_obj_start)->size(); 1872 } else { 1873 cur_addr = start_addr; 1874 } 1875 1876 // end_addr is inclusive to handle regions starting with dead space. 1877 while (cur_addr <= end_addr) { 1878 // Use prefix_end to handle trailing obj in each worker region-chunk. 1879 HeapWord* live_start = bitmap->find_obj_beg(cur_addr, prefix_end); 1880 if (cur_addr != live_start) { 1881 // Only worker 0 handles proceeding dead space. 1882 if (cur_addr != start_addr || worker_id == 0) { 1883 fill_range_in_dense_prefix(cur_addr, live_start); 1884 } 1885 } 1886 if (live_start >= end_addr) { 1887 break; 1888 } 1889 assert(bitmap->is_marked(live_start), "inv"); 1890 cur_addr = live_start + cast_to_oop(live_start)->size(); 1891 } 1892 } 1893 1894 void PSParallelCompact::compact() { 1895 GCTraceTime(Info, gc, phases) tm("Compaction Phase", &_gc_timer); 1896 1897 uint active_gc_threads = ParallelScavengeHeap::heap()->workers().active_workers(); 1898 1899 initialize_shadow_regions(active_gc_threads); 1900 prepare_region_draining_tasks(active_gc_threads); 1901 1902 { 1903 GCTraceTime(Trace, gc, phases) tm("Par Compact", &_gc_timer); 1904 1905 FillDensePrefixAndCompactionTask task(active_gc_threads); 1906 ParallelScavengeHeap::heap()->workers().run_task(&task); 1907 1908 #ifdef ASSERT 1909 verify_filler_in_dense_prefix(); 1910 1911 // Verify that all regions have been processed. 1912 for (unsigned int id = old_space_id; id < last_space_id; ++id) { 1913 verify_complete(SpaceId(id)); 1914 } 1915 #endif 1916 } 1917 } 1918 1919 #ifdef ASSERT 1920 void PSParallelCompact::verify_filler_in_dense_prefix() { 1921 HeapWord* bottom = _space_info[old_space_id].space()->bottom(); 1922 HeapWord* dense_prefix_end = dense_prefix(old_space_id); 1923 HeapWord* cur_addr = bottom; 1924 while (cur_addr < dense_prefix_end) { 1925 oop obj = cast_to_oop(cur_addr); 1926 oopDesc::verify(obj); 1927 if (!mark_bitmap()->is_marked(cur_addr)) { 1928 Klass* k = cast_to_oop(cur_addr)->klass(); 1929 assert(k == Universe::fillerArrayKlass() || k == vmClasses::FillerObject_klass(), "inv"); 1930 } 1931 cur_addr += obj->size(); 1932 } 1933 } 1934 1935 void PSParallelCompact::verify_complete(SpaceId space_id) { 1936 // All Regions served as compaction targets, from dense_prefix() to 1937 // new_top(), should be marked as filled and all Regions between new_top() 1938 // and top() should be available (i.e., should have been emptied). 1939 ParallelCompactData& sd = summary_data(); 1940 SpaceInfo si = _space_info[space_id]; 1941 HeapWord* new_top_addr = sd.region_align_up(si.new_top()); 1942 HeapWord* old_top_addr = sd.region_align_up(si.space()->top()); 1943 const size_t beg_region = sd.addr_to_region_idx(si.dense_prefix()); 1944 const size_t new_top_region = sd.addr_to_region_idx(new_top_addr); 1945 const size_t old_top_region = sd.addr_to_region_idx(old_top_addr); 1946 1947 size_t cur_region; 1948 for (cur_region = beg_region; cur_region < new_top_region; ++cur_region) { 1949 const RegionData* const c = sd.region(cur_region); 1950 assert(c->completed(), "region %zu not filled: destination_count=%u", 1951 cur_region, c->destination_count()); 1952 } 1953 1954 for (cur_region = new_top_region; cur_region < old_top_region; ++cur_region) { 1955 const RegionData* const c = sd.region(cur_region); 1956 assert(c->available(), "region %zu not empty: destination_count=%u", 1957 cur_region, c->destination_count()); 1958 } 1959 } 1960 #endif // #ifdef ASSERT 1961 1962 // Return the SpaceId for the space containing addr. If addr is not in the 1963 // heap, last_space_id is returned. In debug mode it expects the address to be 1964 // in the heap and asserts such. 1965 PSParallelCompact::SpaceId PSParallelCompact::space_id(HeapWord* addr) { 1966 assert(ParallelScavengeHeap::heap()->is_in_reserved(addr), "addr not in the heap"); 1967 1968 for (unsigned int id = old_space_id; id < last_space_id; ++id) { 1969 if (_space_info[id].space()->contains(addr)) { 1970 return SpaceId(id); 1971 } 1972 } 1973 1974 assert(false, "no space contains the addr"); 1975 return last_space_id; 1976 } 1977 1978 // Skip over count live words starting from beg, and return the address of the 1979 // next live word. Callers must also ensure that there are enough live words in 1980 // the range [beg, end) to skip. 1981 HeapWord* PSParallelCompact::skip_live_words(HeapWord* beg, HeapWord* end, size_t count) 1982 { 1983 ParMarkBitMap* m = mark_bitmap(); 1984 HeapWord* cur_addr = beg; 1985 while (true) { 1986 cur_addr = m->find_obj_beg(cur_addr, end); 1987 assert(cur_addr < end, "inv"); 1988 size_t obj_size = cast_to_oop(cur_addr)->size(); 1989 // Strictly greater-than 1990 if (obj_size > count) { 1991 return cur_addr + count; 1992 } 1993 count -= obj_size; 1994 cur_addr += obj_size; 1995 } 1996 } 1997 1998 // On starting to fill a destination region (dest-region), we need to know the 1999 // location of the word that will be at the start of the dest-region after 2000 // compaction. A dest-region can have one or more source regions, but only the 2001 // first source-region contains this location. This location is retrieved by 2002 // calling `first_src_addr` on a dest-region. 2003 // Conversely, a source-region has a dest-region which holds the destination of 2004 // the first live word on this source-region, based on which the destination 2005 // for the rest of live words can be derived. 2006 // 2007 // Note: 2008 // There is some complication due to space-boundary-fragmentation (an obj can't 2009 // cross space-boundary) -- a source-region may be split and behave like two 2010 // distinct regions with their own dest-region, as depicted below. 2011 // 2012 // source-region: region-n 2013 // 2014 // ********************** 2015 // | A|A~~~~B|B | 2016 // ********************** 2017 // n-1 n n+1 2018 // 2019 // AA, BB denote two live objs. ~~~~ denotes unknown number of live objs. 2020 // 2021 // Assuming the dest-region for region-n is the final region before 2022 // old-space-end and its first-live-word is the middle of AA, the heap content 2023 // will look like the following after compaction: 2024 // 2025 // ************** ************* 2026 // A|A~~~~ | |BB | 2027 // ************** ************* 2028 // ^ ^ 2029 // | old-space-end | eden-space-start 2030 // 2031 // Therefore, in this example, region-n will have two dest-regions: 2032 // 1. the final region in old-space 2033 // 2. the first region in eden-space. 2034 // To handle this special case, we introduce the concept of split-region, whose 2035 // contents are relocated to two spaces. `SplitInfo` captures all necessary 2036 // info about the split, the first part, spliting-point, and the second part. 2037 HeapWord* PSParallelCompact::first_src_addr(HeapWord* const dest_addr, 2038 SpaceId src_space_id, 2039 size_t src_region_idx) 2040 { 2041 const size_t RegionSize = ParallelCompactData::RegionSize; 2042 const ParallelCompactData& sd = summary_data(); 2043 assert(sd.is_region_aligned(dest_addr), "precondition"); 2044 2045 const RegionData* const src_region_ptr = sd.region(src_region_idx); 2046 assert(src_region_ptr->data_size() > 0, "src region cannot be empty"); 2047 2048 const size_t partial_obj_size = src_region_ptr->partial_obj_size(); 2049 HeapWord* const src_region_destination = src_region_ptr->destination(); 2050 2051 HeapWord* const region_start = sd.region_to_addr(src_region_idx); 2052 HeapWord* const region_end = sd.region_to_addr(src_region_idx) + RegionSize; 2053 2054 // Identify the actual destination for the first live words on this region, 2055 // taking split-region into account. 2056 HeapWord* region_start_destination; 2057 const SplitInfo& split_info = _space_info[src_space_id].split_info(); 2058 if (split_info.is_split(src_region_idx)) { 2059 // The second part of this split region; use the recorded split point. 2060 if (dest_addr == src_region_destination) { 2061 return split_info.split_point(); 2062 } 2063 region_start_destination = split_info.preceding_destination(); 2064 } else { 2065 region_start_destination = src_region_destination; 2066 } 2067 2068 // Calculate the offset to be skipped 2069 size_t words_to_skip = pointer_delta(dest_addr, region_start_destination); 2070 2071 HeapWord* result; 2072 if (partial_obj_size > words_to_skip) { 2073 result = region_start + words_to_skip; 2074 } else { 2075 words_to_skip -= partial_obj_size; 2076 result = skip_live_words(region_start + partial_obj_size, region_end, words_to_skip); 2077 } 2078 2079 if (split_info.is_split(src_region_idx)) { 2080 assert(result < split_info.split_point(), "postcondition"); 2081 } else { 2082 assert(result < region_end, "postcondition"); 2083 } 2084 2085 return result; 2086 } 2087 2088 void PSParallelCompact::decrement_destination_counts(ParCompactionManager* cm, 2089 SpaceId src_space_id, 2090 size_t beg_region, 2091 HeapWord* end_addr) 2092 { 2093 ParallelCompactData& sd = summary_data(); 2094 2095 #ifdef ASSERT 2096 MutableSpace* const src_space = _space_info[src_space_id].space(); 2097 HeapWord* const beg_addr = sd.region_to_addr(beg_region); 2098 assert(src_space->contains(beg_addr) || beg_addr == src_space->end(), 2099 "src_space_id does not match beg_addr"); 2100 assert(src_space->contains(end_addr) || end_addr == src_space->end(), 2101 "src_space_id does not match end_addr"); 2102 #endif // #ifdef ASSERT 2103 2104 RegionData* const beg = sd.region(beg_region); 2105 RegionData* const end = sd.addr_to_region_ptr(sd.region_align_up(end_addr)); 2106 2107 // Regions up to new_top() are enqueued if they become available. 2108 HeapWord* const new_top = _space_info[src_space_id].new_top(); 2109 RegionData* const enqueue_end = 2110 sd.addr_to_region_ptr(sd.region_align_up(new_top)); 2111 2112 for (RegionData* cur = beg; cur < end; ++cur) { 2113 assert(cur->data_size() > 0, "region must have live data"); 2114 cur->decrement_destination_count(); 2115 if (cur < enqueue_end && cur->available() && cur->claim()) { 2116 if (cur->mark_normal()) { 2117 cm->push_region(sd.region(cur)); 2118 } else if (cur->mark_copied()) { 2119 // Try to copy the content of the shadow region back to its corresponding 2120 // heap region if the shadow region is filled. Otherwise, the GC thread 2121 // fills the shadow region will copy the data back (see 2122 // MoveAndUpdateShadowClosure::complete_region). 2123 copy_back(sd.region_to_addr(cur->shadow_region()), sd.region_to_addr(cur)); 2124 ParCompactionManager::push_shadow_region_mt_safe(cur->shadow_region()); 2125 cur->set_completed(); 2126 } 2127 } 2128 } 2129 } 2130 2131 size_t PSParallelCompact::next_src_region(MoveAndUpdateClosure& closure, 2132 SpaceId& src_space_id, 2133 HeapWord*& src_space_top, 2134 HeapWord* end_addr) 2135 { 2136 ParallelCompactData& sd = PSParallelCompact::summary_data(); 2137 2138 size_t src_region_idx = 0; 2139 2140 // Skip empty regions (if any) up to the top of the space. 2141 HeapWord* const src_aligned_up = sd.region_align_up(end_addr); 2142 RegionData* src_region_ptr = sd.addr_to_region_ptr(src_aligned_up); 2143 HeapWord* const top_aligned_up = sd.region_align_up(src_space_top); 2144 const RegionData* const top_region_ptr = sd.addr_to_region_ptr(top_aligned_up); 2145 2146 while (src_region_ptr < top_region_ptr && src_region_ptr->data_size() == 0) { 2147 ++src_region_ptr; 2148 } 2149 2150 if (src_region_ptr < top_region_ptr) { 2151 // Found the first non-empty region in the same space. 2152 src_region_idx = sd.region(src_region_ptr); 2153 closure.set_source(sd.region_to_addr(src_region_idx)); 2154 return src_region_idx; 2155 } 2156 2157 // Switch to a new source space and find the first non-empty region. 2158 uint space_id = src_space_id + 1; 2159 assert(space_id < last_space_id, "not enough spaces"); 2160 2161 for (/* empty */; space_id < last_space_id; ++space_id) { 2162 HeapWord* bottom = _space_info[space_id].space()->bottom(); 2163 HeapWord* top = _space_info[space_id].space()->top(); 2164 // Skip empty space 2165 if (bottom == top) { 2166 continue; 2167 } 2168 2169 // Identify the first region that contains live words in this space 2170 size_t cur_region = sd.addr_to_region_idx(bottom); 2171 size_t end_region = sd.addr_to_region_idx(sd.region_align_up(top)); 2172 2173 for (/* empty */ ; cur_region < end_region; ++cur_region) { 2174 RegionData* cur = sd.region(cur_region); 2175 if (cur->live_obj_size() > 0) { 2176 HeapWord* region_start_addr = sd.region_to_addr(cur_region); 2177 2178 src_space_id = SpaceId(space_id); 2179 src_space_top = top; 2180 closure.set_source(region_start_addr); 2181 return cur_region; 2182 } 2183 } 2184 } 2185 2186 ShouldNotReachHere(); 2187 } 2188 2189 HeapWord* PSParallelCompact::partial_obj_end(HeapWord* region_start_addr) { 2190 ParallelCompactData& sd = summary_data(); 2191 assert(sd.is_region_aligned(region_start_addr), "precondition"); 2192 2193 // Use per-region partial_obj_size to locate the end of the obj, that extends 2194 // to region_start_addr. 2195 size_t start_region_idx = sd.addr_to_region_idx(region_start_addr); 2196 size_t end_region_idx = sd.region_count(); 2197 size_t accumulated_size = 0; 2198 for (size_t region_idx = start_region_idx; region_idx < end_region_idx; ++region_idx) { 2199 size_t cur_partial_obj_size = sd.region(region_idx)->partial_obj_size(); 2200 accumulated_size += cur_partial_obj_size; 2201 if (cur_partial_obj_size != ParallelCompactData::RegionSize) { 2202 break; 2203 } 2204 } 2205 return region_start_addr + accumulated_size; 2206 } 2207 2208 // Use region_idx as the destination region, and evacuate all live objs on its 2209 // source regions to this destination region. 2210 void PSParallelCompact::fill_region(ParCompactionManager* cm, MoveAndUpdateClosure& closure, size_t region_idx) 2211 { 2212 ParMarkBitMap* const bitmap = mark_bitmap(); 2213 ParallelCompactData& sd = summary_data(); 2214 RegionData* const region_ptr = sd.region(region_idx); 2215 2216 // Get the source region and related info. 2217 size_t src_region_idx = region_ptr->source_region(); 2218 SpaceId src_space_id = space_id(sd.region_to_addr(src_region_idx)); 2219 HeapWord* src_space_top = _space_info[src_space_id].space()->top(); 2220 HeapWord* dest_addr = sd.region_to_addr(region_idx); 2221 2222 closure.set_source(first_src_addr(dest_addr, src_space_id, src_region_idx)); 2223 2224 // Adjust src_region_idx to prepare for decrementing destination counts (the 2225 // destination count is not decremented when a region is copied to itself). 2226 if (src_region_idx == region_idx) { 2227 src_region_idx += 1; 2228 } 2229 2230 // source-region: 2231 // 2232 // ********** 2233 // | ~~~ | 2234 // ********** 2235 // ^ 2236 // |-- closure.source() / first_src_addr 2237 // 2238 // 2239 // ~~~ : live words 2240 // 2241 // destination-region: 2242 // 2243 // ********** 2244 // | | 2245 // ********** 2246 // ^ 2247 // |-- region-start 2248 if (bitmap->is_unmarked(closure.source())) { 2249 // An object overflows the previous destination region, so this 2250 // destination region should copy the remainder of the object or as much as 2251 // will fit. 2252 HeapWord* const old_src_addr = closure.source(); 2253 { 2254 HeapWord* region_start = sd.region_align_down(closure.source()); 2255 HeapWord* obj_start = bitmap->find_obj_beg_reverse(region_start, closure.source()); 2256 HeapWord* obj_end; 2257 if (obj_start != closure.source()) { 2258 assert(bitmap->is_marked(obj_start), "inv"); 2259 // Found the actual obj-start, try to find the obj-end using either 2260 // size() if this obj is completely contained in the current region. 2261 HeapWord* next_region_start = region_start + ParallelCompactData::RegionSize; 2262 HeapWord* partial_obj_start = (next_region_start >= src_space_top) 2263 ? nullptr 2264 : sd.addr_to_region_ptr(next_region_start)->partial_obj_addr(); 2265 // This obj extends to next region iff partial_obj_addr of the *next* 2266 // region is the same as obj-start. 2267 if (partial_obj_start == obj_start) { 2268 // This obj extends to next region. 2269 obj_end = partial_obj_end(next_region_start); 2270 } else { 2271 // Completely contained in this region; safe to use size(). 2272 obj_end = obj_start + cast_to_oop(obj_start)->size(); 2273 } 2274 } else { 2275 // This obj extends to current region. 2276 obj_end = partial_obj_end(region_start); 2277 } 2278 size_t partial_obj_size = pointer_delta(obj_end, closure.source()); 2279 closure.copy_partial_obj(partial_obj_size); 2280 } 2281 2282 if (closure.is_full()) { 2283 decrement_destination_counts(cm, src_space_id, src_region_idx, closure.source()); 2284 closure.complete_region(dest_addr, region_ptr); 2285 return; 2286 } 2287 2288 // Finished copying without using up the current destination-region 2289 HeapWord* const end_addr = sd.region_align_down(closure.source()); 2290 if (sd.region_align_down(old_src_addr) != end_addr) { 2291 assert(sd.region_align_up(old_src_addr) == end_addr, "only one region"); 2292 // The partial object was copied from more than one source region. 2293 decrement_destination_counts(cm, src_space_id, src_region_idx, end_addr); 2294 2295 // Move to the next source region, possibly switching spaces as well. All 2296 // args except end_addr may be modified. 2297 src_region_idx = next_src_region(closure, src_space_id, src_space_top, end_addr); 2298 } 2299 } 2300 2301 // Handle the rest obj-by-obj, where we know obj-start. 2302 do { 2303 HeapWord* cur_addr = closure.source(); 2304 HeapWord* const end_addr = MIN2(sd.region_align_up(cur_addr + 1), 2305 src_space_top); 2306 // To handle the case where the final obj in source region extends to next region. 2307 HeapWord* final_obj_start = (end_addr == src_space_top) 2308 ? nullptr 2309 : sd.addr_to_region_ptr(end_addr)->partial_obj_addr(); 2310 // Apply closure on objs inside [cur_addr, end_addr) 2311 do { 2312 cur_addr = bitmap->find_obj_beg(cur_addr, end_addr); 2313 if (cur_addr == end_addr) { 2314 break; 2315 } 2316 size_t obj_size; 2317 if (final_obj_start == cur_addr) { 2318 obj_size = pointer_delta(partial_obj_end(end_addr), cur_addr); 2319 } else { 2320 // This obj doesn't extend into next region; size() is safe to use. 2321 obj_size = cast_to_oop(cur_addr)->size(); 2322 } 2323 closure.do_addr(cur_addr, obj_size); 2324 cur_addr += obj_size; 2325 } while (cur_addr < end_addr && !closure.is_full()); 2326 2327 if (closure.is_full()) { 2328 decrement_destination_counts(cm, src_space_id, src_region_idx, closure.source()); 2329 closure.complete_region(dest_addr, region_ptr); 2330 return; 2331 } 2332 2333 decrement_destination_counts(cm, src_space_id, src_region_idx, end_addr); 2334 2335 // Move to the next source region, possibly switching spaces as well. All 2336 // args except end_addr may be modified. 2337 src_region_idx = next_src_region(closure, src_space_id, src_space_top, end_addr); 2338 } while (true); 2339 } 2340 2341 void PSParallelCompact::fill_and_update_region(ParCompactionManager* cm, size_t region_idx) 2342 { 2343 MoveAndUpdateClosure cl(mark_bitmap(), region_idx); 2344 fill_region(cm, cl, region_idx); 2345 } 2346 2347 void PSParallelCompact::fill_and_update_shadow_region(ParCompactionManager* cm, size_t region_idx) 2348 { 2349 // Get a shadow region first 2350 ParallelCompactData& sd = summary_data(); 2351 RegionData* const region_ptr = sd.region(region_idx); 2352 size_t shadow_region = ParCompactionManager::pop_shadow_region_mt_safe(region_ptr); 2353 // The InvalidShadow return value indicates the corresponding heap region is available, 2354 // so use MoveAndUpdateClosure to fill the normal region. Otherwise, use 2355 // MoveAndUpdateShadowClosure to fill the acquired shadow region. 2356 if (shadow_region == ParCompactionManager::InvalidShadow) { 2357 MoveAndUpdateClosure cl(mark_bitmap(), region_idx); 2358 region_ptr->shadow_to_normal(); 2359 return fill_region(cm, cl, region_idx); 2360 } else { 2361 MoveAndUpdateShadowClosure cl(mark_bitmap(), region_idx, shadow_region); 2362 return fill_region(cm, cl, region_idx); 2363 } 2364 } 2365 2366 void PSParallelCompact::copy_back(HeapWord *shadow_addr, HeapWord *region_addr) 2367 { 2368 Copy::aligned_conjoint_words(shadow_addr, region_addr, _summary_data.RegionSize); 2369 } 2370 2371 bool PSParallelCompact::steal_unavailable_region(ParCompactionManager* cm, size_t ®ion_idx) 2372 { 2373 size_t next = cm->next_shadow_region(); 2374 ParallelCompactData& sd = summary_data(); 2375 size_t old_new_top = sd.addr_to_region_idx(_space_info[old_space_id].new_top()); 2376 uint active_gc_threads = ParallelScavengeHeap::heap()->workers().active_workers(); 2377 2378 while (next < old_new_top) { 2379 if (sd.region(next)->mark_shadow()) { 2380 region_idx = next; 2381 return true; 2382 } 2383 next = cm->move_next_shadow_region_by(active_gc_threads); 2384 } 2385 2386 return false; 2387 } 2388 2389 // The shadow region is an optimization to address region dependencies in full GC. The basic 2390 // idea is making more regions available by temporally storing their live objects in empty 2391 // shadow regions to resolve dependencies between them and the destination regions. Therefore, 2392 // GC threads need not wait destination regions to be available before processing sources. 2393 // 2394 // A typical workflow would be: 2395 // After draining its own stack and failing to steal from others, a GC worker would pick an 2396 // unavailable region (destination count > 0) and get a shadow region. Then the worker fills 2397 // the shadow region by copying live objects from source regions of the unavailable one. Once 2398 // the unavailable region becomes available, the data in the shadow region will be copied back. 2399 // Shadow regions are empty regions in the to-space and regions between top and end of other spaces. 2400 void PSParallelCompact::initialize_shadow_regions(uint parallel_gc_threads) 2401 { 2402 const ParallelCompactData& sd = PSParallelCompact::summary_data(); 2403 2404 for (unsigned int id = old_space_id; id < last_space_id; ++id) { 2405 SpaceInfo* const space_info = _space_info + id; 2406 MutableSpace* const space = space_info->space(); 2407 2408 const size_t beg_region = 2409 sd.addr_to_region_idx(sd.region_align_up(MAX2(space_info->new_top(), space->top()))); 2410 const size_t end_region = 2411 sd.addr_to_region_idx(sd.region_align_down(space->end())); 2412 2413 for (size_t cur = beg_region; cur < end_region; ++cur) { 2414 ParCompactionManager::push_shadow_region(cur); 2415 } 2416 } 2417 2418 size_t beg_region = sd.addr_to_region_idx(_space_info[old_space_id].dense_prefix()); 2419 for (uint i = 0; i < parallel_gc_threads; i++) { 2420 ParCompactionManager *cm = ParCompactionManager::gc_thread_compaction_manager(i); 2421 cm->set_next_shadow_region(beg_region + i); 2422 } 2423 } 2424 2425 void MoveAndUpdateClosure::copy_partial_obj(size_t partial_obj_size) 2426 { 2427 size_t words = MIN2(partial_obj_size, words_remaining()); 2428 2429 // This test is necessary; if omitted, the pointer updates to a partial object 2430 // that crosses the dense prefix boundary could be overwritten. 2431 if (source() != copy_destination()) { 2432 DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());) 2433 Copy::aligned_conjoint_words(source(), copy_destination(), words); 2434 } 2435 update_state(words); 2436 } 2437 2438 void MoveAndUpdateClosure::complete_region(HeapWord* dest_addr, PSParallelCompact::RegionData* region_ptr) { 2439 assert(region_ptr->shadow_state() == ParallelCompactData::RegionData::NormalRegion, "Region should be finished"); 2440 region_ptr->set_completed(); 2441 } 2442 2443 void MoveAndUpdateClosure::do_addr(HeapWord* addr, size_t words) { 2444 assert(destination() != nullptr, "sanity"); 2445 _source = addr; 2446 2447 // The start_array must be updated even if the object is not moving. 2448 if (_start_array != nullptr) { 2449 _start_array->update_for_block(destination(), destination() + words); 2450 } 2451 2452 // Avoid overflow 2453 words = MIN2(words, words_remaining()); 2454 assert(words > 0, "inv"); 2455 2456 if (copy_destination() != source()) { 2457 DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());) 2458 assert(source() != destination(), "inv"); 2459 assert(FullGCForwarding::is_forwarded(cast_to_oop(source())), "inv"); 2460 assert(FullGCForwarding::forwardee(cast_to_oop(source())) == cast_to_oop(destination()), "inv"); 2461 Copy::aligned_conjoint_words(source(), copy_destination(), words); 2462 cast_to_oop(copy_destination())->init_mark(); 2463 } 2464 2465 update_state(words); 2466 } 2467 2468 void MoveAndUpdateShadowClosure::complete_region(HeapWord* dest_addr, PSParallelCompact::RegionData* region_ptr) { 2469 assert(region_ptr->shadow_state() == ParallelCompactData::RegionData::ShadowRegion, "Region should be shadow"); 2470 // Record the shadow region index 2471 region_ptr->set_shadow_region(_shadow); 2472 // Mark the shadow region as filled to indicate the data is ready to be 2473 // copied back 2474 region_ptr->mark_filled(); 2475 // Try to copy the content of the shadow region back to its corresponding 2476 // heap region if available; the GC thread that decreases the destination 2477 // count to zero will do the copying otherwise (see 2478 // PSParallelCompact::decrement_destination_counts). 2479 if (((region_ptr->available() && region_ptr->claim()) || region_ptr->claimed()) && region_ptr->mark_copied()) { 2480 region_ptr->set_completed(); 2481 PSParallelCompact::copy_back(PSParallelCompact::summary_data().region_to_addr(_shadow), dest_addr); 2482 ParCompactionManager::push_shadow_region_mt_safe(_shadow); 2483 } 2484 }