1 /* 2 * Copyright (c) 2005, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "classfile/classLoaderDataGraph.hpp" 26 #include "classfile/javaClasses.inline.hpp" 27 #include "classfile/stringTable.hpp" 28 #include "classfile/symbolTable.hpp" 29 #include "classfile/systemDictionary.hpp" 30 #include "code/codeCache.hpp" 31 #include "compiler/oopMap.hpp" 32 #include "gc/parallel/objectStartArray.inline.hpp" 33 #include "gc/parallel/parallelArguments.hpp" 34 #include "gc/parallel/parallelScavengeHeap.inline.hpp" 35 #include "gc/parallel/parMarkBitMap.inline.hpp" 36 #include "gc/parallel/psAdaptiveSizePolicy.hpp" 37 #include "gc/parallel/psCompactionManager.inline.hpp" 38 #include "gc/parallel/psOldGen.hpp" 39 #include "gc/parallel/psParallelCompact.inline.hpp" 40 #include "gc/parallel/psPromotionManager.inline.hpp" 41 #include "gc/parallel/psRootType.hpp" 42 #include "gc/parallel/psScavenge.hpp" 43 #include "gc/parallel/psStringDedup.hpp" 44 #include "gc/parallel/psYoungGen.hpp" 45 #include "gc/shared/classUnloadingContext.hpp" 46 #include "gc/shared/fullGCForwarding.inline.hpp" 47 #include "gc/shared/gcCause.hpp" 48 #include "gc/shared/gcHeapSummary.hpp" 49 #include "gc/shared/gcId.hpp" 50 #include "gc/shared/gcLocker.hpp" 51 #include "gc/shared/gcTimer.hpp" 52 #include "gc/shared/gcTrace.hpp" 53 #include "gc/shared/gcTraceTime.inline.hpp" 54 #include "gc/shared/gcVMOperations.hpp" 55 #include "gc/shared/isGCActiveMark.hpp" 56 #include "gc/shared/oopStorage.inline.hpp" 57 #include "gc/shared/oopStorageSet.inline.hpp" 58 #include "gc/shared/oopStorageSetParState.inline.hpp" 59 #include "gc/shared/preservedMarks.inline.hpp" 60 #include "gc/shared/referencePolicy.hpp" 61 #include "gc/shared/referenceProcessor.hpp" 62 #include "gc/shared/referenceProcessorPhaseTimes.hpp" 63 #include "gc/shared/spaceDecorator.hpp" 64 #include "gc/shared/strongRootsScope.hpp" 65 #include "gc/shared/taskTerminator.hpp" 66 #include "gc/shared/weakProcessor.inline.hpp" 67 #include "gc/shared/workerPolicy.hpp" 68 #include "gc/shared/workerThread.hpp" 69 #include "gc/shared/workerUtils.hpp" 70 #include "logging/log.hpp" 71 #include "memory/iterator.inline.hpp" 72 #include "memory/memoryReserver.hpp" 73 #include "memory/metaspaceUtils.hpp" 74 #include "memory/resourceArea.hpp" 75 #include "memory/universe.hpp" 76 #include "nmt/memTracker.hpp" 77 #include "oops/access.inline.hpp" 78 #include "oops/flatArrayKlass.inline.hpp" 79 #include "oops/instanceClassLoaderKlass.inline.hpp" 80 #include "oops/instanceKlass.inline.hpp" 81 #include "oops/instanceMirrorKlass.inline.hpp" 82 #include "oops/methodData.hpp" 83 #include "oops/objArrayKlass.inline.hpp" 84 #include "oops/oop.inline.hpp" 85 #include "runtime/atomic.hpp" 86 #include "runtime/handles.inline.hpp" 87 #include "runtime/java.hpp" 88 #include "runtime/safepoint.hpp" 89 #include "runtime/threads.hpp" 90 #include "runtime/vmThread.hpp" 91 #include "services/memoryService.hpp" 92 #include "utilities/align.hpp" 93 #include "utilities/debug.hpp" 94 #include "utilities/events.hpp" 95 #include "utilities/formatBuffer.hpp" 96 #include "utilities/macros.hpp" 97 #include "utilities/stack.inline.hpp" 98 #if INCLUDE_JVMCI 99 #include "jvmci/jvmci.hpp" 100 #endif 101 102 #include <math.h> 103 104 // All sizes are in HeapWords. 105 const size_t ParallelCompactData::Log2RegionSize = 16; // 64K words 106 const size_t ParallelCompactData::RegionSize = (size_t)1 << Log2RegionSize; 107 static_assert(ParallelCompactData::RegionSize >= BitsPerWord, "region-start bit word-aligned"); 108 const size_t ParallelCompactData::RegionSizeBytes = 109 RegionSize << LogHeapWordSize; 110 const size_t ParallelCompactData::RegionSizeOffsetMask = RegionSize - 1; 111 const size_t ParallelCompactData::RegionAddrOffsetMask = RegionSizeBytes - 1; 112 const size_t ParallelCompactData::RegionAddrMask = ~RegionAddrOffsetMask; 113 114 const ParallelCompactData::RegionData::region_sz_t 115 ParallelCompactData::RegionData::dc_shift = 27; 116 117 const ParallelCompactData::RegionData::region_sz_t 118 ParallelCompactData::RegionData::dc_mask = ~0U << dc_shift; 119 120 const ParallelCompactData::RegionData::region_sz_t 121 ParallelCompactData::RegionData::dc_one = 0x1U << dc_shift; 122 123 const ParallelCompactData::RegionData::region_sz_t 124 ParallelCompactData::RegionData::los_mask = ~dc_mask; 125 126 const ParallelCompactData::RegionData::region_sz_t 127 ParallelCompactData::RegionData::dc_claimed = 0x8U << dc_shift; 128 129 const ParallelCompactData::RegionData::region_sz_t 130 ParallelCompactData::RegionData::dc_completed = 0xcU << dc_shift; 131 132 bool ParallelCompactData::RegionData::is_clear() { 133 return (_destination == nullptr) && 134 (_source_region == 0) && 135 (_partial_obj_addr == nullptr) && 136 (_partial_obj_size == 0) && 137 (_dc_and_los == 0) && 138 (_shadow_state == 0); 139 } 140 141 #ifdef ASSERT 142 void ParallelCompactData::RegionData::verify_clear() { 143 assert(_destination == nullptr, "inv"); 144 assert(_source_region == 0, "inv"); 145 assert(_partial_obj_addr == nullptr, "inv"); 146 assert(_partial_obj_size == 0, "inv"); 147 assert(_dc_and_los == 0, "inv"); 148 assert(_shadow_state == 0, "inv"); 149 } 150 #endif 151 152 SpaceInfo PSParallelCompact::_space_info[PSParallelCompact::last_space_id]; 153 154 SpanSubjectToDiscoveryClosure PSParallelCompact::_span_based_discoverer; 155 ReferenceProcessor* PSParallelCompact::_ref_processor = nullptr; 156 157 void SplitInfo::record(size_t split_region_idx, HeapWord* split_point, size_t preceding_live_words) { 158 assert(split_region_idx != 0, "precondition"); 159 160 // Obj denoted by split_point will be deferred to the next space. 161 assert(split_point != nullptr, "precondition"); 162 163 const ParallelCompactData& sd = PSParallelCompact::summary_data(); 164 165 PSParallelCompact::RegionData* split_region_ptr = sd.region(split_region_idx); 166 assert(preceding_live_words < split_region_ptr->data_size(), "inv"); 167 168 HeapWord* preceding_destination = split_region_ptr->destination(); 169 assert(preceding_destination != nullptr, "inv"); 170 171 // How many regions does the preceding part occupy 172 uint preceding_destination_count; 173 if (preceding_live_words == 0) { 174 preceding_destination_count = 0; 175 } else { 176 // -1 so that the ending address doesn't fall on the region-boundary 177 if (sd.region_align_down(preceding_destination) == 178 sd.region_align_down(preceding_destination + preceding_live_words - 1)) { 179 preceding_destination_count = 1; 180 } else { 181 preceding_destination_count = 2; 182 } 183 } 184 185 _split_region_idx = split_region_idx; 186 _split_point = split_point; 187 _preceding_live_words = preceding_live_words; 188 _preceding_destination = preceding_destination; 189 _preceding_destination_count = preceding_destination_count; 190 } 191 192 void SplitInfo::clear() 193 { 194 _split_region_idx = 0; 195 _split_point = nullptr; 196 _preceding_live_words = 0; 197 _preceding_destination = nullptr; 198 _preceding_destination_count = 0; 199 assert(!is_valid(), "sanity"); 200 } 201 202 #ifdef ASSERT 203 void SplitInfo::verify_clear() 204 { 205 assert(_split_region_idx == 0, "not clear"); 206 assert(_split_point == nullptr, "not clear"); 207 assert(_preceding_live_words == 0, "not clear"); 208 assert(_preceding_destination == nullptr, "not clear"); 209 assert(_preceding_destination_count == 0, "not clear"); 210 } 211 #endif // #ifdef ASSERT 212 213 214 void PSParallelCompact::print_on(outputStream* st) { 215 _mark_bitmap.print_on(st); 216 } 217 218 ParallelCompactData::ParallelCompactData() : 219 _heap_start(nullptr), 220 DEBUG_ONLY(_heap_end(nullptr) COMMA) 221 _region_vspace(nullptr), 222 _reserved_byte_size(0), 223 _region_data(nullptr), 224 _region_count(0) {} 225 226 bool ParallelCompactData::initialize(MemRegion reserved_heap) 227 { 228 _heap_start = reserved_heap.start(); 229 const size_t heap_size = reserved_heap.word_size(); 230 DEBUG_ONLY(_heap_end = _heap_start + heap_size;) 231 232 assert(region_align_down(_heap_start) == _heap_start, 233 "region start not aligned"); 234 235 return initialize_region_data(heap_size); 236 } 237 238 PSVirtualSpace* 239 ParallelCompactData::create_vspace(size_t count, size_t element_size) 240 { 241 const size_t raw_bytes = count * element_size; 242 const size_t page_sz = os::page_size_for_region_aligned(raw_bytes, 10); 243 const size_t granularity = os::vm_allocation_granularity(); 244 const size_t rs_align = MAX2(page_sz, granularity); 245 246 _reserved_byte_size = align_up(raw_bytes, rs_align); 247 248 ReservedSpace rs = MemoryReserver::reserve(_reserved_byte_size, 249 rs_align, 250 page_sz, 251 mtGC); 252 253 if (!rs.is_reserved()) { 254 // Failed to reserve memory. 255 return nullptr; 256 } 257 258 os::trace_page_sizes("Parallel Compact Data", raw_bytes, raw_bytes, rs.base(), 259 rs.size(), page_sz); 260 261 MemTracker::record_virtual_memory_tag(rs, mtGC); 262 263 PSVirtualSpace* vspace = new PSVirtualSpace(rs, page_sz); 264 265 if (!vspace->expand_by(_reserved_byte_size)) { 266 // Failed to commit memory. 267 268 delete vspace; 269 270 // Release memory reserved in the space. 271 MemoryReserver::release(rs); 272 273 return nullptr; 274 } 275 276 return vspace; 277 } 278 279 bool ParallelCompactData::initialize_region_data(size_t heap_size) 280 { 281 assert(is_aligned(heap_size, RegionSize), "precondition"); 282 283 const size_t count = heap_size >> Log2RegionSize; 284 _region_vspace = create_vspace(count, sizeof(RegionData)); 285 if (_region_vspace != nullptr) { 286 _region_data = (RegionData*)_region_vspace->reserved_low_addr(); 287 _region_count = count; 288 return true; 289 } 290 return false; 291 } 292 293 void ParallelCompactData::clear_range(size_t beg_region, size_t end_region) { 294 assert(beg_region <= _region_count, "beg_region out of range"); 295 assert(end_region <= _region_count, "end_region out of range"); 296 297 const size_t region_cnt = end_region - beg_region; 298 memset(_region_data + beg_region, 0, region_cnt * sizeof(RegionData)); 299 } 300 301 void 302 ParallelCompactData::summarize_dense_prefix(HeapWord* beg, HeapWord* end) 303 { 304 assert(is_region_aligned(beg), "not RegionSize aligned"); 305 assert(is_region_aligned(end), "not RegionSize aligned"); 306 307 size_t cur_region = addr_to_region_idx(beg); 308 const size_t end_region = addr_to_region_idx(end); 309 HeapWord* addr = beg; 310 while (cur_region < end_region) { 311 _region_data[cur_region].set_destination(addr); 312 _region_data[cur_region].set_destination_count(0); 313 _region_data[cur_region].set_source_region(cur_region); 314 315 // Update live_obj_size so the region appears completely full. 316 size_t live_size = RegionSize - _region_data[cur_region].partial_obj_size(); 317 _region_data[cur_region].set_live_obj_size(live_size); 318 319 ++cur_region; 320 addr += RegionSize; 321 } 322 } 323 324 // The total live words on src_region would overflow the target space, so find 325 // the overflowing object and record the split point. The invariant is that an 326 // obj should not cross space boundary. 327 HeapWord* ParallelCompactData::summarize_split_space(size_t src_region, 328 SplitInfo& split_info, 329 HeapWord* const destination, 330 HeapWord* const target_end, 331 HeapWord** target_next) { 332 assert(destination <= target_end, "sanity"); 333 assert(destination + _region_data[src_region].data_size() > target_end, 334 "region should not fit into target space"); 335 assert(is_region_aligned(target_end), "sanity"); 336 337 size_t partial_obj_size = _region_data[src_region].partial_obj_size(); 338 339 if (destination + partial_obj_size > target_end) { 340 assert(partial_obj_size > 0, "inv"); 341 // The overflowing obj is from a previous region. 342 // 343 // source-regions: 344 // 345 // *************** 346 // | A|AA | 347 // *************** 348 // ^ 349 // | split-point 350 // 351 // dest-region: 352 // 353 // ******** 354 // |~~~~A | 355 // ******** 356 // ^^ 357 // || target-space-end 358 // | 359 // | destination 360 // 361 // AAA would overflow target-space. 362 // 363 HeapWord* overflowing_obj = _region_data[src_region].partial_obj_addr(); 364 size_t split_region = addr_to_region_idx(overflowing_obj); 365 366 // The number of live words before the overflowing object on this split region 367 size_t preceding_live_words; 368 if (is_region_aligned(overflowing_obj)) { 369 preceding_live_words = 0; 370 } else { 371 // Words accounted by the overflowing object on the split region 372 size_t overflowing_size = pointer_delta(region_align_up(overflowing_obj), overflowing_obj); 373 preceding_live_words = region(split_region)->data_size() - overflowing_size; 374 } 375 376 split_info.record(split_region, overflowing_obj, preceding_live_words); 377 378 // The [overflowing_obj, src_region_start) part has been accounted for, so 379 // must move back the new_top, now that this overflowing obj is deferred. 380 HeapWord* new_top = destination - pointer_delta(region_to_addr(src_region), overflowing_obj); 381 382 // If the overflowing obj was relocated to its original destination, 383 // those destination regions would have their source_region set. Now that 384 // this overflowing obj is relocated somewhere else, reset the 385 // source_region. 386 { 387 size_t range_start = addr_to_region_idx(region_align_up(new_top)); 388 size_t range_end = addr_to_region_idx(region_align_up(destination)); 389 for (size_t i = range_start; i < range_end; ++i) { 390 region(i)->set_source_region(0); 391 } 392 } 393 394 // Update new top of target space 395 *target_next = new_top; 396 397 return overflowing_obj; 398 } 399 400 // Obj-iteration to locate the overflowing obj 401 HeapWord* region_start = region_to_addr(src_region); 402 HeapWord* region_end = region_start + RegionSize; 403 HeapWord* cur_addr = region_start + partial_obj_size; 404 size_t live_words = partial_obj_size; 405 406 while (true) { 407 assert(cur_addr < region_end, "inv"); 408 cur_addr = PSParallelCompact::mark_bitmap()->find_obj_beg(cur_addr, region_end); 409 // There must be an overflowing obj in this region 410 assert(cur_addr < region_end, "inv"); 411 412 oop obj = cast_to_oop(cur_addr); 413 size_t obj_size = obj->size(); 414 if (destination + live_words + obj_size > target_end) { 415 // Found the overflowing obj 416 split_info.record(src_region, cur_addr, live_words); 417 *target_next = destination + live_words; 418 return cur_addr; 419 } 420 421 live_words += obj_size; 422 cur_addr += obj_size; 423 } 424 } 425 426 size_t ParallelCompactData::live_words_in_space(const MutableSpace* space, 427 HeapWord** full_region_prefix_end) { 428 size_t cur_region = addr_to_region_idx(space->bottom()); 429 const size_t end_region = addr_to_region_idx(region_align_up(space->top())); 430 size_t live_words = 0; 431 if (full_region_prefix_end == nullptr) { 432 for (/* empty */; cur_region < end_region; ++cur_region) { 433 live_words += _region_data[cur_region].data_size(); 434 } 435 } else { 436 bool first_set = false; 437 for (/* empty */; cur_region < end_region; ++cur_region) { 438 size_t live_words_in_region = _region_data[cur_region].data_size(); 439 if (!first_set && live_words_in_region < RegionSize) { 440 *full_region_prefix_end = region_to_addr(cur_region); 441 first_set = true; 442 } 443 live_words += live_words_in_region; 444 } 445 if (!first_set) { 446 // All regions are full of live objs. 447 assert(is_region_aligned(space->top()), "inv"); 448 *full_region_prefix_end = space->top(); 449 } 450 assert(*full_region_prefix_end != nullptr, "postcondition"); 451 assert(is_region_aligned(*full_region_prefix_end), "inv"); 452 assert(*full_region_prefix_end >= space->bottom(), "in-range"); 453 assert(*full_region_prefix_end <= space->top(), "in-range"); 454 } 455 return live_words; 456 } 457 458 bool ParallelCompactData::summarize(SplitInfo& split_info, 459 HeapWord* source_beg, HeapWord* source_end, 460 HeapWord** source_next, 461 HeapWord* target_beg, HeapWord* target_end, 462 HeapWord** target_next) 463 { 464 HeapWord* const source_next_val = source_next == nullptr ? nullptr : *source_next; 465 log_develop_trace(gc, compaction)( 466 "sb=" PTR_FORMAT " se=" PTR_FORMAT " sn=" PTR_FORMAT 467 "tb=" PTR_FORMAT " te=" PTR_FORMAT " tn=" PTR_FORMAT, 468 p2i(source_beg), p2i(source_end), p2i(source_next_val), 469 p2i(target_beg), p2i(target_end), p2i(*target_next)); 470 471 size_t cur_region = addr_to_region_idx(source_beg); 472 const size_t end_region = addr_to_region_idx(region_align_up(source_end)); 473 474 HeapWord *dest_addr = target_beg; 475 for (/* empty */; cur_region < end_region; cur_region++) { 476 size_t words = _region_data[cur_region].data_size(); 477 478 // Skip empty ones 479 if (words == 0) { 480 continue; 481 } 482 483 if (split_info.is_split(cur_region)) { 484 assert(words > split_info.preceding_live_words(), "inv"); 485 words -= split_info.preceding_live_words(); 486 } 487 488 _region_data[cur_region].set_destination(dest_addr); 489 490 // If cur_region does not fit entirely into the target space, find a point 491 // at which the source space can be 'split' so that part is copied to the 492 // target space and the rest is copied elsewhere. 493 if (dest_addr + words > target_end) { 494 assert(source_next != nullptr, "source_next is null when splitting"); 495 *source_next = summarize_split_space(cur_region, split_info, dest_addr, 496 target_end, target_next); 497 return false; 498 } 499 500 uint destination_count = split_info.is_split(cur_region) 501 ? split_info.preceding_destination_count() 502 : 0; 503 504 HeapWord* const last_addr = dest_addr + words - 1; 505 const size_t dest_region_1 = addr_to_region_idx(dest_addr); 506 const size_t dest_region_2 = addr_to_region_idx(last_addr); 507 508 // Initially assume that the destination regions will be the same and 509 // adjust the value below if necessary. Under this assumption, if 510 // cur_region == dest_region_2, then cur_region will be compacted 511 // completely into itself. 512 destination_count += cur_region == dest_region_2 ? 0 : 1; 513 if (dest_region_1 != dest_region_2) { 514 // Destination regions differ; adjust destination_count. 515 destination_count += 1; 516 // Data from cur_region will be copied to the start of dest_region_2. 517 _region_data[dest_region_2].set_source_region(cur_region); 518 } else if (is_region_aligned(dest_addr)) { 519 // Data from cur_region will be copied to the start of the destination 520 // region. 521 _region_data[dest_region_1].set_source_region(cur_region); 522 } 523 524 _region_data[cur_region].set_destination_count(destination_count); 525 dest_addr += words; 526 } 527 528 *target_next = dest_addr; 529 return true; 530 } 531 532 #ifdef ASSERT 533 void ParallelCompactData::verify_clear() { 534 for (uint cur_idx = 0; cur_idx < region_count(); ++cur_idx) { 535 if (!region(cur_idx)->is_clear()) { 536 log_warning(gc)("Uncleared Region: %u", cur_idx); 537 region(cur_idx)->verify_clear(); 538 } 539 } 540 } 541 #endif // #ifdef ASSERT 542 543 STWGCTimer PSParallelCompact::_gc_timer; 544 ParallelOldTracer PSParallelCompact::_gc_tracer; 545 elapsedTimer PSParallelCompact::_accumulated_time; 546 unsigned int PSParallelCompact::_maximum_compaction_gc_num = 0; 547 CollectorCounters* PSParallelCompact::_counters = nullptr; 548 ParMarkBitMap PSParallelCompact::_mark_bitmap; 549 ParallelCompactData PSParallelCompact::_summary_data; 550 551 PSParallelCompact::IsAliveClosure PSParallelCompact::_is_alive_closure; 552 553 class PCAdjustPointerClosure: public BasicOopIterateClosure { 554 template <typename T> 555 void do_oop_work(T* p) { PSParallelCompact::adjust_pointer(p); } 556 557 public: 558 virtual void do_oop(oop* p) { do_oop_work(p); } 559 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 560 561 virtual ReferenceIterationMode reference_iteration_mode() { return DO_FIELDS; } 562 }; 563 564 static PCAdjustPointerClosure pc_adjust_pointer_closure; 565 566 bool PSParallelCompact::IsAliveClosure::do_object_b(oop p) { return mark_bitmap()->is_marked(p); } 567 568 void PSParallelCompact::post_initialize() { 569 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 570 _span_based_discoverer.set_span(heap->reserved_region()); 571 _ref_processor = 572 new ReferenceProcessor(&_span_based_discoverer, 573 ParallelGCThreads, // mt processing degree 574 ParallelGCThreads, // mt discovery degree 575 false, // concurrent_discovery 576 &_is_alive_closure); // non-header is alive closure 577 578 _counters = new CollectorCounters("Parallel full collection pauses", 1); 579 580 // Initialize static fields in ParCompactionManager. 581 ParCompactionManager::initialize(mark_bitmap()); 582 } 583 584 bool PSParallelCompact::initialize_aux_data() { 585 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 586 MemRegion mr = heap->reserved_region(); 587 assert(mr.byte_size() != 0, "heap should be reserved"); 588 589 initialize_space_info(); 590 591 if (!_mark_bitmap.initialize(mr)) { 592 vm_shutdown_during_initialization( 593 err_msg("Unable to allocate %zuKB bitmaps for parallel " 594 "garbage collection for the requested %zuKB heap.", 595 _mark_bitmap.reserved_byte_size()/K, mr.byte_size()/K)); 596 return false; 597 } 598 599 if (!_summary_data.initialize(mr)) { 600 vm_shutdown_during_initialization( 601 err_msg("Unable to allocate %zuKB card tables for parallel " 602 "garbage collection for the requested %zuKB heap.", 603 _summary_data.reserved_byte_size()/K, mr.byte_size()/K)); 604 return false; 605 } 606 607 return true; 608 } 609 610 void PSParallelCompact::initialize_space_info() 611 { 612 memset(&_space_info, 0, sizeof(_space_info)); 613 614 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 615 PSYoungGen* young_gen = heap->young_gen(); 616 617 _space_info[old_space_id].set_space(heap->old_gen()->object_space()); 618 _space_info[eden_space_id].set_space(young_gen->eden_space()); 619 _space_info[from_space_id].set_space(young_gen->from_space()); 620 _space_info[to_space_id].set_space(young_gen->to_space()); 621 622 _space_info[old_space_id].set_start_array(heap->old_gen()->start_array()); 623 } 624 625 void 626 PSParallelCompact::clear_data_covering_space(SpaceId id) 627 { 628 // At this point, top is the value before GC, new_top() is the value that will 629 // be set at the end of GC. The marking bitmap is cleared to top; nothing 630 // should be marked above top. The summary data is cleared to the larger of 631 // top & new_top. 632 MutableSpace* const space = _space_info[id].space(); 633 HeapWord* const bot = space->bottom(); 634 HeapWord* const top = space->top(); 635 HeapWord* const max_top = MAX2(top, _space_info[id].new_top()); 636 637 _mark_bitmap.clear_range(bot, top); 638 639 const size_t beg_region = _summary_data.addr_to_region_idx(bot); 640 const size_t end_region = 641 _summary_data.addr_to_region_idx(_summary_data.region_align_up(max_top)); 642 _summary_data.clear_range(beg_region, end_region); 643 644 // Clear the data used to 'split' regions. 645 SplitInfo& split_info = _space_info[id].split_info(); 646 if (split_info.is_valid()) { 647 split_info.clear(); 648 } 649 DEBUG_ONLY(split_info.verify_clear();) 650 } 651 652 void PSParallelCompact::pre_compact() 653 { 654 // Update the from & to space pointers in space_info, since they are swapped 655 // at each young gen gc. Do the update unconditionally (even though a 656 // promotion failure does not swap spaces) because an unknown number of young 657 // collections will have swapped the spaces an unknown number of times. 658 GCTraceTime(Debug, gc, phases) tm("Pre Compact", &_gc_timer); 659 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 660 _space_info[from_space_id].set_space(heap->young_gen()->from_space()); 661 _space_info[to_space_id].set_space(heap->young_gen()->to_space()); 662 663 heap->increment_total_collections(true); 664 665 CodeCache::on_gc_marking_cycle_start(); 666 667 heap->print_before_gc(); 668 heap->trace_heap_before_gc(&_gc_tracer); 669 670 // Fill in TLABs 671 heap->ensure_parsability(true); // retire TLABs 672 673 if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) { 674 Universe::verify("Before GC"); 675 } 676 677 DEBUG_ONLY(mark_bitmap()->verify_clear();) 678 DEBUG_ONLY(summary_data().verify_clear();) 679 } 680 681 void PSParallelCompact::post_compact() 682 { 683 GCTraceTime(Info, gc, phases) tm("Post Compact", &_gc_timer); 684 ParCompactionManager::remove_all_shadow_regions(); 685 686 CodeCache::on_gc_marking_cycle_finish(); 687 CodeCache::arm_all_nmethods(); 688 689 // Need to clear claim bits for the next full-gc (marking and adjust-pointers). 690 ClassLoaderDataGraph::clear_claimed_marks(); 691 692 for (unsigned int id = old_space_id; id < last_space_id; ++id) { 693 // Clear the marking bitmap, summary data and split info. 694 clear_data_covering_space(SpaceId(id)); 695 { 696 MutableSpace* space = _space_info[id].space(); 697 HeapWord* top = space->top(); 698 HeapWord* new_top = _space_info[id].new_top(); 699 if (ZapUnusedHeapArea && new_top < top) { 700 space->mangle_region(MemRegion(new_top, top)); 701 } 702 // Update top(). Must be done after clearing the bitmap and summary data. 703 space->set_top(new_top); 704 } 705 } 706 707 #ifdef ASSERT 708 { 709 mark_bitmap()->verify_clear(); 710 summary_data().verify_clear(); 711 } 712 #endif 713 714 ParCompactionManager::flush_all_string_dedup_requests(); 715 716 MutableSpace* const eden_space = _space_info[eden_space_id].space(); 717 MutableSpace* const from_space = _space_info[from_space_id].space(); 718 MutableSpace* const to_space = _space_info[to_space_id].space(); 719 720 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 721 bool eden_empty = eden_space->is_empty(); 722 723 // Update heap occupancy information which is used as input to the soft ref 724 // clearing policy at the next gc. 725 Universe::heap()->update_capacity_and_used_at_gc(); 726 727 bool young_gen_empty = eden_empty && from_space->is_empty() && 728 to_space->is_empty(); 729 730 PSCardTable* ct = heap->card_table(); 731 MemRegion old_mr = heap->old_gen()->committed(); 732 if (young_gen_empty) { 733 ct->clear_MemRegion(old_mr); 734 } else { 735 ct->dirty_MemRegion(old_mr); 736 } 737 738 heap->prune_scavengable_nmethods(); 739 740 #if COMPILER2_OR_JVMCI 741 DerivedPointerTable::update_pointers(); 742 #endif 743 744 // Signal that we have completed a visit to all live objects. 745 Universe::heap()->record_whole_heap_examined_timestamp(); 746 } 747 748 HeapWord* PSParallelCompact::compute_dense_prefix_for_old_space(MutableSpace* old_space, 749 HeapWord* full_region_prefix_end) { 750 const size_t region_size = ParallelCompactData::RegionSize; 751 const ParallelCompactData& sd = summary_data(); 752 753 // Iteration starts with the region *after* the full-region-prefix-end. 754 const RegionData* const start_region = sd.addr_to_region_ptr(full_region_prefix_end); 755 // If final region is not full, iteration stops before that region, 756 // because fill_dense_prefix_end assumes that prefix_end <= top. 757 const RegionData* const end_region = sd.addr_to_region_ptr(old_space->top()); 758 assert(start_region <= end_region, "inv"); 759 760 size_t max_waste = old_space->capacity_in_words() * (MarkSweepDeadRatio / 100.0); 761 const RegionData* cur_region = start_region; 762 for (/* empty */; cur_region < end_region; ++cur_region) { 763 assert(region_size >= cur_region->data_size(), "inv"); 764 size_t dead_size = region_size - cur_region->data_size(); 765 if (max_waste < dead_size) { 766 break; 767 } 768 max_waste -= dead_size; 769 } 770 771 HeapWord* const prefix_end = sd.region_to_addr(cur_region); 772 assert(sd.is_region_aligned(prefix_end), "postcondition"); 773 assert(prefix_end >= full_region_prefix_end, "in-range"); 774 assert(prefix_end <= old_space->top(), "in-range"); 775 return prefix_end; 776 } 777 778 void PSParallelCompact::fill_dense_prefix_end(SpaceId id) { 779 // Comparing two sizes to decide if filling is required: 780 // 781 // The size of the filler (min-obj-size) is 2 heap words with the default 782 // MinObjAlignment, since both markword and klass take 1 heap word. 783 // With +UseCompactObjectHeaders, the minimum filler size is only one word, 784 // because the Klass* gets encoded in the mark-word. 785 // 786 // The size of the gap (if any) right before dense-prefix-end is 787 // MinObjAlignment. 788 // 789 // Need to fill in the gap only if it's smaller than min-obj-size, and the 790 // filler obj will extend to next region. 791 792 if (MinObjAlignment >= checked_cast<int>(CollectedHeap::min_fill_size())) { 793 return; 794 } 795 796 assert(!UseCompactObjectHeaders, "Compact headers can allocate small objects"); 797 assert(CollectedHeap::min_fill_size() == 2, "inv"); 798 HeapWord* const dense_prefix_end = dense_prefix(id); 799 assert(_summary_data.is_region_aligned(dense_prefix_end), "precondition"); 800 assert(dense_prefix_end <= space(id)->top(), "precondition"); 801 if (dense_prefix_end == space(id)->top()) { 802 // Must not have single-word gap right before prefix-end/top. 803 return; 804 } 805 RegionData* const region_after_dense_prefix = _summary_data.addr_to_region_ptr(dense_prefix_end); 806 807 if (region_after_dense_prefix->partial_obj_size() != 0 || 808 _mark_bitmap.is_marked(dense_prefix_end)) { 809 // The region after the dense prefix starts with live bytes. 810 return; 811 } 812 813 HeapWord* block_start = start_array(id)->block_start_reaching_into_card(dense_prefix_end); 814 if (block_start == dense_prefix_end - 1) { 815 assert(!_mark_bitmap.is_marked(block_start), "inv"); 816 // There is exactly one heap word gap right before the dense prefix end, so we need a filler object. 817 // The filler object will extend into region_after_dense_prefix. 818 const size_t obj_len = 2; // min-fill-size 819 HeapWord* const obj_beg = dense_prefix_end - 1; 820 CollectedHeap::fill_with_object(obj_beg, obj_len); 821 _mark_bitmap.mark_obj(obj_beg); 822 _summary_data.addr_to_region_ptr(obj_beg)->add_live_obj(1); 823 region_after_dense_prefix->set_partial_obj_size(1); 824 region_after_dense_prefix->set_partial_obj_addr(obj_beg); 825 assert(start_array(id) != nullptr, "sanity"); 826 start_array(id)->update_for_block(obj_beg, obj_beg + obj_len); 827 } 828 } 829 830 bool PSParallelCompact::check_maximum_compaction(size_t total_live_words, 831 MutableSpace* const old_space, 832 HeapWord* full_region_prefix_end) { 833 834 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 835 836 // Check System.GC 837 bool is_max_on_system_gc = UseMaximumCompactionOnSystemGC 838 && GCCause::is_user_requested_gc(heap->gc_cause()); 839 840 // Check if all live objs are too much for old-gen. 841 const bool is_old_gen_too_full = (total_live_words >= old_space->capacity_in_words()); 842 843 // JVM flags 844 const uint total_invocations = heap->total_full_collections(); 845 assert(total_invocations >= _maximum_compaction_gc_num, "sanity"); 846 const size_t gcs_since_max = total_invocations - _maximum_compaction_gc_num; 847 const bool is_interval_ended = gcs_since_max > HeapMaximumCompactionInterval; 848 849 // If all regions in old-gen are full 850 const bool is_region_full = 851 full_region_prefix_end >= _summary_data.region_align_down(old_space->top()); 852 853 if (is_max_on_system_gc || is_old_gen_too_full || is_interval_ended || is_region_full) { 854 _maximum_compaction_gc_num = total_invocations; 855 return true; 856 } 857 858 return false; 859 } 860 861 void PSParallelCompact::summary_phase() 862 { 863 GCTraceTime(Info, gc, phases) tm("Summary Phase", &_gc_timer); 864 865 MutableSpace* const old_space = _space_info[old_space_id].space(); 866 { 867 size_t total_live_words = 0; 868 HeapWord* full_region_prefix_end = nullptr; 869 { 870 // old-gen 871 size_t live_words = _summary_data.live_words_in_space(old_space, 872 &full_region_prefix_end); 873 total_live_words += live_words; 874 } 875 // young-gen 876 for (uint i = eden_space_id; i < last_space_id; ++i) { 877 const MutableSpace* space = _space_info[i].space(); 878 size_t live_words = _summary_data.live_words_in_space(space); 879 total_live_words += live_words; 880 _space_info[i].set_new_top(space->bottom() + live_words); 881 _space_info[i].set_dense_prefix(space->bottom()); 882 } 883 884 bool maximum_compaction = check_maximum_compaction(total_live_words, 885 old_space, 886 full_region_prefix_end); 887 { 888 GCTraceTime(Info, gc, phases) tm("Summary Phase: expand", &_gc_timer); 889 // Try to expand old-gen in order to fit all live objs and waste. 890 size_t target_capacity_bytes = total_live_words * HeapWordSize 891 + old_space->capacity_in_bytes() * (MarkSweepDeadRatio / 100); 892 ParallelScavengeHeap::heap()->old_gen()->try_expand_till_size(target_capacity_bytes); 893 } 894 895 HeapWord* dense_prefix_end = maximum_compaction 896 ? full_region_prefix_end 897 : compute_dense_prefix_for_old_space(old_space, 898 full_region_prefix_end); 899 SpaceId id = old_space_id; 900 _space_info[id].set_dense_prefix(dense_prefix_end); 901 902 if (dense_prefix_end != old_space->bottom()) { 903 fill_dense_prefix_end(id); 904 _summary_data.summarize_dense_prefix(old_space->bottom(), dense_prefix_end); 905 } 906 907 // Compacting objs in [dense_prefix_end, old_space->top()) 908 _summary_data.summarize(_space_info[id].split_info(), 909 dense_prefix_end, old_space->top(), nullptr, 910 dense_prefix_end, old_space->end(), 911 _space_info[id].new_top_addr()); 912 } 913 914 // Summarize the remaining spaces in the young gen. The initial target space 915 // is the old gen. If a space does not fit entirely into the target, then the 916 // remainder is compacted into the space itself and that space becomes the new 917 // target. 918 SpaceId dst_space_id = old_space_id; 919 HeapWord* dst_space_end = old_space->end(); 920 HeapWord** new_top_addr = _space_info[dst_space_id].new_top_addr(); 921 for (unsigned int id = eden_space_id; id < last_space_id; ++id) { 922 const MutableSpace* space = _space_info[id].space(); 923 const size_t live = pointer_delta(_space_info[id].new_top(), 924 space->bottom()); 925 const size_t available = pointer_delta(dst_space_end, *new_top_addr); 926 927 if (live > 0 && live <= available) { 928 // All the live data will fit. 929 bool done = _summary_data.summarize(_space_info[id].split_info(), 930 space->bottom(), space->top(), 931 nullptr, 932 *new_top_addr, dst_space_end, 933 new_top_addr); 934 assert(done, "space must fit into old gen"); 935 936 // Reset the new_top value for the space. 937 _space_info[id].set_new_top(space->bottom()); 938 } else if (live > 0) { 939 // Attempt to fit part of the source space into the target space. 940 HeapWord* next_src_addr = nullptr; 941 bool done = _summary_data.summarize(_space_info[id].split_info(), 942 space->bottom(), space->top(), 943 &next_src_addr, 944 *new_top_addr, dst_space_end, 945 new_top_addr); 946 assert(!done, "space should not fit into old gen"); 947 assert(next_src_addr != nullptr, "sanity"); 948 949 // The source space becomes the new target, so the remainder is compacted 950 // within the space itself. 951 dst_space_id = SpaceId(id); 952 dst_space_end = space->end(); 953 new_top_addr = _space_info[id].new_top_addr(); 954 done = _summary_data.summarize(_space_info[id].split_info(), 955 next_src_addr, space->top(), 956 nullptr, 957 space->bottom(), dst_space_end, 958 new_top_addr); 959 assert(done, "space must fit when compacted into itself"); 960 assert(*new_top_addr <= space->top(), "usage should not grow"); 961 } 962 } 963 } 964 965 // This method invokes a full collection. The argument controls whether 966 // soft-refs should be cleared or not. 967 // Note that this method should only be called from the vm_thread while at a 968 // safepoint. 969 bool PSParallelCompact::invoke(bool clear_all_soft_refs) { 970 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); 971 assert(Thread::current() == (Thread*)VMThread::vm_thread(), 972 "should be in vm thread"); 973 974 SvcGCMarker sgcm(SvcGCMarker::FULL); 975 IsSTWGCActiveMark mark; 976 977 return PSParallelCompact::invoke_no_policy(clear_all_soft_refs); 978 } 979 980 // This method contains no policy. You should probably 981 // be calling invoke() instead. 982 bool PSParallelCompact::invoke_no_policy(bool clear_all_soft_refs) { 983 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); 984 assert(ref_processor() != nullptr, "Sanity"); 985 986 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 987 988 GCIdMark gc_id_mark; 989 _gc_timer.register_gc_start(); 990 _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start()); 991 992 GCCause::Cause gc_cause = heap->gc_cause(); 993 PSOldGen* old_gen = heap->old_gen(); 994 PSAdaptiveSizePolicy* size_policy = heap->size_policy(); 995 996 // Make sure data structures are sane, make the heap parsable, and do other 997 // miscellaneous bookkeeping. 998 pre_compact(); 999 1000 const PreGenGCValues pre_gc_values = heap->get_pre_gc_values(); 1001 1002 { 1003 const uint active_workers = 1004 WorkerPolicy::calc_active_workers(ParallelScavengeHeap::heap()->workers().max_workers(), 1005 ParallelScavengeHeap::heap()->workers().active_workers(), 1006 Threads::number_of_non_daemon_threads()); 1007 ParallelScavengeHeap::heap()->workers().set_active_workers(active_workers); 1008 1009 GCTraceCPUTime tcpu(&_gc_tracer); 1010 GCTraceTime(Info, gc) tm("Pause Full", nullptr, gc_cause, true); 1011 1012 heap->pre_full_gc_dump(&_gc_timer); 1013 1014 TraceCollectorStats tcs(counters()); 1015 TraceMemoryManagerStats tms(heap->old_gc_manager(), gc_cause, "end of major GC"); 1016 1017 if (log_is_enabled(Debug, gc, heap, exit)) { 1018 accumulated_time()->start(); 1019 } 1020 1021 // Let the size policy know we're starting 1022 size_policy->major_collection_begin(); 1023 1024 #if COMPILER2_OR_JVMCI 1025 DerivedPointerTable::clear(); 1026 #endif 1027 1028 ref_processor()->start_discovery(clear_all_soft_refs); 1029 1030 marking_phase(&_gc_tracer); 1031 1032 summary_phase(); 1033 1034 #if COMPILER2_OR_JVMCI 1035 assert(DerivedPointerTable::is_active(), "Sanity"); 1036 DerivedPointerTable::set_active(false); 1037 #endif 1038 1039 forward_to_new_addr(); 1040 1041 adjust_pointers(); 1042 1043 compact(); 1044 1045 ParCompactionManager::_preserved_marks_set->restore(&ParallelScavengeHeap::heap()->workers()); 1046 1047 ParCompactionManager::verify_all_region_stack_empty(); 1048 1049 // Reset the mark bitmap, summary data, and do other bookkeeping. Must be 1050 // done before resizing. 1051 post_compact(); 1052 1053 size_policy->major_collection_end(); 1054 1055 size_policy->sample_old_gen_used_bytes(MAX2(pre_gc_values.old_gen_used(), old_gen->used_in_bytes())); 1056 1057 if (UseAdaptiveSizePolicy) { 1058 heap->resize_after_full_gc(); 1059 } 1060 1061 heap->resize_all_tlabs(); 1062 1063 // Resize the metaspace capacity after a collection 1064 MetaspaceGC::compute_new_size(); 1065 1066 if (log_is_enabled(Debug, gc, heap, exit)) { 1067 accumulated_time()->stop(); 1068 } 1069 1070 heap->print_heap_change(pre_gc_values); 1071 1072 // Track memory usage and detect low memory 1073 MemoryService::track_memory_usage(); 1074 heap->update_counters(); 1075 1076 heap->post_full_gc_dump(&_gc_timer); 1077 1078 size_policy->record_gc_pause_end_instant(); 1079 } 1080 1081 heap->gc_epilogue(true); 1082 1083 if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) { 1084 Universe::verify("After GC"); 1085 } 1086 1087 heap->print_after_gc(); 1088 heap->trace_heap_after_gc(&_gc_tracer); 1089 1090 _gc_timer.register_gc_end(); 1091 1092 _gc_tracer.report_dense_prefix(dense_prefix(old_space_id)); 1093 _gc_tracer.report_gc_end(_gc_timer.gc_end(), _gc_timer.time_partitions()); 1094 1095 return true; 1096 } 1097 1098 class PCAddThreadRootsMarkingTaskClosure : public ThreadClosure { 1099 private: 1100 uint _worker_id; 1101 1102 public: 1103 PCAddThreadRootsMarkingTaskClosure(uint worker_id) : _worker_id(worker_id) { } 1104 void do_thread(Thread* thread) { 1105 assert(ParallelScavengeHeap::heap()->is_stw_gc_active(), "called outside gc"); 1106 1107 ResourceMark rm; 1108 1109 ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(_worker_id); 1110 1111 MarkingNMethodClosure mark_and_push_in_blobs(&cm->_mark_and_push_closure, 1112 !NMethodToOopClosure::FixRelocations, 1113 true /* keepalive nmethods */); 1114 1115 thread->oops_do(&cm->_mark_and_push_closure, &mark_and_push_in_blobs); 1116 1117 // Do the real work 1118 cm->follow_marking_stacks(); 1119 } 1120 }; 1121 1122 void steal_marking_work(TaskTerminator& terminator, uint worker_id) { 1123 assert(ParallelScavengeHeap::heap()->is_stw_gc_active(), "called outside gc"); 1124 1125 ParCompactionManager* cm = 1126 ParCompactionManager::gc_thread_compaction_manager(worker_id); 1127 1128 do { 1129 ScannerTask task; 1130 if (ParCompactionManager::steal(worker_id, task)) { 1131 cm->follow_contents(task, true); 1132 } 1133 cm->follow_marking_stacks(); 1134 } while (!terminator.offer_termination()); 1135 } 1136 1137 class MarkFromRootsTask : public WorkerTask { 1138 StrongRootsScope _strong_roots_scope; // needed for Threads::possibly_parallel_threads_do 1139 OopStorageSetStrongParState<false /* concurrent */, false /* is_const */> _oop_storage_set_par_state; 1140 TaskTerminator _terminator; 1141 uint _active_workers; 1142 1143 public: 1144 MarkFromRootsTask(uint active_workers) : 1145 WorkerTask("MarkFromRootsTask"), 1146 _strong_roots_scope(active_workers), 1147 _terminator(active_workers, ParCompactionManager::marking_stacks()), 1148 _active_workers(active_workers) {} 1149 1150 virtual void work(uint worker_id) { 1151 ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(worker_id); 1152 cm->create_marking_stats_cache(); 1153 { 1154 CLDToOopClosure cld_closure(&cm->_mark_and_push_closure, ClassLoaderData::_claim_stw_fullgc_mark); 1155 ClassLoaderDataGraph::always_strong_cld_do(&cld_closure); 1156 1157 // Do the real work 1158 cm->follow_marking_stacks(); 1159 } 1160 1161 { 1162 PCAddThreadRootsMarkingTaskClosure closure(worker_id); 1163 Threads::possibly_parallel_threads_do(_active_workers > 1 /* is_par */, &closure); 1164 } 1165 1166 // Mark from OopStorages 1167 { 1168 _oop_storage_set_par_state.oops_do(&cm->_mark_and_push_closure); 1169 // Do the real work 1170 cm->follow_marking_stacks(); 1171 } 1172 1173 if (_active_workers > 1) { 1174 steal_marking_work(_terminator, worker_id); 1175 } 1176 } 1177 }; 1178 1179 class ParallelCompactRefProcProxyTask : public RefProcProxyTask { 1180 TaskTerminator _terminator; 1181 1182 public: 1183 ParallelCompactRefProcProxyTask(uint max_workers) 1184 : RefProcProxyTask("ParallelCompactRefProcProxyTask", max_workers), 1185 _terminator(_max_workers, ParCompactionManager::marking_stacks()) {} 1186 1187 void work(uint worker_id) override { 1188 assert(worker_id < _max_workers, "sanity"); 1189 ParCompactionManager* cm = (_tm == RefProcThreadModel::Single) ? ParCompactionManager::get_vmthread_cm() : ParCompactionManager::gc_thread_compaction_manager(worker_id); 1190 BarrierEnqueueDiscoveredFieldClosure enqueue; 1191 ParCompactionManager::FollowStackClosure complete_gc(cm, (_tm == RefProcThreadModel::Single) ? nullptr : &_terminator, worker_id); 1192 _rp_task->rp_work(worker_id, PSParallelCompact::is_alive_closure(), &cm->_mark_and_push_closure, &enqueue, &complete_gc); 1193 } 1194 1195 void prepare_run_task_hook() override { 1196 _terminator.reset_for_reuse(_queue_count); 1197 } 1198 }; 1199 1200 static void flush_marking_stats_cache(const uint num_workers) { 1201 for (uint i = 0; i < num_workers; ++i) { 1202 ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(i); 1203 cm->flush_and_destroy_marking_stats_cache(); 1204 } 1205 } 1206 1207 void PSParallelCompact::marking_phase(ParallelOldTracer *gc_tracer) { 1208 // Recursively traverse all live objects and mark them 1209 GCTraceTime(Info, gc, phases) tm("Marking Phase", &_gc_timer); 1210 1211 uint active_gc_threads = ParallelScavengeHeap::heap()->workers().active_workers(); 1212 1213 ClassLoaderDataGraph::verify_claimed_marks_cleared(ClassLoaderData::_claim_stw_fullgc_mark); 1214 { 1215 GCTraceTime(Debug, gc, phases) tm("Par Mark", &_gc_timer); 1216 1217 MarkFromRootsTask task(active_gc_threads); 1218 ParallelScavengeHeap::heap()->workers().run_task(&task); 1219 } 1220 1221 // Process reference objects found during marking 1222 { 1223 GCTraceTime(Debug, gc, phases) tm("Reference Processing", &_gc_timer); 1224 1225 ReferenceProcessorStats stats; 1226 ReferenceProcessorPhaseTimes pt(&_gc_timer, ref_processor()->max_num_queues()); 1227 1228 ParallelCompactRefProcProxyTask task(ref_processor()->max_num_queues()); 1229 stats = ref_processor()->process_discovered_references(task, &ParallelScavengeHeap::heap()->workers(), pt); 1230 1231 gc_tracer->report_gc_reference_stats(stats); 1232 pt.print_all_references(); 1233 } 1234 1235 { 1236 GCTraceTime(Debug, gc, phases) tm("Flush Marking Stats", &_gc_timer); 1237 1238 flush_marking_stats_cache(active_gc_threads); 1239 } 1240 1241 // This is the point where the entire marking should have completed. 1242 ParCompactionManager::verify_all_marking_stack_empty(); 1243 1244 { 1245 GCTraceTime(Debug, gc, phases) tm("Weak Processing", &_gc_timer); 1246 WeakProcessor::weak_oops_do(&ParallelScavengeHeap::heap()->workers(), 1247 is_alive_closure(), 1248 &do_nothing_cl, 1249 1); 1250 } 1251 1252 { 1253 GCTraceTime(Debug, gc, phases) tm_m("Class Unloading", &_gc_timer); 1254 1255 ClassUnloadingContext ctx(1 /* num_nmethod_unlink_workers */, 1256 false /* unregister_nmethods_during_purge */, 1257 false /* lock_nmethod_free_separately */); 1258 1259 bool unloading_occurred; 1260 { 1261 CodeCache::UnlinkingScope scope(is_alive_closure()); 1262 1263 // Follow system dictionary roots and unload classes. 1264 unloading_occurred = SystemDictionary::do_unloading(&_gc_timer); 1265 1266 // Unload nmethods. 1267 CodeCache::do_unloading(unloading_occurred); 1268 } 1269 1270 { 1271 GCTraceTime(Debug, gc, phases) t("Purge Unlinked NMethods", gc_timer()); 1272 // Release unloaded nmethod's memory. 1273 ctx.purge_nmethods(); 1274 } 1275 { 1276 GCTraceTime(Debug, gc, phases) ur("Unregister NMethods", &_gc_timer); 1277 ParallelScavengeHeap::heap()->prune_unlinked_nmethods(); 1278 } 1279 { 1280 GCTraceTime(Debug, gc, phases) t("Free Code Blobs", gc_timer()); 1281 ctx.free_nmethods(); 1282 } 1283 1284 // Prune dead klasses from subklass/sibling/implementor lists. 1285 Klass::clean_weak_klass_links(unloading_occurred); 1286 1287 // Clean JVMCI metadata handles. 1288 JVMCI_ONLY(JVMCI::do_unloading(unloading_occurred)); 1289 { 1290 // Delete metaspaces for unloaded class loaders and clean up loader_data graph 1291 GCTraceTime(Debug, gc, phases) t("Purge Class Loader Data", gc_timer()); 1292 ClassLoaderDataGraph::purge(true /* at_safepoint */); 1293 DEBUG_ONLY(MetaspaceUtils::verify();) 1294 } 1295 } 1296 1297 { 1298 GCTraceTime(Debug, gc, phases) tm("Report Object Count", &_gc_timer); 1299 _gc_tracer.report_object_count_after_gc(is_alive_closure(), &ParallelScavengeHeap::heap()->workers()); 1300 } 1301 #if TASKQUEUE_STATS 1302 ParCompactionManager::print_and_reset_taskqueue_stats(); 1303 #endif 1304 } 1305 1306 template<typename Func> 1307 void PSParallelCompact::adjust_in_space_helper(SpaceId id, volatile uint* claim_counter, Func&& on_stripe) { 1308 MutableSpace* sp = PSParallelCompact::space(id); 1309 HeapWord* const bottom = sp->bottom(); 1310 HeapWord* const top = sp->top(); 1311 if (bottom == top) { 1312 return; 1313 } 1314 1315 const uint num_regions_per_stripe = 2; 1316 const size_t region_size = ParallelCompactData::RegionSize; 1317 const size_t stripe_size = num_regions_per_stripe * region_size; 1318 1319 while (true) { 1320 uint counter = Atomic::fetch_then_add(claim_counter, num_regions_per_stripe); 1321 HeapWord* cur_stripe = bottom + counter * region_size; 1322 if (cur_stripe >= top) { 1323 break; 1324 } 1325 HeapWord* stripe_end = MIN2(cur_stripe + stripe_size, top); 1326 on_stripe(cur_stripe, stripe_end); 1327 } 1328 } 1329 1330 void PSParallelCompact::adjust_in_old_space(volatile uint* claim_counter) { 1331 // Regions in old-space shouldn't be split. 1332 assert(!_space_info[old_space_id].split_info().is_valid(), "inv"); 1333 1334 auto scan_obj_with_limit = [&] (HeapWord* obj_start, HeapWord* left, HeapWord* right) { 1335 assert(mark_bitmap()->is_marked(obj_start), "inv"); 1336 oop obj = cast_to_oop(obj_start); 1337 return obj->oop_iterate_size(&pc_adjust_pointer_closure, MemRegion(left, right)); 1338 }; 1339 1340 adjust_in_space_helper(old_space_id, claim_counter, [&] (HeapWord* stripe_start, HeapWord* stripe_end) { 1341 assert(_summary_data.is_region_aligned(stripe_start), "inv"); 1342 RegionData* cur_region = _summary_data.addr_to_region_ptr(stripe_start); 1343 HeapWord* obj_start; 1344 if (cur_region->partial_obj_size() != 0) { 1345 obj_start = cur_region->partial_obj_addr(); 1346 obj_start += scan_obj_with_limit(obj_start, stripe_start, stripe_end); 1347 } else { 1348 obj_start = stripe_start; 1349 } 1350 1351 while (obj_start < stripe_end) { 1352 obj_start = mark_bitmap()->find_obj_beg(obj_start, stripe_end); 1353 if (obj_start >= stripe_end) { 1354 break; 1355 } 1356 obj_start += scan_obj_with_limit(obj_start, stripe_start, stripe_end); 1357 } 1358 }); 1359 } 1360 1361 void PSParallelCompact::adjust_in_young_space(SpaceId id, volatile uint* claim_counter) { 1362 adjust_in_space_helper(id, claim_counter, [](HeapWord* stripe_start, HeapWord* stripe_end) { 1363 HeapWord* obj_start = stripe_start; 1364 while (obj_start < stripe_end) { 1365 obj_start = mark_bitmap()->find_obj_beg(obj_start, stripe_end); 1366 if (obj_start >= stripe_end) { 1367 break; 1368 } 1369 oop obj = cast_to_oop(obj_start); 1370 obj_start += obj->oop_iterate_size(&pc_adjust_pointer_closure); 1371 } 1372 }); 1373 } 1374 1375 void PSParallelCompact::adjust_pointers_in_spaces(uint worker_id, volatile uint* claim_counters) { 1376 auto start_time = Ticks::now(); 1377 adjust_in_old_space(&claim_counters[0]); 1378 for (uint id = eden_space_id; id < last_space_id; ++id) { 1379 adjust_in_young_space(SpaceId(id), &claim_counters[id]); 1380 } 1381 log_trace(gc, phases)("adjust_pointers_in_spaces worker %u: %.3f ms", worker_id, (Ticks::now() - start_time).seconds() * 1000); 1382 } 1383 1384 class PSAdjustTask final : public WorkerTask { 1385 SubTasksDone _sub_tasks; 1386 WeakProcessor::Task _weak_proc_task; 1387 OopStorageSetStrongParState<false, false> _oop_storage_iter; 1388 uint _nworkers; 1389 volatile uint _claim_counters[PSParallelCompact::last_space_id] = {}; 1390 1391 enum PSAdjustSubTask { 1392 PSAdjustSubTask_code_cache, 1393 1394 PSAdjustSubTask_num_elements 1395 }; 1396 1397 public: 1398 PSAdjustTask(uint nworkers) : 1399 WorkerTask("PSAdjust task"), 1400 _sub_tasks(PSAdjustSubTask_num_elements), 1401 _weak_proc_task(nworkers), 1402 _nworkers(nworkers) { 1403 1404 ClassLoaderDataGraph::verify_claimed_marks_cleared(ClassLoaderData::_claim_stw_fullgc_adjust); 1405 if (nworkers > 1) { 1406 Threads::change_thread_claim_token(); 1407 } 1408 } 1409 1410 ~PSAdjustTask() { 1411 Threads::assert_all_threads_claimed(); 1412 } 1413 1414 void work(uint worker_id) { 1415 ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(worker_id); 1416 cm->preserved_marks()->adjust_during_full_gc(); 1417 { 1418 // adjust pointers in all spaces 1419 PSParallelCompact::adjust_pointers_in_spaces(worker_id, _claim_counters); 1420 } 1421 { 1422 ResourceMark rm; 1423 Threads::possibly_parallel_oops_do(_nworkers > 1, &pc_adjust_pointer_closure, nullptr); 1424 } 1425 _oop_storage_iter.oops_do(&pc_adjust_pointer_closure); 1426 { 1427 CLDToOopClosure cld_closure(&pc_adjust_pointer_closure, ClassLoaderData::_claim_stw_fullgc_adjust); 1428 ClassLoaderDataGraph::cld_do(&cld_closure); 1429 } 1430 { 1431 AlwaysTrueClosure always_alive; 1432 _weak_proc_task.work(worker_id, &always_alive, &pc_adjust_pointer_closure); 1433 } 1434 if (_sub_tasks.try_claim_task(PSAdjustSubTask_code_cache)) { 1435 NMethodToOopClosure adjust_code(&pc_adjust_pointer_closure, NMethodToOopClosure::FixRelocations); 1436 CodeCache::nmethods_do(&adjust_code); 1437 } 1438 _sub_tasks.all_tasks_claimed(); 1439 } 1440 }; 1441 1442 void PSParallelCompact::adjust_pointers() { 1443 // Adjust the pointers to reflect the new locations 1444 GCTraceTime(Info, gc, phases) tm("Adjust Pointers", &_gc_timer); 1445 uint nworkers = ParallelScavengeHeap::heap()->workers().active_workers(); 1446 PSAdjustTask task(nworkers); 1447 ParallelScavengeHeap::heap()->workers().run_task(&task); 1448 } 1449 1450 // Split [start, end) evenly for a number of workers and return the 1451 // range for worker_id. 1452 static void split_regions_for_worker(size_t start, size_t end, 1453 uint worker_id, uint num_workers, 1454 size_t* worker_start, size_t* worker_end) { 1455 assert(start < end, "precondition"); 1456 assert(num_workers > 0, "precondition"); 1457 assert(worker_id < num_workers, "precondition"); 1458 1459 size_t num_regions = end - start; 1460 size_t num_regions_per_worker = num_regions / num_workers; 1461 size_t remainder = num_regions % num_workers; 1462 // The first few workers will get one extra. 1463 *worker_start = start + worker_id * num_regions_per_worker 1464 + MIN2(checked_cast<size_t>(worker_id), remainder); 1465 *worker_end = *worker_start + num_regions_per_worker 1466 + (worker_id < remainder ? 1 : 0); 1467 } 1468 1469 void PSParallelCompact::forward_to_new_addr() { 1470 GCTraceTime(Info, gc, phases) tm("Forward", &_gc_timer); 1471 uint nworkers = ParallelScavengeHeap::heap()->workers().active_workers(); 1472 1473 struct ForwardTask final : public WorkerTask { 1474 uint _num_workers; 1475 1476 explicit ForwardTask(uint num_workers) : 1477 WorkerTask("PSForward task"), 1478 _num_workers(num_workers) {} 1479 1480 static void forward_objs_in_range(ParCompactionManager* cm, 1481 HeapWord* start, 1482 HeapWord* end, 1483 HeapWord* destination) { 1484 HeapWord* cur_addr = start; 1485 HeapWord* new_addr = destination; 1486 1487 while (cur_addr < end) { 1488 cur_addr = mark_bitmap()->find_obj_beg(cur_addr, end); 1489 if (cur_addr >= end) { 1490 return; 1491 } 1492 assert(mark_bitmap()->is_marked(cur_addr), "inv"); 1493 oop obj = cast_to_oop(cur_addr); 1494 if (new_addr != cur_addr) { 1495 cm->preserved_marks()->push_if_necessary(obj, obj->mark()); 1496 FullGCForwarding::forward_to(obj, cast_to_oop(new_addr)); 1497 } 1498 size_t obj_size = obj->size(); 1499 new_addr += obj_size; 1500 cur_addr += obj_size; 1501 } 1502 } 1503 1504 void work(uint worker_id) override { 1505 ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(worker_id); 1506 for (uint id = old_space_id; id < last_space_id; ++id) { 1507 MutableSpace* sp = PSParallelCompact::space(SpaceId(id)); 1508 HeapWord* dense_prefix_addr = dense_prefix(SpaceId(id)); 1509 HeapWord* top = sp->top(); 1510 1511 if (dense_prefix_addr == top) { 1512 // Empty space 1513 continue; 1514 } 1515 1516 const SplitInfo& split_info = _space_info[SpaceId(id)].split_info(); 1517 size_t dense_prefix_region = _summary_data.addr_to_region_idx(dense_prefix_addr); 1518 size_t top_region = _summary_data.addr_to_region_idx(_summary_data.region_align_up(top)); 1519 size_t start_region; 1520 size_t end_region; 1521 split_regions_for_worker(dense_prefix_region, top_region, 1522 worker_id, _num_workers, 1523 &start_region, &end_region); 1524 for (size_t cur_region = start_region; cur_region < end_region; ++cur_region) { 1525 RegionData* region_ptr = _summary_data.region(cur_region); 1526 size_t partial_obj_size = region_ptr->partial_obj_size(); 1527 1528 if (partial_obj_size == ParallelCompactData::RegionSize) { 1529 // No obj-start 1530 continue; 1531 } 1532 1533 HeapWord* region_start = _summary_data.region_to_addr(cur_region); 1534 HeapWord* region_end = region_start + ParallelCompactData::RegionSize; 1535 1536 if (split_info.is_split(cur_region)) { 1537 // Part 1: will be relocated to space-1 1538 HeapWord* preceding_destination = split_info.preceding_destination(); 1539 HeapWord* split_point = split_info.split_point(); 1540 forward_objs_in_range(cm, region_start + partial_obj_size, split_point, preceding_destination + partial_obj_size); 1541 1542 // Part 2: will be relocated to space-2 1543 HeapWord* destination = region_ptr->destination(); 1544 forward_objs_in_range(cm, split_point, region_end, destination); 1545 } else { 1546 HeapWord* destination = region_ptr->destination(); 1547 forward_objs_in_range(cm, region_start + partial_obj_size, region_end, destination + partial_obj_size); 1548 } 1549 } 1550 } 1551 } 1552 } task(nworkers); 1553 1554 ParallelScavengeHeap::heap()->workers().run_task(&task); 1555 DEBUG_ONLY(verify_forward();) 1556 } 1557 1558 #ifdef ASSERT 1559 void PSParallelCompact::verify_forward() { 1560 HeapWord* const old_dense_prefix_addr = dense_prefix(SpaceId(old_space_id)); 1561 RegionData* old_region = _summary_data.region(_summary_data.addr_to_region_idx(old_dense_prefix_addr)); 1562 HeapWord* bump_ptr = old_region->partial_obj_size() != 0 1563 ? old_dense_prefix_addr + old_region->partial_obj_size() 1564 : old_dense_prefix_addr; 1565 SpaceId bump_ptr_space = old_space_id; 1566 1567 for (uint id = old_space_id; id < last_space_id; ++id) { 1568 MutableSpace* sp = PSParallelCompact::space(SpaceId(id)); 1569 HeapWord* dense_prefix_addr = dense_prefix(SpaceId(id)); 1570 HeapWord* top = sp->top(); 1571 HeapWord* cur_addr = dense_prefix_addr; 1572 1573 while (cur_addr < top) { 1574 cur_addr = mark_bitmap()->find_obj_beg(cur_addr, top); 1575 if (cur_addr >= top) { 1576 break; 1577 } 1578 assert(mark_bitmap()->is_marked(cur_addr), "inv"); 1579 assert(bump_ptr <= _space_info[bump_ptr_space].new_top(), "inv"); 1580 // Move to the space containing cur_addr 1581 if (bump_ptr == _space_info[bump_ptr_space].new_top()) { 1582 bump_ptr = space(space_id(cur_addr))->bottom(); 1583 bump_ptr_space = space_id(bump_ptr); 1584 } 1585 oop obj = cast_to_oop(cur_addr); 1586 if (cur_addr == bump_ptr) { 1587 assert(!FullGCForwarding::is_forwarded(obj), "inv"); 1588 } else { 1589 assert(FullGCForwarding::forwardee(obj) == cast_to_oop(bump_ptr), "inv"); 1590 } 1591 bump_ptr += obj->size(); 1592 cur_addr += obj->size(); 1593 } 1594 } 1595 } 1596 #endif 1597 1598 // Helper class to print 8 region numbers per line and then print the total at the end. 1599 class FillableRegionLogger : public StackObj { 1600 private: 1601 Log(gc, compaction) log; 1602 static const int LineLength = 8; 1603 size_t _regions[LineLength]; 1604 int _next_index; 1605 bool _enabled; 1606 size_t _total_regions; 1607 public: 1608 FillableRegionLogger() : _next_index(0), _enabled(log_develop_is_enabled(Trace, gc, compaction)), _total_regions(0) { } 1609 ~FillableRegionLogger() { 1610 log.trace("%zu initially fillable regions", _total_regions); 1611 } 1612 1613 void print_line() { 1614 if (!_enabled || _next_index == 0) { 1615 return; 1616 } 1617 FormatBuffer<> line("Fillable: "); 1618 for (int i = 0; i < _next_index; i++) { 1619 line.append(" %7zu", _regions[i]); 1620 } 1621 log.trace("%s", line.buffer()); 1622 _next_index = 0; 1623 } 1624 1625 void handle(size_t region) { 1626 if (!_enabled) { 1627 return; 1628 } 1629 _regions[_next_index++] = region; 1630 if (_next_index == LineLength) { 1631 print_line(); 1632 } 1633 _total_regions++; 1634 } 1635 }; 1636 1637 void PSParallelCompact::prepare_region_draining_tasks(uint parallel_gc_threads) 1638 { 1639 GCTraceTime(Trace, gc, phases) tm("Drain Task Setup", &_gc_timer); 1640 1641 // Find the threads that are active 1642 uint worker_id = 0; 1643 1644 // Find all regions that are available (can be filled immediately) and 1645 // distribute them to the thread stacks. The iteration is done in reverse 1646 // order (high to low) so the regions will be removed in ascending order. 1647 1648 const ParallelCompactData& sd = PSParallelCompact::summary_data(); 1649 1650 // id + 1 is used to test termination so unsigned can 1651 // be used with an old_space_id == 0. 1652 FillableRegionLogger region_logger; 1653 for (unsigned int id = last_space_id - 1; id + 1 > old_space_id; --id) { 1654 SpaceInfo* const space_info = _space_info + id; 1655 HeapWord* const new_top = space_info->new_top(); 1656 1657 const size_t beg_region = sd.addr_to_region_idx(space_info->dense_prefix()); 1658 const size_t end_region = 1659 sd.addr_to_region_idx(sd.region_align_up(new_top)); 1660 1661 for (size_t cur = end_region - 1; cur + 1 > beg_region; --cur) { 1662 if (sd.region(cur)->claim_unsafe()) { 1663 ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(worker_id); 1664 bool result = sd.region(cur)->mark_normal(); 1665 assert(result, "Must succeed at this point."); 1666 cm->region_stack()->push(cur); 1667 region_logger.handle(cur); 1668 // Assign regions to tasks in round-robin fashion. 1669 if (++worker_id == parallel_gc_threads) { 1670 worker_id = 0; 1671 } 1672 } 1673 } 1674 region_logger.print_line(); 1675 } 1676 } 1677 1678 static void compaction_with_stealing_work(TaskTerminator* terminator, uint worker_id) { 1679 assert(ParallelScavengeHeap::heap()->is_stw_gc_active(), "called outside gc"); 1680 1681 ParCompactionManager* cm = 1682 ParCompactionManager::gc_thread_compaction_manager(worker_id); 1683 1684 // Drain the stacks that have been preloaded with regions 1685 // that are ready to fill. 1686 1687 cm->drain_region_stacks(); 1688 1689 guarantee(cm->region_stack()->is_empty(), "Not empty"); 1690 1691 size_t region_index = 0; 1692 1693 while (true) { 1694 if (ParCompactionManager::steal(worker_id, region_index)) { 1695 PSParallelCompact::fill_and_update_region(cm, region_index); 1696 cm->drain_region_stacks(); 1697 } else if (PSParallelCompact::steal_unavailable_region(cm, region_index)) { 1698 // Fill and update an unavailable region with the help of a shadow region 1699 PSParallelCompact::fill_and_update_shadow_region(cm, region_index); 1700 cm->drain_region_stacks(); 1701 } else { 1702 if (terminator->offer_termination()) { 1703 break; 1704 } 1705 // Go around again. 1706 } 1707 } 1708 } 1709 1710 class FillDensePrefixAndCompactionTask: public WorkerTask { 1711 uint _num_workers; 1712 TaskTerminator _terminator; 1713 1714 public: 1715 FillDensePrefixAndCompactionTask(uint active_workers) : 1716 WorkerTask("FillDensePrefixAndCompactionTask"), 1717 _num_workers(active_workers), 1718 _terminator(active_workers, ParCompactionManager::region_task_queues()) { 1719 } 1720 1721 virtual void work(uint worker_id) { 1722 { 1723 auto start = Ticks::now(); 1724 PSParallelCompact::fill_dead_objs_in_dense_prefix(worker_id, _num_workers); 1725 log_trace(gc, phases)("Fill dense prefix by worker %u: %.3f ms", worker_id, (Ticks::now() - start).seconds() * 1000); 1726 } 1727 compaction_with_stealing_work(&_terminator, worker_id); 1728 } 1729 }; 1730 1731 void PSParallelCompact::fill_range_in_dense_prefix(HeapWord* start, HeapWord* end) { 1732 #ifdef ASSERT 1733 { 1734 assert(start < end, "precondition"); 1735 assert(mark_bitmap()->find_obj_beg(start, end) == end, "precondition"); 1736 HeapWord* bottom = _space_info[old_space_id].space()->bottom(); 1737 if (start != bottom) { 1738 HeapWord* obj_start = mark_bitmap()->find_obj_beg_reverse(bottom, start); 1739 HeapWord* after_obj = obj_start + cast_to_oop(obj_start)->size(); 1740 assert(after_obj == start, "precondition"); 1741 } 1742 } 1743 #endif 1744 1745 CollectedHeap::fill_with_objects(start, pointer_delta(end, start)); 1746 HeapWord* addr = start; 1747 do { 1748 size_t size = cast_to_oop(addr)->size(); 1749 start_array(old_space_id)->update_for_block(addr, addr + size); 1750 addr += size; 1751 } while (addr < end); 1752 } 1753 1754 void PSParallelCompact::fill_dead_objs_in_dense_prefix(uint worker_id, uint num_workers) { 1755 ParMarkBitMap* bitmap = mark_bitmap(); 1756 1757 HeapWord* const bottom = _space_info[old_space_id].space()->bottom(); 1758 HeapWord* const prefix_end = dense_prefix(old_space_id); 1759 1760 if (bottom == prefix_end) { 1761 return; 1762 } 1763 1764 size_t bottom_region = _summary_data.addr_to_region_idx(bottom); 1765 size_t prefix_end_region = _summary_data.addr_to_region_idx(prefix_end); 1766 1767 size_t start_region; 1768 size_t end_region; 1769 split_regions_for_worker(bottom_region, prefix_end_region, 1770 worker_id, num_workers, 1771 &start_region, &end_region); 1772 1773 if (start_region == end_region) { 1774 return; 1775 } 1776 1777 HeapWord* const start_addr = _summary_data.region_to_addr(start_region); 1778 HeapWord* const end_addr = _summary_data.region_to_addr(end_region); 1779 1780 // Skip live partial obj (if any) from previous region. 1781 HeapWord* cur_addr; 1782 RegionData* start_region_ptr = _summary_data.region(start_region); 1783 if (start_region_ptr->partial_obj_size() != 0) { 1784 HeapWord* partial_obj_start = start_region_ptr->partial_obj_addr(); 1785 assert(bitmap->is_marked(partial_obj_start), "inv"); 1786 cur_addr = partial_obj_start + cast_to_oop(partial_obj_start)->size(); 1787 } else { 1788 cur_addr = start_addr; 1789 } 1790 1791 // end_addr is inclusive to handle regions starting with dead space. 1792 while (cur_addr <= end_addr) { 1793 // Use prefix_end to handle trailing obj in each worker region-chunk. 1794 HeapWord* live_start = bitmap->find_obj_beg(cur_addr, prefix_end); 1795 if (cur_addr != live_start) { 1796 // Only worker 0 handles proceeding dead space. 1797 if (cur_addr != start_addr || worker_id == 0) { 1798 fill_range_in_dense_prefix(cur_addr, live_start); 1799 } 1800 } 1801 if (live_start >= end_addr) { 1802 break; 1803 } 1804 assert(bitmap->is_marked(live_start), "inv"); 1805 cur_addr = live_start + cast_to_oop(live_start)->size(); 1806 } 1807 } 1808 1809 void PSParallelCompact::compact() { 1810 GCTraceTime(Info, gc, phases) tm("Compaction Phase", &_gc_timer); 1811 1812 uint active_gc_threads = ParallelScavengeHeap::heap()->workers().active_workers(); 1813 1814 initialize_shadow_regions(active_gc_threads); 1815 prepare_region_draining_tasks(active_gc_threads); 1816 1817 { 1818 GCTraceTime(Trace, gc, phases) tm("Par Compact", &_gc_timer); 1819 1820 FillDensePrefixAndCompactionTask task(active_gc_threads); 1821 ParallelScavengeHeap::heap()->workers().run_task(&task); 1822 1823 #ifdef ASSERT 1824 verify_filler_in_dense_prefix(); 1825 1826 // Verify that all regions have been processed. 1827 for (unsigned int id = old_space_id; id < last_space_id; ++id) { 1828 verify_complete(SpaceId(id)); 1829 } 1830 #endif 1831 } 1832 } 1833 1834 #ifdef ASSERT 1835 void PSParallelCompact::verify_filler_in_dense_prefix() { 1836 HeapWord* bottom = _space_info[old_space_id].space()->bottom(); 1837 HeapWord* dense_prefix_end = dense_prefix(old_space_id); 1838 HeapWord* cur_addr = bottom; 1839 while (cur_addr < dense_prefix_end) { 1840 oop obj = cast_to_oop(cur_addr); 1841 oopDesc::verify(obj); 1842 if (!mark_bitmap()->is_marked(cur_addr)) { 1843 Klass* k = cast_to_oop(cur_addr)->klass(); 1844 assert(k == Universe::fillerArrayKlass() || k == vmClasses::FillerObject_klass(), "inv"); 1845 } 1846 cur_addr += obj->size(); 1847 } 1848 } 1849 1850 void PSParallelCompact::verify_complete(SpaceId space_id) { 1851 // All Regions served as compaction targets, from dense_prefix() to 1852 // new_top(), should be marked as filled and all Regions between new_top() 1853 // and top() should be available (i.e., should have been emptied). 1854 ParallelCompactData& sd = summary_data(); 1855 SpaceInfo si = _space_info[space_id]; 1856 HeapWord* new_top_addr = sd.region_align_up(si.new_top()); 1857 HeapWord* old_top_addr = sd.region_align_up(si.space()->top()); 1858 const size_t beg_region = sd.addr_to_region_idx(si.dense_prefix()); 1859 const size_t new_top_region = sd.addr_to_region_idx(new_top_addr); 1860 const size_t old_top_region = sd.addr_to_region_idx(old_top_addr); 1861 1862 size_t cur_region; 1863 for (cur_region = beg_region; cur_region < new_top_region; ++cur_region) { 1864 const RegionData* const c = sd.region(cur_region); 1865 assert(c->completed(), "region %zu not filled: destination_count=%u", 1866 cur_region, c->destination_count()); 1867 } 1868 1869 for (cur_region = new_top_region; cur_region < old_top_region; ++cur_region) { 1870 const RegionData* const c = sd.region(cur_region); 1871 assert(c->available(), "region %zu not empty: destination_count=%u", 1872 cur_region, c->destination_count()); 1873 } 1874 } 1875 #endif // #ifdef ASSERT 1876 1877 // Return the SpaceId for the space containing addr. If addr is not in the 1878 // heap, last_space_id is returned. In debug mode it expects the address to be 1879 // in the heap and asserts such. 1880 PSParallelCompact::SpaceId PSParallelCompact::space_id(HeapWord* addr) { 1881 assert(ParallelScavengeHeap::heap()->is_in_reserved(addr), "addr not in the heap"); 1882 1883 for (unsigned int id = old_space_id; id < last_space_id; ++id) { 1884 if (_space_info[id].space()->contains(addr)) { 1885 return SpaceId(id); 1886 } 1887 } 1888 1889 assert(false, "no space contains the addr"); 1890 return last_space_id; 1891 } 1892 1893 // Skip over count live words starting from beg, and return the address of the 1894 // next live word. Callers must also ensure that there are enough live words in 1895 // the range [beg, end) to skip. 1896 HeapWord* PSParallelCompact::skip_live_words(HeapWord* beg, HeapWord* end, size_t count) 1897 { 1898 ParMarkBitMap* m = mark_bitmap(); 1899 HeapWord* cur_addr = beg; 1900 while (true) { 1901 cur_addr = m->find_obj_beg(cur_addr, end); 1902 assert(cur_addr < end, "inv"); 1903 size_t obj_size = cast_to_oop(cur_addr)->size(); 1904 // Strictly greater-than 1905 if (obj_size > count) { 1906 return cur_addr + count; 1907 } 1908 count -= obj_size; 1909 cur_addr += obj_size; 1910 } 1911 } 1912 1913 // On starting to fill a destination region (dest-region), we need to know the 1914 // location of the word that will be at the start of the dest-region after 1915 // compaction. A dest-region can have one or more source regions, but only the 1916 // first source-region contains this location. This location is retrieved by 1917 // calling `first_src_addr` on a dest-region. 1918 // Conversely, a source-region has a dest-region which holds the destination of 1919 // the first live word on this source-region, based on which the destination 1920 // for the rest of live words can be derived. 1921 // 1922 // Note: 1923 // There is some complication due to space-boundary-fragmentation (an obj can't 1924 // cross space-boundary) -- a source-region may be split and behave like two 1925 // distinct regions with their own dest-region, as depicted below. 1926 // 1927 // source-region: region-n 1928 // 1929 // ********************** 1930 // | A|A~~~~B|B | 1931 // ********************** 1932 // n-1 n n+1 1933 // 1934 // AA, BB denote two live objs. ~~~~ denotes unknown number of live objs. 1935 // 1936 // Assuming the dest-region for region-n is the final region before 1937 // old-space-end and its first-live-word is the middle of AA, the heap content 1938 // will look like the following after compaction: 1939 // 1940 // ************** ************* 1941 // A|A~~~~ | |BB | 1942 // ************** ************* 1943 // ^ ^ 1944 // | old-space-end | eden-space-start 1945 // 1946 // Therefore, in this example, region-n will have two dest-regions: 1947 // 1. the final region in old-space 1948 // 2. the first region in eden-space. 1949 // To handle this special case, we introduce the concept of split-region, whose 1950 // contents are relocated to two spaces. `SplitInfo` captures all necessary 1951 // info about the split, the first part, spliting-point, and the second part. 1952 HeapWord* PSParallelCompact::first_src_addr(HeapWord* const dest_addr, 1953 SpaceId src_space_id, 1954 size_t src_region_idx) 1955 { 1956 const size_t RegionSize = ParallelCompactData::RegionSize; 1957 const ParallelCompactData& sd = summary_data(); 1958 assert(sd.is_region_aligned(dest_addr), "precondition"); 1959 1960 const RegionData* const src_region_ptr = sd.region(src_region_idx); 1961 assert(src_region_ptr->data_size() > 0, "src region cannot be empty"); 1962 1963 const size_t partial_obj_size = src_region_ptr->partial_obj_size(); 1964 HeapWord* const src_region_destination = src_region_ptr->destination(); 1965 1966 HeapWord* const region_start = sd.region_to_addr(src_region_idx); 1967 HeapWord* const region_end = sd.region_to_addr(src_region_idx) + RegionSize; 1968 1969 // Identify the actual destination for the first live words on this region, 1970 // taking split-region into account. 1971 HeapWord* region_start_destination; 1972 const SplitInfo& split_info = _space_info[src_space_id].split_info(); 1973 if (split_info.is_split(src_region_idx)) { 1974 // The second part of this split region; use the recorded split point. 1975 if (dest_addr == src_region_destination) { 1976 return split_info.split_point(); 1977 } 1978 region_start_destination = split_info.preceding_destination(); 1979 } else { 1980 region_start_destination = src_region_destination; 1981 } 1982 1983 // Calculate the offset to be skipped 1984 size_t words_to_skip = pointer_delta(dest_addr, region_start_destination); 1985 1986 HeapWord* result; 1987 if (partial_obj_size > words_to_skip) { 1988 result = region_start + words_to_skip; 1989 } else { 1990 words_to_skip -= partial_obj_size; 1991 result = skip_live_words(region_start + partial_obj_size, region_end, words_to_skip); 1992 } 1993 1994 if (split_info.is_split(src_region_idx)) { 1995 assert(result < split_info.split_point(), "postcondition"); 1996 } else { 1997 assert(result < region_end, "postcondition"); 1998 } 1999 2000 return result; 2001 } 2002 2003 void PSParallelCompact::decrement_destination_counts(ParCompactionManager* cm, 2004 SpaceId src_space_id, 2005 size_t beg_region, 2006 HeapWord* end_addr) 2007 { 2008 ParallelCompactData& sd = summary_data(); 2009 2010 #ifdef ASSERT 2011 MutableSpace* const src_space = _space_info[src_space_id].space(); 2012 HeapWord* const beg_addr = sd.region_to_addr(beg_region); 2013 assert(src_space->contains(beg_addr) || beg_addr == src_space->end(), 2014 "src_space_id does not match beg_addr"); 2015 assert(src_space->contains(end_addr) || end_addr == src_space->end(), 2016 "src_space_id does not match end_addr"); 2017 #endif // #ifdef ASSERT 2018 2019 RegionData* const beg = sd.region(beg_region); 2020 RegionData* const end = sd.addr_to_region_ptr(sd.region_align_up(end_addr)); 2021 2022 // Regions up to new_top() are enqueued if they become available. 2023 HeapWord* const new_top = _space_info[src_space_id].new_top(); 2024 RegionData* const enqueue_end = 2025 sd.addr_to_region_ptr(sd.region_align_up(new_top)); 2026 2027 for (RegionData* cur = beg; cur < end; ++cur) { 2028 assert(cur->data_size() > 0, "region must have live data"); 2029 cur->decrement_destination_count(); 2030 if (cur < enqueue_end && cur->available() && cur->claim()) { 2031 if (cur->mark_normal()) { 2032 cm->push_region(sd.region(cur)); 2033 } else if (cur->mark_copied()) { 2034 // Try to copy the content of the shadow region back to its corresponding 2035 // heap region if the shadow region is filled. Otherwise, the GC thread 2036 // fills the shadow region will copy the data back (see 2037 // MoveAndUpdateShadowClosure::complete_region). 2038 copy_back(sd.region_to_addr(cur->shadow_region()), sd.region_to_addr(cur)); 2039 ParCompactionManager::push_shadow_region_mt_safe(cur->shadow_region()); 2040 cur->set_completed(); 2041 } 2042 } 2043 } 2044 } 2045 2046 size_t PSParallelCompact::next_src_region(MoveAndUpdateClosure& closure, 2047 SpaceId& src_space_id, 2048 HeapWord*& src_space_top, 2049 HeapWord* end_addr) 2050 { 2051 ParallelCompactData& sd = PSParallelCompact::summary_data(); 2052 2053 size_t src_region_idx = 0; 2054 2055 // Skip empty regions (if any) up to the top of the space. 2056 HeapWord* const src_aligned_up = sd.region_align_up(end_addr); 2057 RegionData* src_region_ptr = sd.addr_to_region_ptr(src_aligned_up); 2058 HeapWord* const top_aligned_up = sd.region_align_up(src_space_top); 2059 const RegionData* const top_region_ptr = sd.addr_to_region_ptr(top_aligned_up); 2060 2061 while (src_region_ptr < top_region_ptr && src_region_ptr->data_size() == 0) { 2062 ++src_region_ptr; 2063 } 2064 2065 if (src_region_ptr < top_region_ptr) { 2066 // Found the first non-empty region in the same space. 2067 src_region_idx = sd.region(src_region_ptr); 2068 closure.set_source(sd.region_to_addr(src_region_idx)); 2069 return src_region_idx; 2070 } 2071 2072 // Switch to a new source space and find the first non-empty region. 2073 uint space_id = src_space_id + 1; 2074 assert(space_id < last_space_id, "not enough spaces"); 2075 2076 for (/* empty */; space_id < last_space_id; ++space_id) { 2077 HeapWord* bottom = _space_info[space_id].space()->bottom(); 2078 HeapWord* top = _space_info[space_id].space()->top(); 2079 // Skip empty space 2080 if (bottom == top) { 2081 continue; 2082 } 2083 2084 // Identify the first region that contains live words in this space 2085 size_t cur_region = sd.addr_to_region_idx(bottom); 2086 size_t end_region = sd.addr_to_region_idx(sd.region_align_up(top)); 2087 2088 for (/* empty */ ; cur_region < end_region; ++cur_region) { 2089 RegionData* cur = sd.region(cur_region); 2090 if (cur->live_obj_size() > 0) { 2091 HeapWord* region_start_addr = sd.region_to_addr(cur_region); 2092 2093 src_space_id = SpaceId(space_id); 2094 src_space_top = top; 2095 closure.set_source(region_start_addr); 2096 return cur_region; 2097 } 2098 } 2099 } 2100 2101 ShouldNotReachHere(); 2102 } 2103 2104 HeapWord* PSParallelCompact::partial_obj_end(HeapWord* region_start_addr) { 2105 ParallelCompactData& sd = summary_data(); 2106 assert(sd.is_region_aligned(region_start_addr), "precondition"); 2107 2108 // Use per-region partial_obj_size to locate the end of the obj, that extends 2109 // to region_start_addr. 2110 size_t start_region_idx = sd.addr_to_region_idx(region_start_addr); 2111 size_t end_region_idx = sd.region_count(); 2112 size_t accumulated_size = 0; 2113 for (size_t region_idx = start_region_idx; region_idx < end_region_idx; ++region_idx) { 2114 size_t cur_partial_obj_size = sd.region(region_idx)->partial_obj_size(); 2115 accumulated_size += cur_partial_obj_size; 2116 if (cur_partial_obj_size != ParallelCompactData::RegionSize) { 2117 break; 2118 } 2119 } 2120 return region_start_addr + accumulated_size; 2121 } 2122 2123 // Use region_idx as the destination region, and evacuate all live objs on its 2124 // source regions to this destination region. 2125 void PSParallelCompact::fill_region(ParCompactionManager* cm, MoveAndUpdateClosure& closure, size_t region_idx) 2126 { 2127 ParMarkBitMap* const bitmap = mark_bitmap(); 2128 ParallelCompactData& sd = summary_data(); 2129 RegionData* const region_ptr = sd.region(region_idx); 2130 2131 // Get the source region and related info. 2132 size_t src_region_idx = region_ptr->source_region(); 2133 SpaceId src_space_id = space_id(sd.region_to_addr(src_region_idx)); 2134 HeapWord* src_space_top = _space_info[src_space_id].space()->top(); 2135 HeapWord* dest_addr = sd.region_to_addr(region_idx); 2136 2137 closure.set_source(first_src_addr(dest_addr, src_space_id, src_region_idx)); 2138 2139 // Adjust src_region_idx to prepare for decrementing destination counts (the 2140 // destination count is not decremented when a region is copied to itself). 2141 if (src_region_idx == region_idx) { 2142 src_region_idx += 1; 2143 } 2144 2145 // source-region: 2146 // 2147 // ********** 2148 // | ~~~ | 2149 // ********** 2150 // ^ 2151 // |-- closure.source() / first_src_addr 2152 // 2153 // 2154 // ~~~ : live words 2155 // 2156 // destination-region: 2157 // 2158 // ********** 2159 // | | 2160 // ********** 2161 // ^ 2162 // |-- region-start 2163 if (bitmap->is_unmarked(closure.source())) { 2164 // An object overflows the previous destination region, so this 2165 // destination region should copy the remainder of the object or as much as 2166 // will fit. 2167 HeapWord* const old_src_addr = closure.source(); 2168 { 2169 HeapWord* region_start = sd.region_align_down(closure.source()); 2170 HeapWord* obj_start = bitmap->find_obj_beg_reverse(region_start, closure.source()); 2171 HeapWord* obj_end; 2172 if (obj_start != closure.source()) { 2173 assert(bitmap->is_marked(obj_start), "inv"); 2174 // Found the actual obj-start, try to find the obj-end using either 2175 // size() if this obj is completely contained in the current region. 2176 HeapWord* next_region_start = region_start + ParallelCompactData::RegionSize; 2177 HeapWord* partial_obj_start = (next_region_start >= src_space_top) 2178 ? nullptr 2179 : sd.addr_to_region_ptr(next_region_start)->partial_obj_addr(); 2180 // This obj extends to next region iff partial_obj_addr of the *next* 2181 // region is the same as obj-start. 2182 if (partial_obj_start == obj_start) { 2183 // This obj extends to next region. 2184 obj_end = partial_obj_end(next_region_start); 2185 } else { 2186 // Completely contained in this region; safe to use size(). 2187 obj_end = obj_start + cast_to_oop(obj_start)->size(); 2188 } 2189 } else { 2190 // This obj extends to current region. 2191 obj_end = partial_obj_end(region_start); 2192 } 2193 size_t partial_obj_size = pointer_delta(obj_end, closure.source()); 2194 closure.copy_partial_obj(partial_obj_size); 2195 } 2196 2197 if (closure.is_full()) { 2198 decrement_destination_counts(cm, src_space_id, src_region_idx, closure.source()); 2199 closure.complete_region(dest_addr, region_ptr); 2200 return; 2201 } 2202 2203 // Finished copying without using up the current destination-region 2204 HeapWord* const end_addr = sd.region_align_down(closure.source()); 2205 if (sd.region_align_down(old_src_addr) != end_addr) { 2206 assert(sd.region_align_up(old_src_addr) == end_addr, "only one region"); 2207 // The partial object was copied from more than one source region. 2208 decrement_destination_counts(cm, src_space_id, src_region_idx, end_addr); 2209 2210 // Move to the next source region, possibly switching spaces as well. All 2211 // args except end_addr may be modified. 2212 src_region_idx = next_src_region(closure, src_space_id, src_space_top, end_addr); 2213 } 2214 } 2215 2216 // Handle the rest obj-by-obj, where we know obj-start. 2217 do { 2218 HeapWord* cur_addr = closure.source(); 2219 HeapWord* const end_addr = MIN2(sd.region_align_up(cur_addr + 1), 2220 src_space_top); 2221 // To handle the case where the final obj in source region extends to next region. 2222 HeapWord* final_obj_start = (end_addr == src_space_top) 2223 ? nullptr 2224 : sd.addr_to_region_ptr(end_addr)->partial_obj_addr(); 2225 // Apply closure on objs inside [cur_addr, end_addr) 2226 do { 2227 cur_addr = bitmap->find_obj_beg(cur_addr, end_addr); 2228 if (cur_addr == end_addr) { 2229 break; 2230 } 2231 size_t obj_size; 2232 if (final_obj_start == cur_addr) { 2233 obj_size = pointer_delta(partial_obj_end(end_addr), cur_addr); 2234 } else { 2235 // This obj doesn't extend into next region; size() is safe to use. 2236 obj_size = cast_to_oop(cur_addr)->size(); 2237 } 2238 closure.do_addr(cur_addr, obj_size); 2239 cur_addr += obj_size; 2240 } while (cur_addr < end_addr && !closure.is_full()); 2241 2242 if (closure.is_full()) { 2243 decrement_destination_counts(cm, src_space_id, src_region_idx, closure.source()); 2244 closure.complete_region(dest_addr, region_ptr); 2245 return; 2246 } 2247 2248 decrement_destination_counts(cm, src_space_id, src_region_idx, end_addr); 2249 2250 // Move to the next source region, possibly switching spaces as well. All 2251 // args except end_addr may be modified. 2252 src_region_idx = next_src_region(closure, src_space_id, src_space_top, end_addr); 2253 } while (true); 2254 } 2255 2256 void PSParallelCompact::fill_and_update_region(ParCompactionManager* cm, size_t region_idx) 2257 { 2258 MoveAndUpdateClosure cl(mark_bitmap(), region_idx); 2259 fill_region(cm, cl, region_idx); 2260 } 2261 2262 void PSParallelCompact::fill_and_update_shadow_region(ParCompactionManager* cm, size_t region_idx) 2263 { 2264 // Get a shadow region first 2265 ParallelCompactData& sd = summary_data(); 2266 RegionData* const region_ptr = sd.region(region_idx); 2267 size_t shadow_region = ParCompactionManager::pop_shadow_region_mt_safe(region_ptr); 2268 // The InvalidShadow return value indicates the corresponding heap region is available, 2269 // so use MoveAndUpdateClosure to fill the normal region. Otherwise, use 2270 // MoveAndUpdateShadowClosure to fill the acquired shadow region. 2271 if (shadow_region == ParCompactionManager::InvalidShadow) { 2272 MoveAndUpdateClosure cl(mark_bitmap(), region_idx); 2273 region_ptr->shadow_to_normal(); 2274 return fill_region(cm, cl, region_idx); 2275 } else { 2276 MoveAndUpdateShadowClosure cl(mark_bitmap(), region_idx, shadow_region); 2277 return fill_region(cm, cl, region_idx); 2278 } 2279 } 2280 2281 void PSParallelCompact::copy_back(HeapWord *shadow_addr, HeapWord *region_addr) 2282 { 2283 Copy::aligned_conjoint_words(shadow_addr, region_addr, _summary_data.RegionSize); 2284 } 2285 2286 bool PSParallelCompact::steal_unavailable_region(ParCompactionManager* cm, size_t ®ion_idx) 2287 { 2288 size_t next = cm->next_shadow_region(); 2289 ParallelCompactData& sd = summary_data(); 2290 size_t old_new_top = sd.addr_to_region_idx(_space_info[old_space_id].new_top()); 2291 uint active_gc_threads = ParallelScavengeHeap::heap()->workers().active_workers(); 2292 2293 while (next < old_new_top) { 2294 if (sd.region(next)->mark_shadow()) { 2295 region_idx = next; 2296 return true; 2297 } 2298 next = cm->move_next_shadow_region_by(active_gc_threads); 2299 } 2300 2301 return false; 2302 } 2303 2304 // The shadow region is an optimization to address region dependencies in full GC. The basic 2305 // idea is making more regions available by temporally storing their live objects in empty 2306 // shadow regions to resolve dependencies between them and the destination regions. Therefore, 2307 // GC threads need not wait destination regions to be available before processing sources. 2308 // 2309 // A typical workflow would be: 2310 // After draining its own stack and failing to steal from others, a GC worker would pick an 2311 // unavailable region (destination count > 0) and get a shadow region. Then the worker fills 2312 // the shadow region by copying live objects from source regions of the unavailable one. Once 2313 // the unavailable region becomes available, the data in the shadow region will be copied back. 2314 // Shadow regions are empty regions in the to-space and regions between top and end of other spaces. 2315 void PSParallelCompact::initialize_shadow_regions(uint parallel_gc_threads) 2316 { 2317 const ParallelCompactData& sd = PSParallelCompact::summary_data(); 2318 2319 for (unsigned int id = old_space_id; id < last_space_id; ++id) { 2320 SpaceInfo* const space_info = _space_info + id; 2321 MutableSpace* const space = space_info->space(); 2322 2323 const size_t beg_region = 2324 sd.addr_to_region_idx(sd.region_align_up(MAX2(space_info->new_top(), space->top()))); 2325 const size_t end_region = 2326 sd.addr_to_region_idx(sd.region_align_down(space->end())); 2327 2328 for (size_t cur = beg_region; cur < end_region; ++cur) { 2329 ParCompactionManager::push_shadow_region(cur); 2330 } 2331 } 2332 2333 size_t beg_region = sd.addr_to_region_idx(_space_info[old_space_id].dense_prefix()); 2334 for (uint i = 0; i < parallel_gc_threads; i++) { 2335 ParCompactionManager *cm = ParCompactionManager::gc_thread_compaction_manager(i); 2336 cm->set_next_shadow_region(beg_region + i); 2337 } 2338 } 2339 2340 void MoveAndUpdateClosure::copy_partial_obj(size_t partial_obj_size) 2341 { 2342 size_t words = MIN2(partial_obj_size, words_remaining()); 2343 2344 // This test is necessary; if omitted, the pointer updates to a partial object 2345 // that crosses the dense prefix boundary could be overwritten. 2346 if (source() != copy_destination()) { 2347 DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());) 2348 Copy::aligned_conjoint_words(source(), copy_destination(), words); 2349 } 2350 update_state(words); 2351 } 2352 2353 void MoveAndUpdateClosure::complete_region(HeapWord* dest_addr, PSParallelCompact::RegionData* region_ptr) { 2354 assert(region_ptr->shadow_state() == ParallelCompactData::RegionData::NormalRegion, "Region should be finished"); 2355 region_ptr->set_completed(); 2356 } 2357 2358 void MoveAndUpdateClosure::do_addr(HeapWord* addr, size_t words) { 2359 assert(destination() != nullptr, "sanity"); 2360 _source = addr; 2361 2362 // The start_array must be updated even if the object is not moving. 2363 if (_start_array != nullptr) { 2364 _start_array->update_for_block(destination(), destination() + words); 2365 } 2366 2367 // Avoid overflow 2368 words = MIN2(words, words_remaining()); 2369 assert(words > 0, "inv"); 2370 2371 if (copy_destination() != source()) { 2372 DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());) 2373 assert(source() != destination(), "inv"); 2374 assert(FullGCForwarding::is_forwarded(cast_to_oop(source())), "inv"); 2375 assert(FullGCForwarding::forwardee(cast_to_oop(source())) == cast_to_oop(destination()), "inv"); 2376 Copy::aligned_conjoint_words(source(), copy_destination(), words); 2377 cast_to_oop(copy_destination())->reinit_mark(); 2378 } 2379 2380 update_state(words); 2381 } 2382 2383 void MoveAndUpdateShadowClosure::complete_region(HeapWord* dest_addr, PSParallelCompact::RegionData* region_ptr) { 2384 assert(region_ptr->shadow_state() == ParallelCompactData::RegionData::ShadowRegion, "Region should be shadow"); 2385 // Record the shadow region index 2386 region_ptr->set_shadow_region(_shadow); 2387 // Mark the shadow region as filled to indicate the data is ready to be 2388 // copied back 2389 region_ptr->mark_filled(); 2390 // Try to copy the content of the shadow region back to its corresponding 2391 // heap region if available; the GC thread that decreases the destination 2392 // count to zero will do the copying otherwise (see 2393 // PSParallelCompact::decrement_destination_counts). 2394 if (((region_ptr->available() && region_ptr->claim()) || region_ptr->claimed()) && region_ptr->mark_copied()) { 2395 region_ptr->set_completed(); 2396 PSParallelCompact::copy_back(PSParallelCompact::summary_data().region_to_addr(_shadow), dest_addr); 2397 ParCompactionManager::push_shadow_region_mt_safe(_shadow); 2398 } 2399 }