1 /*
2 * Copyright (c) 2005, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "classfile/classLoaderDataGraph.hpp"
26 #include "classfile/javaClasses.inline.hpp"
27 #include "classfile/stringTable.hpp"
28 #include "classfile/symbolTable.hpp"
29 #include "classfile/systemDictionary.hpp"
30 #include "code/codeCache.hpp"
31 #include "code/nmethod.hpp"
32 #include "compiler/oopMap.hpp"
33 #include "cppstdlib/new.hpp"
34 #include "gc/parallel/objectStartArray.inline.hpp"
35 #include "gc/parallel/parallelArguments.hpp"
36 #include "gc/parallel/parallelScavengeHeap.inline.hpp"
37 #include "gc/parallel/parMarkBitMap.inline.hpp"
38 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
39 #include "gc/parallel/psCompactionManager.inline.hpp"
40 #include "gc/parallel/psOldGen.hpp"
41 #include "gc/parallel/psParallelCompact.inline.hpp"
42 #include "gc/parallel/psPromotionManager.inline.hpp"
43 #include "gc/parallel/psRootType.hpp"
44 #include "gc/parallel/psScavenge.hpp"
45 #include "gc/parallel/psStringDedup.hpp"
46 #include "gc/parallel/psYoungGen.hpp"
47 #include "gc/shared/classUnloadingContext.hpp"
48 #include "gc/shared/collectedHeap.inline.hpp"
49 #include "gc/shared/fullGCForwarding.inline.hpp"
50 #include "gc/shared/gcCause.hpp"
51 #include "gc/shared/gcHeapSummary.hpp"
52 #include "gc/shared/gcId.hpp"
53 #include "gc/shared/gcLocker.hpp"
54 #include "gc/shared/gcTimer.hpp"
55 #include "gc/shared/gcTrace.hpp"
56 #include "gc/shared/gcTraceTime.inline.hpp"
57 #include "gc/shared/gcVMOperations.hpp"
58 #include "gc/shared/isGCActiveMark.hpp"
59 #include "gc/shared/oopStorage.inline.hpp"
60 #include "gc/shared/oopStorageSet.inline.hpp"
61 #include "gc/shared/oopStorageSetParState.inline.hpp"
62 #include "gc/shared/parallelCleaning.hpp"
63 #include "gc/shared/preservedMarks.inline.hpp"
64 #include "gc/shared/referencePolicy.hpp"
65 #include "gc/shared/referenceProcessor.hpp"
66 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
67 #include "gc/shared/spaceDecorator.hpp"
68 #include "gc/shared/taskTerminator.hpp"
69 #include "gc/shared/weakProcessor.inline.hpp"
70 #include "gc/shared/workerPolicy.hpp"
71 #include "gc/shared/workerThread.hpp"
72 #include "gc/shared/workerUtils.hpp"
73 #include "logging/log.hpp"
74 #include "memory/iterator.inline.hpp"
75 #include "memory/memoryReserver.hpp"
76 #include "memory/metaspaceUtils.hpp"
77 #include "memory/resourceArea.hpp"
78 #include "memory/universe.hpp"
79 #include "nmt/memTracker.hpp"
80 #include "oops/access.inline.hpp"
81 #include "oops/flatArrayKlass.inline.hpp"
82 #include "oops/instanceClassLoaderKlass.inline.hpp"
83 #include "oops/instanceKlass.inline.hpp"
84 #include "oops/instanceMirrorKlass.inline.hpp"
85 #include "oops/methodData.hpp"
86 #include "oops/objArrayKlass.inline.hpp"
87 #include "oops/oop.inline.hpp"
88 #include "runtime/arguments.hpp"
89 #include "runtime/handles.inline.hpp"
90 #include "runtime/java.hpp"
91 #include "runtime/safepoint.hpp"
92 #include "runtime/threads.hpp"
93 #include "runtime/vmThread.hpp"
94 #include "services/memoryService.hpp"
95 #include "utilities/align.hpp"
96 #include "utilities/debug.hpp"
97 #include "utilities/events.hpp"
98 #include "utilities/formatBuffer.hpp"
99 #include "utilities/macros.hpp"
100 #include "utilities/stack.inline.hpp"
101 #if INCLUDE_JVMCI
102 #include "jvmci/jvmci.hpp"
103 #endif
104
105 #include <math.h>
106
107 // All sizes are in HeapWords.
108 const size_t ParallelCompactData::Log2RegionSize = 16; // 64K words
109 const size_t ParallelCompactData::RegionSize = (size_t)1 << Log2RegionSize;
110 static_assert(ParallelCompactData::RegionSize >= BitsPerWord, "region-start bit word-aligned");
111 const size_t ParallelCompactData::RegionSizeBytes =
112 RegionSize << LogHeapWordSize;
113 const size_t ParallelCompactData::RegionSizeOffsetMask = RegionSize - 1;
114 const size_t ParallelCompactData::RegionAddrOffsetMask = RegionSizeBytes - 1;
115 const size_t ParallelCompactData::RegionAddrMask = ~RegionAddrOffsetMask;
116
117 const ParallelCompactData::RegionData::region_sz_t
118 ParallelCompactData::RegionData::dc_shift = 27;
119
120 const ParallelCompactData::RegionData::region_sz_t
121 ParallelCompactData::RegionData::dc_mask = ~0U << dc_shift;
122
123 const ParallelCompactData::RegionData::region_sz_t
124 ParallelCompactData::RegionData::dc_one = 0x1U << dc_shift;
125
126 const ParallelCompactData::RegionData::region_sz_t
127 ParallelCompactData::RegionData::los_mask = ~dc_mask;
128
129 const ParallelCompactData::RegionData::region_sz_t
130 ParallelCompactData::RegionData::dc_claimed = 0x8U << dc_shift;
131
132 const ParallelCompactData::RegionData::region_sz_t
133 ParallelCompactData::RegionData::dc_completed = 0xcU << dc_shift;
134
135 bool ParallelCompactData::RegionData::is_clear() {
136 return (_destination == nullptr) &&
137 (_source_region == 0) &&
138 (_partial_obj_addr == nullptr) &&
139 (_partial_obj_size == 0) &&
140 (dc_and_los() == 0) &&
141 (shadow_state() == 0);
142 }
143
144 #ifdef ASSERT
145 void ParallelCompactData::RegionData::verify_clear() {
146 assert(_destination == nullptr, "inv");
147 assert(_source_region == 0, "inv");
148 assert(_partial_obj_addr == nullptr, "inv");
149 assert(_partial_obj_size == 0, "inv");
150 assert(dc_and_los() == 0, "inv");
151 assert(shadow_state() == 0, "inv");
152 }
153 #endif
154
155 SpaceInfo PSParallelCompact::_space_info[PSParallelCompact::last_space_id];
156
157 SpanSubjectToDiscoveryClosure PSParallelCompact::_span_based_discoverer;
158 ReferenceProcessor* PSParallelCompact::_ref_processor = nullptr;
159
160 void SplitInfo::record(size_t split_region_idx, HeapWord* split_point, size_t preceding_live_words) {
161 assert(split_region_idx != 0, "precondition");
162
163 // Obj denoted by split_point will be deferred to the next space.
164 assert(split_point != nullptr, "precondition");
165
166 const ParallelCompactData& sd = PSParallelCompact::summary_data();
167
168 PSParallelCompact::RegionData* split_region_ptr = sd.region(split_region_idx);
169 assert(preceding_live_words < split_region_ptr->data_size(), "inv");
170
171 HeapWord* preceding_destination = split_region_ptr->destination();
172 assert(preceding_destination != nullptr, "inv");
173
174 // How many regions does the preceding part occupy
175 uint preceding_destination_count;
176 if (preceding_live_words == 0) {
177 preceding_destination_count = 0;
178 } else {
179 // -1 so that the ending address doesn't fall on the region-boundary
180 if (sd.region_align_down(preceding_destination) ==
181 sd.region_align_down(preceding_destination + preceding_live_words - 1)) {
182 preceding_destination_count = 1;
183 } else {
184 preceding_destination_count = 2;
185 }
186 }
187
188 _split_region_idx = split_region_idx;
189 _split_point = split_point;
190 _preceding_live_words = preceding_live_words;
191 _preceding_destination = preceding_destination;
192 _preceding_destination_count = preceding_destination_count;
193 }
194
195 void SplitInfo::clear()
196 {
197 _split_region_idx = 0;
198 _split_point = nullptr;
199 _preceding_live_words = 0;
200 _preceding_destination = nullptr;
201 _preceding_destination_count = 0;
202 assert(!is_valid(), "sanity");
203 }
204
205 #ifdef ASSERT
206 void SplitInfo::verify_clear()
207 {
208 assert(_split_region_idx == 0, "not clear");
209 assert(_split_point == nullptr, "not clear");
210 assert(_preceding_live_words == 0, "not clear");
211 assert(_preceding_destination == nullptr, "not clear");
212 assert(_preceding_destination_count == 0, "not clear");
213 }
214 #endif // #ifdef ASSERT
215
216
217 void PSParallelCompact::print_on(outputStream* st) {
218 _mark_bitmap.print_on(st);
219 }
220
221 ParallelCompactData::ParallelCompactData() :
222 _heap_start(nullptr),
223 DEBUG_ONLY(_heap_end(nullptr) COMMA)
224 _region_vspace(nullptr),
225 _reserved_byte_size(0),
226 _region_data(nullptr),
227 _region_count(0) {}
228
229 bool ParallelCompactData::initialize(MemRegion reserved_heap)
230 {
231 _heap_start = reserved_heap.start();
232 const size_t heap_size = reserved_heap.word_size();
233 DEBUG_ONLY(_heap_end = _heap_start + heap_size;)
234
235 assert(region_align_down(_heap_start) == _heap_start,
236 "region start not aligned");
237
238 return initialize_region_data(heap_size);
239 }
240
241 PSVirtualSpace*
242 ParallelCompactData::create_vspace(size_t count, size_t element_size)
243 {
244 const size_t raw_bytes = count * element_size;
245 const size_t page_sz = os::page_size_for_region_aligned(raw_bytes, 10);
246 const size_t granularity = os::vm_allocation_granularity();
247 const size_t rs_align = MAX2(page_sz, granularity);
248
249 _reserved_byte_size = align_up(raw_bytes, rs_align);
250
251 ReservedSpace rs = MemoryReserver::reserve(_reserved_byte_size,
252 rs_align,
253 page_sz,
254 mtGC);
255
256 if (!rs.is_reserved()) {
257 // Failed to reserve memory.
258 return nullptr;
259 }
260
261 os::trace_page_sizes("Parallel Compact Data", raw_bytes, raw_bytes, rs.base(),
262 rs.size(), page_sz);
263
264 MemTracker::record_virtual_memory_tag(rs, mtGC);
265
266 PSVirtualSpace* vspace = new PSVirtualSpace(rs, page_sz);
267
268 if (!vspace->expand_by(_reserved_byte_size)) {
269 // Failed to commit memory.
270
271 delete vspace;
272
273 // Release memory reserved in the space.
274 MemoryReserver::release(rs);
275
276 return nullptr;
277 }
278
279 return vspace;
280 }
281
282 bool ParallelCompactData::initialize_region_data(size_t heap_size)
283 {
284 assert(is_aligned(heap_size, RegionSize), "precondition");
285
286 const size_t count = heap_size >> Log2RegionSize;
287 _region_vspace = create_vspace(count, sizeof(RegionData));
288 if (_region_vspace != nullptr) {
289 _region_data = (RegionData*)_region_vspace->reserved_low_addr();
290 _region_count = count;
291 return true;
292 }
293 return false;
294 }
295
296 void ParallelCompactData::clear_range(size_t beg_region, size_t end_region) {
297 assert(beg_region <= _region_count, "beg_region out of range");
298 assert(end_region <= _region_count, "end_region out of range");
299
300 const size_t region_cnt = end_region - beg_region;
301 for (size_t i = beg_region; i < end_region; i++) {
302 ::new (&_region_data[i]) RegionData{};
303 }
304 }
305
306 // The total live words on src_region would overflow the target space, so find
307 // the overflowing object and record the split point. The invariant is that an
308 // obj should not cross space boundary.
309 HeapWord* ParallelCompactData::summarize_split_space(size_t src_region,
310 SplitInfo& split_info,
311 HeapWord* const destination,
312 HeapWord* const target_end,
313 HeapWord** target_next) {
314 assert(destination <= target_end, "sanity");
315 assert(destination + _region_data[src_region].data_size() > target_end,
316 "region should not fit into target space");
317 assert(is_region_aligned(target_end), "sanity");
318
319 size_t partial_obj_size = _region_data[src_region].partial_obj_size();
320
321 if (destination + partial_obj_size > target_end) {
322 assert(partial_obj_size > 0, "inv");
323 // The overflowing obj is from a previous region.
324 //
325 // source-regions:
326 //
327 // ***************
328 // | A|AA |
329 // ***************
330 // ^
331 // | split-point
332 //
333 // dest-region:
334 //
335 // ********
336 // |~~~~A |
337 // ********
338 // ^^
339 // || target-space-end
340 // |
341 // | destination
342 //
343 // AAA would overflow target-space.
344 //
345 HeapWord* overflowing_obj = _region_data[src_region].partial_obj_addr();
346 size_t split_region = addr_to_region_idx(overflowing_obj);
347
348 // The number of live words before the overflowing object on this split region
349 size_t preceding_live_words;
350 if (is_region_aligned(overflowing_obj)) {
351 preceding_live_words = 0;
352 } else {
353 // Words accounted by the overflowing object on the split region
354 size_t overflowing_size = pointer_delta(region_align_up(overflowing_obj), overflowing_obj);
355 preceding_live_words = region(split_region)->data_size() - overflowing_size;
356 }
357
358 split_info.record(split_region, overflowing_obj, preceding_live_words);
359
360 // The [overflowing_obj, src_region_start) part has been accounted for, so
361 // must move back the new_top, now that this overflowing obj is deferred.
362 HeapWord* new_top = destination - pointer_delta(region_to_addr(src_region), overflowing_obj);
363
364 // If the overflowing obj was relocated to its original destination,
365 // those destination regions would have their source_region set. Now that
366 // this overflowing obj is relocated somewhere else, reset the
367 // source_region.
368 {
369 size_t range_start = addr_to_region_idx(region_align_up(new_top));
370 size_t range_end = addr_to_region_idx(region_align_up(destination));
371 for (size_t i = range_start; i < range_end; ++i) {
372 region(i)->set_source_region(0);
373 }
374 }
375
376 // Update new top of target space
377 *target_next = new_top;
378
379 return overflowing_obj;
380 }
381
382 // Obj-iteration to locate the overflowing obj
383 HeapWord* region_start = region_to_addr(src_region);
384 HeapWord* region_end = region_start + RegionSize;
385 HeapWord* cur_addr = region_start + partial_obj_size;
386 size_t live_words = partial_obj_size;
387
388 while (true) {
389 assert(cur_addr < region_end, "inv");
390 cur_addr = PSParallelCompact::mark_bitmap()->find_obj_beg(cur_addr, region_end);
391 // There must be an overflowing obj in this region
392 assert(cur_addr < region_end, "inv");
393
394 oop obj = cast_to_oop(cur_addr);
395 size_t obj_size = obj->size();
396 if (destination + live_words + obj_size > target_end) {
397 // Found the overflowing obj
398 split_info.record(src_region, cur_addr, live_words);
399 *target_next = destination + live_words;
400 return cur_addr;
401 }
402
403 live_words += obj_size;
404 cur_addr += obj_size;
405 }
406 }
407
408 size_t ParallelCompactData::live_words_in_space(const MutableSpace* space,
409 HeapWord** full_region_prefix_end) {
410 size_t cur_region = addr_to_region_idx(space->bottom());
411 const size_t end_region = addr_to_region_idx(region_align_up(space->top()));
412 size_t live_words = 0;
413 if (full_region_prefix_end == nullptr) {
414 for (/* empty */; cur_region < end_region; ++cur_region) {
415 live_words += _region_data[cur_region].data_size();
416 }
417 } else {
418 bool first_set = false;
419 for (/* empty */; cur_region < end_region; ++cur_region) {
420 size_t live_words_in_region = _region_data[cur_region].data_size();
421 if (!first_set && live_words_in_region < RegionSize) {
422 *full_region_prefix_end = region_to_addr(cur_region);
423 first_set = true;
424 }
425 live_words += live_words_in_region;
426 }
427 if (!first_set) {
428 // All regions are full of live objs.
429 assert(is_region_aligned(space->top()), "inv");
430 *full_region_prefix_end = space->top();
431 }
432 assert(*full_region_prefix_end != nullptr, "postcondition");
433 assert(is_region_aligned(*full_region_prefix_end), "inv");
434 assert(*full_region_prefix_end >= space->bottom(), "in-range");
435 assert(*full_region_prefix_end <= space->top(), "in-range");
436 }
437 return live_words;
438 }
439
440 bool ParallelCompactData::summarize(SplitInfo& split_info,
441 HeapWord* source_beg, HeapWord* source_end,
442 HeapWord** source_next,
443 HeapWord* target_beg, HeapWord* target_end,
444 HeapWord** target_next)
445 {
446 HeapWord* const source_next_val = source_next == nullptr ? nullptr : *source_next;
447 log_develop_trace(gc, compaction)(
448 "sb=" PTR_FORMAT " se=" PTR_FORMAT " sn=" PTR_FORMAT
449 "tb=" PTR_FORMAT " te=" PTR_FORMAT " tn=" PTR_FORMAT,
450 p2i(source_beg), p2i(source_end), p2i(source_next_val),
451 p2i(target_beg), p2i(target_end), p2i(*target_next));
452
453 size_t cur_region = addr_to_region_idx(source_beg);
454 const size_t end_region = addr_to_region_idx(region_align_up(source_end));
455
456 HeapWord *dest_addr = target_beg;
457 for (/* empty */; cur_region < end_region; cur_region++) {
458 size_t words = _region_data[cur_region].data_size();
459
460 // Skip empty ones
461 if (words == 0) {
462 continue;
463 }
464
465 if (split_info.is_split(cur_region)) {
466 assert(words > split_info.preceding_live_words(), "inv");
467 words -= split_info.preceding_live_words();
468 }
469
470 _region_data[cur_region].set_destination(dest_addr);
471
472 // If cur_region does not fit entirely into the target space, find a point
473 // at which the source space can be 'split' so that part is copied to the
474 // target space and the rest is copied elsewhere.
475 if (dest_addr + words > target_end) {
476 assert(source_next != nullptr, "source_next is null when splitting");
477 *source_next = summarize_split_space(cur_region, split_info, dest_addr,
478 target_end, target_next);
479 return false;
480 }
481
482 uint destination_count = split_info.is_split(cur_region)
483 ? split_info.preceding_destination_count()
484 : 0;
485
486 HeapWord* const last_addr = dest_addr + words - 1;
487 const size_t dest_region_1 = addr_to_region_idx(dest_addr);
488 const size_t dest_region_2 = addr_to_region_idx(last_addr);
489
490 // Initially assume that the destination regions will be the same and
491 // adjust the value below if necessary. Under this assumption, if
492 // cur_region == dest_region_2, then cur_region will be compacted
493 // completely into itself.
494 destination_count += cur_region == dest_region_2 ? 0 : 1;
495 if (dest_region_1 != dest_region_2) {
496 // Destination regions differ; adjust destination_count.
497 destination_count += 1;
498 // Data from cur_region will be copied to the start of dest_region_2.
499 _region_data[dest_region_2].set_source_region(cur_region);
500 } else if (is_region_aligned(dest_addr)) {
501 // Data from cur_region will be copied to the start of the destination
502 // region.
503 _region_data[dest_region_1].set_source_region(cur_region);
504 }
505
506 _region_data[cur_region].set_destination_count(destination_count);
507 dest_addr += words;
508 }
509
510 *target_next = dest_addr;
511 return true;
512 }
513
514 #ifdef ASSERT
515 void ParallelCompactData::verify_clear() {
516 for (uint cur_idx = 0; cur_idx < region_count(); ++cur_idx) {
517 if (!region(cur_idx)->is_clear()) {
518 log_warning(gc)("Uncleared Region: %u", cur_idx);
519 region(cur_idx)->verify_clear();
520 }
521 }
522 }
523 #endif // #ifdef ASSERT
524
525 STWGCTimer PSParallelCompact::_gc_timer;
526 ParallelOldTracer PSParallelCompact::_gc_tracer;
527 elapsedTimer PSParallelCompact::_accumulated_time;
528 unsigned int PSParallelCompact::_maximum_compaction_gc_num = 0;
529 CollectorCounters* PSParallelCompact::_counters = nullptr;
530 ParMarkBitMap PSParallelCompact::_mark_bitmap;
531 ParallelCompactData PSParallelCompact::_summary_data;
532
533 PSParallelCompact::IsAliveClosure PSParallelCompact::_is_alive_closure;
534
535 class PCAdjustPointerClosure: public BasicOopIterateClosure {
536 template <typename T>
537 void do_oop_work(T* p) { PSParallelCompact::adjust_pointer(p); }
538
539 public:
540 virtual void do_oop(oop* p) { do_oop_work(p); }
541 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
542
543 virtual ReferenceIterationMode reference_iteration_mode() { return DO_FIELDS; }
544 };
545
546 static PCAdjustPointerClosure pc_adjust_pointer_closure;
547
548 bool PSParallelCompact::IsAliveClosure::do_object_b(oop p) { return mark_bitmap()->is_marked(p); }
549
550 void PSParallelCompact::post_initialize() {
551 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
552 _span_based_discoverer.set_span(heap->reserved_region());
553 _ref_processor =
554 new ReferenceProcessor(&_span_based_discoverer,
555 ParallelGCThreads, // mt processing degree
556 ParallelGCThreads, // mt discovery degree
557 false, // concurrent_discovery
558 &_is_alive_closure); // non-header is alive closure
559
560 _counters = new CollectorCounters("Parallel full collection pauses", 1);
561
562 // Initialize static fields in ParCompactionManager.
563 ParCompactionManager::initialize(mark_bitmap());
564 }
565
566 bool PSParallelCompact::initialize_aux_data() {
567 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
568 MemRegion mr = heap->reserved_region();
569 assert(mr.byte_size() != 0, "heap should be reserved");
570
571 initialize_space_info();
572
573 if (!_mark_bitmap.initialize(mr)) {
574 vm_shutdown_during_initialization(
575 err_msg("Unable to allocate %zuKB bitmaps for parallel "
576 "garbage collection for the requested %zuKB heap.",
577 _mark_bitmap.reserved_byte_size()/K, mr.byte_size()/K));
578 return false;
579 }
580
581 if (!_summary_data.initialize(mr)) {
582 vm_shutdown_during_initialization(
583 err_msg("Unable to allocate %zuKB card tables for parallel "
584 "garbage collection for the requested %zuKB heap.",
585 _summary_data.reserved_byte_size()/K, mr.byte_size()/K));
586 return false;
587 }
588
589 return true;
590 }
591
592 void PSParallelCompact::initialize_space_info()
593 {
594 memset(&_space_info, 0, sizeof(_space_info));
595
596 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
597 PSYoungGen* young_gen = heap->young_gen();
598
599 _space_info[old_space_id].set_space(heap->old_gen()->object_space());
600 _space_info[eden_space_id].set_space(young_gen->eden_space());
601 _space_info[from_space_id].set_space(young_gen->from_space());
602 _space_info[to_space_id].set_space(young_gen->to_space());
603
604 _space_info[old_space_id].set_start_array(heap->old_gen()->start_array());
605 }
606
607 void
608 PSParallelCompact::clear_data_covering_space(SpaceId id)
609 {
610 // At this point, top is the value before GC, new_top() is the value that will
611 // be set at the end of GC. The marking bitmap is cleared to top; nothing
612 // should be marked above top. The summary data is cleared to the larger of
613 // top & new_top.
614 MutableSpace* const space = _space_info[id].space();
615 HeapWord* const bot = space->bottom();
616 HeapWord* const top = space->top();
617 HeapWord* const max_top = MAX2(top, _space_info[id].new_top());
618
619 _mark_bitmap.clear_range(bot, top);
620
621 const size_t beg_region = _summary_data.addr_to_region_idx(bot);
622 const size_t end_region =
623 _summary_data.addr_to_region_idx(_summary_data.region_align_up(max_top));
624 _summary_data.clear_range(beg_region, end_region);
625
626 // Clear the data used to 'split' regions.
627 SplitInfo& split_info = _space_info[id].split_info();
628 if (split_info.is_valid()) {
629 split_info.clear();
630 }
631 DEBUG_ONLY(split_info.verify_clear();)
632 }
633
634 void PSParallelCompact::pre_compact()
635 {
636 // Update the from & to space pointers in space_info, since they are swapped
637 // at each young gen gc. Do the update unconditionally (even though a
638 // promotion failure does not swap spaces) because an unknown number of young
639 // collections will have swapped the spaces an unknown number of times.
640 GCTraceTime(Debug, gc, phases) tm("Pre Compact", &_gc_timer);
641 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
642 _space_info[from_space_id].set_space(heap->young_gen()->from_space());
643 _space_info[to_space_id].set_space(heap->young_gen()->to_space());
644
645 heap->increment_total_collections(true);
646
647 CodeCache::on_gc_marking_cycle_start();
648
649 heap->print_before_gc();
650 heap->trace_heap_before_gc(&_gc_tracer);
651
652 // Fill in TLABs
653 heap->ensure_parsability(true); // retire TLABs
654
655 if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
656 Universe::verify("Before GC");
657 }
658
659 DEBUG_ONLY(mark_bitmap()->verify_clear();)
660 DEBUG_ONLY(summary_data().verify_clear();)
661 }
662
663 void PSParallelCompact::post_compact()
664 {
665 GCTraceTime(Info, gc, phases) tm("Post Compact", &_gc_timer);
666 ParCompactionManager::remove_all_shadow_regions();
667
668 CodeCache::on_gc_marking_cycle_finish();
669 CodeCache::arm_all_nmethods();
670
671 // Need to clear claim bits for the next full-gc (marking and adjust-pointers).
672 ClassLoaderDataGraph::clear_claimed_marks();
673
674 for (unsigned int id = old_space_id; id < last_space_id; ++id) {
675 // Clear the marking bitmap, summary data and split info.
676 clear_data_covering_space(SpaceId(id));
677 {
678 MutableSpace* space = _space_info[id].space();
679 HeapWord* top = space->top();
680 HeapWord* new_top = _space_info[id].new_top();
681 if (ZapUnusedHeapArea && new_top < top) {
682 space->mangle_region(MemRegion(new_top, top));
683 }
684 // Update top(). Must be done after clearing the bitmap and summary data.
685 space->set_top(new_top);
686 }
687 }
688
689 #ifdef ASSERT
690 {
691 mark_bitmap()->verify_clear();
692 summary_data().verify_clear();
693 }
694 #endif
695
696 ParCompactionManager::flush_all_string_dedup_requests();
697
698 MutableSpace* const eden_space = _space_info[eden_space_id].space();
699 MutableSpace* const from_space = _space_info[from_space_id].space();
700 MutableSpace* const to_space = _space_info[to_space_id].space();
701
702 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
703 bool eden_empty = eden_space->is_empty();
704
705 // Update heap occupancy information which is used as input to the soft ref
706 // clearing policy at the next gc.
707 Universe::heap()->update_capacity_and_used_at_gc();
708
709 bool young_gen_empty = eden_empty && from_space->is_empty() &&
710 to_space->is_empty();
711
712 PSCardTable* ct = heap->card_table();
713 MemRegion old_mr = heap->old_gen()->committed();
714 if (young_gen_empty) {
715 ct->clear_MemRegion(old_mr);
716 } else {
717 ct->dirty_MemRegion(old_mr);
718 }
719
720 heap->prune_scavengable_nmethods();
721
722 #if COMPILER2_OR_JVMCI
723 DerivedPointerTable::update_pointers();
724 #endif
725
726 // Signal that we have completed a visit to all live objects.
727 Universe::heap()->record_whole_heap_examined_timestamp();
728 }
729
730 HeapWord* PSParallelCompact::compute_dense_prefix_for_old_space(MutableSpace* old_space,
731 HeapWord* full_region_prefix_end) {
732 const size_t region_size = ParallelCompactData::RegionSize;
733 const ParallelCompactData& sd = summary_data();
734
735 // Iteration starts with the region *after* the full-region-prefix-end.
736 const RegionData* const start_region = sd.addr_to_region_ptr(full_region_prefix_end);
737 // If final region is not full, iteration stops before that region,
738 // because fill_dense_prefix_end assumes that prefix_end <= top.
739 const RegionData* const end_region = sd.addr_to_region_ptr(old_space->top());
740 assert(start_region <= end_region, "inv");
741
742 size_t max_waste = old_space->capacity_in_words() * (MarkSweepDeadRatio / 100.0);
743 const RegionData* cur_region = start_region;
744 for (/* empty */; cur_region < end_region; ++cur_region) {
745 assert(region_size >= cur_region->data_size(), "inv");
746 size_t dead_size = region_size - cur_region->data_size();
747 if (max_waste < dead_size) {
748 break;
749 }
750 max_waste -= dead_size;
751 }
752
753 HeapWord* const prefix_end = sd.region_to_addr(cur_region);
754 assert(sd.is_region_aligned(prefix_end), "postcondition");
755 assert(prefix_end >= full_region_prefix_end, "in-range");
756 assert(prefix_end <= old_space->top(), "in-range");
757 return prefix_end;
758 }
759
760 void PSParallelCompact::fill_dense_prefix_end(SpaceId id) {
761 // Comparing two sizes to decide if filling is required:
762 //
763 // The size of the filler (min-obj-size) is 2 heap words with the default
764 // MinObjAlignment, since both markword and klass take 1 heap word.
765 // With +UseCompactObjectHeaders, the minimum filler size is only one word,
766 // because the Klass* gets encoded in the mark-word.
767 //
768 // The size of the gap (if any) right before dense-prefix-end is
769 // MinObjAlignment.
770 //
771 // Need to fill in the gap only if it's smaller than min-obj-size, and the
772 // filler obj will extend to next region.
773
774 if (MinObjAlignment >= checked_cast<int>(CollectedHeap::min_fill_size())) {
775 return;
776 }
777
778 assert(!UseCompactObjectHeaders, "Compact headers can allocate small objects");
779 assert(CollectedHeap::min_fill_size() == 2, "inv");
780 HeapWord* const dense_prefix_end = dense_prefix(id);
781 assert(_summary_data.is_region_aligned(dense_prefix_end), "precondition");
782 assert(dense_prefix_end <= space(id)->top(), "precondition");
783 if (dense_prefix_end == space(id)->top()) {
784 // Must not have single-word gap right before prefix-end/top.
785 return;
786 }
787 RegionData* const region_after_dense_prefix = _summary_data.addr_to_region_ptr(dense_prefix_end);
788
789 if (region_after_dense_prefix->partial_obj_size() != 0 ||
790 _mark_bitmap.is_marked(dense_prefix_end)) {
791 // The region after the dense prefix starts with live bytes.
792 return;
793 }
794
795 HeapWord* block_start = start_array(id)->block_start_reaching_into_card(dense_prefix_end);
796 if (block_start == dense_prefix_end - 1) {
797 assert(!_mark_bitmap.is_marked(block_start), "inv");
798 // There is exactly one heap word gap right before the dense prefix end, so we need a filler object.
799 // The filler object will extend into region_after_dense_prefix.
800 const size_t obj_len = 2; // min-fill-size
801 HeapWord* const obj_beg = dense_prefix_end - 1;
802 CollectedHeap::fill_with_object(obj_beg, obj_len);
803 _mark_bitmap.mark_obj(obj_beg);
804 _summary_data.addr_to_region_ptr(obj_beg)->add_live_obj(1);
805 region_after_dense_prefix->set_partial_obj_size(1);
806 region_after_dense_prefix->set_partial_obj_addr(obj_beg);
807 assert(start_array(id) != nullptr, "sanity");
808 start_array(id)->update_for_block(obj_beg, obj_beg + obj_len);
809 }
810 }
811
812 bool PSParallelCompact::check_maximum_compaction(bool should_do_max_compaction,
813 size_t total_live_words,
814 MutableSpace* const old_space,
815 HeapWord* full_region_prefix_end) {
816
817 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
818
819 // Check System.GC
820 bool is_max_on_system_gc = UseMaximumCompactionOnSystemGC
821 && GCCause::is_user_requested_gc(heap->gc_cause());
822
823 // Check if all live objs are too much for old-gen.
824 const bool is_old_gen_too_full = (total_live_words >= old_space->capacity_in_words());
825
826 // If all regions in old-gen are full
827 const bool is_region_full =
828 full_region_prefix_end >= _summary_data.region_align_down(old_space->top());
829
830 return should_do_max_compaction
831 || is_max_on_system_gc
832 || is_old_gen_too_full
833 || is_region_full;
834 }
835
836 void PSParallelCompact::summary_phase(bool should_do_max_compaction)
837 {
838 GCTraceTime(Info, gc, phases) tm("Summary Phase", &_gc_timer);
839
840 MutableSpace* const old_space = _space_info[old_space_id].space();
841 {
842 size_t total_live_words = 0;
843 HeapWord* full_region_prefix_end = nullptr;
844 {
845 // old-gen
846 size_t live_words = _summary_data.live_words_in_space(old_space,
847 &full_region_prefix_end);
848 total_live_words += live_words;
849 }
850 // young-gen
851 for (uint i = eden_space_id; i < last_space_id; ++i) {
852 const MutableSpace* space = _space_info[i].space();
853 size_t live_words = _summary_data.live_words_in_space(space);
854 total_live_words += live_words;
855 _space_info[i].set_new_top(space->bottom() + live_words);
856 _space_info[i].set_dense_prefix(space->bottom());
857 }
858
859 should_do_max_compaction = check_maximum_compaction(should_do_max_compaction,
860 total_live_words,
861 old_space,
862 full_region_prefix_end);
863 {
864 GCTraceTime(Info, gc, phases) tm("Summary Phase: expand", &_gc_timer);
865 // Try to expand old-gen in order to fit all live objs and waste.
866 size_t target_capacity_bytes = total_live_words * HeapWordSize
867 + old_space->capacity_in_bytes() * (MarkSweepDeadRatio / 100);
868 ParallelScavengeHeap::heap()->old_gen()->try_expand_till_size(target_capacity_bytes);
869 }
870
871 HeapWord* dense_prefix_end = should_do_max_compaction
872 ? full_region_prefix_end
873 : compute_dense_prefix_for_old_space(old_space,
874 full_region_prefix_end);
875 SpaceId id = old_space_id;
876 _space_info[id].set_dense_prefix(dense_prefix_end);
877
878 if (dense_prefix_end != old_space->bottom()) {
879 fill_dense_prefix_end(id);
880 }
881
882 // Compacting objs in [dense_prefix_end, old_space->top())
883 _summary_data.summarize(_space_info[id].split_info(),
884 dense_prefix_end, old_space->top(), nullptr,
885 dense_prefix_end, old_space->end(),
886 _space_info[id].new_top_addr());
887 }
888
889 // Summarize the remaining spaces in the young gen. The initial target space
890 // is the old gen. If a space does not fit entirely into the target, then the
891 // remainder is compacted into the space itself and that space becomes the new
892 // target.
893 SpaceId dst_space_id = old_space_id;
894 HeapWord* dst_space_end = old_space->end();
895 HeapWord** new_top_addr = _space_info[dst_space_id].new_top_addr();
896 for (unsigned int id = eden_space_id; id < last_space_id; ++id) {
897 const MutableSpace* space = _space_info[id].space();
898 const size_t live = pointer_delta(_space_info[id].new_top(),
899 space->bottom());
900 const size_t available = pointer_delta(dst_space_end, *new_top_addr);
901
902 if (live > 0 && live <= available) {
903 // All the live data will fit.
904 bool done = _summary_data.summarize(_space_info[id].split_info(),
905 space->bottom(), space->top(),
906 nullptr,
907 *new_top_addr, dst_space_end,
908 new_top_addr);
909 assert(done, "space must fit into old gen");
910
911 // Reset the new_top value for the space.
912 _space_info[id].set_new_top(space->bottom());
913 } else if (live > 0) {
914 // Attempt to fit part of the source space into the target space.
915 HeapWord* next_src_addr = nullptr;
916 bool done = _summary_data.summarize(_space_info[id].split_info(),
917 space->bottom(), space->top(),
918 &next_src_addr,
919 *new_top_addr, dst_space_end,
920 new_top_addr);
921 assert(!done, "space should not fit into old gen");
922 assert(next_src_addr != nullptr, "sanity");
923
924 // The source space becomes the new target, so the remainder is compacted
925 // within the space itself.
926 dst_space_id = SpaceId(id);
927 dst_space_end = space->end();
928 new_top_addr = _space_info[id].new_top_addr();
929 done = _summary_data.summarize(_space_info[id].split_info(),
930 next_src_addr, space->top(),
931 nullptr,
932 space->bottom(), dst_space_end,
933 new_top_addr);
934 assert(done, "space must fit when compacted into itself");
935 assert(*new_top_addr <= space->top(), "usage should not grow");
936 }
937 }
938 }
939
940 void PSParallelCompact::report_object_count_after_gc() {
941 GCTraceTime(Debug, gc, phases) tm("Report Object Count", &_gc_timer);
942 // The heap is compacted, all objects are iterable. However there may be
943 // filler objects in the heap which we should ignore.
944 class SkipFillerObjectClosure : public BoolObjectClosure {
945 public:
946 bool do_object_b(oop obj) override { return !CollectedHeap::is_filler_object(obj); }
947 } cl;
948 _gc_tracer.report_object_count_after_gc(&cl, &ParallelScavengeHeap::heap()->workers());
949 }
950
951 bool PSParallelCompact::invoke(bool clear_all_soft_refs, bool should_do_max_compaction) {
952 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
953 assert(Thread::current() == (Thread*)VMThread::vm_thread(),
954 "should be in vm thread");
955 assert(ref_processor() != nullptr, "Sanity");
956
957 SvcGCMarker sgcm(SvcGCMarker::FULL);
958 IsSTWGCActiveMark mark;
959
960 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
961
962 GCIdMark gc_id_mark;
963 _gc_timer.register_gc_start();
964 _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start());
965
966 GCCause::Cause gc_cause = heap->gc_cause();
967 PSOldGen* old_gen = heap->old_gen();
968 PSAdaptiveSizePolicy* size_policy = heap->size_policy();
969
970 // Make sure data structures are sane, make the heap parsable, and do other
971 // miscellaneous bookkeeping.
972 pre_compact();
973
974 const PreGenGCValues pre_gc_values = heap->get_pre_gc_values();
975
976 {
977 const uint active_workers =
978 WorkerPolicy::calc_active_workers(ParallelScavengeHeap::heap()->workers().max_workers(),
979 ParallelScavengeHeap::heap()->workers().active_workers(),
980 Threads::number_of_non_daemon_threads());
981 ParallelScavengeHeap::heap()->workers().set_active_workers(active_workers);
982
983 GCTraceCPUTime tcpu(&_gc_tracer);
984 GCTraceTime(Info, gc) tm("Pause Full", nullptr, gc_cause, true);
985
986 heap->pre_full_gc_dump(&_gc_timer);
987
988 TraceCollectorStats tcs(counters());
989 TraceMemoryManagerStats tms(heap->old_gc_manager(), gc_cause, "end of major GC");
990
991 if (log_is_enabled(Debug, gc, heap, exit)) {
992 accumulated_time()->start();
993 }
994
995 // Let the size policy know we're starting
996 size_policy->major_collection_begin();
997
998 #if COMPILER2_OR_JVMCI
999 DerivedPointerTable::clear();
1000 #endif
1001
1002 ref_processor()->start_discovery(clear_all_soft_refs);
1003
1004 marking_phase(&_gc_tracer);
1005
1006 summary_phase(should_do_max_compaction);
1007
1008 #if COMPILER2_OR_JVMCI
1009 assert(DerivedPointerTable::is_active(), "Sanity");
1010 DerivedPointerTable::set_active(false);
1011 #endif
1012
1013 forward_to_new_addr();
1014
1015 adjust_pointers();
1016
1017 compact();
1018
1019 ParCompactionManager::_preserved_marks_set->restore(&ParallelScavengeHeap::heap()->workers());
1020
1021 ParCompactionManager::verify_all_region_stack_empty();
1022
1023 // Reset the mark bitmap, summary data, and do other bookkeeping. Must be
1024 // done before resizing.
1025 post_compact();
1026
1027 size_policy->major_collection_end();
1028
1029 size_policy->sample_old_gen_used_bytes(MAX2(pre_gc_values.old_gen_used(), old_gen->used_in_bytes()));
1030
1031 if (UseAdaptiveSizePolicy) {
1032 heap->resize_after_full_gc();
1033 }
1034
1035 heap->resize_all_tlabs();
1036
1037 // Resize the metaspace capacity after a collection
1038 MetaspaceGC::compute_new_size();
1039
1040 if (log_is_enabled(Debug, gc, heap, exit)) {
1041 accumulated_time()->stop();
1042 }
1043
1044 heap->print_heap_change(pre_gc_values);
1045
1046 report_object_count_after_gc();
1047
1048 // Track memory usage and detect low memory
1049 MemoryService::track_memory_usage();
1050 heap->update_counters();
1051
1052 heap->post_full_gc_dump(&_gc_timer);
1053
1054 size_policy->record_gc_pause_end_instant();
1055 }
1056
1057 heap->gc_epilogue(true);
1058
1059 if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
1060 Universe::verify("After GC");
1061 }
1062
1063 heap->print_after_gc();
1064 heap->trace_heap_after_gc(&_gc_tracer);
1065
1066 _gc_timer.register_gc_end();
1067
1068 _gc_tracer.report_dense_prefix(dense_prefix(old_space_id));
1069 _gc_tracer.report_gc_end(_gc_timer.gc_end(), _gc_timer.time_partitions());
1070
1071 return true;
1072 }
1073
1074 class PCAddThreadRootsMarkingTaskClosure : public ThreadClosure {
1075 ParCompactionManager* _cm;
1076
1077 public:
1078 PCAddThreadRootsMarkingTaskClosure(ParCompactionManager* cm) : _cm(cm) { }
1079 void do_thread(Thread* thread) {
1080 ResourceMark rm;
1081
1082 MarkingNMethodClosure mark_and_push_in_blobs(&_cm->_mark_and_push_closure);
1083
1084 thread->oops_do(&_cm->_mark_and_push_closure, &mark_and_push_in_blobs);
1085
1086 // Do the real work
1087 _cm->follow_marking_stacks();
1088 }
1089 };
1090
1091 void steal_marking_work(TaskTerminator& terminator, uint worker_id) {
1092 assert(ParallelScavengeHeap::heap()->is_stw_gc_active(), "called outside gc");
1093
1094 ParCompactionManager* cm =
1095 ParCompactionManager::gc_thread_compaction_manager(worker_id);
1096
1097 do {
1098 ScannerTask task;
1099 if (ParCompactionManager::steal(worker_id, task)) {
1100 cm->follow_contents(task, true);
1101 }
1102 cm->follow_marking_stacks();
1103 } while (!terminator.offer_termination());
1104 }
1105
1106 class MarkFromRootsTask : public WorkerTask {
1107 NMethodMarkingScope _nmethod_marking_scope;
1108 ThreadsClaimTokenScope _threads_claim_token_scope;
1109 OopStorageSetStrongParState<false /* concurrent */, false /* is_const */> _oop_storage_set_par_state;
1110 TaskTerminator _terminator;
1111 uint _active_workers;
1112
1113 public:
1114 MarkFromRootsTask(uint active_workers) :
1115 WorkerTask("MarkFromRootsTask"),
1116 _nmethod_marking_scope(),
1117 _threads_claim_token_scope(),
1118 _terminator(active_workers, ParCompactionManager::marking_stacks()),
1119 _active_workers(active_workers) {}
1120
1121 virtual void work(uint worker_id) {
1122 ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(worker_id);
1123 cm->create_marking_stats_cache();
1124 {
1125 CLDToOopClosure cld_closure(&cm->_mark_and_push_closure, ClassLoaderData::_claim_stw_fullgc_mark);
1126 ClassLoaderDataGraph::always_strong_cld_do(&cld_closure);
1127
1128 // Do the real work
1129 cm->follow_marking_stacks();
1130 }
1131
1132 {
1133 PCAddThreadRootsMarkingTaskClosure closure(cm);
1134 Threads::possibly_parallel_threads_do(_active_workers > 1 /* is_par */, &closure);
1135 }
1136
1137 // Mark from OopStorages
1138 {
1139 _oop_storage_set_par_state.oops_do(&cm->_mark_and_push_closure);
1140 // Do the real work
1141 cm->follow_marking_stacks();
1142 }
1143
1144 if (_active_workers > 1) {
1145 steal_marking_work(_terminator, worker_id);
1146 }
1147 }
1148 };
1149
1150 class ParallelCompactRefProcProxyTask : public RefProcProxyTask {
1151 TaskTerminator _terminator;
1152
1153 public:
1154 ParallelCompactRefProcProxyTask(uint max_workers)
1155 : RefProcProxyTask("ParallelCompactRefProcProxyTask", max_workers),
1156 _terminator(_max_workers, ParCompactionManager::marking_stacks()) {}
1157
1158 void work(uint worker_id) override {
1159 assert(worker_id < _max_workers, "sanity");
1160 ParCompactionManager* cm = (_tm == RefProcThreadModel::Single) ? ParCompactionManager::get_vmthread_cm() : ParCompactionManager::gc_thread_compaction_manager(worker_id);
1161 BarrierEnqueueDiscoveredFieldClosure enqueue;
1162 ParCompactionManager::FollowStackClosure complete_gc(cm, (_tm == RefProcThreadModel::Single) ? nullptr : &_terminator, worker_id);
1163 _rp_task->rp_work(worker_id, PSParallelCompact::is_alive_closure(), &cm->_mark_and_push_closure, &enqueue, &complete_gc);
1164 }
1165
1166 void prepare_run_task_hook() override {
1167 _terminator.reset_for_reuse(_queue_count);
1168 }
1169 };
1170
1171 static void flush_marking_stats_cache(const uint num_workers) {
1172 for (uint i = 0; i < num_workers; ++i) {
1173 ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(i);
1174 cm->flush_and_destroy_marking_stats_cache();
1175 }
1176 }
1177
1178 class PSParallelCleaningTask : public WorkerTask {
1179 bool _unloading_occurred;
1180 CodeCacheUnloadingTask _code_cache_task;
1181 // Prune dead klasses from subklass/sibling/implementor lists.
1182 KlassCleaningTask _klass_cleaning_task;
1183
1184 public:
1185 PSParallelCleaningTask(bool unloading_occurred) :
1186 WorkerTask("PS Parallel Cleaning"),
1187 _unloading_occurred(unloading_occurred),
1188 _code_cache_task(unloading_occurred),
1189 _klass_cleaning_task() {}
1190
1191 void work(uint worker_id) {
1192 #if INCLUDE_JVMCI
1193 if (EnableJVMCI && worker_id == 0) {
1194 // Serial work; only first worker.
1195 // Clean JVMCI metadata handles.
1196 JVMCI::do_unloading(_unloading_occurred);
1197 }
1198 #endif
1199
1200 // Do first pass of code cache cleaning.
1201 _code_cache_task.work(worker_id);
1202
1203 // Clean all klasses that were not unloaded.
1204 // The weak metadata in klass doesn't need to be
1205 // processed if there was no unloading.
1206 if (_unloading_occurred) {
1207 _klass_cleaning_task.work();
1208 }
1209 }
1210 };
1211
1212 void PSParallelCompact::marking_phase(ParallelOldTracer *gc_tracer) {
1213 // Recursively traverse all live objects and mark them
1214 GCTraceTime(Info, gc, phases) tm("Marking Phase", &_gc_timer);
1215
1216 uint active_gc_threads = ParallelScavengeHeap::heap()->workers().active_workers();
1217
1218 ClassLoaderDataGraph::verify_claimed_marks_cleared(ClassLoaderData::_claim_stw_fullgc_mark);
1219 {
1220 GCTraceTime(Debug, gc, phases) tm("Par Mark", &_gc_timer);
1221
1222 MarkFromRootsTask task(active_gc_threads);
1223 ParallelScavengeHeap::heap()->workers().run_task(&task);
1224 }
1225
1226 // Process reference objects found during marking
1227 {
1228 GCTraceTime(Debug, gc, phases) tm("Reference Processing", &_gc_timer);
1229
1230 ReferenceProcessorStats stats;
1231 ReferenceProcessorPhaseTimes pt(&_gc_timer, ref_processor()->max_num_queues());
1232
1233 ParallelCompactRefProcProxyTask task(ref_processor()->max_num_queues());
1234 stats = ref_processor()->process_discovered_references(task, &ParallelScavengeHeap::heap()->workers(), pt);
1235
1236 gc_tracer->report_gc_reference_stats(stats);
1237 pt.print_all_references();
1238 }
1239
1240 {
1241 GCTraceTime(Debug, gc, phases) tm("Flush Marking Stats", &_gc_timer);
1242
1243 flush_marking_stats_cache(active_gc_threads);
1244 }
1245
1246 // This is the point where the entire marking should have completed.
1247 ParCompactionManager::verify_all_marking_stack_empty();
1248
1249 {
1250 GCTraceTime(Debug, gc, phases) tm("Weak Processing", &_gc_timer);
1251 WeakProcessor::weak_oops_do(&ParallelScavengeHeap::heap()->workers(),
1252 is_alive_closure(),
1253 &do_nothing_cl,
1254 1);
1255 }
1256
1257 {
1258 GCTraceTime(Debug, gc, phases) tm_m("Class Unloading", &_gc_timer);
1259
1260 ClassUnloadingContext ctx(active_gc_threads /* num_nmethod_unlink_workers */,
1261 false /* unregister_nmethods_during_purge */,
1262 false /* lock_nmethod_free_separately */);
1263
1264 {
1265 CodeCache::UnlinkingScope scope(is_alive_closure());
1266
1267 // Follow system dictionary roots and unload classes.
1268 bool unloading_occurred = SystemDictionary::do_unloading(&_gc_timer);
1269
1270 PSParallelCleaningTask task{unloading_occurred};
1271 ParallelScavengeHeap::heap()->workers().run_task(&task);
1272 }
1273
1274 {
1275 GCTraceTime(Debug, gc, phases) t("Purge Unlinked NMethods", gc_timer());
1276 // Release unloaded nmethod's memory.
1277 ctx.purge_nmethods();
1278 }
1279 {
1280 GCTraceTime(Debug, gc, phases) ur("Unregister NMethods", &_gc_timer);
1281 ParallelScavengeHeap::heap()->prune_unlinked_nmethods();
1282 }
1283 {
1284 GCTraceTime(Debug, gc, phases) t("Free Code Blobs", gc_timer());
1285 ctx.free_nmethods();
1286 }
1287 {
1288 // Delete metaspaces for unloaded class loaders and clean up loader_data graph
1289 GCTraceTime(Debug, gc, phases) t("Purge Class Loader Data", gc_timer());
1290 ClassLoaderDataGraph::purge(true /* at_safepoint */);
1291 DEBUG_ONLY(MetaspaceUtils::verify();)
1292 }
1293 }
1294
1295 #if TASKQUEUE_STATS
1296 ParCompactionManager::print_and_reset_taskqueue_stats();
1297 #endif
1298 }
1299
1300 template<typename Func>
1301 void PSParallelCompact::adjust_in_space_helper(SpaceId id, Atomic<uint>* claim_counter, Func&& on_stripe) {
1302 MutableSpace* sp = PSParallelCompact::space(id);
1303 HeapWord* const bottom = sp->bottom();
1304 HeapWord* const top = sp->top();
1305 if (bottom == top) {
1306 return;
1307 }
1308
1309 const uint num_regions_per_stripe = 2;
1310 const size_t region_size = ParallelCompactData::RegionSize;
1311 const size_t stripe_size = num_regions_per_stripe * region_size;
1312
1313 while (true) {
1314 uint counter = claim_counter->fetch_then_add(num_regions_per_stripe);
1315 HeapWord* cur_stripe = bottom + counter * region_size;
1316 if (cur_stripe >= top) {
1317 break;
1318 }
1319 HeapWord* stripe_end = MIN2(cur_stripe + stripe_size, top);
1320 on_stripe(cur_stripe, stripe_end);
1321 }
1322 }
1323
1324 void PSParallelCompact::adjust_in_old_space(Atomic<uint>* claim_counter) {
1325 // Regions in old-space shouldn't be split.
1326 assert(!_space_info[old_space_id].split_info().is_valid(), "inv");
1327
1328 auto scan_obj_with_limit = [&] (HeapWord* obj_start, HeapWord* left, HeapWord* right) {
1329 assert(mark_bitmap()->is_marked(obj_start), "inv");
1330 oop obj = cast_to_oop(obj_start);
1331 return obj->oop_iterate_size(&pc_adjust_pointer_closure, MemRegion(left, right));
1332 };
1333
1334 adjust_in_space_helper(old_space_id, claim_counter, [&] (HeapWord* stripe_start, HeapWord* stripe_end) {
1335 assert(_summary_data.is_region_aligned(stripe_start), "inv");
1336 RegionData* cur_region = _summary_data.addr_to_region_ptr(stripe_start);
1337 HeapWord* obj_start;
1338 if (cur_region->partial_obj_size() != 0) {
1339 obj_start = cur_region->partial_obj_addr();
1340 obj_start += scan_obj_with_limit(obj_start, stripe_start, stripe_end);
1341 } else {
1342 obj_start = stripe_start;
1343 }
1344
1345 while (obj_start < stripe_end) {
1346 obj_start = mark_bitmap()->find_obj_beg(obj_start, stripe_end);
1347 if (obj_start >= stripe_end) {
1348 break;
1349 }
1350 obj_start += scan_obj_with_limit(obj_start, stripe_start, stripe_end);
1351 }
1352 });
1353 }
1354
1355 void PSParallelCompact::adjust_in_young_space(SpaceId id, Atomic<uint>* claim_counter) {
1356 adjust_in_space_helper(id, claim_counter, [](HeapWord* stripe_start, HeapWord* stripe_end) {
1357 HeapWord* obj_start = stripe_start;
1358 while (obj_start < stripe_end) {
1359 obj_start = mark_bitmap()->find_obj_beg(obj_start, stripe_end);
1360 if (obj_start >= stripe_end) {
1361 break;
1362 }
1363 oop obj = cast_to_oop(obj_start);
1364 obj_start += obj->oop_iterate_size(&pc_adjust_pointer_closure);
1365 }
1366 });
1367 }
1368
1369 void PSParallelCompact::adjust_pointers_in_spaces(uint worker_id, Atomic<uint>* claim_counters) {
1370 auto start_time = Ticks::now();
1371 adjust_in_old_space(&claim_counters[0]);
1372 for (uint id = eden_space_id; id < last_space_id; ++id) {
1373 adjust_in_young_space(SpaceId(id), &claim_counters[id]);
1374 }
1375 log_trace(gc, phases)("adjust_pointers_in_spaces worker %u: %.3f ms", worker_id, (Ticks::now() - start_time).seconds() * 1000);
1376 }
1377
1378 class PSAdjustTask final : public WorkerTask {
1379 ThreadsClaimTokenScope _threads_claim_token_scope;
1380 WeakProcessor::Task _weak_proc_task;
1381 OopStorageSetStrongParState<false, false> _oop_storage_iter;
1382 uint _nworkers;
1383 Atomic<bool> _code_cache_claimed;
1384 Atomic<uint> _claim_counters[PSParallelCompact::last_space_id];
1385
1386 bool try_claim_code_cache_task() {
1387 return _code_cache_claimed.load_relaxed() == false
1388 && _code_cache_claimed.compare_set(false, true);
1389 }
1390
1391 public:
1392 PSAdjustTask(uint nworkers) :
1393 WorkerTask("PSAdjust task"),
1394 _threads_claim_token_scope(),
1395 _weak_proc_task(nworkers),
1396 _oop_storage_iter(),
1397 _nworkers(nworkers),
1398 _code_cache_claimed(false) {
1399
1400 for (unsigned int i = PSParallelCompact::old_space_id; i < PSParallelCompact::last_space_id; ++i) {
1401 ::new (&_claim_counters[i]) Atomic<uint>{};
1402 }
1403 ClassLoaderDataGraph::verify_claimed_marks_cleared(ClassLoaderData::_claim_stw_fullgc_adjust);
1404 }
1405
1406 void work(uint worker_id) {
1407 {
1408 // Pointers in heap.
1409 ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(worker_id);
1410 cm->preserved_marks()->adjust_during_full_gc();
1411
1412 PSParallelCompact::adjust_pointers_in_spaces(worker_id, _claim_counters);
1413 }
1414
1415 {
1416 // All (strong and weak) CLDs.
1417 CLDToOopClosure cld_closure(&pc_adjust_pointer_closure, ClassLoaderData::_claim_stw_fullgc_adjust);
1418 ClassLoaderDataGraph::cld_do(&cld_closure);
1419 }
1420
1421 {
1422 // Threads stack frames. No need to visit on-stack nmethods, because all
1423 // nmethods are visited in one go via CodeCache::nmethods_do.
1424 ResourceMark rm;
1425 Threads::possibly_parallel_oops_do(_nworkers > 1, &pc_adjust_pointer_closure, nullptr);
1426 if (try_claim_code_cache_task()) {
1427 NMethodToOopClosure adjust_code(&pc_adjust_pointer_closure, NMethodToOopClosure::FixRelocations);
1428 CodeCache::nmethods_do(&adjust_code);
1429 }
1430 }
1431
1432 {
1433 // VM internal strong and weak roots.
1434 _oop_storage_iter.oops_do(&pc_adjust_pointer_closure);
1435 AlwaysTrueClosure always_alive;
1436 _weak_proc_task.work(worker_id, &always_alive, &pc_adjust_pointer_closure);
1437 }
1438 }
1439 };
1440
1441 void PSParallelCompact::adjust_pointers() {
1442 // Adjust the pointers to reflect the new locations
1443 GCTraceTime(Info, gc, phases) tm("Adjust Pointers", &_gc_timer);
1444 uint nworkers = ParallelScavengeHeap::heap()->workers().active_workers();
1445 PSAdjustTask task(nworkers);
1446 ParallelScavengeHeap::heap()->workers().run_task(&task);
1447 }
1448
1449 // Split [start, end) evenly for a number of workers and return the
1450 // range for worker_id.
1451 static void split_regions_for_worker(size_t start, size_t end,
1452 uint worker_id, uint num_workers,
1453 size_t* worker_start, size_t* worker_end) {
1454 assert(start < end, "precondition");
1455 assert(num_workers > 0, "precondition");
1456 assert(worker_id < num_workers, "precondition");
1457
1458 size_t num_regions = end - start;
1459 size_t num_regions_per_worker = num_regions / num_workers;
1460 size_t remainder = num_regions % num_workers;
1461 // The first few workers will get one extra.
1462 *worker_start = start + worker_id * num_regions_per_worker
1463 + MIN2(checked_cast<size_t>(worker_id), remainder);
1464 *worker_end = *worker_start + num_regions_per_worker
1465 + (worker_id < remainder ? 1 : 0);
1466 }
1467
1468 static bool safe_to_read_header(size_t words) {
1469 precond(words > 0);
1470
1471 // Safe to read if we have enough words for the full header, i.e., both
1472 // markWord and Klass pointer.
1473 const bool safe = words >= (size_t)oopDesc::header_size();
1474
1475 // If using Compact Object Headers, the full header is inside the markWord,
1476 // so will always be safe to read
1477 assert(!UseCompactObjectHeaders || safe, "Compact Object Headers should always be safe");
1478
1479 return safe;
1480 }
1481
1482 void PSParallelCompact::forward_to_new_addr() {
1483 GCTraceTime(Info, gc, phases) tm("Forward", &_gc_timer);
1484 uint nworkers = ParallelScavengeHeap::heap()->workers().active_workers();
1485
1486 struct ForwardTask final : public WorkerTask {
1487 uint _num_workers;
1488
1489 explicit ForwardTask(uint num_workers) :
1490 WorkerTask("PSForward task"),
1491 _num_workers(num_workers) {}
1492
1493 static bool should_preserve_mark(oop obj, HeapWord* end_addr) {
1494 size_t remaining_words = pointer_delta(end_addr, cast_from_oop<HeapWord*>(obj));
1495
1496 if (Arguments::is_valhalla_enabled() && !safe_to_read_header(remaining_words)) {
1497 // When using Valhalla, it might be necessary to preserve the Valhalla-
1498 // specific bits in the markWord. If the entire object header is
1499 // copied, the correct markWord (with the appropriate Valhalla bits)
1500 // can be safely read from the Klass. However, if the full header is
1501 // not copied, we cannot safely read the Klass to obtain this information.
1502 // In such cases, we always preserve the markWord to ensure that all
1503 // relevant bits, including Valhalla-specific ones, are retained.
1504 return true;
1505 } else {
1506 return obj->mark().must_be_preserved();
1507 }
1508 }
1509
1510 static void forward_objs_in_range(ParCompactionManager* cm,
1511 HeapWord* start,
1512 HeapWord* end,
1513 HeapWord* destination) {
1514 HeapWord* cur_addr = start;
1515 HeapWord* new_addr = destination;
1516
1517 while (cur_addr < end) {
1518 cur_addr = mark_bitmap()->find_obj_beg(cur_addr, end);
1519 if (cur_addr >= end) {
1520 return;
1521 }
1522 assert(mark_bitmap()->is_marked(cur_addr), "inv");
1523 oop obj = cast_to_oop(cur_addr);
1524
1525 if (new_addr != cur_addr) {
1526 if (should_preserve_mark(obj, end)) {
1527 cm->preserved_marks()->push_always(obj, obj->mark());
1528 }
1529
1530 FullGCForwarding::forward_to(obj, cast_to_oop(new_addr));
1531 }
1532 size_t obj_size = obj->size();
1533 new_addr += obj_size;
1534 cur_addr += obj_size;
1535 }
1536 }
1537
1538 void work(uint worker_id) override {
1539 ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(worker_id);
1540 for (uint id = old_space_id; id < last_space_id; ++id) {
1541 MutableSpace* sp = PSParallelCompact::space(SpaceId(id));
1542 HeapWord* dense_prefix_addr = dense_prefix(SpaceId(id));
1543 HeapWord* top = sp->top();
1544
1545 if (dense_prefix_addr == top) {
1546 // Empty space
1547 continue;
1548 }
1549
1550 const SplitInfo& split_info = _space_info[SpaceId(id)].split_info();
1551 size_t dense_prefix_region = _summary_data.addr_to_region_idx(dense_prefix_addr);
1552 size_t top_region = _summary_data.addr_to_region_idx(_summary_data.region_align_up(top));
1553 size_t start_region;
1554 size_t end_region;
1555 split_regions_for_worker(dense_prefix_region, top_region,
1556 worker_id, _num_workers,
1557 &start_region, &end_region);
1558 for (size_t cur_region = start_region; cur_region < end_region; ++cur_region) {
1559 RegionData* region_ptr = _summary_data.region(cur_region);
1560 size_t partial_obj_size = region_ptr->partial_obj_size();
1561
1562 if (partial_obj_size == ParallelCompactData::RegionSize) {
1563 // No obj-start
1564 continue;
1565 }
1566
1567 HeapWord* region_start = _summary_data.region_to_addr(cur_region);
1568 HeapWord* region_end = region_start + ParallelCompactData::RegionSize;
1569
1570 if (split_info.is_split(cur_region)) {
1571 // Part 1: will be relocated to space-1
1572 HeapWord* preceding_destination = split_info.preceding_destination();
1573 HeapWord* split_point = split_info.split_point();
1574 forward_objs_in_range(cm, region_start + partial_obj_size, split_point, preceding_destination + partial_obj_size);
1575
1576 // Part 2: will be relocated to space-2
1577 HeapWord* destination = region_ptr->destination();
1578 forward_objs_in_range(cm, split_point, region_end, destination);
1579 } else {
1580 HeapWord* destination = region_ptr->destination();
1581 forward_objs_in_range(cm, region_start + partial_obj_size, region_end, destination + partial_obj_size);
1582 }
1583 }
1584 }
1585 }
1586 } task(nworkers);
1587
1588 ParallelScavengeHeap::heap()->workers().run_task(&task);
1589 DEBUG_ONLY(verify_forward();)
1590 }
1591
1592 #ifdef ASSERT
1593 void PSParallelCompact::verify_forward() {
1594 HeapWord* const old_dense_prefix_addr = dense_prefix(SpaceId(old_space_id));
1595 // The destination addr for the first live obj after dense-prefix.
1596 HeapWord* bump_ptr = old_dense_prefix_addr
1597 + _summary_data.addr_to_region_ptr(old_dense_prefix_addr)->partial_obj_size();
1598 SpaceId bump_ptr_space = old_space_id;
1599
1600 for (uint id = old_space_id; id < last_space_id; ++id) {
1601 MutableSpace* sp = PSParallelCompact::space(SpaceId(id));
1602 // Only verify objs after dense-prefix, because those before dense-prefix are not moved (forwarded).
1603 HeapWord* cur_addr = dense_prefix(SpaceId(id));
1604 HeapWord* top = sp->top();
1605
1606 while (cur_addr < top) {
1607 cur_addr = mark_bitmap()->find_obj_beg(cur_addr, top);
1608 if (cur_addr >= top) {
1609 break;
1610 }
1611 assert(mark_bitmap()->is_marked(cur_addr), "inv");
1612 assert(bump_ptr <= _space_info[bump_ptr_space].new_top(), "inv");
1613 // Move to the space containing cur_addr
1614 if (bump_ptr == _space_info[bump_ptr_space].new_top()) {
1615 bump_ptr = space(space_id(cur_addr))->bottom();
1616 bump_ptr_space = space_id(bump_ptr);
1617 }
1618 oop obj = cast_to_oop(cur_addr);
1619 if (cur_addr == bump_ptr) {
1620 assert(!FullGCForwarding::is_forwarded(obj), "inv");
1621 } else {
1622 assert(FullGCForwarding::forwardee(obj) == cast_to_oop(bump_ptr), "inv");
1623 }
1624 bump_ptr += obj->size();
1625 cur_addr += obj->size();
1626 }
1627 }
1628 }
1629 #endif
1630
1631 // Helper class to print 8 region numbers per line and then print the total at the end.
1632 class FillableRegionLogger : public StackObj {
1633 private:
1634 Log(gc, compaction) log;
1635 static const int LineLength = 8;
1636 size_t _regions[LineLength];
1637 int _next_index;
1638 bool _enabled;
1639 size_t _total_regions;
1640 public:
1641 FillableRegionLogger() : _next_index(0), _enabled(log_develop_is_enabled(Trace, gc, compaction)), _total_regions(0) { }
1642 ~FillableRegionLogger() {
1643 log.trace("%zu initially fillable regions", _total_regions);
1644 }
1645
1646 void print_line() {
1647 if (!_enabled || _next_index == 0) {
1648 return;
1649 }
1650 FormatBuffer<> line("Fillable: ");
1651 for (int i = 0; i < _next_index; i++) {
1652 line.append(" %7zu", _regions[i]);
1653 }
1654 log.trace("%s", line.buffer());
1655 _next_index = 0;
1656 }
1657
1658 void handle(size_t region) {
1659 if (!_enabled) {
1660 return;
1661 }
1662 _regions[_next_index++] = region;
1663 if (_next_index == LineLength) {
1664 print_line();
1665 }
1666 _total_regions++;
1667 }
1668 };
1669
1670 void PSParallelCompact::prepare_region_draining_tasks(uint parallel_gc_threads)
1671 {
1672 GCTraceTime(Trace, gc, phases) tm("Drain Task Setup", &_gc_timer);
1673
1674 // Find the threads that are active
1675 uint worker_id = 0;
1676
1677 // Find all regions that are available (can be filled immediately) and
1678 // distribute them to the thread stacks. The iteration is done in reverse
1679 // order (high to low) so the regions will be removed in ascending order.
1680
1681 const ParallelCompactData& sd = PSParallelCompact::summary_data();
1682
1683 // id + 1 is used to test termination so unsigned can
1684 // be used with an old_space_id == 0.
1685 FillableRegionLogger region_logger;
1686 for (unsigned int id = last_space_id - 1; id + 1 > old_space_id; --id) {
1687 SpaceInfo* const space_info = _space_info + id;
1688 HeapWord* const new_top = space_info->new_top();
1689
1690 const size_t beg_region = sd.addr_to_region_idx(space_info->dense_prefix());
1691 const size_t end_region =
1692 sd.addr_to_region_idx(sd.region_align_up(new_top));
1693
1694 for (size_t cur = end_region - 1; cur + 1 > beg_region; --cur) {
1695 if (sd.region(cur)->claim_unsafe()) {
1696 ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(worker_id);
1697 bool result = sd.region(cur)->mark_normal();
1698 assert(result, "Must succeed at this point.");
1699 cm->region_stack()->push(cur);
1700 region_logger.handle(cur);
1701 // Assign regions to tasks in round-robin fashion.
1702 if (++worker_id == parallel_gc_threads) {
1703 worker_id = 0;
1704 }
1705 }
1706 }
1707 region_logger.print_line();
1708 }
1709 }
1710
1711 static void compaction_with_stealing_work(TaskTerminator* terminator, uint worker_id) {
1712 assert(ParallelScavengeHeap::heap()->is_stw_gc_active(), "called outside gc");
1713
1714 ParCompactionManager* cm =
1715 ParCompactionManager::gc_thread_compaction_manager(worker_id);
1716
1717 // Drain the stacks that have been preloaded with regions
1718 // that are ready to fill.
1719
1720 cm->drain_region_stacks();
1721
1722 guarantee(cm->region_stack()->is_empty(), "Not empty");
1723
1724 size_t region_index = 0;
1725
1726 while (true) {
1727 if (ParCompactionManager::steal(worker_id, region_index)) {
1728 PSParallelCompact::fill_and_update_region(cm, region_index);
1729 cm->drain_region_stacks();
1730 } else if (PSParallelCompact::steal_unavailable_region(cm, region_index)) {
1731 // Fill and update an unavailable region with the help of a shadow region
1732 PSParallelCompact::fill_and_update_shadow_region(cm, region_index);
1733 cm->drain_region_stacks();
1734 } else {
1735 if (terminator->offer_termination()) {
1736 break;
1737 }
1738 // Go around again.
1739 }
1740 }
1741 }
1742
1743 class FillDensePrefixAndCompactionTask: public WorkerTask {
1744 TaskTerminator _terminator;
1745
1746 public:
1747 FillDensePrefixAndCompactionTask(uint active_workers) :
1748 WorkerTask("FillDensePrefixAndCompactionTask"),
1749 _terminator(active_workers, ParCompactionManager::region_task_queues()) {
1750 }
1751
1752 virtual void work(uint worker_id) {
1753 if (worker_id == 0) {
1754 auto start = Ticks::now();
1755 PSParallelCompact::fill_dead_objs_in_dense_prefix();
1756 log_trace(gc, phases)("Fill dense prefix by worker 0: %.3f ms", (Ticks::now() - start).seconds() * 1000);
1757 }
1758 compaction_with_stealing_work(&_terminator, worker_id);
1759 }
1760 };
1761
1762 void PSParallelCompact::fill_range_in_dense_prefix(HeapWord* start, HeapWord* end) {
1763 #ifdef ASSERT
1764 {
1765 assert(start < end, "precondition");
1766 assert(mark_bitmap()->find_obj_beg(start, end) == end, "precondition");
1767 HeapWord* bottom = _space_info[old_space_id].space()->bottom();
1768 if (start != bottom) {
1769 // The preceding live obj.
1770 HeapWord* obj_start = mark_bitmap()->find_obj_beg_reverse(bottom, start);
1771 HeapWord* obj_end = obj_start + cast_to_oop(obj_start)->size();
1772 assert(obj_end == start, "precondition");
1773 }
1774 }
1775 #endif
1776
1777 CollectedHeap::fill_with_objects(start, pointer_delta(end, start));
1778 HeapWord* addr = start;
1779 do {
1780 size_t size = cast_to_oop(addr)->size();
1781 start_array(old_space_id)->update_for_block(addr, addr + size);
1782 addr += size;
1783 } while (addr < end);
1784 }
1785
1786 void PSParallelCompact::fill_dead_objs_in_dense_prefix() {
1787 ParMarkBitMap* bitmap = mark_bitmap();
1788
1789 HeapWord* const bottom = _space_info[old_space_id].space()->bottom();
1790 HeapWord* const prefix_end = dense_prefix(old_space_id);
1791
1792 const size_t region_size = ParallelCompactData::RegionSize;
1793
1794 // Fill dead space in [start_addr, end_addr)
1795 HeapWord* const start_addr = bottom;
1796 HeapWord* const end_addr = prefix_end;
1797
1798 for (HeapWord* cur_addr = start_addr; cur_addr < end_addr; /* empty */) {
1799 RegionData* cur_region_ptr = _summary_data.addr_to_region_ptr(cur_addr);
1800 if (cur_region_ptr->data_size() == region_size) {
1801 // Full; no dead space. Next region.
1802 if (_summary_data.is_region_aligned(cur_addr)) {
1803 cur_addr += region_size;
1804 } else {
1805 cur_addr = _summary_data.region_align_up(cur_addr);
1806 }
1807 continue;
1808 }
1809
1810 // Fill dead space inside cur_region.
1811 if (_summary_data.is_region_aligned(cur_addr)) {
1812 cur_addr += cur_region_ptr->partial_obj_size();
1813 }
1814
1815 HeapWord* region_end_addr = _summary_data.region_align_up(cur_addr + 1);
1816 assert(region_end_addr <= end_addr, "inv");
1817 while (cur_addr < region_end_addr) {
1818 // Use end_addr to allow filler-obj to cross region boundary.
1819 HeapWord* live_start = bitmap->find_obj_beg(cur_addr, end_addr);
1820 if (cur_addr != live_start) {
1821 // Found dead space [cur_addr, live_start).
1822 fill_range_in_dense_prefix(cur_addr, live_start);
1823 }
1824 if (live_start >= region_end_addr) {
1825 cur_addr = live_start;
1826 break;
1827 }
1828 assert(bitmap->is_marked(live_start), "inv");
1829 cur_addr = live_start + cast_to_oop(live_start)->size();
1830 }
1831 }
1832 }
1833
1834 void PSParallelCompact::compact() {
1835 GCTraceTime(Info, gc, phases) tm("Compaction Phase", &_gc_timer);
1836
1837 uint active_gc_threads = ParallelScavengeHeap::heap()->workers().active_workers();
1838
1839 initialize_shadow_regions(active_gc_threads);
1840 prepare_region_draining_tasks(active_gc_threads);
1841
1842 {
1843 GCTraceTime(Trace, gc, phases) tm("Par Compact", &_gc_timer);
1844
1845 FillDensePrefixAndCompactionTask task(active_gc_threads);
1846 ParallelScavengeHeap::heap()->workers().run_task(&task);
1847
1848 #ifdef ASSERT
1849 verify_filler_in_dense_prefix();
1850
1851 // Verify that all regions have been processed.
1852 for (unsigned int id = old_space_id; id < last_space_id; ++id) {
1853 verify_complete(SpaceId(id));
1854 }
1855 #endif
1856 }
1857 }
1858
1859 #ifdef ASSERT
1860 void PSParallelCompact::verify_filler_in_dense_prefix() {
1861 HeapWord* bottom = _space_info[old_space_id].space()->bottom();
1862 HeapWord* dense_prefix_end = dense_prefix(old_space_id);
1863
1864 const size_t region_size = ParallelCompactData::RegionSize;
1865
1866 for (HeapWord* cur_addr = bottom; cur_addr < dense_prefix_end; /* empty */) {
1867 RegionData* cur_region_ptr = _summary_data.addr_to_region_ptr(cur_addr);
1868 if (cur_region_ptr->data_size() == region_size) {
1869 // Full; no dead space. Next region.
1870 if (_summary_data.is_region_aligned(cur_addr)) {
1871 cur_addr += region_size;
1872 } else {
1873 cur_addr = _summary_data.region_align_up(cur_addr);
1874 }
1875 continue;
1876 }
1877
1878 // This region contains filler objs.
1879 if (_summary_data.is_region_aligned(cur_addr)) {
1880 cur_addr += cur_region_ptr->partial_obj_size();
1881 }
1882
1883 HeapWord* region_end_addr = _summary_data.region_align_up(cur_addr + 1);
1884 assert(region_end_addr <= dense_prefix_end, "inv");
1885
1886 while (cur_addr < region_end_addr) {
1887 oop obj = cast_to_oop(cur_addr);
1888 oopDesc::verify(obj);
1889 if (!mark_bitmap()->is_marked(cur_addr)) {
1890 assert(CollectedHeap::is_filler_object(cast_to_oop(cur_addr)), "inv");
1891 }
1892 cur_addr += obj->size();
1893 }
1894 }
1895 }
1896
1897 void PSParallelCompact::verify_complete(SpaceId space_id) {
1898 // All Regions served as compaction targets, from dense_prefix() to
1899 // new_top(), should be marked as filled and all Regions between new_top()
1900 // and top() should be available (i.e., should have been emptied).
1901 ParallelCompactData& sd = summary_data();
1902 SpaceInfo si = _space_info[space_id];
1903 HeapWord* new_top_addr = sd.region_align_up(si.new_top());
1904 HeapWord* old_top_addr = sd.region_align_up(si.space()->top());
1905 const size_t beg_region = sd.addr_to_region_idx(si.dense_prefix());
1906 const size_t new_top_region = sd.addr_to_region_idx(new_top_addr);
1907 const size_t old_top_region = sd.addr_to_region_idx(old_top_addr);
1908
1909 size_t cur_region;
1910 for (cur_region = beg_region; cur_region < new_top_region; ++cur_region) {
1911 const RegionData* const c = sd.region(cur_region);
1912 assert(c->completed(), "region %zu not filled: destination_count=%u",
1913 cur_region, c->destination_count());
1914 }
1915
1916 for (cur_region = new_top_region; cur_region < old_top_region; ++cur_region) {
1917 const RegionData* const c = sd.region(cur_region);
1918 assert(c->available(), "region %zu not empty: destination_count=%u",
1919 cur_region, c->destination_count());
1920 }
1921 }
1922 #endif // #ifdef ASSERT
1923
1924 // Return the SpaceId for the space containing addr. If addr is not in the
1925 // heap, last_space_id is returned. In debug mode it expects the address to be
1926 // in the heap and asserts such.
1927 PSParallelCompact::SpaceId PSParallelCompact::space_id(HeapWord* addr) {
1928 assert(ParallelScavengeHeap::heap()->is_in_reserved(addr), "addr not in the heap");
1929
1930 for (unsigned int id = old_space_id; id < last_space_id; ++id) {
1931 if (_space_info[id].space()->contains(addr)) {
1932 return SpaceId(id);
1933 }
1934 }
1935
1936 assert(false, "no space contains the addr");
1937 return last_space_id;
1938 }
1939
1940 // Skip over count live words starting from beg, and return the address of the
1941 // next live word. Callers must also ensure that there are enough live words in
1942 // the range [beg, end) to skip.
1943 HeapWord* PSParallelCompact::skip_live_words(HeapWord* beg, HeapWord* end, size_t count)
1944 {
1945 ParMarkBitMap* m = mark_bitmap();
1946 HeapWord* cur_addr = beg;
1947 while (true) {
1948 cur_addr = m->find_obj_beg(cur_addr, end);
1949 assert(cur_addr < end, "inv");
1950 size_t obj_size = cast_to_oop(cur_addr)->size();
1951 // Strictly greater-than
1952 if (obj_size > count) {
1953 return cur_addr + count;
1954 }
1955 count -= obj_size;
1956 cur_addr += obj_size;
1957 }
1958 }
1959
1960 // On starting to fill a destination region (dest-region), we need to know the
1961 // location of the word that will be at the start of the dest-region after
1962 // compaction. A dest-region can have one or more source regions, but only the
1963 // first source-region contains this location. This location is retrieved by
1964 // calling `first_src_addr` on a dest-region.
1965 // Conversely, a source-region has a dest-region which holds the destination of
1966 // the first live word on this source-region, based on which the destination
1967 // for the rest of live words can be derived.
1968 //
1969 // Note:
1970 // There is some complication due to space-boundary-fragmentation (an obj can't
1971 // cross space-boundary) -- a source-region may be split and behave like two
1972 // distinct regions with their own dest-region, as depicted below.
1973 //
1974 // source-region: region-n
1975 //
1976 // **********************
1977 // | A|A~~~~B|B |
1978 // **********************
1979 // n-1 n n+1
1980 //
1981 // AA, BB denote two live objs. ~~~~ denotes unknown number of live objs.
1982 //
1983 // Assuming the dest-region for region-n is the final region before
1984 // old-space-end and its first-live-word is the middle of AA, the heap content
1985 // will look like the following after compaction:
1986 //
1987 // ************** *************
1988 // A|A~~~~ | |BB |
1989 // ************** *************
1990 // ^ ^
1991 // | old-space-end | eden-space-start
1992 //
1993 // Therefore, in this example, region-n will have two dest-regions:
1994 // 1. the final region in old-space
1995 // 2. the first region in eden-space.
1996 // To handle this special case, we introduce the concept of split-region, whose
1997 // contents are relocated to two spaces. `SplitInfo` captures all necessary
1998 // info about the split, the first part, spliting-point, and the second part.
1999 HeapWord* PSParallelCompact::first_src_addr(HeapWord* const dest_addr,
2000 SpaceId src_space_id,
2001 size_t src_region_idx)
2002 {
2003 const size_t RegionSize = ParallelCompactData::RegionSize;
2004 const ParallelCompactData& sd = summary_data();
2005 assert(sd.is_region_aligned(dest_addr), "precondition");
2006
2007 const RegionData* const src_region_ptr = sd.region(src_region_idx);
2008 assert(src_region_ptr->data_size() > 0, "src region cannot be empty");
2009
2010 const size_t partial_obj_size = src_region_ptr->partial_obj_size();
2011 HeapWord* const src_region_destination = src_region_ptr->destination();
2012
2013 HeapWord* const region_start = sd.region_to_addr(src_region_idx);
2014 HeapWord* const region_end = sd.region_to_addr(src_region_idx) + RegionSize;
2015
2016 // Identify the actual destination for the first live words on this region,
2017 // taking split-region into account.
2018 HeapWord* region_start_destination;
2019 const SplitInfo& split_info = _space_info[src_space_id].split_info();
2020 if (split_info.is_split(src_region_idx)) {
2021 // The second part of this split region; use the recorded split point.
2022 if (dest_addr == src_region_destination) {
2023 return split_info.split_point();
2024 }
2025 region_start_destination = split_info.preceding_destination();
2026 } else {
2027 region_start_destination = src_region_destination;
2028 }
2029
2030 // Calculate the offset to be skipped
2031 size_t words_to_skip = pointer_delta(dest_addr, region_start_destination);
2032
2033 HeapWord* result;
2034 if (partial_obj_size > words_to_skip) {
2035 result = region_start + words_to_skip;
2036 } else {
2037 words_to_skip -= partial_obj_size;
2038 result = skip_live_words(region_start + partial_obj_size, region_end, words_to_skip);
2039 }
2040
2041 if (split_info.is_split(src_region_idx)) {
2042 assert(result < split_info.split_point(), "postcondition");
2043 } else {
2044 assert(result < region_end, "postcondition");
2045 }
2046
2047 return result;
2048 }
2049
2050 void PSParallelCompact::decrement_destination_counts(ParCompactionManager* cm,
2051 SpaceId src_space_id,
2052 size_t beg_region,
2053 HeapWord* end_addr)
2054 {
2055 ParallelCompactData& sd = summary_data();
2056
2057 #ifdef ASSERT
2058 MutableSpace* const src_space = _space_info[src_space_id].space();
2059 HeapWord* const beg_addr = sd.region_to_addr(beg_region);
2060 assert(src_space->contains(beg_addr) || beg_addr == src_space->end(),
2061 "src_space_id does not match beg_addr");
2062 assert(src_space->contains(end_addr) || end_addr == src_space->end(),
2063 "src_space_id does not match end_addr");
2064 #endif // #ifdef ASSERT
2065
2066 RegionData* const beg = sd.region(beg_region);
2067 RegionData* const end = sd.addr_to_region_ptr(sd.region_align_up(end_addr));
2068
2069 // Regions up to new_top() are enqueued if they become available.
2070 HeapWord* const new_top = _space_info[src_space_id].new_top();
2071 RegionData* const enqueue_end =
2072 sd.addr_to_region_ptr(sd.region_align_up(new_top));
2073
2074 for (RegionData* cur = beg; cur < end; ++cur) {
2075 assert(cur->data_size() > 0, "region must have live data");
2076 cur->decrement_destination_count();
2077 if (cur < enqueue_end && cur->available() && cur->claim()) {
2078 if (cur->mark_normal()) {
2079 cm->push_region(sd.region(cur));
2080 } else if (cur->mark_copied()) {
2081 // Try to copy the content of the shadow region back to its corresponding
2082 // heap region if the shadow region is filled. Otherwise, the GC thread
2083 // fills the shadow region will copy the data back (see
2084 // MoveAndUpdateShadowClosure::complete_region).
2085 copy_back(sd.region_to_addr(cur->shadow_region()), sd.region_to_addr(cur));
2086 ParCompactionManager::push_shadow_region_mt_safe(cur->shadow_region());
2087 cur->set_completed();
2088 }
2089 }
2090 }
2091 }
2092
2093 size_t PSParallelCompact::next_src_region(MoveAndUpdateClosure& closure,
2094 SpaceId& src_space_id,
2095 HeapWord*& src_space_top,
2096 HeapWord* end_addr)
2097 {
2098 ParallelCompactData& sd = PSParallelCompact::summary_data();
2099
2100 size_t src_region_idx = 0;
2101
2102 // Skip empty regions (if any) up to the top of the space.
2103 HeapWord* const src_aligned_up = sd.region_align_up(end_addr);
2104 RegionData* src_region_ptr = sd.addr_to_region_ptr(src_aligned_up);
2105 HeapWord* const top_aligned_up = sd.region_align_up(src_space_top);
2106 const RegionData* const top_region_ptr = sd.addr_to_region_ptr(top_aligned_up);
2107
2108 while (src_region_ptr < top_region_ptr && src_region_ptr->data_size() == 0) {
2109 ++src_region_ptr;
2110 }
2111
2112 if (src_region_ptr < top_region_ptr) {
2113 // Found the first non-empty region in the same space.
2114 src_region_idx = sd.region(src_region_ptr);
2115 closure.set_source(sd.region_to_addr(src_region_idx));
2116 return src_region_idx;
2117 }
2118
2119 // Switch to a new source space and find the first non-empty region.
2120 uint space_id = src_space_id + 1;
2121 assert(space_id < last_space_id, "not enough spaces");
2122
2123 for (/* empty */; space_id < last_space_id; ++space_id) {
2124 HeapWord* bottom = _space_info[space_id].space()->bottom();
2125 HeapWord* top = _space_info[space_id].space()->top();
2126 // Skip empty space
2127 if (bottom == top) {
2128 continue;
2129 }
2130
2131 // Identify the first region that contains live words in this space
2132 size_t cur_region = sd.addr_to_region_idx(bottom);
2133 size_t end_region = sd.addr_to_region_idx(sd.region_align_up(top));
2134
2135 for (/* empty */ ; cur_region < end_region; ++cur_region) {
2136 RegionData* cur = sd.region(cur_region);
2137 if (cur->live_obj_size() > 0) {
2138 HeapWord* region_start_addr = sd.region_to_addr(cur_region);
2139
2140 src_space_id = SpaceId(space_id);
2141 src_space_top = top;
2142 closure.set_source(region_start_addr);
2143 return cur_region;
2144 }
2145 }
2146 }
2147
2148 ShouldNotReachHere();
2149 }
2150
2151 HeapWord* PSParallelCompact::partial_obj_end(HeapWord* region_start_addr) {
2152 ParallelCompactData& sd = summary_data();
2153 assert(sd.is_region_aligned(region_start_addr), "precondition");
2154
2155 // Use per-region partial_obj_size to locate the end of the obj, that extends
2156 // to region_start_addr.
2157 size_t start_region_idx = sd.addr_to_region_idx(region_start_addr);
2158 size_t end_region_idx = sd.region_count();
2159 size_t accumulated_size = 0;
2160 for (size_t region_idx = start_region_idx; region_idx < end_region_idx; ++region_idx) {
2161 size_t cur_partial_obj_size = sd.region(region_idx)->partial_obj_size();
2162 accumulated_size += cur_partial_obj_size;
2163 if (cur_partial_obj_size != ParallelCompactData::RegionSize) {
2164 break;
2165 }
2166 }
2167 return region_start_addr + accumulated_size;
2168 }
2169
2170 static markWord safe_mark_word_prototype(HeapWord* cur_addr, HeapWord* end_addr) {
2171 // If the original markWord contains bits that cannot be reconstructed because
2172 // the header cannot be safely read, a placeholder is used. In this case,
2173 // the correct markWord is preserved before compaction and restored after
2174 // compaction completes.
2175 size_t remaining_words = pointer_delta(end_addr, cur_addr);
2176
2177 if (UseCompactObjectHeaders || (Arguments::is_valhalla_enabled() && safe_to_read_header(remaining_words))) {
2178 return cast_to_oop(cur_addr)->klass()->prototype_header();
2179 } else {
2180 return markWord::prototype();
2181 }
2182 }
2183
2184 // Use region_idx as the destination region, and evacuate all live objs on its
2185 // source regions to this destination region.
2186 void PSParallelCompact::fill_region(ParCompactionManager* cm, MoveAndUpdateClosure& closure, size_t region_idx)
2187 {
2188 ParMarkBitMap* const bitmap = mark_bitmap();
2189 ParallelCompactData& sd = summary_data();
2190 RegionData* const region_ptr = sd.region(region_idx);
2191
2192 // Get the source region and related info.
2193 size_t src_region_idx = region_ptr->source_region();
2194 SpaceId src_space_id = space_id(sd.region_to_addr(src_region_idx));
2195 HeapWord* src_space_top = _space_info[src_space_id].space()->top();
2196 HeapWord* dest_addr = sd.region_to_addr(region_idx);
2197
2198 closure.set_source(first_src_addr(dest_addr, src_space_id, src_region_idx));
2199
2200 // Adjust src_region_idx to prepare for decrementing destination counts (the
2201 // destination count is not decremented when a region is copied to itself).
2202 if (src_region_idx == region_idx) {
2203 src_region_idx += 1;
2204 }
2205
2206 // source-region:
2207 //
2208 // **********
2209 // | ~~~ |
2210 // **********
2211 // ^
2212 // |-- closure.source() / first_src_addr
2213 //
2214 //
2215 // ~~~ : live words
2216 //
2217 // destination-region:
2218 //
2219 // **********
2220 // | |
2221 // **********
2222 // ^
2223 // |-- region-start
2224 if (bitmap->is_unmarked(closure.source())) {
2225 // An object overflows the previous destination region, so this
2226 // destination region should copy the remainder of the object or as much as
2227 // will fit.
2228 HeapWord* const old_src_addr = closure.source();
2229 {
2230 HeapWord* region_start = sd.region_align_down(closure.source());
2231 HeapWord* obj_start = bitmap->find_obj_beg_reverse(region_start, closure.source());
2232 HeapWord* obj_end;
2233 if (obj_start != closure.source()) {
2234 assert(bitmap->is_marked(obj_start), "inv");
2235 // Found the actual obj-start, try to find the obj-end using either
2236 // size() if this obj is completely contained in the current region.
2237 HeapWord* next_region_start = region_start + ParallelCompactData::RegionSize;
2238 HeapWord* partial_obj_start = (next_region_start >= src_space_top)
2239 ? nullptr
2240 : sd.addr_to_region_ptr(next_region_start)->partial_obj_addr();
2241 // This obj extends to next region iff partial_obj_addr of the *next*
2242 // region is the same as obj-start.
2243 if (partial_obj_start == obj_start) {
2244 // This obj extends to next region.
2245 obj_end = partial_obj_end(next_region_start);
2246 } else {
2247 // Completely contained in this region; safe to use size().
2248 obj_end = obj_start + cast_to_oop(obj_start)->size();
2249 }
2250 } else {
2251 // This obj extends to current region.
2252 obj_end = partial_obj_end(region_start);
2253 }
2254 size_t partial_obj_size = pointer_delta(obj_end, closure.source());
2255 closure.copy_partial_obj(partial_obj_size);
2256 }
2257
2258 if (closure.is_full()) {
2259 decrement_destination_counts(cm, src_space_id, src_region_idx, closure.source());
2260 closure.complete_region(dest_addr, region_ptr);
2261 return;
2262 }
2263
2264 // Finished copying without using up the current destination-region
2265 HeapWord* const end_addr = sd.region_align_down(closure.source());
2266 if (sd.region_align_down(old_src_addr) != end_addr) {
2267 assert(sd.region_align_up(old_src_addr) == end_addr, "only one region");
2268 // The partial object was copied from more than one source region.
2269 decrement_destination_counts(cm, src_space_id, src_region_idx, end_addr);
2270
2271 // Move to the next source region, possibly switching spaces as well. All
2272 // args except end_addr may be modified.
2273 src_region_idx = next_src_region(closure, src_space_id, src_space_top, end_addr);
2274 }
2275 }
2276
2277 // Handle the rest obj-by-obj, where we know obj-start.
2278 do {
2279 HeapWord* cur_addr = closure.source();
2280 HeapWord* const end_addr = MIN2(sd.region_align_up(cur_addr + 1),
2281 src_space_top);
2282 // To handle the case where the final obj in source region extends to next region.
2283 HeapWord* final_obj_start = (end_addr == src_space_top)
2284 ? nullptr
2285 : sd.addr_to_region_ptr(end_addr)->partial_obj_addr();
2286 // Apply closure on objs inside [cur_addr, end_addr)
2287 do {
2288 cur_addr = bitmap->find_obj_beg(cur_addr, end_addr);
2289 if (cur_addr == end_addr) {
2290 break;
2291 }
2292 size_t obj_size;
2293 if (final_obj_start == cur_addr) {
2294 obj_size = pointer_delta(partial_obj_end(end_addr), cur_addr);
2295 } else {
2296 // This obj doesn't extend into next region; size() is safe to use.
2297 obj_size = cast_to_oop(cur_addr)->size();
2298 }
2299
2300 markWord mark = safe_mark_word_prototype(cur_addr, end_addr);
2301
2302 // Perform the move and update of the object
2303 closure.do_addr(cur_addr, obj_size, mark);
2304
2305 cur_addr += obj_size;
2306 } while (cur_addr < end_addr && !closure.is_full());
2307
2308 if (closure.is_full()) {
2309 decrement_destination_counts(cm, src_space_id, src_region_idx, closure.source());
2310 closure.complete_region(dest_addr, region_ptr);
2311 return;
2312 }
2313
2314 decrement_destination_counts(cm, src_space_id, src_region_idx, end_addr);
2315
2316 // Move to the next source region, possibly switching spaces as well. All
2317 // args except end_addr may be modified.
2318 src_region_idx = next_src_region(closure, src_space_id, src_space_top, end_addr);
2319 } while (true);
2320 }
2321
2322 void PSParallelCompact::fill_and_update_region(ParCompactionManager* cm, size_t region_idx)
2323 {
2324 MoveAndUpdateClosure cl(mark_bitmap(), region_idx);
2325 fill_region(cm, cl, region_idx);
2326 }
2327
2328 void PSParallelCompact::fill_and_update_shadow_region(ParCompactionManager* cm, size_t region_idx)
2329 {
2330 // Get a shadow region first
2331 ParallelCompactData& sd = summary_data();
2332 RegionData* const region_ptr = sd.region(region_idx);
2333 size_t shadow_region = ParCompactionManager::pop_shadow_region_mt_safe(region_ptr);
2334 // The InvalidShadow return value indicates the corresponding heap region is available,
2335 // so use MoveAndUpdateClosure to fill the normal region. Otherwise, use
2336 // MoveAndUpdateShadowClosure to fill the acquired shadow region.
2337 if (shadow_region == ParCompactionManager::InvalidShadow) {
2338 MoveAndUpdateClosure cl(mark_bitmap(), region_idx);
2339 region_ptr->shadow_to_normal();
2340 return fill_region(cm, cl, region_idx);
2341 } else {
2342 MoveAndUpdateShadowClosure cl(mark_bitmap(), region_idx, shadow_region);
2343 return fill_region(cm, cl, region_idx);
2344 }
2345 }
2346
2347 void PSParallelCompact::copy_back(HeapWord *shadow_addr, HeapWord *region_addr)
2348 {
2349 Copy::aligned_conjoint_words(shadow_addr, region_addr, _summary_data.RegionSize);
2350 }
2351
2352 bool PSParallelCompact::steal_unavailable_region(ParCompactionManager* cm, size_t ®ion_idx)
2353 {
2354 size_t next = cm->next_shadow_region();
2355 ParallelCompactData& sd = summary_data();
2356 size_t old_new_top = sd.addr_to_region_idx(_space_info[old_space_id].new_top());
2357 uint active_gc_threads = ParallelScavengeHeap::heap()->workers().active_workers();
2358
2359 while (next < old_new_top) {
2360 if (sd.region(next)->mark_shadow()) {
2361 region_idx = next;
2362 return true;
2363 }
2364 next = cm->move_next_shadow_region_by(active_gc_threads);
2365 }
2366
2367 return false;
2368 }
2369
2370 // The shadow region is an optimization to address region dependencies in full GC. The basic
2371 // idea is making more regions available by temporally storing their live objects in empty
2372 // shadow regions to resolve dependencies between them and the destination regions. Therefore,
2373 // GC threads need not wait destination regions to be available before processing sources.
2374 //
2375 // A typical workflow would be:
2376 // After draining its own stack and failing to steal from others, a GC worker would pick an
2377 // unavailable region (destination count > 0) and get a shadow region. Then the worker fills
2378 // the shadow region by copying live objects from source regions of the unavailable one. Once
2379 // the unavailable region becomes available, the data in the shadow region will be copied back.
2380 // Shadow regions are empty regions in the to-space and regions between top and end of other spaces.
2381 void PSParallelCompact::initialize_shadow_regions(uint parallel_gc_threads)
2382 {
2383 const ParallelCompactData& sd = PSParallelCompact::summary_data();
2384
2385 for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2386 SpaceInfo* const space_info = _space_info + id;
2387 MutableSpace* const space = space_info->space();
2388
2389 const size_t beg_region =
2390 sd.addr_to_region_idx(sd.region_align_up(MAX2(space_info->new_top(), space->top())));
2391 const size_t end_region =
2392 sd.addr_to_region_idx(sd.region_align_down(space->end()));
2393
2394 for (size_t cur = beg_region; cur < end_region; ++cur) {
2395 ParCompactionManager::push_shadow_region(cur);
2396 }
2397 }
2398
2399 size_t beg_region = sd.addr_to_region_idx(_space_info[old_space_id].dense_prefix());
2400 for (uint i = 0; i < parallel_gc_threads; i++) {
2401 ParCompactionManager *cm = ParCompactionManager::gc_thread_compaction_manager(i);
2402 cm->set_next_shadow_region(beg_region + i);
2403 }
2404 }
2405
2406 void MoveAndUpdateClosure::copy_partial_obj(size_t partial_obj_size)
2407 {
2408 size_t words = MIN2(partial_obj_size, words_remaining());
2409
2410 // This test is necessary; if omitted, the pointer updates to a partial object
2411 // that crosses the dense prefix boundary could be overwritten.
2412 if (source() != copy_destination()) {
2413 DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
2414 Copy::aligned_conjoint_words(source(), copy_destination(), words);
2415 }
2416 update_state(words);
2417 }
2418
2419 void MoveAndUpdateClosure::complete_region(HeapWord* dest_addr, PSParallelCompact::RegionData* region_ptr) {
2420 assert(region_ptr->shadow_state() == ParallelCompactData::RegionData::NormalRegion, "Region should be finished");
2421 region_ptr->set_completed();
2422 }
2423
2424 void MoveAndUpdateClosure::do_addr(HeapWord* addr, size_t words, markWord mark) {
2425 assert(destination() != nullptr, "sanity");
2426 _source = addr;
2427
2428 // The start_array must be updated even if the object is not moving.
2429 if (_start_array != nullptr) {
2430 _start_array->update_for_block(destination(), destination() + words);
2431 }
2432
2433 // Avoid overflow
2434 words = MIN2(words, words_remaining());
2435 assert(words > 0, "inv");
2436
2437 if (copy_destination() != source()) {
2438 DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
2439 assert(source() != destination(), "inv");
2440 assert(FullGCForwarding::is_forwarded(cast_to_oop(source())), "inv");
2441 assert(FullGCForwarding::forwardee(cast_to_oop(source())) == cast_to_oop(destination()), "inv");
2442 Copy::aligned_conjoint_words(source(), copy_destination(), words);
2443 cast_to_oop(copy_destination())->set_mark(mark);
2444 }
2445
2446 update_state(words);
2447 }
2448
2449 void MoveAndUpdateShadowClosure::complete_region(HeapWord* dest_addr, PSParallelCompact::RegionData* region_ptr) {
2450 assert(region_ptr->shadow_state() == ParallelCompactData::RegionData::ShadowRegion, "Region should be shadow");
2451 // Record the shadow region index
2452 region_ptr->set_shadow_region(_shadow);
2453 // Mark the shadow region as filled to indicate the data is ready to be
2454 // copied back
2455 region_ptr->mark_filled();
2456 // Try to copy the content of the shadow region back to its corresponding
2457 // heap region if available; the GC thread that decreases the destination
2458 // count to zero will do the copying otherwise (see
2459 // PSParallelCompact::decrement_destination_counts).
2460 if (((region_ptr->available() && region_ptr->claim()) || region_ptr->claimed()) && region_ptr->mark_copied()) {
2461 region_ptr->set_completed();
2462 PSParallelCompact::copy_back(PSParallelCompact::summary_data().region_to_addr(_shadow), dest_addr);
2463 ParCompactionManager::push_shadow_region_mt_safe(_shadow);
2464 }
2465 }