513 void ParallelCompactData::verify_clear() {
514 for (uint cur_idx = 0; cur_idx < region_count(); ++cur_idx) {
515 if (!region(cur_idx)->is_clear()) {
516 log_warning(gc)("Uncleared Region: %u", cur_idx);
517 region(cur_idx)->verify_clear();
518 }
519 }
520 }
521 #endif // #ifdef ASSERT
522
523 STWGCTimer PSParallelCompact::_gc_timer;
524 ParallelOldTracer PSParallelCompact::_gc_tracer;
525 elapsedTimer PSParallelCompact::_accumulated_time;
526 unsigned int PSParallelCompact::_maximum_compaction_gc_num = 0;
527 CollectorCounters* PSParallelCompact::_counters = nullptr;
528 ParMarkBitMap PSParallelCompact::_mark_bitmap;
529 ParallelCompactData PSParallelCompact::_summary_data;
530
531 PSParallelCompact::IsAliveClosure PSParallelCompact::_is_alive_closure;
532
533 class PCAdjustPointerClosure: public BasicOopIterateClosure {
534 template <typename T>
535 void do_oop_work(T* p) { PSParallelCompact::adjust_pointer(p); }
536
537 public:
538 virtual void do_oop(oop* p) { do_oop_work(p); }
539 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
540
541 virtual ReferenceIterationMode reference_iteration_mode() { return DO_FIELDS; }
542 };
543
544 static PCAdjustPointerClosure pc_adjust_pointer_closure;
545
546 bool PSParallelCompact::IsAliveClosure::do_object_b(oop p) { return mark_bitmap()->is_marked(p); }
547
548 void PSParallelCompact::post_initialize() {
549 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
550 _span_based_discoverer.set_span(heap->reserved_region());
551 _ref_processor =
552 new ReferenceProcessor(&_span_based_discoverer,
553 ParallelGCThreads, // mt processing degree
554 ParallelGCThreads, // mt discovery degree
555 false, // concurrent_discovery
556 &_is_alive_closure); // non-header is alive closure
557
558 _counters = new CollectorCounters("Parallel full collection pauses", 1);
559
560 // Initialize static fields in ParCompactionManager.
561 ParCompactionManager::initialize(mark_bitmap());
562 }
563
564 bool PSParallelCompact::initialize_aux_data() {
991 }
992
993 // Let the size policy know we're starting
994 size_policy->major_collection_begin();
995
996 #if COMPILER2_OR_JVMCI
997 DerivedPointerTable::clear();
998 #endif
999
1000 ref_processor()->start_discovery(clear_all_soft_refs);
1001
1002 marking_phase(&_gc_tracer);
1003
1004 summary_phase(should_do_max_compaction);
1005
1006 #if COMPILER2_OR_JVMCI
1007 assert(DerivedPointerTable::is_active(), "Sanity");
1008 DerivedPointerTable::set_active(false);
1009 #endif
1010
1011 forward_to_new_addr();
1012
1013 adjust_pointers();
1014
1015 compact();
1016
1017 ParCompactionManager::_preserved_marks_set->restore(&ParallelScavengeHeap::heap()->workers());
1018
1019 ParCompactionManager::verify_all_region_stack_empty();
1020
1021 // Reset the mark bitmap, summary data, and do other bookkeeping. Must be
1022 // done before resizing.
1023 post_compact();
1024
1025 size_policy->major_collection_end();
1026
1027 size_policy->sample_old_gen_used_bytes(MAX2(pre_gc_values.old_gen_used(), old_gen->used_in_bytes()));
1028
1029 if (UseAdaptiveSizePolicy) {
1030 heap->resize_after_full_gc();
1031 }
1032
1033 heap->resize_all_tlabs();
1034
1035 // Resize the metaspace capacity after a collection
1036 MetaspaceGC::compute_new_size();
2367
2368 void MoveAndUpdateClosure::do_addr(HeapWord* addr, size_t words) {
2369 assert(destination() != nullptr, "sanity");
2370 _source = addr;
2371
2372 // The start_array must be updated even if the object is not moving.
2373 if (_start_array != nullptr) {
2374 _start_array->update_for_block(destination(), destination() + words);
2375 }
2376
2377 // Avoid overflow
2378 words = MIN2(words, words_remaining());
2379 assert(words > 0, "inv");
2380
2381 if (copy_destination() != source()) {
2382 DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
2383 assert(source() != destination(), "inv");
2384 assert(FullGCForwarding::is_forwarded(cast_to_oop(source())), "inv");
2385 assert(FullGCForwarding::forwardee(cast_to_oop(source())) == cast_to_oop(destination()), "inv");
2386 Copy::aligned_conjoint_words(source(), copy_destination(), words);
2387 cast_to_oop(copy_destination())->init_mark();
2388 }
2389
2390 update_state(words);
2391 }
2392
2393 void MoveAndUpdateShadowClosure::complete_region(HeapWord* dest_addr, PSParallelCompact::RegionData* region_ptr) {
2394 assert(region_ptr->shadow_state() == ParallelCompactData::RegionData::ShadowRegion, "Region should be shadow");
2395 // Record the shadow region index
2396 region_ptr->set_shadow_region(_shadow);
2397 // Mark the shadow region as filled to indicate the data is ready to be
2398 // copied back
2399 region_ptr->mark_filled();
2400 // Try to copy the content of the shadow region back to its corresponding
2401 // heap region if available; the GC thread that decreases the destination
2402 // count to zero will do the copying otherwise (see
2403 // PSParallelCompact::decrement_destination_counts).
2404 if (((region_ptr->available() && region_ptr->claim()) || region_ptr->claimed()) && region_ptr->mark_copied()) {
2405 region_ptr->set_completed();
2406 PSParallelCompact::copy_back(PSParallelCompact::summary_data().region_to_addr(_shadow), dest_addr);
2407 ParCompactionManager::push_shadow_region_mt_safe(_shadow);
|
513 void ParallelCompactData::verify_clear() {
514 for (uint cur_idx = 0; cur_idx < region_count(); ++cur_idx) {
515 if (!region(cur_idx)->is_clear()) {
516 log_warning(gc)("Uncleared Region: %u", cur_idx);
517 region(cur_idx)->verify_clear();
518 }
519 }
520 }
521 #endif // #ifdef ASSERT
522
523 STWGCTimer PSParallelCompact::_gc_timer;
524 ParallelOldTracer PSParallelCompact::_gc_tracer;
525 elapsedTimer PSParallelCompact::_accumulated_time;
526 unsigned int PSParallelCompact::_maximum_compaction_gc_num = 0;
527 CollectorCounters* PSParallelCompact::_counters = nullptr;
528 ParMarkBitMap PSParallelCompact::_mark_bitmap;
529 ParallelCompactData PSParallelCompact::_summary_data;
530
531 PSParallelCompact::IsAliveClosure PSParallelCompact::_is_alive_closure;
532
533 class PCAdjustPointerClosureNew: public BasicOopIterateClosure {
534 template <typename T>
535 void do_oop_work(T* p) { PSParallelCompact::adjust_pointer(p); }
536
537 public:
538 virtual void do_oop(oop* p) { do_oop_work(p); }
539 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
540
541 virtual ReferenceIterationMode reference_iteration_mode() { return DO_FIELDS; }
542 };
543
544 static PCAdjustPointerClosureNew pc_adjust_pointer_closure;
545
546 bool PSParallelCompact::IsAliveClosure::do_object_b(oop p) { return mark_bitmap()->is_marked(p); }
547
548 void PSParallelCompact::post_initialize() {
549 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
550 _span_based_discoverer.set_span(heap->reserved_region());
551 _ref_processor =
552 new ReferenceProcessor(&_span_based_discoverer,
553 ParallelGCThreads, // mt processing degree
554 ParallelGCThreads, // mt discovery degree
555 false, // concurrent_discovery
556 &_is_alive_closure); // non-header is alive closure
557
558 _counters = new CollectorCounters("Parallel full collection pauses", 1);
559
560 // Initialize static fields in ParCompactionManager.
561 ParCompactionManager::initialize(mark_bitmap());
562 }
563
564 bool PSParallelCompact::initialize_aux_data() {
991 }
992
993 // Let the size policy know we're starting
994 size_policy->major_collection_begin();
995
996 #if COMPILER2_OR_JVMCI
997 DerivedPointerTable::clear();
998 #endif
999
1000 ref_processor()->start_discovery(clear_all_soft_refs);
1001
1002 marking_phase(&_gc_tracer);
1003
1004 summary_phase(should_do_max_compaction);
1005
1006 #if COMPILER2_OR_JVMCI
1007 assert(DerivedPointerTable::is_active(), "Sanity");
1008 DerivedPointerTable::set_active(false);
1009 #endif
1010
1011 FullGCForwarding::begin();
1012
1013 forward_to_new_addr();
1014
1015 adjust_pointers();
1016
1017 compact();
1018
1019 FullGCForwarding::end();
1020
1021 ParCompactionManager::_preserved_marks_set->restore(&ParallelScavengeHeap::heap()->workers());
1022
1023 ParCompactionManager::verify_all_region_stack_empty();
1024
1025 // Reset the mark bitmap, summary data, and do other bookkeeping. Must be
1026 // done before resizing.
1027 post_compact();
1028
1029 size_policy->major_collection_end();
1030
1031 size_policy->sample_old_gen_used_bytes(MAX2(pre_gc_values.old_gen_used(), old_gen->used_in_bytes()));
1032
1033 if (UseAdaptiveSizePolicy) {
1034 heap->resize_after_full_gc();
1035 }
1036
1037 heap->resize_all_tlabs();
1038
1039 // Resize the metaspace capacity after a collection
1040 MetaspaceGC::compute_new_size();
2371
2372 void MoveAndUpdateClosure::do_addr(HeapWord* addr, size_t words) {
2373 assert(destination() != nullptr, "sanity");
2374 _source = addr;
2375
2376 // The start_array must be updated even if the object is not moving.
2377 if (_start_array != nullptr) {
2378 _start_array->update_for_block(destination(), destination() + words);
2379 }
2380
2381 // Avoid overflow
2382 words = MIN2(words, words_remaining());
2383 assert(words > 0, "inv");
2384
2385 if (copy_destination() != source()) {
2386 DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
2387 assert(source() != destination(), "inv");
2388 assert(FullGCForwarding::is_forwarded(cast_to_oop(source())), "inv");
2389 assert(FullGCForwarding::forwardee(cast_to_oop(source())) == cast_to_oop(destination()), "inv");
2390 Copy::aligned_conjoint_words(source(), copy_destination(), words);
2391 cast_to_oop(copy_destination())->reinit_mark();
2392 }
2393
2394 update_state(words);
2395 }
2396
2397 void MoveAndUpdateShadowClosure::complete_region(HeapWord* dest_addr, PSParallelCompact::RegionData* region_ptr) {
2398 assert(region_ptr->shadow_state() == ParallelCompactData::RegionData::ShadowRegion, "Region should be shadow");
2399 // Record the shadow region index
2400 region_ptr->set_shadow_region(_shadow);
2401 // Mark the shadow region as filled to indicate the data is ready to be
2402 // copied back
2403 region_ptr->mark_filled();
2404 // Try to copy the content of the shadow region back to its corresponding
2405 // heap region if available; the GC thread that decreases the destination
2406 // count to zero will do the copying otherwise (see
2407 // PSParallelCompact::decrement_destination_counts).
2408 if (((region_ptr->available() && region_ptr->claim()) || region_ptr->claimed()) && region_ptr->mark_copied()) {
2409 region_ptr->set_completed();
2410 PSParallelCompact::copy_back(PSParallelCompact::summary_data().region_to_addr(_shadow), dest_addr);
2411 ParCompactionManager::push_shadow_region_mt_safe(_shadow);
|