< prev index next >

src/hotspot/share/gc/parallel/psParallelCompact.cpp

Print this page

  42 #include "gc/parallel/psRootType.hpp"
  43 #include "gc/parallel/psScavenge.hpp"
  44 #include "gc/parallel/psStringDedup.hpp"
  45 #include "gc/parallel/psYoungGen.hpp"
  46 #include "gc/shared/classUnloadingContext.hpp"
  47 #include "gc/shared/gcCause.hpp"
  48 #include "gc/shared/gcHeapSummary.hpp"
  49 #include "gc/shared/gcId.hpp"
  50 #include "gc/shared/gcLocker.hpp"
  51 #include "gc/shared/gcTimer.hpp"
  52 #include "gc/shared/gcTrace.hpp"
  53 #include "gc/shared/gcTraceTime.inline.hpp"
  54 #include "gc/shared/isGCActiveMark.hpp"
  55 #include "gc/shared/oopStorage.inline.hpp"
  56 #include "gc/shared/oopStorageSet.inline.hpp"
  57 #include "gc/shared/oopStorageSetParState.inline.hpp"
  58 #include "gc/shared/preservedMarks.inline.hpp"
  59 #include "gc/shared/referencePolicy.hpp"
  60 #include "gc/shared/referenceProcessor.hpp"
  61 #include "gc/shared/referenceProcessorPhaseTimes.hpp"

  62 #include "gc/shared/strongRootsScope.hpp"
  63 #include "gc/shared/taskTerminator.hpp"
  64 #include "gc/shared/weakProcessor.inline.hpp"
  65 #include "gc/shared/workerPolicy.hpp"
  66 #include "gc/shared/workerThread.hpp"
  67 #include "gc/shared/workerUtils.hpp"
  68 #include "logging/log.hpp"
  69 #include "memory/iterator.inline.hpp"
  70 #include "memory/metaspaceUtils.hpp"
  71 #include "memory/resourceArea.hpp"
  72 #include "memory/universe.hpp"
  73 #include "nmt/memTracker.hpp"
  74 #include "oops/access.inline.hpp"
  75 #include "oops/instanceClassLoaderKlass.inline.hpp"
  76 #include "oops/instanceKlass.inline.hpp"
  77 #include "oops/instanceMirrorKlass.inline.hpp"
  78 #include "oops/methodData.hpp"
  79 #include "oops/objArrayKlass.inline.hpp"
  80 #include "oops/oop.inline.hpp"
  81 #include "runtime/atomic.hpp"

 767   HeapWord* const prefix_end = sd.region_to_addr(cur_region);
 768   assert(sd.is_region_aligned(prefix_end), "postcondition");
 769   assert(prefix_end >= full_region_prefix_end, "in-range");
 770   assert(prefix_end <= old_space->top(), "in-range");
 771   return prefix_end;
 772 }
 773 
 774 void PSParallelCompact::fill_dense_prefix_end(SpaceId id) {
 775   // Comparing two sizes to decide if filling is required:
 776   //
 777   // The size of the filler (min-obj-size) is 2 heap words with the default
 778   // MinObjAlignment, since both markword and klass take 1 heap word.
 779   //
 780   // The size of the gap (if any) right before dense-prefix-end is
 781   // MinObjAlignment.
 782   //
 783   // Need to fill in the gap only if it's smaller than min-obj-size, and the
 784   // filler obj will extend to next region.
 785 
 786   // Note: If min-fill-size decreases to 1, this whole method becomes redundant.




 787   assert(CollectedHeap::min_fill_size() >= 2, "inv");
 788 #ifndef _LP64
 789   // In 32-bit system, each heap word is 4 bytes, so MinObjAlignment == 2.
 790   // The gap is always equal to min-fill-size, so nothing to do.
 791   return;
 792 #endif
 793   if (MinObjAlignment > 1) {
 794     return;
 795   }
 796   assert(CollectedHeap::min_fill_size() == 2, "inv");
 797   HeapWord* const dense_prefix_end = dense_prefix(id);
 798   assert(_summary_data.is_region_aligned(dense_prefix_end), "precondition");
 799   assert(dense_prefix_end <= space(id)->top(), "precondition");
 800   if (dense_prefix_end == space(id)->top()) {
 801     // Must not have single-word gap right before prefix-end/top.
 802     return;
 803   }
 804   RegionData* const region_after_dense_prefix = _summary_data.addr_to_region_ptr(dense_prefix_end);
 805 
 806   if (region_after_dense_prefix->partial_obj_size() != 0 ||

1033     DerivedPointerTable::clear();
1034 #endif
1035 
1036     ref_processor()->start_discovery(maximum_heap_compaction);
1037 
1038     ClassUnloadingContext ctx(1 /* num_nmethod_unlink_workers */,
1039                               false /* unregister_nmethods_during_purge */,
1040                               false /* lock_nmethod_free_separately */);
1041 
1042     marking_phase(&_gc_tracer);
1043 
1044     bool max_on_system_gc = UseMaximumCompactionOnSystemGC
1045       && GCCause::is_user_requested_gc(gc_cause);
1046     summary_phase(maximum_heap_compaction || max_on_system_gc);
1047 
1048 #if COMPILER2_OR_JVMCI
1049     assert(DerivedPointerTable::is_active(), "Sanity");
1050     DerivedPointerTable::set_active(false);
1051 #endif
1052 


1053     forward_to_new_addr();
1054 
1055     adjust_pointers();
1056 
1057     compact();
1058 


1059     ParCompactionManager::_preserved_marks_set->restore(&ParallelScavengeHeap::heap()->workers());
1060 
1061     ParCompactionManager::verify_all_region_stack_empty();
1062 
1063     // Reset the mark bitmap, summary data, and do other bookkeeping.  Must be
1064     // done before resizing.
1065     post_compact();
1066 
1067     // Let the size policy know we're done
1068     size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause);
1069 
1070     if (UseAdaptiveSizePolicy) {
1071       log_debug(gc, ergo)("AdaptiveSizeStart: collection: %d ", heap->total_collections());
1072       log_trace(gc, ergo)("old_gen_capacity: " SIZE_FORMAT " young_gen_capacity: " SIZE_FORMAT,
1073                           old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes());
1074 
1075       // Don't check if the size_policy is ready here.  Let
1076       // the size_policy check that internally.
1077       if (UseAdaptiveGenerationSizePolicyAtMajorCollection &&
1078           AdaptiveSizePolicy::should_update_promo_stats(gc_cause)) {

1578             // No obj-start
1579             continue;
1580           }
1581 
1582           HeapWord* region_start = _summary_data.region_to_addr(cur_region);
1583           HeapWord* region_end = region_start + ParallelCompactData::RegionSize;
1584 
1585           HeapWord* cur_addr = region_start + live_words;
1586 
1587           HeapWord* destination = region_ptr->destination();
1588           while (cur_addr < region_end) {
1589             cur_addr = mark_bitmap()->find_obj_beg(cur_addr, region_end);
1590             if (cur_addr >= region_end) {
1591               break;
1592             }
1593             assert(mark_bitmap()->is_marked(cur_addr), "inv");
1594             HeapWord* new_addr = destination + live_words;
1595             oop obj = cast_to_oop(cur_addr);
1596             if (new_addr != cur_addr) {
1597               cm->preserved_marks()->push_if_necessary(obj, obj->mark());
1598               obj->forward_to(cast_to_oop(new_addr));
1599             }
1600             size_t obj_size = obj->size();
1601             live_words += obj_size;
1602             cur_addr += obj_size;
1603           }
1604         }
1605       }
1606     }
1607   } task(nworkers);
1608 
1609   ParallelScavengeHeap::heap()->workers().run_task(&task);
1610   debug_only(verify_forward();)
1611 }
1612 
1613 #ifdef ASSERT
1614 void PSParallelCompact::verify_forward() {
1615   HeapWord* old_dense_prefix_addr = dense_prefix(SpaceId(old_space_id));
1616   RegionData* old_region = _summary_data.region(_summary_data.addr_to_region_idx(old_dense_prefix_addr));
1617   HeapWord* bump_ptr = old_region->partial_obj_size() != 0
1618                        ? old_dense_prefix_addr + old_region->partial_obj_size()

1621 
1622   for (uint id = old_space_id; id < last_space_id; ++id) {
1623     MutableSpace* sp = PSParallelCompact::space(SpaceId(id));
1624     HeapWord* dense_prefix_addr = dense_prefix(SpaceId(id));
1625     HeapWord* top = sp->top();
1626     HeapWord* cur_addr = dense_prefix_addr;
1627 
1628     while (cur_addr < top) {
1629       cur_addr = mark_bitmap()->find_obj_beg(cur_addr, top);
1630       if (cur_addr >= top) {
1631         break;
1632       }
1633       assert(mark_bitmap()->is_marked(cur_addr), "inv");
1634       // Move to the space containing cur_addr
1635       if (bump_ptr == _space_info[bump_ptr_space].new_top()) {
1636         bump_ptr = space(space_id(cur_addr))->bottom();
1637         bump_ptr_space = space_id(bump_ptr);
1638       }
1639       oop obj = cast_to_oop(cur_addr);
1640       if (cur_addr != bump_ptr) {
1641         assert(obj->forwardee() == cast_to_oop(bump_ptr), "inv");
1642       }
1643       bump_ptr += obj->size();
1644       cur_addr += obj->size();
1645     }
1646   }
1647 }
1648 #endif
1649 
1650 // Helper class to print 8 region numbers per line and then print the total at the end.
1651 class FillableRegionLogger : public StackObj {
1652 private:
1653   Log(gc, compaction) log;
1654   static const int LineLength = 8;
1655   size_t _regions[LineLength];
1656   int _next_index;
1657   bool _enabled;
1658   size_t _total_regions;
1659 public:
1660   FillableRegionLogger() : _next_index(0), _enabled(log_develop_is_enabled(Trace, gc, compaction)), _total_regions(0) { }
1661   ~FillableRegionLogger() {

2385   region_ptr->set_completed();
2386 }
2387 
2388 void MoveAndUpdateClosure::do_addr(HeapWord* addr, size_t words) {
2389   assert(destination() != nullptr, "sanity");
2390   _source = addr;
2391 
2392   // The start_array must be updated even if the object is not moving.
2393   if (_start_array != nullptr) {
2394     _start_array->update_for_block(destination(), destination() + words);
2395   }
2396 
2397   // Avoid overflow
2398   words = MIN2(words, words_remaining());
2399   assert(words > 0, "inv");
2400 
2401   if (copy_destination() != source()) {
2402     DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
2403     assert(source() != destination(), "inv");
2404     assert(cast_to_oop(source())->is_forwarded(), "inv");
2405     assert(cast_to_oop(source())->forwardee() == cast_to_oop(destination()), "inv");
2406     Copy::aligned_conjoint_words(source(), copy_destination(), words);
2407     cast_to_oop(copy_destination())->init_mark();
2408   }
2409 
2410   update_state(words);
2411 }
2412 
2413 void MoveAndUpdateShadowClosure::complete_region(HeapWord* dest_addr, PSParallelCompact::RegionData* region_ptr) {
2414   assert(region_ptr->shadow_state() == ParallelCompactData::RegionData::ShadowRegion, "Region should be shadow");
2415   // Record the shadow region index
2416   region_ptr->set_shadow_region(_shadow);
2417   // Mark the shadow region as filled to indicate the data is ready to be
2418   // copied back
2419   region_ptr->mark_filled();
2420   // Try to copy the content of the shadow region back to its corresponding
2421   // heap region if available; the GC thread that decreases the destination
2422   // count to zero will do the copying otherwise (see
2423   // PSParallelCompact::decrement_destination_counts).
2424   if (((region_ptr->available() && region_ptr->claim()) || region_ptr->claimed()) && region_ptr->mark_copied()) {
2425     region_ptr->set_completed();

  42 #include "gc/parallel/psRootType.hpp"
  43 #include "gc/parallel/psScavenge.hpp"
  44 #include "gc/parallel/psStringDedup.hpp"
  45 #include "gc/parallel/psYoungGen.hpp"
  46 #include "gc/shared/classUnloadingContext.hpp"
  47 #include "gc/shared/gcCause.hpp"
  48 #include "gc/shared/gcHeapSummary.hpp"
  49 #include "gc/shared/gcId.hpp"
  50 #include "gc/shared/gcLocker.hpp"
  51 #include "gc/shared/gcTimer.hpp"
  52 #include "gc/shared/gcTrace.hpp"
  53 #include "gc/shared/gcTraceTime.inline.hpp"
  54 #include "gc/shared/isGCActiveMark.hpp"
  55 #include "gc/shared/oopStorage.inline.hpp"
  56 #include "gc/shared/oopStorageSet.inline.hpp"
  57 #include "gc/shared/oopStorageSetParState.inline.hpp"
  58 #include "gc/shared/preservedMarks.inline.hpp"
  59 #include "gc/shared/referencePolicy.hpp"
  60 #include "gc/shared/referenceProcessor.hpp"
  61 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
  62 #include "gc/shared/slidingForwarding.inline.hpp"
  63 #include "gc/shared/strongRootsScope.hpp"
  64 #include "gc/shared/taskTerminator.hpp"
  65 #include "gc/shared/weakProcessor.inline.hpp"
  66 #include "gc/shared/workerPolicy.hpp"
  67 #include "gc/shared/workerThread.hpp"
  68 #include "gc/shared/workerUtils.hpp"
  69 #include "logging/log.hpp"
  70 #include "memory/iterator.inline.hpp"
  71 #include "memory/metaspaceUtils.hpp"
  72 #include "memory/resourceArea.hpp"
  73 #include "memory/universe.hpp"
  74 #include "nmt/memTracker.hpp"
  75 #include "oops/access.inline.hpp"
  76 #include "oops/instanceClassLoaderKlass.inline.hpp"
  77 #include "oops/instanceKlass.inline.hpp"
  78 #include "oops/instanceMirrorKlass.inline.hpp"
  79 #include "oops/methodData.hpp"
  80 #include "oops/objArrayKlass.inline.hpp"
  81 #include "oops/oop.inline.hpp"
  82 #include "runtime/atomic.hpp"

 768   HeapWord* const prefix_end = sd.region_to_addr(cur_region);
 769   assert(sd.is_region_aligned(prefix_end), "postcondition");
 770   assert(prefix_end >= full_region_prefix_end, "in-range");
 771   assert(prefix_end <= old_space->top(), "in-range");
 772   return prefix_end;
 773 }
 774 
 775 void PSParallelCompact::fill_dense_prefix_end(SpaceId id) {
 776   // Comparing two sizes to decide if filling is required:
 777   //
 778   // The size of the filler (min-obj-size) is 2 heap words with the default
 779   // MinObjAlignment, since both markword and klass take 1 heap word.
 780   //
 781   // The size of the gap (if any) right before dense-prefix-end is
 782   // MinObjAlignment.
 783   //
 784   // Need to fill in the gap only if it's smaller than min-obj-size, and the
 785   // filler obj will extend to next region.
 786 
 787   // Note: If min-fill-size decreases to 1, this whole method becomes redundant.
 788   if (UseCompactObjectHeaders) {
 789     // The gap is always equal to min-fill-size, so nothing to do.
 790     return;
 791   }
 792   assert(CollectedHeap::min_fill_size() >= 2, "inv");
 793 #ifndef _LP64
 794   // In 32-bit system, each heap word is 4 bytes, so MinObjAlignment == 2.
 795   // The gap is always equal to min-fill-size, so nothing to do.
 796   return;
 797 #endif
 798   if (MinObjAlignment > 1) {
 799     return;
 800   }
 801   assert(CollectedHeap::min_fill_size() == 2, "inv");
 802   HeapWord* const dense_prefix_end = dense_prefix(id);
 803   assert(_summary_data.is_region_aligned(dense_prefix_end), "precondition");
 804   assert(dense_prefix_end <= space(id)->top(), "precondition");
 805   if (dense_prefix_end == space(id)->top()) {
 806     // Must not have single-word gap right before prefix-end/top.
 807     return;
 808   }
 809   RegionData* const region_after_dense_prefix = _summary_data.addr_to_region_ptr(dense_prefix_end);
 810 
 811   if (region_after_dense_prefix->partial_obj_size() != 0 ||

1038     DerivedPointerTable::clear();
1039 #endif
1040 
1041     ref_processor()->start_discovery(maximum_heap_compaction);
1042 
1043     ClassUnloadingContext ctx(1 /* num_nmethod_unlink_workers */,
1044                               false /* unregister_nmethods_during_purge */,
1045                               false /* lock_nmethod_free_separately */);
1046 
1047     marking_phase(&_gc_tracer);
1048 
1049     bool max_on_system_gc = UseMaximumCompactionOnSystemGC
1050       && GCCause::is_user_requested_gc(gc_cause);
1051     summary_phase(maximum_heap_compaction || max_on_system_gc);
1052 
1053 #if COMPILER2_OR_JVMCI
1054     assert(DerivedPointerTable::is_active(), "Sanity");
1055     DerivedPointerTable::set_active(false);
1056 #endif
1057 
1058     SlidingForwarding::begin();
1059 
1060     forward_to_new_addr();
1061 
1062     adjust_pointers();
1063 
1064     compact();
1065 
1066     SlidingForwarding::end();
1067 
1068     ParCompactionManager::_preserved_marks_set->restore(&ParallelScavengeHeap::heap()->workers());
1069 
1070     ParCompactionManager::verify_all_region_stack_empty();
1071 
1072     // Reset the mark bitmap, summary data, and do other bookkeeping.  Must be
1073     // done before resizing.
1074     post_compact();
1075 
1076     // Let the size policy know we're done
1077     size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause);
1078 
1079     if (UseAdaptiveSizePolicy) {
1080       log_debug(gc, ergo)("AdaptiveSizeStart: collection: %d ", heap->total_collections());
1081       log_trace(gc, ergo)("old_gen_capacity: " SIZE_FORMAT " young_gen_capacity: " SIZE_FORMAT,
1082                           old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes());
1083 
1084       // Don't check if the size_policy is ready here.  Let
1085       // the size_policy check that internally.
1086       if (UseAdaptiveGenerationSizePolicyAtMajorCollection &&
1087           AdaptiveSizePolicy::should_update_promo_stats(gc_cause)) {

1587             // No obj-start
1588             continue;
1589           }
1590 
1591           HeapWord* region_start = _summary_data.region_to_addr(cur_region);
1592           HeapWord* region_end = region_start + ParallelCompactData::RegionSize;
1593 
1594           HeapWord* cur_addr = region_start + live_words;
1595 
1596           HeapWord* destination = region_ptr->destination();
1597           while (cur_addr < region_end) {
1598             cur_addr = mark_bitmap()->find_obj_beg(cur_addr, region_end);
1599             if (cur_addr >= region_end) {
1600               break;
1601             }
1602             assert(mark_bitmap()->is_marked(cur_addr), "inv");
1603             HeapWord* new_addr = destination + live_words;
1604             oop obj = cast_to_oop(cur_addr);
1605             if (new_addr != cur_addr) {
1606               cm->preserved_marks()->push_if_necessary(obj, obj->mark());
1607               SlidingForwarding::forward_to(obj, cast_to_oop(new_addr));
1608             }
1609             size_t obj_size = obj->size();
1610             live_words += obj_size;
1611             cur_addr += obj_size;
1612           }
1613         }
1614       }
1615     }
1616   } task(nworkers);
1617 
1618   ParallelScavengeHeap::heap()->workers().run_task(&task);
1619   debug_only(verify_forward();)
1620 }
1621 
1622 #ifdef ASSERT
1623 void PSParallelCompact::verify_forward() {
1624   HeapWord* old_dense_prefix_addr = dense_prefix(SpaceId(old_space_id));
1625   RegionData* old_region = _summary_data.region(_summary_data.addr_to_region_idx(old_dense_prefix_addr));
1626   HeapWord* bump_ptr = old_region->partial_obj_size() != 0
1627                        ? old_dense_prefix_addr + old_region->partial_obj_size()

1630 
1631   for (uint id = old_space_id; id < last_space_id; ++id) {
1632     MutableSpace* sp = PSParallelCompact::space(SpaceId(id));
1633     HeapWord* dense_prefix_addr = dense_prefix(SpaceId(id));
1634     HeapWord* top = sp->top();
1635     HeapWord* cur_addr = dense_prefix_addr;
1636 
1637     while (cur_addr < top) {
1638       cur_addr = mark_bitmap()->find_obj_beg(cur_addr, top);
1639       if (cur_addr >= top) {
1640         break;
1641       }
1642       assert(mark_bitmap()->is_marked(cur_addr), "inv");
1643       // Move to the space containing cur_addr
1644       if (bump_ptr == _space_info[bump_ptr_space].new_top()) {
1645         bump_ptr = space(space_id(cur_addr))->bottom();
1646         bump_ptr_space = space_id(bump_ptr);
1647       }
1648       oop obj = cast_to_oop(cur_addr);
1649       if (cur_addr != bump_ptr) {
1650         assert(SlidingForwarding::forwardee(obj) == cast_to_oop(bump_ptr), "inv");
1651       }
1652       bump_ptr += obj->size();
1653       cur_addr += obj->size();
1654     }
1655   }
1656 }
1657 #endif
1658 
1659 // Helper class to print 8 region numbers per line and then print the total at the end.
1660 class FillableRegionLogger : public StackObj {
1661 private:
1662   Log(gc, compaction) log;
1663   static const int LineLength = 8;
1664   size_t _regions[LineLength];
1665   int _next_index;
1666   bool _enabled;
1667   size_t _total_regions;
1668 public:
1669   FillableRegionLogger() : _next_index(0), _enabled(log_develop_is_enabled(Trace, gc, compaction)), _total_regions(0) { }
1670   ~FillableRegionLogger() {

2394   region_ptr->set_completed();
2395 }
2396 
2397 void MoveAndUpdateClosure::do_addr(HeapWord* addr, size_t words) {
2398   assert(destination() != nullptr, "sanity");
2399   _source = addr;
2400 
2401   // The start_array must be updated even if the object is not moving.
2402   if (_start_array != nullptr) {
2403     _start_array->update_for_block(destination(), destination() + words);
2404   }
2405 
2406   // Avoid overflow
2407   words = MIN2(words, words_remaining());
2408   assert(words > 0, "inv");
2409 
2410   if (copy_destination() != source()) {
2411     DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
2412     assert(source() != destination(), "inv");
2413     assert(cast_to_oop(source())->is_forwarded(), "inv");
2414     assert(SlidingForwarding::forwardee(cast_to_oop(source())) == cast_to_oop(destination()), "inv");
2415     Copy::aligned_conjoint_words(source(), copy_destination(), words);
2416     cast_to_oop(copy_destination())->init_mark();
2417   }
2418 
2419   update_state(words);
2420 }
2421 
2422 void MoveAndUpdateShadowClosure::complete_region(HeapWord* dest_addr, PSParallelCompact::RegionData* region_ptr) {
2423   assert(region_ptr->shadow_state() == ParallelCompactData::RegionData::ShadowRegion, "Region should be shadow");
2424   // Record the shadow region index
2425   region_ptr->set_shadow_region(_shadow);
2426   // Mark the shadow region as filled to indicate the data is ready to be
2427   // copied back
2428   region_ptr->mark_filled();
2429   // Try to copy the content of the shadow region back to its corresponding
2430   // heap region if available; the GC thread that decreases the destination
2431   // count to zero will do the copying otherwise (see
2432   // PSParallelCompact::decrement_destination_counts).
2433   if (((region_ptr->available() && region_ptr->claim()) || region_ptr->claimed()) && region_ptr->mark_copied()) {
2434     region_ptr->set_completed();
< prev index next >