< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.cpp

Print this page

 23  */
 24 
 25 #include "precompiled.hpp"
 26 
 27 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
 28 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 29 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
 30 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
 31 #include "gc/shenandoah/shenandoahUtils.hpp"
 32 #include "runtime/atomic.hpp"
 33 #include "services/memTracker.hpp"
 34 #include "utilities/copy.hpp"
 35 
 36 ShenandoahCollectionSet::ShenandoahCollectionSet(ShenandoahHeap* heap, ReservedSpace space, char* heap_base) :
 37   _map_size(heap->num_regions()),
 38   _region_size_bytes_shift(ShenandoahHeapRegion::region_size_bytes_shift()),
 39   _map_space(space),
 40   _cset_map(_map_space.base() + ((uintx)heap_base >> _region_size_bytes_shift)),
 41   _biased_cset_map(_map_space.base()),
 42   _heap(heap),

 43   _garbage(0),
 44   _used(0),
 45   _region_count(0),

 46   _current_index(0) {
 47 
 48   // The collection set map is reserved to cover the entire heap *and* zero addresses.
 49   // This is needed to accept in-cset checks for both heap oops and NULLs, freeing
 50   // high-performance code from checking for NULL first.
 51   //
 52   // Since heap_base can be far away, committing the entire map would waste memory.
 53   // Therefore, we only commit the parts that are needed to operate: the heap view,
 54   // and the zero page.
 55   //
 56   // Note: we could instead commit the entire map, and piggyback on OS virtual memory
 57   // subsystem for mapping not-yet-written-to pages to a single physical backing page,
 58   // but this is not guaranteed, and would confuse NMT and other memory accounting tools.
 59 
 60   MemTracker::record_virtual_memory_type(_map_space.base(), mtGC);
 61 
 62   size_t page_size = (size_t)os::vm_page_size();
 63 
 64   if (!_map_space.special()) {
 65     // Commit entire pages that cover the heap cset map.
 66     char* bot_addr = align_down(_cset_map, page_size);
 67     char* top_addr = align_up(_cset_map + _map_size, page_size);
 68     os::commit_memory_or_exit(bot_addr, pointer_delta(top_addr, bot_addr, 1), false,
 69                               "Unable to commit collection set bitmap: heap");
 70 
 71     // Commit the zero page, if not yet covered by heap cset map.
 72     if (bot_addr != _biased_cset_map) {
 73       os::commit_memory_or_exit(_biased_cset_map, page_size, false,
 74                                 "Unable to commit collection set bitmap: zero page");
 75     }
 76   }
 77 
 78   Copy::zero_to_bytes(_cset_map, _map_size);
 79   Copy::zero_to_bytes(_biased_cset_map, page_size);
 80 }
 81 
 82 void ShenandoahCollectionSet::add_region(ShenandoahHeapRegion* r) {
 83   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 84   assert(Thread::current()->is_VM_thread(), "Must be VMThread");
 85   assert(!is_in(r), "Already in collection set");


 86   _cset_map[r->index()] = 1;













 87   _region_count++;

 88   _garbage += r->garbage();
 89   _used += r->used();
 90 
 91   // Update the region status too. State transition would be checked internally.
 92   r->make_cset();
 93 }
 94 
 95 void ShenandoahCollectionSet::clear() {
 96   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 97   Copy::zero_to_bytes(_cset_map, _map_size);
 98 
 99 #ifdef ASSERT
100   for (size_t index = 0; index < _heap->num_regions(); index ++) {
101     assert (!_heap->get_region(index)->is_cset(), "should have been cleared before");
102   }
103 #endif
104 
105   _garbage = 0;

106   _used = 0;
107 
108   _region_count = 0;
109   _current_index = 0;









110 }
111 
112 ShenandoahHeapRegion* ShenandoahCollectionSet::claim_next() {
113   // This code is optimized for the case when collection set contains only
114   // a few regions. In this case, it is more constructive to check for is_in
115   // before hitting the (potentially contended) atomic index.
116 
117   size_t max = _heap->num_regions();
118   size_t old = Atomic::load(&_current_index);
119 
120   for (size_t index = old; index < max; index++) {
121     if (is_in(index)) {
122       size_t cur = Atomic::cmpxchg(&_current_index, old, index + 1, memory_order_relaxed);
123       assert(cur >= old, "Always move forward");
124       if (cur == old) {
125         // Successfully moved the claim index, this is our region.
126         return _heap->get_region(index);
127       } else {
128         // Somebody else moved the claim index, restart from there.
129         index = cur - 1; // adjust for loop post-increment

 23  */
 24 
 25 #include "precompiled.hpp"
 26 
 27 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
 28 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 29 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
 30 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
 31 #include "gc/shenandoah/shenandoahUtils.hpp"
 32 #include "runtime/atomic.hpp"
 33 #include "services/memTracker.hpp"
 34 #include "utilities/copy.hpp"
 35 
 36 ShenandoahCollectionSet::ShenandoahCollectionSet(ShenandoahHeap* heap, ReservedSpace space, char* heap_base) :
 37   _map_size(heap->num_regions()),
 38   _region_size_bytes_shift(ShenandoahHeapRegion::region_size_bytes_shift()),
 39   _map_space(space),
 40   _cset_map(_map_space.base() + ((uintx)heap_base >> _region_size_bytes_shift)),
 41   _biased_cset_map(_map_space.base()),
 42   _heap(heap),
 43   _has_old_regions(false),
 44   _garbage(0),
 45   _used(0),
 46   _region_count(0),
 47   _old_garbage(0),
 48   _current_index(0) {
 49 
 50   // The collection set map is reserved to cover the entire heap *and* zero addresses.
 51   // This is needed to accept in-cset checks for both heap oops and NULLs, freeing
 52   // high-performance code from checking for NULL first.
 53   //
 54   // Since heap_base can be far away, committing the entire map would waste memory.
 55   // Therefore, we only commit the parts that are needed to operate: the heap view,
 56   // and the zero page.
 57   //
 58   // Note: we could instead commit the entire map, and piggyback on OS virtual memory
 59   // subsystem for mapping not-yet-written-to pages to a single physical backing page,
 60   // but this is not guaranteed, and would confuse NMT and other memory accounting tools.
 61 
 62   MemTracker::record_virtual_memory_type(_map_space.base(), mtGC);
 63 
 64   size_t page_size = (size_t)os::vm_page_size();
 65 
 66   if (!_map_space.special()) {
 67     // Commit entire pages that cover the heap cset map.
 68     char* bot_addr = align_down(_cset_map, page_size);
 69     char* top_addr = align_up(_cset_map + _map_size, page_size);
 70     os::commit_memory_or_exit(bot_addr, pointer_delta(top_addr, bot_addr, 1), false,
 71                               "Unable to commit collection set bitmap: heap");
 72 
 73     // Commit the zero page, if not yet covered by heap cset map.
 74     if (bot_addr != _biased_cset_map) {
 75       os::commit_memory_or_exit(_biased_cset_map, page_size, false,
 76                                 "Unable to commit collection set bitmap: zero page");
 77     }
 78   }
 79 
 80   Copy::zero_to_bytes(_cset_map, _map_size);
 81   Copy::zero_to_bytes(_biased_cset_map, page_size);
 82 }
 83 
 84 void ShenandoahCollectionSet::add_region(ShenandoahHeapRegion* r) {
 85   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 86   assert(Thread::current()->is_VM_thread(), "Must be VMThread");
 87   assert(!is_in(r), "Already in collection set");
 88   assert(!r->is_humongous(), "Only add regular regions to the collection set");
 89 
 90   _cset_map[r->index()] = 1;
 91 
 92   if (r->affiliation() == YOUNG_GENERATION) {
 93     _young_region_count++;
 94     _young_bytes_to_evacuate += r->get_live_data_bytes();
 95     if (r->age() >= InitialTenuringThreshold) {
 96       _young_bytes_to_promote += r->get_live_data_bytes();
 97     }
 98   } else if (r->affiliation() == OLD_GENERATION) {
 99     _old_region_count++;
100     _old_bytes_to_evacuate += r->get_live_data_bytes();
101     _old_garbage += r->garbage();
102   }
103 
104   _region_count++;
105   _has_old_regions |= r->is_old();
106   _garbage += r->garbage();
107   _used += r->used();

108   // Update the region status too. State transition would be checked internally.
109   r->make_cset();
110 }
111 
112 void ShenandoahCollectionSet::clear() {
113   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
114   Copy::zero_to_bytes(_cset_map, _map_size);
115 
116 #ifdef ASSERT
117   for (size_t index = 0; index < _heap->num_regions(); index ++) {
118     assert (!_heap->get_region(index)->is_cset(), "should have been cleared before");
119   }
120 #endif
121 
122   _garbage = 0;
123   _old_garbage = 0;
124   _used = 0;
125 
126   _region_count = 0;
127   _current_index = 0;
128 
129   _young_region_count = 0;
130   _young_bytes_to_evacuate = 0;
131   _young_bytes_to_promote = 0;
132 
133   _old_region_count = 0;
134   _old_bytes_to_evacuate = 0;
135 
136   _has_old_regions = false;
137 }
138 
139 ShenandoahHeapRegion* ShenandoahCollectionSet::claim_next() {
140   // This code is optimized for the case when collection set contains only
141   // a few regions. In this case, it is more constructive to check for is_in
142   // before hitting the (potentially contended) atomic index.
143 
144   size_t max = _heap->num_regions();
145   size_t old = Atomic::load(&_current_index);
146 
147   for (size_t index = old; index < max; index++) {
148     if (is_in(index)) {
149       size_t cur = Atomic::cmpxchg(&_current_index, old, index + 1, memory_order_relaxed);
150       assert(cur >= old, "Always move forward");
151       if (cur == old) {
152         // Successfully moved the claim index, this is our region.
153         return _heap->get_region(index);
154       } else {
155         // Somebody else moved the claim index, restart from there.
156         index = cur - 1; // adjust for loop post-increment
< prev index next >