< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.cpp

Print this page

 23  */
 24 
 25 #include "precompiled.hpp"
 26 
 27 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
 28 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 29 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
 30 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
 31 #include "gc/shenandoah/shenandoahUtils.hpp"
 32 #include "runtime/atomic.hpp"
 33 #include "services/memTracker.hpp"
 34 #include "utilities/copy.hpp"
 35 
 36 ShenandoahCollectionSet::ShenandoahCollectionSet(ShenandoahHeap* heap, ReservedSpace space, char* heap_base) :
 37   _map_size(heap->num_regions()),
 38   _region_size_bytes_shift(ShenandoahHeapRegion::region_size_bytes_shift()),
 39   _map_space(space),
 40   _cset_map(_map_space.base() + ((uintx)heap_base >> _region_size_bytes_shift)),
 41   _biased_cset_map(_map_space.base()),
 42   _heap(heap),

 43   _garbage(0),
 44   _used(0),
 45   _region_count(0),
 46   _current_index(0) {
 47 
 48   // The collection set map is reserved to cover the entire heap *and* zero addresses.
 49   // This is needed to accept in-cset checks for both heap oops and NULLs, freeing
 50   // high-performance code from checking for NULL first.
 51   //
 52   // Since heap_base can be far away, committing the entire map would waste memory.
 53   // Therefore, we only commit the parts that are needed to operate: the heap view,
 54   // and the zero page.
 55   //
 56   // Note: we could instead commit the entire map, and piggyback on OS virtual memory
 57   // subsystem for mapping not-yet-written-to pages to a single physical backing page,
 58   // but this is not guaranteed, and would confuse NMT and other memory accounting tools.
 59 
 60   MemTracker::record_virtual_memory_type(_map_space.base(), mtGC);
 61 
 62   size_t page_size = (size_t)os::vm_page_size();

 66     char* bot_addr = align_down(_cset_map, page_size);
 67     char* top_addr = align_up(_cset_map + _map_size, page_size);
 68     os::commit_memory_or_exit(bot_addr, pointer_delta(top_addr, bot_addr, 1), false,
 69                               "Unable to commit collection set bitmap: heap");
 70 
 71     // Commit the zero page, if not yet covered by heap cset map.
 72     if (bot_addr != _biased_cset_map) {
 73       os::commit_memory_or_exit(_biased_cset_map, page_size, false,
 74                                 "Unable to commit collection set bitmap: zero page");
 75     }
 76   }
 77 
 78   Copy::zero_to_bytes(_cset_map, _map_size);
 79   Copy::zero_to_bytes(_biased_cset_map, page_size);
 80 }
 81 
 82 void ShenandoahCollectionSet::add_region(ShenandoahHeapRegion* r) {
 83   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 84   assert(Thread::current()->is_VM_thread(), "Must be VMThread");
 85   assert(!is_in(r), "Already in collection set");


 86   _cset_map[r->index()] = 1;









 87   _region_count++;

 88   _garbage += r->garbage();
 89   _used += r->used();
 90 
 91   // Update the region status too. State transition would be checked internally.
 92   r->make_cset();
 93 }
 94 
 95 void ShenandoahCollectionSet::clear() {
 96   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 97   Copy::zero_to_bytes(_cset_map, _map_size);
 98 
 99 #ifdef ASSERT
100   for (size_t index = 0; index < _heap->num_regions(); index ++) {
101     assert (!_heap->get_region(index)->is_cset(), "should have been cleared before");
102   }
103 #endif
104 
105   _garbage = 0;
106   _used = 0;
107 
108   _region_count = 0;
109   _current_index = 0;







110 }
111 
112 ShenandoahHeapRegion* ShenandoahCollectionSet::claim_next() {
113   // This code is optimized for the case when collection set contains only
114   // a few regions. In this case, it is more constructive to check for is_in
115   // before hitting the (potentially contended) atomic index.
116 
117   size_t max = _heap->num_regions();
118   size_t old = Atomic::load(&_current_index);
119 
120   for (size_t index = old; index < max; index++) {
121     if (is_in(index)) {
122       size_t cur = Atomic::cmpxchg(&_current_index, old, index + 1, memory_order_relaxed);
123       assert(cur >= old, "Always move forward");
124       if (cur == old) {
125         // Successfully moved the claim index, this is our region.
126         return _heap->get_region(index);
127       } else {
128         // Somebody else moved the claim index, restart from there.
129         index = cur - 1; // adjust for loop post-increment

 23  */
 24 
 25 #include "precompiled.hpp"
 26 
 27 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
 28 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 29 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
 30 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
 31 #include "gc/shenandoah/shenandoahUtils.hpp"
 32 #include "runtime/atomic.hpp"
 33 #include "services/memTracker.hpp"
 34 #include "utilities/copy.hpp"
 35 
 36 ShenandoahCollectionSet::ShenandoahCollectionSet(ShenandoahHeap* heap, ReservedSpace space, char* heap_base) :
 37   _map_size(heap->num_regions()),
 38   _region_size_bytes_shift(ShenandoahHeapRegion::region_size_bytes_shift()),
 39   _map_space(space),
 40   _cset_map(_map_space.base() + ((uintx)heap_base >> _region_size_bytes_shift)),
 41   _biased_cset_map(_map_space.base()),
 42   _heap(heap),
 43   _has_old_regions(false),
 44   _garbage(0),
 45   _used(0),
 46   _region_count(0),
 47   _current_index(0) {
 48 
 49   // The collection set map is reserved to cover the entire heap *and* zero addresses.
 50   // This is needed to accept in-cset checks for both heap oops and NULLs, freeing
 51   // high-performance code from checking for NULL first.
 52   //
 53   // Since heap_base can be far away, committing the entire map would waste memory.
 54   // Therefore, we only commit the parts that are needed to operate: the heap view,
 55   // and the zero page.
 56   //
 57   // Note: we could instead commit the entire map, and piggyback on OS virtual memory
 58   // subsystem for mapping not-yet-written-to pages to a single physical backing page,
 59   // but this is not guaranteed, and would confuse NMT and other memory accounting tools.
 60 
 61   MemTracker::record_virtual_memory_type(_map_space.base(), mtGC);
 62 
 63   size_t page_size = (size_t)os::vm_page_size();

 67     char* bot_addr = align_down(_cset_map, page_size);
 68     char* top_addr = align_up(_cset_map + _map_size, page_size);
 69     os::commit_memory_or_exit(bot_addr, pointer_delta(top_addr, bot_addr, 1), false,
 70                               "Unable to commit collection set bitmap: heap");
 71 
 72     // Commit the zero page, if not yet covered by heap cset map.
 73     if (bot_addr != _biased_cset_map) {
 74       os::commit_memory_or_exit(_biased_cset_map, page_size, false,
 75                                 "Unable to commit collection set bitmap: zero page");
 76     }
 77   }
 78 
 79   Copy::zero_to_bytes(_cset_map, _map_size);
 80   Copy::zero_to_bytes(_biased_cset_map, page_size);
 81 }
 82 
 83 void ShenandoahCollectionSet::add_region(ShenandoahHeapRegion* r) {
 84   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 85   assert(Thread::current()->is_VM_thread(), "Must be VMThread");
 86   assert(!is_in(r), "Already in collection set");
 87   assert(!r->is_humongous(), "Only add regular regions to the collection set");
 88 
 89   _cset_map[r->index()] = 1;
 90 
 91   if (r->affiliation() == YOUNG_GENERATION) {
 92     _young_region_count++;
 93     _young_bytes_to_evacuate += r->get_live_data_bytes();
 94   } else if (r->affiliation() == OLD_GENERATION) {
 95     _old_region_count++;
 96     _old_bytes_to_evacuate += r->get_live_data_bytes();
 97   }
 98 
 99   _region_count++;
100   _has_old_regions |= r->is_old();
101   _garbage += r->garbage();
102   _used += r->used();

103   // Update the region status too. State transition would be checked internally.
104   r->make_cset();
105 }
106 
107 void ShenandoahCollectionSet::clear() {
108   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
109   Copy::zero_to_bytes(_cset_map, _map_size);
110 
111 #ifdef ASSERT
112   for (size_t index = 0; index < _heap->num_regions(); index ++) {
113     assert (!_heap->get_region(index)->is_cset(), "should have been cleared before");
114   }
115 #endif
116 
117   _garbage = 0;
118   _used = 0;
119 
120   _region_count = 0;
121   _current_index = 0;
122 
123   _young_region_count = 0;
124   _old_region_count = 0;
125   _young_bytes_to_evacuate = 0;
126   _old_bytes_to_evacuate = 0;
127 
128   _has_old_regions = false;
129 }
130 
131 ShenandoahHeapRegion* ShenandoahCollectionSet::claim_next() {
132   // This code is optimized for the case when collection set contains only
133   // a few regions. In this case, it is more constructive to check for is_in
134   // before hitting the (potentially contended) atomic index.
135 
136   size_t max = _heap->num_regions();
137   size_t old = Atomic::load(&_current_index);
138 
139   for (size_t index = old; index < max; index++) {
140     if (is_in(index)) {
141       size_t cur = Atomic::cmpxchg(&_current_index, old, index + 1, memory_order_relaxed);
142       assert(cur >= old, "Always move forward");
143       if (cur == old) {
144         // Successfully moved the claim index, this is our region.
145         return _heap->get_region(index);
146       } else {
147         // Somebody else moved the claim index, restart from there.
148         index = cur - 1; // adjust for loop post-increment
< prev index next >