1 /*
  2  * Copyright (c) 2016, 2023, Red Hat, Inc. All rights reserved.
  3  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
  4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  5  *
  6  * This code is free software; you can redistribute it and/or modify it
  7  * under the terms of the GNU General Public License version 2 only, as
  8  * published by the Free Software Foundation.
  9  *
 10  * This code is distributed in the hope that it will be useful, but WITHOUT
 11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #include "precompiled.hpp"
 27 
 28 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
 29 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 30 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
 31 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
 32 #include "gc/shenandoah/shenandoahUtils.hpp"
 33 #include "runtime/atomic.hpp"
 34 #include "services/memTracker.hpp"
 35 #include "utilities/copy.hpp"
 36 
 37 ShenandoahCollectionSet::ShenandoahCollectionSet(ShenandoahHeap* heap, ReservedSpace space, char* heap_base) :
 38   _map_size(heap->num_regions()),
 39   _region_size_bytes_shift(ShenandoahHeapRegion::region_size_bytes_shift()),
 40   _map_space(space),
 41   _cset_map(_map_space.base() + ((uintx)heap_base >> _region_size_bytes_shift)),
 42   _biased_cset_map(_map_space.base()),
 43   _heap(heap),
 44   _has_old_regions(false),
 45   _garbage(0),
 46   _used(0),
 47   _live(0),
 48   _region_count(0),
 49   _old_garbage(0),
 50   _preselected_regions(nullptr),
 51   _current_index(0) {
 52 
 53   // The collection set map is reserved to cover the entire heap *and* zero addresses.
 54   // This is needed to accept in-cset checks for both heap oops and nulls, freeing
 55   // high-performance code from checking for null first.
 56   //
 57   // Since heap_base can be far away, committing the entire map would waste memory.
 58   // Therefore, we only commit the parts that are needed to operate: the heap view,
 59   // and the zero page.
 60   //
 61   // Note: we could instead commit the entire map, and piggyback on OS virtual memory
 62   // subsystem for mapping not-yet-written-to pages to a single physical backing page,
 63   // but this is not guaranteed, and would confuse NMT and other memory accounting tools.
 64 
 65   MemTracker::record_virtual_memory_type(_map_space.base(), mtGC);
 66 
 67   size_t page_size = os::vm_page_size();
 68 
 69   if (!_map_space.special()) {
 70     // Commit entire pages that cover the heap cset map.
 71     char* bot_addr = align_down(_cset_map, page_size);
 72     char* top_addr = align_up(_cset_map + _map_size, page_size);
 73     os::commit_memory_or_exit(bot_addr, pointer_delta(top_addr, bot_addr, 1), false,
 74                               "Unable to commit collection set bitmap: heap");
 75 
 76     // Commit the zero page, if not yet covered by heap cset map.
 77     if (bot_addr != _biased_cset_map) {
 78       os::commit_memory_or_exit(_biased_cset_map, page_size, false,
 79                                 "Unable to commit collection set bitmap: zero page");
 80     }
 81   }
 82 
 83   Copy::zero_to_bytes(_cset_map, _map_size);
 84   Copy::zero_to_bytes(_biased_cset_map, page_size);
 85 }
 86 
 87 void ShenandoahCollectionSet::add_region(ShenandoahHeapRegion* r) {
 88   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 89   assert(Thread::current()->is_VM_thread(), "Must be VMThread");
 90   assert(!is_in(r), "Already in collection set");
 91   assert(!r->is_humongous(), "Only add regular regions to the collection set");
 92 
 93   _cset_map[r->index()] = 1;
 94   size_t live    = r->get_live_data_bytes();
 95   size_t garbage = r->garbage();
 96   size_t free    = r->free();
 97   if (r->is_young()) {
 98     _young_bytes_to_evacuate += live;
 99     _young_available_bytes_collected += free;
100     if (ShenandoahHeap::heap()->mode()->is_generational() && r->age() >= ShenandoahHeap::heap()->age_census()->tenuring_threshold()) {
101       _young_bytes_to_promote += live;
102     }
103   } else if (r->is_old()) {
104     _old_bytes_to_evacuate += live;
105     _old_garbage += garbage;
106   }
107 
108   _region_count++;
109   _has_old_regions |= r->is_old();
110   _garbage += garbage;
111   _used += r->used();
112   _live += live;
113   // Update the region status too. State transition would be checked internally.
114   r->make_cset();
115 }
116 
117 void ShenandoahCollectionSet::clear() {
118   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
119 
120   Copy::zero_to_bytes(_cset_map, _map_size);
121 
122 #ifdef ASSERT
123   for (size_t index = 0; index < _heap->num_regions(); index ++) {
124     assert (!_heap->get_region(index)->is_cset(), "should have been cleared before");
125   }
126 #endif
127 
128   _garbage = 0;
129   _old_garbage = 0;
130   _used = 0;
131   _live = 0;
132 
133   _region_count = 0;
134   _current_index = 0;
135 
136   _young_bytes_to_evacuate = 0;
137   _young_bytes_to_promote = 0;
138   _old_bytes_to_evacuate = 0;
139 
140   _young_available_bytes_collected = 0;
141 
142   _has_old_regions = false;
143 }
144 
145 ShenandoahHeapRegion* ShenandoahCollectionSet::claim_next() {
146   // This code is optimized for the case when collection set contains only
147   // a few regions. In this case, it is more constructive to check for is_in
148   // before hitting the (potentially contended) atomic index.
149 
150   size_t max = _heap->num_regions();
151   size_t old = Atomic::load(&_current_index);
152 
153   for (size_t index = old; index < max; index++) {
154     if (is_in(index)) {
155       size_t cur = Atomic::cmpxchg(&_current_index, old, index + 1, memory_order_relaxed);
156       assert(cur >= old, "Always move forward");
157       if (cur == old) {
158         // Successfully moved the claim index, this is our region.
159         return _heap->get_region(index);
160       } else {
161         // Somebody else moved the claim index, restart from there.
162         index = cur - 1; // adjust for loop post-increment
163         old = cur;
164       }
165     }
166   }
167   return nullptr;
168 }
169 
170 ShenandoahHeapRegion* ShenandoahCollectionSet::next() {
171   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
172   assert(Thread::current()->is_VM_thread(), "Must be VMThread");
173 
174   size_t max = _heap->num_regions();
175   for (size_t index = _current_index; index < max; index++) {
176     if (is_in(index)) {
177       _current_index = index + 1;
178       return _heap->get_region(index);
179     }
180   }
181 
182   return nullptr;
183 }
184 
185 void ShenandoahCollectionSet::print_on(outputStream* out) const {
186   out->print_cr("Collection Set: Regions: "
187                 SIZE_FORMAT ", Garbage: " SIZE_FORMAT "%s, Live: " SIZE_FORMAT "%s, Used: " SIZE_FORMAT "%s", count(),
188                 byte_size_in_proper_unit(garbage()), proper_unit_for_byte_size(garbage()),
189                 byte_size_in_proper_unit(live()),    proper_unit_for_byte_size(live()),
190                 byte_size_in_proper_unit(used()),    proper_unit_for_byte_size(used()));
191 
192   debug_only(size_t regions = 0;)
193   for (size_t index = 0; index < _heap->num_regions(); index ++) {
194     if (is_in(index)) {
195       _heap->get_region(index)->print_on(out);
196       debug_only(regions ++;)
197     }
198   }
199   assert(regions == count(), "Must match");
200 }